removes pubkey from LegacyContactInfo public interface (#31375)
Working towards LegacyContactInfo => ContactInfo migration, the commit adds more api parity between the two.
This commit is contained in:
parent
95806c3295
commit
aafcac27d8
|
@ -156,7 +156,7 @@ fn create_client(
|
||||||
info!("Searching for target_node: {:?}", target_node);
|
info!("Searching for target_node: {:?}", target_node);
|
||||||
let mut target_client = None;
|
let mut target_client = None;
|
||||||
for node in nodes {
|
for node in nodes {
|
||||||
if node.id == target_node {
|
if node.pubkey() == &target_node {
|
||||||
target_client = Some(get_client(
|
target_client = Some(get_client(
|
||||||
&[node],
|
&[node],
|
||||||
&SocketAddrSpace::Unspecified,
|
&SocketAddrSpace::Unspecified,
|
||||||
|
|
|
@ -63,7 +63,7 @@ fn get_retransmit_peers_deterministic_wrapper(b: &mut Bencher, unstaked_ratio: O
|
||||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||||
let bank = Bank::new_for_benches(&genesis_config);
|
let bank = Bank::new_for_benches(&genesis_config);
|
||||||
let (nodes, cluster_nodes) = make_cluster_nodes(&mut rng, unstaked_ratio);
|
let (nodes, cluster_nodes) = make_cluster_nodes(&mut rng, unstaked_ratio);
|
||||||
let slot_leader = nodes[1..].choose(&mut rng).unwrap().id;
|
let slot_leader = *nodes[1..].choose(&mut rng).unwrap().pubkey();
|
||||||
let slot = rand::random::<u64>();
|
let slot = rand::random::<u64>();
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
get_retransmit_peers_deterministic(
|
get_retransmit_peers_deterministic(
|
||||||
|
|
|
@ -314,10 +314,10 @@ impl BroadcastRun for BroadcastDuplicatesRun {
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.remove(shred.signature())
|
.remove(shred.signature())
|
||||||
{
|
{
|
||||||
if cluster_partition.contains(&node.id) {
|
if cluster_partition.contains(node.pubkey()) {
|
||||||
info!(
|
info!(
|
||||||
"skipping node {} for original shred index {}, slot {}",
|
"skipping node {} for original shred index {}, slot {}",
|
||||||
node.id,
|
node.pubkey(),
|
||||||
shred.index(),
|
shred.index(),
|
||||||
shred.slot()
|
shred.slot()
|
||||||
);
|
);
|
||||||
|
|
|
@ -93,7 +93,7 @@ impl Node {
|
||||||
fn pubkey(&self) -> Pubkey {
|
fn pubkey(&self) -> Pubkey {
|
||||||
match &self.node {
|
match &self.node {
|
||||||
NodeId::Pubkey(pubkey) => *pubkey,
|
NodeId::Pubkey(pubkey) => *pubkey,
|
||||||
NodeId::ContactInfo(node) => node.id,
|
NodeId::ContactInfo(node) => *node.pubkey(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -245,11 +245,11 @@ impl ClusterNodes<RetransmitStage> {
|
||||||
.inspect(|node| {
|
.inspect(|node| {
|
||||||
if let Some(node) = node.contact_info() {
|
if let Some(node) = node.contact_info() {
|
||||||
if let Ok(addr) = node.tvu() {
|
if let Ok(addr) = node.tvu() {
|
||||||
addrs.entry(addr).or_insert(node.id);
|
addrs.entry(addr).or_insert(*node.pubkey());
|
||||||
}
|
}
|
||||||
if !drop_redundant_turbine_path {
|
if !drop_redundant_turbine_path {
|
||||||
if let Ok(addr) = node.tvu_forwards() {
|
if let Ok(addr) = node.tvu_forwards() {
|
||||||
frwds.entry(addr).or_insert(node.id);
|
frwds.entry(addr).or_insert(*node.pubkey());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -341,7 +341,7 @@ fn get_nodes(cluster_info: &ClusterInfo, stakes: &HashMap<Pubkey, u64>) -> Vec<N
|
||||||
})
|
})
|
||||||
// All known tvu-peers from gossip.
|
// All known tvu-peers from gossip.
|
||||||
.chain(cluster_info.tvu_peers().into_iter().map(|node| {
|
.chain(cluster_info.tvu_peers().into_iter().map(|node| {
|
||||||
let stake = stakes.get(&node.id).copied().unwrap_or_default();
|
let stake = stakes.get(node.pubkey()).copied().unwrap_or_default();
|
||||||
let node = NodeId::from(node);
|
let node = NodeId::from(node);
|
||||||
Node { node, stake }
|
Node { node, stake }
|
||||||
}))
|
}))
|
||||||
|
@ -603,7 +603,13 @@ mod tests {
|
||||||
.map(|node| (node.pubkey(), node))
|
.map(|node| (node.pubkey(), node))
|
||||||
.collect();
|
.collect();
|
||||||
for node in &nodes {
|
for node in &nodes {
|
||||||
assert_eq!(cluster_nodes[&node.id].contact_info().unwrap().id, node.id);
|
assert_eq!(
|
||||||
|
cluster_nodes[node.pubkey()]
|
||||||
|
.contact_info()
|
||||||
|
.unwrap()
|
||||||
|
.pubkey(),
|
||||||
|
node.pubkey()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
for (pubkey, stake) in &stakes {
|
for (pubkey, stake) in &stakes {
|
||||||
if *stake > 0 {
|
if *stake > 0 {
|
||||||
|
@ -633,7 +639,13 @@ mod tests {
|
||||||
.map(|node| (node.pubkey(), node))
|
.map(|node| (node.pubkey(), node))
|
||||||
.collect();
|
.collect();
|
||||||
for node in &nodes {
|
for node in &nodes {
|
||||||
assert_eq!(cluster_nodes[&node.id].contact_info().unwrap().id, node.id);
|
assert_eq!(
|
||||||
|
cluster_nodes[node.pubkey()]
|
||||||
|
.contact_info()
|
||||||
|
.unwrap()
|
||||||
|
.pubkey(),
|
||||||
|
node.pubkey()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
for (pubkey, stake) in &stakes {
|
for (pubkey, stake) in &stakes {
|
||||||
if *stake > 0 {
|
if *stake > 0 {
|
||||||
|
|
|
@ -179,7 +179,7 @@ impl ClusterSlots {
|
||||||
.iter()
|
.iter()
|
||||||
.map(|peer| {
|
.map(|peer| {
|
||||||
validator_stakes
|
validator_stakes
|
||||||
.get(&peer.id)
|
.get(peer.pubkey())
|
||||||
.map(|node| node.total_stake)
|
.map(|node| node.total_stake)
|
||||||
.unwrap_or(0)
|
.unwrap_or(0)
|
||||||
+ 1
|
+ 1
|
||||||
|
@ -193,7 +193,7 @@ impl ClusterSlots {
|
||||||
let slot_peers = slot_peers.read().unwrap();
|
let slot_peers = slot_peers.read().unwrap();
|
||||||
repair_peers
|
repair_peers
|
||||||
.iter()
|
.iter()
|
||||||
.map(|peer| slot_peers.get(&peer.id).cloned().unwrap_or(0))
|
.map(|peer| slot_peers.get(peer.pubkey()).cloned().unwrap_or(0))
|
||||||
.zip(stakes)
|
.zip(stakes)
|
||||||
.map(|(a, b)| (a / 2 + b / 2).max(1u64))
|
.map(|(a, b)| (a / 2 + b / 2).max(1u64))
|
||||||
.collect()
|
.collect()
|
||||||
|
@ -210,7 +210,7 @@ impl ClusterSlots {
|
||||||
repair_peers
|
repair_peers
|
||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.filter_map(|(i, ci)| Some((slot_peers.get(&ci.id)? + 1, i)))
|
.filter_map(|(i, ci)| Some((slot_peers.get(ci.pubkey())? + 1, i)))
|
||||||
.collect()
|
.collect()
|
||||||
})
|
})
|
||||||
.unwrap_or_default()
|
.unwrap_or_default()
|
||||||
|
@ -291,8 +291,8 @@ mod tests {
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.insert(0, Arc::new(RwLock::new(map)));
|
.insert(0, Arc::new(RwLock::new(map)));
|
||||||
c1.id = k1;
|
c1.set_pubkey(k1);
|
||||||
c2.id = k2;
|
c2.set_pubkey(k2);
|
||||||
assert_eq!(cs.compute_weights(0, &[c1, c2]), vec![std::u64::MAX / 4, 1]);
|
assert_eq!(cs.compute_weights(0, &[c1, c2]), vec![std::u64::MAX / 4, 1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -320,8 +320,8 @@ mod tests {
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
*cs.validator_stakes.write().unwrap() = Arc::new(validator_stakes);
|
*cs.validator_stakes.write().unwrap() = Arc::new(validator_stakes);
|
||||||
c1.id = k1;
|
c1.set_pubkey(k1);
|
||||||
c2.id = k2;
|
c2.set_pubkey(k2);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
cs.compute_weights(0, &[c1, c2]),
|
cs.compute_weights(0, &[c1, c2]),
|
||||||
vec![std::u64::MAX / 4 + 1, 1]
|
vec![std::u64::MAX / 4 + 1, 1]
|
||||||
|
@ -333,7 +333,7 @@ mod tests {
|
||||||
let cs = ClusterSlots::default();
|
let cs = ClusterSlots::default();
|
||||||
let mut contact_infos = vec![ContactInfo::default(); 2];
|
let mut contact_infos = vec![ContactInfo::default(); 2];
|
||||||
for ci in contact_infos.iter_mut() {
|
for ci in contact_infos.iter_mut() {
|
||||||
ci.id = solana_sdk::pubkey::new_rand();
|
ci.set_pubkey(solana_sdk::pubkey::new_rand());
|
||||||
}
|
}
|
||||||
let slot = 9;
|
let slot = 9;
|
||||||
|
|
||||||
|
@ -345,7 +345,7 @@ mod tests {
|
||||||
|
|
||||||
// Give second validator max stake
|
// Give second validator max stake
|
||||||
let validator_stakes: HashMap<_, _> = vec![(
|
let validator_stakes: HashMap<_, _> = vec![(
|
||||||
*Arc::new(contact_infos[1].id),
|
*contact_infos[1].pubkey(),
|
||||||
NodeVoteAccounts {
|
NodeVoteAccounts {
|
||||||
total_stake: std::u64::MAX / 2,
|
total_stake: std::u64::MAX / 2,
|
||||||
vote_accounts: vec![Pubkey::default()],
|
vote_accounts: vec![Pubkey::default()],
|
||||||
|
@ -358,7 +358,7 @@ mod tests {
|
||||||
// Mark the first validator as completed slot 9, should pick that validator,
|
// Mark the first validator as completed slot 9, should pick that validator,
|
||||||
// even though it only has default stake, while the other validator has
|
// even though it only has default stake, while the other validator has
|
||||||
// max stake
|
// max stake
|
||||||
cs.insert_node_id(slot, contact_infos[0].id);
|
cs.insert_node_id(slot, *contact_infos[0].pubkey());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
cs.compute_weights_exclude_nonfrozen(slot, &contact_infos),
|
cs.compute_weights_exclude_nonfrozen(slot, &contact_infos),
|
||||||
vec![(1, 0)]
|
vec![(1, 0)]
|
||||||
|
|
|
@ -267,13 +267,13 @@ pub(crate) enum RepairResponse {
|
||||||
impl RepairProtocol {
|
impl RepairProtocol {
|
||||||
fn sender(&self) -> &Pubkey {
|
fn sender(&self) -> &Pubkey {
|
||||||
match self {
|
match self {
|
||||||
Self::LegacyWindowIndex(ci, _, _) => &ci.id,
|
Self::LegacyWindowIndex(ci, _, _) => ci.pubkey(),
|
||||||
Self::LegacyHighestWindowIndex(ci, _, _) => &ci.id,
|
Self::LegacyHighestWindowIndex(ci, _, _) => ci.pubkey(),
|
||||||
Self::LegacyOrphan(ci, _) => &ci.id,
|
Self::LegacyOrphan(ci, _) => ci.pubkey(),
|
||||||
Self::LegacyWindowIndexWithNonce(ci, _, _, _) => &ci.id,
|
Self::LegacyWindowIndexWithNonce(ci, _, _, _) => ci.pubkey(),
|
||||||
Self::LegacyHighestWindowIndexWithNonce(ci, _, _, _) => &ci.id,
|
Self::LegacyHighestWindowIndexWithNonce(ci, _, _, _) => ci.pubkey(),
|
||||||
Self::LegacyOrphanWithNonce(ci, _, _) => &ci.id,
|
Self::LegacyOrphanWithNonce(ci, _, _) => ci.pubkey(),
|
||||||
Self::LegacyAncestorHashes(ci, _, _) => &ci.id,
|
Self::LegacyAncestorHashes(ci, _, _) => ci.pubkey(),
|
||||||
Self::Pong(pong) => pong.from(),
|
Self::Pong(pong) => pong.from(),
|
||||||
Self::WindowIndex { header, .. } => &header.sender,
|
Self::WindowIndex { header, .. } => &header.sender,
|
||||||
Self::HighestWindowIndex { header, .. } => &header.sender,
|
Self::HighestWindowIndex { header, .. } => &header.sender,
|
||||||
|
@ -346,7 +346,7 @@ impl RepairPeers {
|
||||||
.zip(weights)
|
.zip(weights)
|
||||||
.filter_map(|(peer, &weight)| {
|
.filter_map(|(peer, &weight)| {
|
||||||
let addr = peer.serve_repair().ok()?;
|
let addr = peer.serve_repair().ok()?;
|
||||||
Some(((peer.id, addr), weight))
|
Some(((*peer.pubkey(), addr), weight))
|
||||||
})
|
})
|
||||||
.unzip();
|
.unzip();
|
||||||
if peers.is_empty() {
|
if peers.is_empty() {
|
||||||
|
@ -1077,7 +1077,7 @@ impl ServeRepair {
|
||||||
.map(|i| index[i])
|
.map(|i| index[i])
|
||||||
.filter_map(|i| {
|
.filter_map(|i| {
|
||||||
let addr = repair_peers[i].serve_repair().ok()?;
|
let addr = repair_peers[i].serve_repair().ok()?;
|
||||||
Some((repair_peers[i].id, addr))
|
Some((*repair_peers[i].pubkey(), addr))
|
||||||
})
|
})
|
||||||
.take(ANCESTOR_HASH_REPAIR_SAMPLE_SIZE)
|
.take(ANCESTOR_HASH_REPAIR_SAMPLE_SIZE)
|
||||||
.collect();
|
.collect();
|
||||||
|
@ -1100,7 +1100,7 @@ impl ServeRepair {
|
||||||
.unzip();
|
.unzip();
|
||||||
let k = WeightedIndex::new(weights)?.sample(&mut rand::thread_rng());
|
let k = WeightedIndex::new(weights)?.sample(&mut rand::thread_rng());
|
||||||
let n = index[k];
|
let n = index[k];
|
||||||
Ok((repair_peers[n].id, repair_peers[n].serve_repair()?))
|
Ok((*repair_peers[n].pubkey(), repair_peers[n].serve_repair()?))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn map_repair_request(
|
pub(crate) fn map_repair_request(
|
||||||
|
@ -2172,7 +2172,7 @@ mod tests {
|
||||||
let known_validators = Some(vec![*contact_info2.pubkey()].into_iter().collect());
|
let known_validators = Some(vec![*contact_info2.pubkey()].into_iter().collect());
|
||||||
let repair_peers = serve_repair.repair_peers(&known_validators, 1);
|
let repair_peers = serve_repair.repair_peers(&known_validators, 1);
|
||||||
assert_eq!(repair_peers.len(), 1);
|
assert_eq!(repair_peers.len(), 1);
|
||||||
assert_eq!(&repair_peers[0].id, contact_info2.pubkey());
|
assert_eq!(repair_peers[0].pubkey(), contact_info2.pubkey());
|
||||||
assert!(serve_repair
|
assert!(serve_repair
|
||||||
.repair_request(
|
.repair_request(
|
||||||
&cluster_slots,
|
&cluster_slots,
|
||||||
|
@ -2190,7 +2190,7 @@ mod tests {
|
||||||
let repair_peers: HashSet<Pubkey> = serve_repair
|
let repair_peers: HashSet<Pubkey> = serve_repair
|
||||||
.repair_peers(&None, 1)
|
.repair_peers(&None, 1)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|c| c.id)
|
.map(|node| *node.pubkey())
|
||||||
.collect();
|
.collect();
|
||||||
assert_eq!(repair_peers.len(), 2);
|
assert_eq!(repair_peers.len(), 2);
|
||||||
assert!(repair_peers.contains(contact_info2.pubkey()));
|
assert!(repair_peers.contains(contact_info2.pubkey()));
|
||||||
|
|
|
@ -2104,7 +2104,7 @@ fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: boo
|
||||||
// Contact infos are refreshed twice during this period.
|
// Contact infos are refreshed twice during this period.
|
||||||
age < CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS
|
age < CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS
|
||||||
})
|
})
|
||||||
.map(|node| (node.id, node))
|
.map(|node| (*node.pubkey(), node))
|
||||||
.collect();
|
.collect();
|
||||||
let my_shred_version = cluster_info.my_shred_version();
|
let my_shred_version = cluster_info.my_shred_version();
|
||||||
let my_id = cluster_info.id();
|
let my_id = cluster_info.id();
|
||||||
|
|
|
@ -430,13 +430,13 @@ fn get_target(
|
||||||
if node.gossip().ok() == Some(entrypoint_addr) {
|
if node.gossip().ok() == Some(entrypoint_addr) {
|
||||||
info!("{:?}", node.gossip());
|
info!("{:?}", node.gossip());
|
||||||
target = match mode {
|
target = match mode {
|
||||||
Mode::Gossip => Some((node.id, node.gossip().unwrap())),
|
Mode::Gossip => Some((*node.pubkey(), node.gossip().unwrap())),
|
||||||
Mode::Tvu => Some((node.id, node.tvu().unwrap())),
|
Mode::Tvu => Some((*node.pubkey(), node.tvu().unwrap())),
|
||||||
Mode::TvuForwards => Some((node.id, node.tvu_forwards().unwrap())),
|
Mode::TvuForwards => Some((*node.pubkey(), node.tvu_forwards().unwrap())),
|
||||||
Mode::Tpu => Some((node.id, node.tpu().unwrap())),
|
Mode::Tpu => Some((*node.pubkey(), node.tpu().unwrap())),
|
||||||
Mode::TpuForwards => Some((node.id, node.tpu_forwards().unwrap())),
|
Mode::TpuForwards => Some((*node.pubkey(), node.tpu_forwards().unwrap())),
|
||||||
Mode::Repair => Some((node.id, node.repair().unwrap())),
|
Mode::Repair => Some((*node.pubkey(), node.repair().unwrap())),
|
||||||
Mode::ServeRepair => Some((node.id, node.serve_repair().unwrap())),
|
Mode::ServeRepair => Some((*node.pubkey(), node.serve_repair().unwrap())),
|
||||||
Mode::Rpc => None,
|
Mode::Rpc => None,
|
||||||
};
|
};
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -563,7 +563,7 @@ impl ClusterInfo {
|
||||||
// there's not much point in saving entrypoint ContactInfo since by
|
// there's not much point in saving entrypoint ContactInfo since by
|
||||||
// definition that information is already available
|
// definition that information is already available
|
||||||
let contact_info = v.value.contact_info().unwrap();
|
let contact_info = v.value.contact_info().unwrap();
|
||||||
if contact_info.id != self_pubkey
|
if contact_info.pubkey() != &self_pubkey
|
||||||
&& contact_info
|
&& contact_info
|
||||||
.gossip()
|
.gossip()
|
||||||
.map(|addr| !entrypoint_gossip_addrs.contains(&addr))
|
.map(|addr| !entrypoint_gossip_addrs.contains(&addr))
|
||||||
|
@ -759,7 +759,7 @@ impl ClusterInfo {
|
||||||
.rpc()
|
.rpc()
|
||||||
.ok()
|
.ok()
|
||||||
.filter(|addr| self.socket_addr_space.check(addr))?;
|
.filter(|addr| self.socket_addr_space.check(addr))?;
|
||||||
let node_version = self.get_node_version(&node.id);
|
let node_version = self.get_node_version(node.pubkey());
|
||||||
if my_shred_version != 0
|
if my_shred_version != 0
|
||||||
&& (node.shred_version() != 0 && node.shred_version() != my_shred_version)
|
&& (node.shred_version() != 0 && node.shred_version() != my_shred_version)
|
||||||
{
|
{
|
||||||
|
@ -769,9 +769,13 @@ impl ClusterInfo {
|
||||||
Some(format!(
|
Some(format!(
|
||||||
"{:15} {:2}| {:5} | {:44} |{:^9}| {:5}| {:5}| {}\n",
|
"{:15} {:2}| {:5} | {:44} |{:^9}| {:5}| {:5}| {}\n",
|
||||||
rpc_addr.to_string(),
|
rpc_addr.to_string(),
|
||||||
if node.id == my_pubkey { "me" } else { "" },
|
if node.pubkey() == &my_pubkey {
|
||||||
|
"me"
|
||||||
|
} else {
|
||||||
|
""
|
||||||
|
},
|
||||||
now.saturating_sub(last_updated),
|
now.saturating_sub(last_updated),
|
||||||
node.id,
|
node.pubkey(),
|
||||||
if let Some(node_version) = node_version {
|
if let Some(node_version) = node_version {
|
||||||
node_version.to_string()
|
node_version.to_string()
|
||||||
} else {
|
} else {
|
||||||
|
@ -812,7 +816,7 @@ impl ClusterInfo {
|
||||||
total_spy_nodes = total_spy_nodes.saturating_add(1);
|
total_spy_nodes = total_spy_nodes.saturating_add(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
let node_version = self.get_node_version(&node.id);
|
let node_version = self.get_node_version(node.pubkey());
|
||||||
if my_shred_version != 0 && (node.shred_version() != 0 && node.shred_version() != my_shred_version) {
|
if my_shred_version != 0 && (node.shred_version() != 0 && node.shred_version() != my_shred_version) {
|
||||||
different_shred_nodes = different_shred_nodes.saturating_add(1);
|
different_shred_nodes = different_shred_nodes.saturating_add(1);
|
||||||
None
|
None
|
||||||
|
@ -831,9 +835,9 @@ impl ClusterInfo {
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(IpAddr::to_string)
|
.map(IpAddr::to_string)
|
||||||
.unwrap_or_else(|| String::from("none")),
|
.unwrap_or_else(|| String::from("none")),
|
||||||
if node.id == my_pubkey { "me" } else { "" },
|
if node.pubkey() == &my_pubkey { "me" } else { "" },
|
||||||
now.saturating_sub(last_updated),
|
now.saturating_sub(last_updated),
|
||||||
node.id,
|
node.pubkey(),
|
||||||
if let Some(node_version) = node_version {
|
if let Some(node_version) = node_version {
|
||||||
node_version.to_string()
|
node_version.to_string()
|
||||||
} else {
|
} else {
|
||||||
|
@ -1277,7 +1281,9 @@ impl ClusterInfo {
|
||||||
let gossip_crds = self.gossip.crds.read().unwrap();
|
let gossip_crds = self.gossip.crds.read().unwrap();
|
||||||
gossip_crds
|
gossip_crds
|
||||||
.get_nodes_contact_info()
|
.get_nodes_contact_info()
|
||||||
.filter(|node| node.id != self_pubkey && self.check_socket_addr_space(&node.rpc()))
|
.filter(|node| {
|
||||||
|
node.pubkey() != &self_pubkey && self.check_socket_addr_space(&node.rpc())
|
||||||
|
})
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
@ -1297,7 +1303,7 @@ impl ClusterInfo {
|
||||||
gossip_crds
|
gossip_crds
|
||||||
.get_nodes_contact_info()
|
.get_nodes_contact_info()
|
||||||
// shred_version not considered for gossip peers (ie, spy nodes do not set shred_version)
|
// shred_version not considered for gossip peers (ie, spy nodes do not set shred_version)
|
||||||
.filter(|node| node.id != me && self.check_socket_addr_space(&node.gossip()))
|
.filter(|node| node.pubkey() != &me && self.check_socket_addr_space(&node.gossip()))
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
@ -1307,7 +1313,9 @@ impl ClusterInfo {
|
||||||
let self_pubkey = self.id();
|
let self_pubkey = self.id();
|
||||||
self.time_gossip_read_lock("all_tvu_peers", &self.stats.all_tvu_peers)
|
self.time_gossip_read_lock("all_tvu_peers", &self.stats.all_tvu_peers)
|
||||||
.get_nodes_contact_info()
|
.get_nodes_contact_info()
|
||||||
.filter(|node| node.id != self_pubkey && self.check_socket_addr_space(&node.tvu()))
|
.filter(|node| {
|
||||||
|
node.pubkey() != &self_pubkey && self.check_socket_addr_space(&node.tvu())
|
||||||
|
})
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
@ -1319,7 +1327,7 @@ impl ClusterInfo {
|
||||||
self.time_gossip_read_lock("tvu_peers", &self.stats.tvu_peers)
|
self.time_gossip_read_lock("tvu_peers", &self.stats.tvu_peers)
|
||||||
.get_nodes_contact_info()
|
.get_nodes_contact_info()
|
||||||
.filter(|node| {
|
.filter(|node| {
|
||||||
node.id != self_pubkey
|
node.pubkey() != &self_pubkey
|
||||||
&& node.shred_version() == self_shred_version
|
&& node.shred_version() == self_shred_version
|
||||||
&& self.check_socket_addr_space(&node.tvu())
|
&& self.check_socket_addr_space(&node.tvu())
|
||||||
})
|
})
|
||||||
|
@ -1336,11 +1344,11 @@ impl ClusterInfo {
|
||||||
gossip_crds
|
gossip_crds
|
||||||
.get_nodes_contact_info()
|
.get_nodes_contact_info()
|
||||||
.filter(|node| {
|
.filter(|node| {
|
||||||
node.id != self_pubkey
|
node.pubkey() != &self_pubkey
|
||||||
&& node.shred_version() == self_shred_version
|
&& node.shred_version() == self_shred_version
|
||||||
&& self.check_socket_addr_space(&node.tvu())
|
&& self.check_socket_addr_space(&node.tvu())
|
||||||
&& self.check_socket_addr_space(&node.serve_repair())
|
&& self.check_socket_addr_space(&node.serve_repair())
|
||||||
&& match gossip_crds.get::<&LowestSlot>(node.id) {
|
&& match gossip_crds.get::<&LowestSlot>(*node.pubkey()) {
|
||||||
None => true, // fallback to legacy behavior
|
None => true, // fallback to legacy behavior
|
||||||
Some(lowest_slot) => lowest_slot.lowest <= slot,
|
Some(lowest_slot) => lowest_slot.lowest <= slot,
|
||||||
}
|
}
|
||||||
|
@ -1364,7 +1372,9 @@ impl ClusterInfo {
|
||||||
let gossip_crds = self.gossip.crds.read().unwrap();
|
let gossip_crds = self.gossip.crds.read().unwrap();
|
||||||
gossip_crds
|
gossip_crds
|
||||||
.get_nodes_contact_info()
|
.get_nodes_contact_info()
|
||||||
.filter(|node| node.id != self_pubkey && self.check_socket_addr_space(&node.tpu()))
|
.filter(|node| {
|
||||||
|
node.pubkey() != &self_pubkey && self.check_socket_addr_space(&node.tpu())
|
||||||
|
})
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
@ -1660,7 +1670,7 @@ impl ClusterInfo {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
for entrypoint in entrypoints.iter_mut() {
|
for entrypoint in entrypoints.iter_mut() {
|
||||||
if entrypoint.id == Pubkey::default() {
|
if entrypoint.pubkey() == &Pubkey::default() {
|
||||||
// If a pull from the entrypoint was successful it should exist in the CRDS table
|
// If a pull from the entrypoint was successful it should exist in the CRDS table
|
||||||
if let Some(entrypoint_from_gossip) = entrypoint
|
if let Some(entrypoint_from_gossip) = entrypoint
|
||||||
.gossip()
|
.gossip()
|
||||||
|
@ -1681,7 +1691,7 @@ impl ClusterInfo {
|
||||||
info!(
|
info!(
|
||||||
"Setting shred version to {:?} from entrypoint {:?}",
|
"Setting shred version to {:?} from entrypoint {:?}",
|
||||||
entrypoint.shred_version(),
|
entrypoint.shred_version(),
|
||||||
entrypoint.id
|
entrypoint.pubkey()
|
||||||
);
|
);
|
||||||
self.my_contact_info
|
self.my_contact_info
|
||||||
.write()
|
.write()
|
||||||
|
@ -1692,7 +1702,7 @@ impl ClusterInfo {
|
||||||
self.my_shred_version() != 0
|
self.my_shred_version() != 0
|
||||||
&& entrypoints
|
&& entrypoints
|
||||||
.iter()
|
.iter()
|
||||||
.all(|entrypoint| entrypoint.id != Pubkey::default())
|
.all(|entrypoint| entrypoint.pubkey() != &Pubkey::default())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_purge(
|
fn handle_purge(
|
||||||
|
@ -1725,7 +1735,8 @@ impl ClusterInfo {
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.iter()
|
.iter()
|
||||||
.map(|k| k.id)
|
.map(LegacyContactInfo::pubkey)
|
||||||
|
.copied()
|
||||||
.chain(std::iter::once(self.id()))
|
.chain(std::iter::once(self.id()))
|
||||||
.collect();
|
.collect();
|
||||||
self.stats.trim_crds_table.add_relaxed(1);
|
self.stats.trim_crds_table.add_relaxed(1);
|
||||||
|
@ -1913,7 +1924,7 @@ impl ClusterInfo {
|
||||||
.with_min_len(1024)
|
.with_min_len(1024)
|
||||||
.filter(|(_, _, caller)| match caller.contact_info() {
|
.filter(|(_, _, caller)| match caller.contact_info() {
|
||||||
None => false,
|
None => false,
|
||||||
Some(caller) if caller.id == self_pubkey => {
|
Some(caller) if caller.pubkey() == &self_pubkey => {
|
||||||
warn!("PullRequest ignored, I'm talking to myself");
|
warn!("PullRequest ignored, I'm talking to myself");
|
||||||
self.stats.window_request_loopback.add_relaxed(1);
|
self.stats.window_request_loopback.add_relaxed(1);
|
||||||
false
|
false
|
||||||
|
@ -3146,7 +3157,7 @@ fn filter_on_shred_version(
|
||||||
values.retain(|value| match &value.data {
|
values.retain(|value| match &value.data {
|
||||||
// Allow node to update its own contact info in case their
|
// Allow node to update its own contact info in case their
|
||||||
// shred-version changes
|
// shred-version changes
|
||||||
CrdsData::LegacyContactInfo(node) => node.id == *from,
|
CrdsData::LegacyContactInfo(node) => node.pubkey() == from,
|
||||||
CrdsData::NodeInstance(_) => true,
|
CrdsData::NodeInstance(_) => true,
|
||||||
_ => false,
|
_ => false,
|
||||||
})
|
})
|
||||||
|
|
|
@ -348,7 +348,7 @@ pub(crate) fn get_gossip_nodes<R: Rng>(
|
||||||
if value.local_timestamp < active_cutoff {
|
if value.local_timestamp < active_cutoff {
|
||||||
// In order to mitigate eclipse attack, for staked nodes
|
// In order to mitigate eclipse attack, for staked nodes
|
||||||
// continue retrying periodically.
|
// continue retrying periodically.
|
||||||
let stake = stakes.get(&node.id).copied().unwrap_or_default();
|
let stake = stakes.get(node.pubkey()).copied().unwrap_or_default();
|
||||||
if stake == 0u64 || !rng.gen_ratio(1, 16) {
|
if stake == 0u64 || !rng.gen_ratio(1, 16) {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
@ -356,14 +356,14 @@ pub(crate) fn get_gossip_nodes<R: Rng>(
|
||||||
Some(node)
|
Some(node)
|
||||||
})
|
})
|
||||||
.filter(|node| {
|
.filter(|node| {
|
||||||
&node.id != pubkey
|
node.pubkey() != pubkey
|
||||||
&& verify_shred_version(node.shred_version())
|
&& verify_shred_version(node.shred_version())
|
||||||
&& node
|
&& node
|
||||||
.gossip()
|
.gossip()
|
||||||
.map(|addr| socket_addr_space.check(&addr))
|
.map(|addr| socket_addr_space.check(&addr))
|
||||||
.unwrap_or_default()
|
.unwrap_or_default()
|
||||||
&& match gossip_validators {
|
&& match gossip_validators {
|
||||||
Some(nodes) => nodes.contains(&node.id),
|
Some(nodes) => nodes.contains(node.pubkey()),
|
||||||
None => true,
|
None => true,
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -381,7 +381,7 @@ pub(crate) fn dedup_gossip_addresses(
|
||||||
.filter_map(|node| Some((node.gossip().ok()?, node)))
|
.filter_map(|node| Some((node.gossip().ok()?, node)))
|
||||||
.into_grouping_map()
|
.into_grouping_map()
|
||||||
.aggregate(|acc, _node_gossip, node| {
|
.aggregate(|acc, _node_gossip, node| {
|
||||||
let stake = stakes.get(&node.id).copied().unwrap_or_default();
|
let stake = stakes.get(node.pubkey()).copied().unwrap_or_default();
|
||||||
match acc {
|
match acc {
|
||||||
Some((ref s, _)) if s >= &stake => acc,
|
Some((ref s, _)) if s >= &stake => acc,
|
||||||
Some(_) | None => Some((stake, node)),
|
Some(_) | None => Some((stake, node)),
|
||||||
|
@ -410,7 +410,7 @@ pub(crate) fn maybe_ping_gossip_addresses<R: Rng + CryptoRng>(
|
||||||
Ok(addr) => addr,
|
Ok(addr) => addr,
|
||||||
};
|
};
|
||||||
let (check, ping) = {
|
let (check, ping) = {
|
||||||
let node = (node.id, node_gossip);
|
let node = (*node.pubkey(), node_gossip);
|
||||||
ping_cache.check(now, node, &mut pingf)
|
ping_cache.check(now, node, &mut pingf)
|
||||||
};
|
};
|
||||||
if let Some(ping) = ping {
|
if let Some(ping) = ping {
|
||||||
|
@ -465,7 +465,7 @@ mod test {
|
||||||
//incorrect dest
|
//incorrect dest
|
||||||
let mut res = crds_gossip.process_prune_msg(
|
let mut res = crds_gossip.process_prune_msg(
|
||||||
&id,
|
&id,
|
||||||
&ci.id,
|
ci.pubkey(),
|
||||||
&Pubkey::from(hash(&[1; 32]).to_bytes()),
|
&Pubkey::from(hash(&[1; 32]).to_bytes()),
|
||||||
&[prune_pubkey],
|
&[prune_pubkey],
|
||||||
now,
|
now,
|
||||||
|
@ -476,7 +476,7 @@ mod test {
|
||||||
//correct dest
|
//correct dest
|
||||||
res = crds_gossip.process_prune_msg(
|
res = crds_gossip.process_prune_msg(
|
||||||
&id, // self_pubkey
|
&id, // self_pubkey
|
||||||
&ci.id, // peer
|
ci.pubkey(), // peer
|
||||||
&id, // destination
|
&id, // destination
|
||||||
&[prune_pubkey], // origins
|
&[prune_pubkey], // origins
|
||||||
now,
|
now,
|
||||||
|
@ -488,7 +488,7 @@ mod test {
|
||||||
let timeout = now + crds_gossip.push.prune_timeout * 2;
|
let timeout = now + crds_gossip.push.prune_timeout * 2;
|
||||||
res = crds_gossip.process_prune_msg(
|
res = crds_gossip.process_prune_msg(
|
||||||
&id, // self_pubkey
|
&id, // self_pubkey
|
||||||
&ci.id, // peer
|
ci.pubkey(), // peer
|
||||||
&id, // destination
|
&id, // destination
|
||||||
&[prune_pubkey], // origins
|
&[prune_pubkey], // origins
|
||||||
now,
|
now,
|
||||||
|
|
|
@ -829,7 +829,7 @@ pub(crate) mod tests {
|
||||||
ping_cache
|
ping_cache
|
||||||
.lock()
|
.lock()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.mock_pong(new.id, new.gossip().unwrap(), Instant::now());
|
.mock_pong(*new.pubkey(), new.gossip().unwrap(), Instant::now());
|
||||||
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
||||||
crds.write()
|
crds.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -893,12 +893,12 @@ pub(crate) mod tests {
|
||||||
let node = CrdsGossipPull::default();
|
let node = CrdsGossipPull::default();
|
||||||
crds.insert(entry, now, GossipRoute::LocalMessage).unwrap();
|
crds.insert(entry, now, GossipRoute::LocalMessage).unwrap();
|
||||||
let old = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
let old = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
||||||
ping_cache.mock_pong(old.id, old.gossip().unwrap(), Instant::now());
|
ping_cache.mock_pong(*old.pubkey(), old.gossip().unwrap(), Instant::now());
|
||||||
let old = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(old));
|
let old = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(old));
|
||||||
crds.insert(old.clone(), now, GossipRoute::LocalMessage)
|
crds.insert(old.clone(), now, GossipRoute::LocalMessage)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
||||||
ping_cache.mock_pong(new.id, new.gossip().unwrap(), Instant::now());
|
ping_cache.mock_pong(*new.pubkey(), new.gossip().unwrap(), Instant::now());
|
||||||
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
||||||
crds.insert(new, now, GossipRoute::LocalMessage).unwrap();
|
crds.insert(new, now, GossipRoute::LocalMessage).unwrap();
|
||||||
let crds = RwLock::new(crds);
|
let crds = RwLock::new(crds);
|
||||||
|
@ -956,7 +956,7 @@ pub(crate) mod tests {
|
||||||
.insert(entry, now, GossipRoute::LocalMessage)
|
.insert(entry, now, GossipRoute::LocalMessage)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), now);
|
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), now);
|
||||||
ping_cache.mock_pong(new.id, new.gossip().unwrap(), Instant::now());
|
ping_cache.mock_pong(*new.pubkey(), new.gossip().unwrap(), Instant::now());
|
||||||
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
||||||
node_crds
|
node_crds
|
||||||
.insert(new, now, GossipRoute::LocalMessage)
|
.insert(new, now, GossipRoute::LocalMessage)
|
||||||
|
@ -1058,7 +1058,7 @@ pub(crate) mod tests {
|
||||||
128, // capacity
|
128, // capacity
|
||||||
);
|
);
|
||||||
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
||||||
ping_cache.mock_pong(new.id, new.gossip().unwrap(), Instant::now());
|
ping_cache.mock_pong(*new.pubkey(), new.gossip().unwrap(), Instant::now());
|
||||||
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
||||||
node_crds.insert(new, 0, GossipRoute::LocalMessage).unwrap();
|
node_crds.insert(new, 0, GossipRoute::LocalMessage).unwrap();
|
||||||
let node_crds = RwLock::new(node_crds);
|
let node_crds = RwLock::new(node_crds);
|
||||||
|
@ -1118,14 +1118,14 @@ pub(crate) mod tests {
|
||||||
128, // capacity
|
128, // capacity
|
||||||
);
|
);
|
||||||
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 1);
|
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 1);
|
||||||
ping_cache.mock_pong(new.id, new.gossip().unwrap(), Instant::now());
|
ping_cache.mock_pong(*new.pubkey(), new.gossip().unwrap(), Instant::now());
|
||||||
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
||||||
node_crds.insert(new, 0, GossipRoute::LocalMessage).unwrap();
|
node_crds.insert(new, 0, GossipRoute::LocalMessage).unwrap();
|
||||||
|
|
||||||
let mut dest_crds = Crds::default();
|
let mut dest_crds = Crds::default();
|
||||||
let new_id = solana_sdk::pubkey::new_rand();
|
let new_id = solana_sdk::pubkey::new_rand();
|
||||||
let new = ContactInfo::new_localhost(&new_id, 1);
|
let new = ContactInfo::new_localhost(&new_id, 1);
|
||||||
ping_cache.mock_pong(new.id, new.gossip().unwrap(), Instant::now());
|
ping_cache.mock_pong(*new.pubkey(), new.gossip().unwrap(), Instant::now());
|
||||||
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
||||||
dest_crds
|
dest_crds
|
||||||
.insert(new.clone(), 0, GossipRoute::LocalMessage)
|
.insert(new.clone(), 0, GossipRoute::LocalMessage)
|
||||||
|
@ -1134,7 +1134,11 @@ pub(crate) mod tests {
|
||||||
|
|
||||||
// node contains a key from the dest node, but at an older local timestamp
|
// node contains a key from the dest node, but at an older local timestamp
|
||||||
let same_key = ContactInfo::new_localhost(&new_id, 0);
|
let same_key = ContactInfo::new_localhost(&new_id, 0);
|
||||||
ping_cache.mock_pong(same_key.id, same_key.gossip().unwrap(), Instant::now());
|
ping_cache.mock_pong(
|
||||||
|
*same_key.pubkey(),
|
||||||
|
same_key.gossip().unwrap(),
|
||||||
|
Instant::now(),
|
||||||
|
);
|
||||||
let same_key = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(same_key));
|
let same_key = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(same_key));
|
||||||
assert_eq!(same_key.label(), new.label());
|
assert_eq!(same_key.label(), new.label());
|
||||||
assert!(same_key.wallclock() < new.wallclock());
|
assert!(same_key.wallclock() < new.wallclock());
|
||||||
|
|
|
@ -272,7 +272,7 @@ impl CrdsGossipPush {
|
||||||
);
|
);
|
||||||
let nodes = crds_gossip::dedup_gossip_addresses(nodes, stakes)
|
let nodes = crds_gossip::dedup_gossip_addresses(nodes, stakes)
|
||||||
.into_values()
|
.into_values()
|
||||||
.map(|(_stake, node)| node.id)
|
.map(|(_stake, node)| *node.pubkey())
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
if nodes.is_empty() {
|
if nodes.is_empty() {
|
||||||
return;
|
return;
|
||||||
|
@ -336,7 +336,7 @@ mod tests {
|
||||||
// push a new message
|
// push a new message
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
push.process_push_message(&crds, vec![(Pubkey::default(), vec![value])], 0),
|
push.process_push_message(&crds, vec![(Pubkey::default(), vec![value])], 0),
|
||||||
[ci.id].into_iter().collect()
|
[*ci.pubkey()].into_iter().collect()
|
||||||
);
|
);
|
||||||
|
|
||||||
// push an old version
|
// push an old version
|
||||||
|
@ -372,7 +372,7 @@ mod tests {
|
||||||
let crds = RwLock::<Crds>::default();
|
let crds = RwLock::<Crds>::default();
|
||||||
let push = CrdsGossipPush::default();
|
let push = CrdsGossipPush::default();
|
||||||
let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
||||||
let origin = ci.id;
|
let origin = *ci.pubkey();
|
||||||
ci.set_wallclock(0);
|
ci.set_wallclock(0);
|
||||||
let value_old = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(ci.clone()));
|
let value_old = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(ci.clone()));
|
||||||
|
|
||||||
|
@ -398,7 +398,7 @@ mod tests {
|
||||||
let push = CrdsGossipPush::default();
|
let push = CrdsGossipPush::default();
|
||||||
let mut ping_cache = new_ping_cache();
|
let mut ping_cache = new_ping_cache();
|
||||||
let peer = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
let peer = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
||||||
ping_cache.mock_pong(peer.id, peer.gossip().unwrap(), Instant::now());
|
ping_cache.mock_pong(*peer.pubkey(), peer.gossip().unwrap(), Instant::now());
|
||||||
let peer = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(peer));
|
let peer = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(peer));
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
crds.insert(peer.clone(), now, GossipRoute::LocalMessage),
|
crds.insert(peer.clone(), now, GossipRoute::LocalMessage),
|
||||||
|
@ -450,7 +450,7 @@ mod tests {
|
||||||
.map(|wallclock| {
|
.map(|wallclock| {
|
||||||
let mut peer = ContactInfo::new_rand(&mut rng, /*pubkey=*/ None);
|
let mut peer = ContactInfo::new_rand(&mut rng, /*pubkey=*/ None);
|
||||||
peer.set_wallclock(wallclock);
|
peer.set_wallclock(wallclock);
|
||||||
ping_cache.mock_pong(peer.id, peer.gossip().unwrap(), Instant::now());
|
ping_cache.mock_pong(*peer.pubkey(), peer.gossip().unwrap(), Instant::now());
|
||||||
CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(peer))
|
CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(peer))
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
|
@ -598,7 +598,7 @@ impl CrdsValue {
|
||||||
}
|
}
|
||||||
pub fn pubkey(&self) -> Pubkey {
|
pub fn pubkey(&self) -> Pubkey {
|
||||||
match &self.data {
|
match &self.data {
|
||||||
CrdsData::LegacyContactInfo(contact_info) => contact_info.id,
|
CrdsData::LegacyContactInfo(contact_info) => *contact_info.pubkey(),
|
||||||
CrdsData::Vote(_, vote) => vote.from,
|
CrdsData::Vote(_, vote) => vote.from,
|
||||||
CrdsData::LowestSlot(_, slots) => slots.from,
|
CrdsData::LowestSlot(_, slots) => slots.from,
|
||||||
CrdsData::LegacySnapshotHashes(hash) => hash.from,
|
CrdsData::LegacySnapshotHashes(hash) => hash.from,
|
||||||
|
@ -719,7 +719,7 @@ mod test {
|
||||||
let mut rng = rand::thread_rng();
|
let mut rng = rand::thread_rng();
|
||||||
let v = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(LegacyContactInfo::default()));
|
let v = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(LegacyContactInfo::default()));
|
||||||
assert_eq!(v.wallclock(), 0);
|
assert_eq!(v.wallclock(), 0);
|
||||||
let key = v.contact_info().unwrap().id;
|
let key = *v.contact_info().unwrap().pubkey();
|
||||||
assert_eq!(v.label(), CrdsValueLabel::LegacyContactInfo(key));
|
assert_eq!(v.label(), CrdsValueLabel::LegacyContactInfo(key));
|
||||||
|
|
||||||
let v = Vote::new(Pubkey::default(), new_test_vote_tx(&mut rng), 0).unwrap();
|
let v = Vote::new(Pubkey::default(), new_test_vote_tx(&mut rng), 0).unwrap();
|
||||||
|
|
|
@ -254,7 +254,7 @@ fn spy(
|
||||||
tvu_peers = spy_ref.all_tvu_peers();
|
tvu_peers = spy_ref.all_tvu_peers();
|
||||||
|
|
||||||
let found_node_by_pubkey = if let Some(pubkey) = find_node_by_pubkey {
|
let found_node_by_pubkey = if let Some(pubkey) = find_node_by_pubkey {
|
||||||
all_peers.iter().any(|x| x.id == pubkey)
|
all_peers.iter().any(|node| node.pubkey() == &pubkey)
|
||||||
} else {
|
} else {
|
||||||
false
|
false
|
||||||
};
|
};
|
||||||
|
|
|
@ -20,7 +20,7 @@ use {
|
||||||
Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, AbiExample, Deserialize, Serialize,
|
Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, AbiExample, Deserialize, Serialize,
|
||||||
)]
|
)]
|
||||||
pub struct LegacyContactInfo {
|
pub struct LegacyContactInfo {
|
||||||
pub id: Pubkey,
|
id: Pubkey,
|
||||||
/// gossip address
|
/// gossip address
|
||||||
gossip: SocketAddr,
|
gossip: SocketAddr,
|
||||||
/// address to connect to for replication
|
/// address to connect to for replication
|
||||||
|
@ -156,6 +156,11 @@ impl LegacyContactInfo {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub fn pubkey(&self) -> &Pubkey {
|
||||||
|
&self.id
|
||||||
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn wallclock(&self) -> u64 {
|
pub fn wallclock(&self) -> u64 {
|
||||||
self.wallclock
|
self.wallclock
|
||||||
|
@ -166,6 +171,10 @@ impl LegacyContactInfo {
|
||||||
self.shred_version
|
self.shred_version
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn set_pubkey(&mut self, pubkey: Pubkey) {
|
||||||
|
self.id = pubkey
|
||||||
|
}
|
||||||
|
|
||||||
pub fn set_wallclock(&mut self, wallclock: u64) {
|
pub fn set_wallclock(&mut self, wallclock: u64) {
|
||||||
self.wallclock = wallclock;
|
self.wallclock = wallclock;
|
||||||
}
|
}
|
||||||
|
|
|
@ -197,7 +197,7 @@ fn process_spy_results(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if let Some(node) = pubkey {
|
if let Some(node) = pubkey {
|
||||||
if !validators.iter().any(|x| x.id == node) {
|
if !validators.iter().any(|x| x.pubkey() == &node) {
|
||||||
eprintln!("Error: Could not find node {node:?}");
|
eprintln!("Error: Could not find node {node:?}");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
|
@ -518,7 +518,7 @@ fn network_run_pull(
|
||||||
let self_info = gossip_crds.get::<&CrdsValue>(&label).unwrap().clone();
|
let self_info = gossip_crds.get::<&CrdsValue>(&label).unwrap().clone();
|
||||||
requests
|
requests
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(move |(peer, filters)| (peer.id, filters, self_info.clone()))
|
.map(move |(peer, filters)| (*peer.pubkey(), filters, self_info.clone()))
|
||||||
})
|
})
|
||||||
.collect()
|
.collect()
|
||||||
};
|
};
|
||||||
|
@ -764,7 +764,7 @@ fn test_prune_errors() {
|
||||||
//incorrect dest
|
//incorrect dest
|
||||||
let mut res = crds_gossip.process_prune_msg(
|
let mut res = crds_gossip.process_prune_msg(
|
||||||
&id, // self_pubkey
|
&id, // self_pubkey
|
||||||
&ci.id, // peer
|
ci.pubkey(), // peer
|
||||||
&Pubkey::from(hash(&[1; 32]).to_bytes()), // destination
|
&Pubkey::from(hash(&[1; 32]).to_bytes()), // destination
|
||||||
&[prune_pubkey], // origins
|
&[prune_pubkey], // origins
|
||||||
now,
|
now,
|
||||||
|
@ -775,7 +775,7 @@ fn test_prune_errors() {
|
||||||
//correct dest
|
//correct dest
|
||||||
res = crds_gossip.process_prune_msg(
|
res = crds_gossip.process_prune_msg(
|
||||||
&id, // self_pubkey
|
&id, // self_pubkey
|
||||||
&ci.id, // peer
|
ci.pubkey(), // peer
|
||||||
&id, // destination
|
&id, // destination
|
||||||
&[prune_pubkey], // origins
|
&[prune_pubkey], // origins
|
||||||
now,
|
now,
|
||||||
|
@ -787,7 +787,7 @@ fn test_prune_errors() {
|
||||||
let timeout = now + crds_gossip.push.prune_timeout * 2;
|
let timeout = now + crds_gossip.push.prune_timeout * 2;
|
||||||
res = crds_gossip.process_prune_msg(
|
res = crds_gossip.process_prune_msg(
|
||||||
&id, // self_pubkey
|
&id, // self_pubkey
|
||||||
&ci.id, // peer
|
ci.pubkey(), // peer
|
||||||
&id, // destination
|
&id, // destination
|
||||||
&[prune_pubkey], // origins
|
&[prune_pubkey], // origins
|
||||||
now,
|
now,
|
||||||
|
|
|
@ -214,12 +214,12 @@ fn gossip_rstar() {
|
||||||
let xv = &listen[0].0;
|
let xv = &listen[0].0;
|
||||||
xv.lookup_contact_info(&xv.id(), |ci| ci.clone()).unwrap()
|
xv.lookup_contact_info(&xv.id(), |ci| ci.clone()).unwrap()
|
||||||
};
|
};
|
||||||
trace!("rstar leader {}", xd.id);
|
trace!("rstar leader {}", xd.pubkey());
|
||||||
for n in 0..(num - 1) {
|
for n in 0..(num - 1) {
|
||||||
let y = (n + 1) % listen.len();
|
let y = (n + 1) % listen.len();
|
||||||
let yv = &listen[y].0;
|
let yv = &listen[y].0;
|
||||||
yv.insert_legacy_info(xd.clone());
|
yv.insert_legacy_info(xd.clone());
|
||||||
trace!("rstar insert {} into {}", xd.id, yv.id());
|
trace!("rstar insert {} into {}", xd.pubkey(), yv.id());
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,7 +78,7 @@ pub fn spend_and_verify_all_nodes<S: ::std::hash::BuildHasher + Sync + Send>(
|
||||||
assert!(cluster_nodes.len() >= nodes);
|
assert!(cluster_nodes.len() >= nodes);
|
||||||
let ignore_nodes = Arc::new(ignore_nodes);
|
let ignore_nodes = Arc::new(ignore_nodes);
|
||||||
cluster_nodes.par_iter().for_each(|ingress_node| {
|
cluster_nodes.par_iter().for_each(|ingress_node| {
|
||||||
if ignore_nodes.contains(&ingress_node.id) {
|
if ignore_nodes.contains(ingress_node.pubkey()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let random_keypair = Keypair::new();
|
let random_keypair = Keypair::new();
|
||||||
|
@ -101,7 +101,7 @@ pub fn spend_and_verify_all_nodes<S: ::std::hash::BuildHasher + Sync + Send>(
|
||||||
.retry_transfer_until_confirmed(funding_keypair, &mut transaction, 10, confs)
|
.retry_transfer_until_confirmed(funding_keypair, &mut transaction, 10, confs)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
for validator in &cluster_nodes {
|
for validator in &cluster_nodes {
|
||||||
if ignore_nodes.contains(&validator.id) {
|
if ignore_nodes.contains(validator.pubkey()) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
let (rpc, tpu) = get_client_facing_addr(validator);
|
let (rpc, tpu) = get_client_facing_addr(validator);
|
||||||
|
@ -242,8 +242,8 @@ pub fn kill_entry_and_spend_and_verify_rest(
|
||||||
|
|
||||||
for ingress_node in &cluster_nodes {
|
for ingress_node in &cluster_nodes {
|
||||||
client
|
client
|
||||||
.poll_get_balance_with_commitment(&ingress_node.id, CommitmentConfig::processed())
|
.poll_get_balance_with_commitment(ingress_node.pubkey(), CommitmentConfig::processed())
|
||||||
.unwrap_or_else(|err| panic!("Node {} has no balance: {}", ingress_node.id, err));
|
.unwrap_or_else(|err| panic!("Node {} has no balance: {}", ingress_node.pubkey(), err));
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("sleeping for 2 leader fortnights");
|
info!("sleeping for 2 leader fortnights");
|
||||||
|
@ -257,7 +257,7 @@ pub fn kill_entry_and_spend_and_verify_rest(
|
||||||
));
|
));
|
||||||
info!("done sleeping for 2 fortnights");
|
info!("done sleeping for 2 fortnights");
|
||||||
for ingress_node in &cluster_nodes {
|
for ingress_node in &cluster_nodes {
|
||||||
if &ingress_node.id == entry_point_info.pubkey() {
|
if ingress_node.pubkey() == entry_point_info.pubkey() {
|
||||||
info!("ingress_node.id == entry_point_info.id, continuing...");
|
info!("ingress_node.id == entry_point_info.id, continuing...");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -384,11 +384,11 @@ pub fn check_no_new_roots(
|
||||||
let client = ThinClient::new(rpc, tpu, connection_cache.clone());
|
let client = ThinClient::new(rpc, tpu, connection_cache.clone());
|
||||||
let initial_root = client
|
let initial_root = client
|
||||||
.get_slot()
|
.get_slot()
|
||||||
.unwrap_or_else(|_| panic!("get_slot for {} failed", ingress_node.id));
|
.unwrap_or_else(|_| panic!("get_slot for {} failed", ingress_node.pubkey()));
|
||||||
roots[i] = initial_root;
|
roots[i] = initial_root;
|
||||||
client
|
client
|
||||||
.get_slot_with_commitment(CommitmentConfig::processed())
|
.get_slot_with_commitment(CommitmentConfig::processed())
|
||||||
.unwrap_or_else(|_| panic!("get_slot for {} failed", ingress_node.id))
|
.unwrap_or_else(|_| panic!("get_slot for {} failed", ingress_node.pubkey()))
|
||||||
})
|
})
|
||||||
.max()
|
.max()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
@ -403,7 +403,7 @@ pub fn check_no_new_roots(
|
||||||
let client = ThinClient::new(rpc, tpu, connection_cache.clone());
|
let client = ThinClient::new(rpc, tpu, connection_cache.clone());
|
||||||
current_slot = client
|
current_slot = client
|
||||||
.get_slot_with_commitment(CommitmentConfig::processed())
|
.get_slot_with_commitment(CommitmentConfig::processed())
|
||||||
.unwrap_or_else(|_| panic!("get_slot for {} failed", contact_infos[0].id));
|
.unwrap_or_else(|_| panic!("get_slot for {} failed", contact_infos[0].pubkey()));
|
||||||
if current_slot > end_slot {
|
if current_slot > end_slot {
|
||||||
reached_end_slot = true;
|
reached_end_slot = true;
|
||||||
break;
|
break;
|
||||||
|
@ -411,7 +411,10 @@ pub fn check_no_new_roots(
|
||||||
if last_print.elapsed().as_secs() > 3 {
|
if last_print.elapsed().as_secs() > 3 {
|
||||||
info!(
|
info!(
|
||||||
"{} current slot: {} on validator: {}, waiting for any validator with slot: {}",
|
"{} current slot: {} on validator: {}, waiting for any validator with slot: {}",
|
||||||
test_name, current_slot, contact_info.id, end_slot
|
test_name,
|
||||||
|
current_slot,
|
||||||
|
contact_info.pubkey(),
|
||||||
|
end_slot
|
||||||
);
|
);
|
||||||
last_print = Instant::now();
|
last_print = Instant::now();
|
||||||
}
|
}
|
||||||
|
@ -427,7 +430,7 @@ pub fn check_no_new_roots(
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
client
|
client
|
||||||
.get_slot()
|
.get_slot()
|
||||||
.unwrap_or_else(|_| panic!("get_slot for {} failed", ingress_node.id)),
|
.unwrap_or_else(|_| panic!("get_slot for {} failed", ingress_node.pubkey())),
|
||||||
roots[i]
|
roots[i]
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -441,7 +444,7 @@ fn poll_all_nodes_for_signature(
|
||||||
confs: usize,
|
confs: usize,
|
||||||
) -> Result<(), TransportError> {
|
) -> Result<(), TransportError> {
|
||||||
for validator in cluster_nodes {
|
for validator in cluster_nodes {
|
||||||
if &validator.id == entry_point_info.pubkey() {
|
if validator.pubkey() == entry_point_info.pubkey() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
let (rpc, tpu) = get_client_facing_addr(validator);
|
let (rpc, tpu) = get_client_facing_addr(validator);
|
||||||
|
|
|
@ -352,7 +352,7 @@ fn test_forwarding() {
|
||||||
|
|
||||||
let validator_info = cluster_nodes
|
let validator_info = cluster_nodes
|
||||||
.iter()
|
.iter()
|
||||||
.find(|c| c.id != leader_pubkey)
|
.find(|c| c.pubkey() != &leader_pubkey)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// Confirm that transactions were forwarded to and processed by the leader.
|
// Confirm that transactions were forwarded to and processed by the leader.
|
||||||
|
@ -1359,7 +1359,7 @@ fn test_snapshots_blockstore_floor() {
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut known_validators = HashSet::new();
|
let mut known_validators = HashSet::new();
|
||||||
known_validators.insert(cluster_nodes[0].id);
|
known_validators.insert(*cluster_nodes[0].pubkey());
|
||||||
validator_snapshot_test_config
|
validator_snapshot_test_config
|
||||||
.validator_config
|
.validator_config
|
||||||
.known_validators = Some(known_validators);
|
.known_validators = Some(known_validators);
|
||||||
|
|
|
@ -115,7 +115,7 @@ fn test_consistency_halt() {
|
||||||
setup_snapshot_validator_config(snapshot_interval_slots, num_account_paths);
|
setup_snapshot_validator_config(snapshot_interval_slots, num_account_paths);
|
||||||
|
|
||||||
let mut known_validators = HashSet::new();
|
let mut known_validators = HashSet::new();
|
||||||
known_validators.insert(cluster_nodes[0].id);
|
known_validators.insert(*cluster_nodes[0].pubkey());
|
||||||
|
|
||||||
validator_snapshot_test_config
|
validator_snapshot_test_config
|
||||||
.validator_config
|
.validator_config
|
||||||
|
|
|
@ -33,7 +33,7 @@ impl TpuInfo for ClusterTpuInfo {
|
||||||
.cluster_info
|
.cluster_info
|
||||||
.tpu_peers()
|
.tpu_peers()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter_map(|node| Some((node.id, node.tpu().ok()?)))
|
.filter_map(|node| Some((*node.pubkey(), node.tpu().ok()?)))
|
||||||
.collect();
|
.collect();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3459,14 +3459,14 @@ pub mod rpc_full {
|
||||||
.unwrap_or_default()
|
.unwrap_or_default()
|
||||||
{
|
{
|
||||||
let (version, feature_set) = if let Some(version) =
|
let (version, feature_set) = if let Some(version) =
|
||||||
cluster_info.get_node_version(&contact_info.id)
|
cluster_info.get_node_version(contact_info.pubkey())
|
||||||
{
|
{
|
||||||
(Some(version.to_string()), Some(version.feature_set))
|
(Some(version.to_string()), Some(version.feature_set))
|
||||||
} else {
|
} else {
|
||||||
(None, None)
|
(None, None)
|
||||||
};
|
};
|
||||||
Some(RpcContactInfo {
|
Some(RpcContactInfo {
|
||||||
pubkey: contact_info.id.to_string(),
|
pubkey: contact_info.pubkey().to_string(),
|
||||||
gossip: contact_info.gossip().ok(),
|
gossip: contact_info.gossip().ok(),
|
||||||
tpu: contact_info
|
tpu: contact_info
|
||||||
.tpu()
|
.tpu()
|
||||||
|
|
|
@ -220,7 +220,7 @@ fn get_rpc_peers(
|
||||||
|
|
||||||
if bootstrap_config.only_known_rpc {
|
if bootstrap_config.only_known_rpc {
|
||||||
rpc_peers.retain(|rpc_peer| {
|
rpc_peers.retain(|rpc_peer| {
|
||||||
is_known_validator(&rpc_peer.id, &validator_config.known_validators)
|
is_known_validator(rpc_peer.pubkey(), &validator_config.known_validators)
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -229,12 +229,14 @@ fn get_rpc_peers(
|
||||||
// Filter out blacklisted nodes
|
// Filter out blacklisted nodes
|
||||||
let rpc_peers: Vec<_> = rpc_peers
|
let rpc_peers: Vec<_> = rpc_peers
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|rpc_peer| !blacklisted_rpc_nodes.contains(&rpc_peer.id))
|
.filter(|rpc_peer| !blacklisted_rpc_nodes.contains(rpc_peer.pubkey()))
|
||||||
.collect();
|
.collect();
|
||||||
let rpc_peers_blacklisted = rpc_peers_total - rpc_peers.len();
|
let rpc_peers_blacklisted = rpc_peers_total - rpc_peers.len();
|
||||||
let rpc_known_peers = rpc_peers
|
let rpc_known_peers = rpc_peers
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|rpc_peer| is_known_validator(&rpc_peer.id, &validator_config.known_validators))
|
.filter(|rpc_peer| {
|
||||||
|
is_known_validator(rpc_peer.pubkey(), &validator_config.known_validators)
|
||||||
|
})
|
||||||
.count();
|
.count();
|
||||||
|
|
||||||
info!("Total {rpc_peers_total} RPC nodes found. {rpc_known_peers} known, {rpc_peers_blacklisted} blacklisted");
|
info!("Total {rpc_peers_total} RPC nodes found. {rpc_known_peers} known, {rpc_peers_blacklisted} blacklisted");
|
||||||
|
@ -507,7 +509,7 @@ fn get_vetted_rpc_nodes(
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"Using RPC service from node {}: {:?}",
|
"Using RPC service from node {}: {:?}",
|
||||||
rpc_contact_info.id,
|
rpc_contact_info.pubkey(),
|
||||||
rpc_contact_info.rpc()
|
rpc_contact_info.rpc()
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -535,7 +537,7 @@ fn get_vetted_rpc_nodes(
|
||||||
fail_rpc_node(
|
fail_rpc_node(
|
||||||
"Failed to ping RPC".to_string(),
|
"Failed to ping RPC".to_string(),
|
||||||
&validator_config.known_validators,
|
&validator_config.known_validators,
|
||||||
&rpc_contact_info.id,
|
rpc_contact_info.pubkey(),
|
||||||
&mut newly_blacklisted_rpc_nodes.write().unwrap(),
|
&mut newly_blacklisted_rpc_nodes.write().unwrap(),
|
||||||
);
|
);
|
||||||
false
|
false
|
||||||
|
@ -545,7 +547,7 @@ fn get_vetted_rpc_nodes(
|
||||||
fail_rpc_node(
|
fail_rpc_node(
|
||||||
format!("Failed to get RPC node version: {err}"),
|
format!("Failed to get RPC node version: {err}"),
|
||||||
&validator_config.known_validators,
|
&validator_config.known_validators,
|
||||||
&rpc_contact_info.id,
|
rpc_contact_info.pubkey(),
|
||||||
&mut newly_blacklisted_rpc_nodes.write().unwrap(),
|
&mut newly_blacklisted_rpc_nodes.write().unwrap(),
|
||||||
);
|
);
|
||||||
false
|
false
|
||||||
|
@ -676,7 +678,7 @@ pub fn rpc_bootstrap(
|
||||||
fail_rpc_node(
|
fail_rpc_node(
|
||||||
err,
|
err,
|
||||||
&validator_config.known_validators,
|
&validator_config.known_validators,
|
||||||
&rpc_contact_info.id,
|
rpc_contact_info.pubkey(),
|
||||||
&mut blacklisted_rpc_nodes,
|
&mut blacklisted_rpc_nodes,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -781,7 +783,7 @@ fn get_rpc_nodes(
|
||||||
} else {
|
} else {
|
||||||
let rpc_peers = peer_snapshot_hashes
|
let rpc_peers = peer_snapshot_hashes
|
||||||
.iter()
|
.iter()
|
||||||
.map(|peer_snapshot_hash| peer_snapshot_hash.rpc_contact_info.id)
|
.map(|peer_snapshot_hash| peer_snapshot_hash.rpc_contact_info.pubkey())
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
let final_snapshot_hash = peer_snapshot_hashes[0].snapshot_hash;
|
let final_snapshot_hash = peer_snapshot_hashes[0].snapshot_hash;
|
||||||
info!(
|
info!(
|
||||||
|
@ -1034,7 +1036,7 @@ fn get_eligible_peer_snapshot_hashes(
|
||||||
let peer_snapshot_hashes = rpc_peers
|
let peer_snapshot_hashes = rpc_peers
|
||||||
.iter()
|
.iter()
|
||||||
.flat_map(|rpc_peer| {
|
.flat_map(|rpc_peer| {
|
||||||
get_snapshot_hashes_for_node(cluster_info, &rpc_peer.id).map(|snapshot_hash| {
|
get_snapshot_hashes_for_node(cluster_info, rpc_peer.pubkey()).map(|snapshot_hash| {
|
||||||
PeerSnapshotHash {
|
PeerSnapshotHash {
|
||||||
rpc_contact_info: rpc_peer.clone(),
|
rpc_contact_info: rpc_peer.clone(),
|
||||||
snapshot_hash,
|
snapshot_hash,
|
||||||
|
@ -1262,7 +1264,7 @@ fn download_snapshot(
|
||||||
&& *download_abort_count < maximum_snapshot_download_abort
|
&& *download_abort_count < maximum_snapshot_download_abort
|
||||||
{
|
{
|
||||||
if let Some(ref known_validators) = validator_config.known_validators {
|
if let Some(ref known_validators) = validator_config.known_validators {
|
||||||
if known_validators.contains(&rpc_contact_info.id)
|
if known_validators.contains(rpc_contact_info.pubkey())
|
||||||
&& known_validators.len() == 1
|
&& known_validators.len() == 1
|
||||||
&& bootstrap_config.only_known_rpc
|
&& bootstrap_config.only_known_rpc
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue