removes pubkey from LegacyContactInfo public interface (#31375)
Working towards LegacyContactInfo => ContactInfo migration, the commit adds more api parity between the two.
This commit is contained in:
parent
95806c3295
commit
aafcac27d8
|
@ -156,7 +156,7 @@ fn create_client(
|
|||
info!("Searching for target_node: {:?}", target_node);
|
||||
let mut target_client = None;
|
||||
for node in nodes {
|
||||
if node.id == target_node {
|
||||
if node.pubkey() == &target_node {
|
||||
target_client = Some(get_client(
|
||||
&[node],
|
||||
&SocketAddrSpace::Unspecified,
|
||||
|
|
|
@ -63,7 +63,7 @@ fn get_retransmit_peers_deterministic_wrapper(b: &mut Bencher, unstaked_ratio: O
|
|||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||
let bank = Bank::new_for_benches(&genesis_config);
|
||||
let (nodes, cluster_nodes) = make_cluster_nodes(&mut rng, unstaked_ratio);
|
||||
let slot_leader = nodes[1..].choose(&mut rng).unwrap().id;
|
||||
let slot_leader = *nodes[1..].choose(&mut rng).unwrap().pubkey();
|
||||
let slot = rand::random::<u64>();
|
||||
b.iter(|| {
|
||||
get_retransmit_peers_deterministic(
|
||||
|
|
|
@ -314,10 +314,10 @@ impl BroadcastRun for BroadcastDuplicatesRun {
|
|||
.unwrap()
|
||||
.remove(shred.signature())
|
||||
{
|
||||
if cluster_partition.contains(&node.id) {
|
||||
if cluster_partition.contains(node.pubkey()) {
|
||||
info!(
|
||||
"skipping node {} for original shred index {}, slot {}",
|
||||
node.id,
|
||||
node.pubkey(),
|
||||
shred.index(),
|
||||
shred.slot()
|
||||
);
|
||||
|
|
|
@ -93,7 +93,7 @@ impl Node {
|
|||
fn pubkey(&self) -> Pubkey {
|
||||
match &self.node {
|
||||
NodeId::Pubkey(pubkey) => *pubkey,
|
||||
NodeId::ContactInfo(node) => node.id,
|
||||
NodeId::ContactInfo(node) => *node.pubkey(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -245,11 +245,11 @@ impl ClusterNodes<RetransmitStage> {
|
|||
.inspect(|node| {
|
||||
if let Some(node) = node.contact_info() {
|
||||
if let Ok(addr) = node.tvu() {
|
||||
addrs.entry(addr).or_insert(node.id);
|
||||
addrs.entry(addr).or_insert(*node.pubkey());
|
||||
}
|
||||
if !drop_redundant_turbine_path {
|
||||
if let Ok(addr) = node.tvu_forwards() {
|
||||
frwds.entry(addr).or_insert(node.id);
|
||||
frwds.entry(addr).or_insert(*node.pubkey());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -341,7 +341,7 @@ fn get_nodes(cluster_info: &ClusterInfo, stakes: &HashMap<Pubkey, u64>) -> Vec<N
|
|||
})
|
||||
// All known tvu-peers from gossip.
|
||||
.chain(cluster_info.tvu_peers().into_iter().map(|node| {
|
||||
let stake = stakes.get(&node.id).copied().unwrap_or_default();
|
||||
let stake = stakes.get(node.pubkey()).copied().unwrap_or_default();
|
||||
let node = NodeId::from(node);
|
||||
Node { node, stake }
|
||||
}))
|
||||
|
@ -603,7 +603,13 @@ mod tests {
|
|||
.map(|node| (node.pubkey(), node))
|
||||
.collect();
|
||||
for node in &nodes {
|
||||
assert_eq!(cluster_nodes[&node.id].contact_info().unwrap().id, node.id);
|
||||
assert_eq!(
|
||||
cluster_nodes[node.pubkey()]
|
||||
.contact_info()
|
||||
.unwrap()
|
||||
.pubkey(),
|
||||
node.pubkey()
|
||||
);
|
||||
}
|
||||
for (pubkey, stake) in &stakes {
|
||||
if *stake > 0 {
|
||||
|
@ -633,7 +639,13 @@ mod tests {
|
|||
.map(|node| (node.pubkey(), node))
|
||||
.collect();
|
||||
for node in &nodes {
|
||||
assert_eq!(cluster_nodes[&node.id].contact_info().unwrap().id, node.id);
|
||||
assert_eq!(
|
||||
cluster_nodes[node.pubkey()]
|
||||
.contact_info()
|
||||
.unwrap()
|
||||
.pubkey(),
|
||||
node.pubkey()
|
||||
);
|
||||
}
|
||||
for (pubkey, stake) in &stakes {
|
||||
if *stake > 0 {
|
||||
|
|
|
@ -179,7 +179,7 @@ impl ClusterSlots {
|
|||
.iter()
|
||||
.map(|peer| {
|
||||
validator_stakes
|
||||
.get(&peer.id)
|
||||
.get(peer.pubkey())
|
||||
.map(|node| node.total_stake)
|
||||
.unwrap_or(0)
|
||||
+ 1
|
||||
|
@ -193,7 +193,7 @@ impl ClusterSlots {
|
|||
let slot_peers = slot_peers.read().unwrap();
|
||||
repair_peers
|
||||
.iter()
|
||||
.map(|peer| slot_peers.get(&peer.id).cloned().unwrap_or(0))
|
||||
.map(|peer| slot_peers.get(peer.pubkey()).cloned().unwrap_or(0))
|
||||
.zip(stakes)
|
||||
.map(|(a, b)| (a / 2 + b / 2).max(1u64))
|
||||
.collect()
|
||||
|
@ -210,7 +210,7 @@ impl ClusterSlots {
|
|||
repair_peers
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(|(i, ci)| Some((slot_peers.get(&ci.id)? + 1, i)))
|
||||
.filter_map(|(i, ci)| Some((slot_peers.get(ci.pubkey())? + 1, i)))
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_default()
|
||||
|
@ -291,8 +291,8 @@ mod tests {
|
|||
.write()
|
||||
.unwrap()
|
||||
.insert(0, Arc::new(RwLock::new(map)));
|
||||
c1.id = k1;
|
||||
c2.id = k2;
|
||||
c1.set_pubkey(k1);
|
||||
c2.set_pubkey(k2);
|
||||
assert_eq!(cs.compute_weights(0, &[c1, c2]), vec![std::u64::MAX / 4, 1]);
|
||||
}
|
||||
|
||||
|
@ -320,8 +320,8 @@ mod tests {
|
|||
.into_iter()
|
||||
.collect();
|
||||
*cs.validator_stakes.write().unwrap() = Arc::new(validator_stakes);
|
||||
c1.id = k1;
|
||||
c2.id = k2;
|
||||
c1.set_pubkey(k1);
|
||||
c2.set_pubkey(k2);
|
||||
assert_eq!(
|
||||
cs.compute_weights(0, &[c1, c2]),
|
||||
vec![std::u64::MAX / 4 + 1, 1]
|
||||
|
@ -333,7 +333,7 @@ mod tests {
|
|||
let cs = ClusterSlots::default();
|
||||
let mut contact_infos = vec![ContactInfo::default(); 2];
|
||||
for ci in contact_infos.iter_mut() {
|
||||
ci.id = solana_sdk::pubkey::new_rand();
|
||||
ci.set_pubkey(solana_sdk::pubkey::new_rand());
|
||||
}
|
||||
let slot = 9;
|
||||
|
||||
|
@ -345,7 +345,7 @@ mod tests {
|
|||
|
||||
// Give second validator max stake
|
||||
let validator_stakes: HashMap<_, _> = vec![(
|
||||
*Arc::new(contact_infos[1].id),
|
||||
*contact_infos[1].pubkey(),
|
||||
NodeVoteAccounts {
|
||||
total_stake: std::u64::MAX / 2,
|
||||
vote_accounts: vec![Pubkey::default()],
|
||||
|
@ -358,7 +358,7 @@ mod tests {
|
|||
// Mark the first validator as completed slot 9, should pick that validator,
|
||||
// even though it only has default stake, while the other validator has
|
||||
// max stake
|
||||
cs.insert_node_id(slot, contact_infos[0].id);
|
||||
cs.insert_node_id(slot, *contact_infos[0].pubkey());
|
||||
assert_eq!(
|
||||
cs.compute_weights_exclude_nonfrozen(slot, &contact_infos),
|
||||
vec![(1, 0)]
|
||||
|
|
|
@ -267,13 +267,13 @@ pub(crate) enum RepairResponse {
|
|||
impl RepairProtocol {
|
||||
fn sender(&self) -> &Pubkey {
|
||||
match self {
|
||||
Self::LegacyWindowIndex(ci, _, _) => &ci.id,
|
||||
Self::LegacyHighestWindowIndex(ci, _, _) => &ci.id,
|
||||
Self::LegacyOrphan(ci, _) => &ci.id,
|
||||
Self::LegacyWindowIndexWithNonce(ci, _, _, _) => &ci.id,
|
||||
Self::LegacyHighestWindowIndexWithNonce(ci, _, _, _) => &ci.id,
|
||||
Self::LegacyOrphanWithNonce(ci, _, _) => &ci.id,
|
||||
Self::LegacyAncestorHashes(ci, _, _) => &ci.id,
|
||||
Self::LegacyWindowIndex(ci, _, _) => ci.pubkey(),
|
||||
Self::LegacyHighestWindowIndex(ci, _, _) => ci.pubkey(),
|
||||
Self::LegacyOrphan(ci, _) => ci.pubkey(),
|
||||
Self::LegacyWindowIndexWithNonce(ci, _, _, _) => ci.pubkey(),
|
||||
Self::LegacyHighestWindowIndexWithNonce(ci, _, _, _) => ci.pubkey(),
|
||||
Self::LegacyOrphanWithNonce(ci, _, _) => ci.pubkey(),
|
||||
Self::LegacyAncestorHashes(ci, _, _) => ci.pubkey(),
|
||||
Self::Pong(pong) => pong.from(),
|
||||
Self::WindowIndex { header, .. } => &header.sender,
|
||||
Self::HighestWindowIndex { header, .. } => &header.sender,
|
||||
|
@ -346,7 +346,7 @@ impl RepairPeers {
|
|||
.zip(weights)
|
||||
.filter_map(|(peer, &weight)| {
|
||||
let addr = peer.serve_repair().ok()?;
|
||||
Some(((peer.id, addr), weight))
|
||||
Some(((*peer.pubkey(), addr), weight))
|
||||
})
|
||||
.unzip();
|
||||
if peers.is_empty() {
|
||||
|
@ -1077,7 +1077,7 @@ impl ServeRepair {
|
|||
.map(|i| index[i])
|
||||
.filter_map(|i| {
|
||||
let addr = repair_peers[i].serve_repair().ok()?;
|
||||
Some((repair_peers[i].id, addr))
|
||||
Some((*repair_peers[i].pubkey(), addr))
|
||||
})
|
||||
.take(ANCESTOR_HASH_REPAIR_SAMPLE_SIZE)
|
||||
.collect();
|
||||
|
@ -1100,7 +1100,7 @@ impl ServeRepair {
|
|||
.unzip();
|
||||
let k = WeightedIndex::new(weights)?.sample(&mut rand::thread_rng());
|
||||
let n = index[k];
|
||||
Ok((repair_peers[n].id, repair_peers[n].serve_repair()?))
|
||||
Ok((*repair_peers[n].pubkey(), repair_peers[n].serve_repair()?))
|
||||
}
|
||||
|
||||
pub(crate) fn map_repair_request(
|
||||
|
@ -2172,7 +2172,7 @@ mod tests {
|
|||
let known_validators = Some(vec![*contact_info2.pubkey()].into_iter().collect());
|
||||
let repair_peers = serve_repair.repair_peers(&known_validators, 1);
|
||||
assert_eq!(repair_peers.len(), 1);
|
||||
assert_eq!(&repair_peers[0].id, contact_info2.pubkey());
|
||||
assert_eq!(repair_peers[0].pubkey(), contact_info2.pubkey());
|
||||
assert!(serve_repair
|
||||
.repair_request(
|
||||
&cluster_slots,
|
||||
|
@ -2190,7 +2190,7 @@ mod tests {
|
|||
let repair_peers: HashSet<Pubkey> = serve_repair
|
||||
.repair_peers(&None, 1)
|
||||
.into_iter()
|
||||
.map(|c| c.id)
|
||||
.map(|node| *node.pubkey())
|
||||
.collect();
|
||||
assert_eq!(repair_peers.len(), 2);
|
||||
assert!(repair_peers.contains(contact_info2.pubkey()));
|
||||
|
|
|
@ -2104,7 +2104,7 @@ fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: boo
|
|||
// Contact infos are refreshed twice during this period.
|
||||
age < CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS
|
||||
})
|
||||
.map(|node| (node.id, node))
|
||||
.map(|node| (*node.pubkey(), node))
|
||||
.collect();
|
||||
let my_shred_version = cluster_info.my_shred_version();
|
||||
let my_id = cluster_info.id();
|
||||
|
|
|
@ -430,13 +430,13 @@ fn get_target(
|
|||
if node.gossip().ok() == Some(entrypoint_addr) {
|
||||
info!("{:?}", node.gossip());
|
||||
target = match mode {
|
||||
Mode::Gossip => Some((node.id, node.gossip().unwrap())),
|
||||
Mode::Tvu => Some((node.id, node.tvu().unwrap())),
|
||||
Mode::TvuForwards => Some((node.id, node.tvu_forwards().unwrap())),
|
||||
Mode::Tpu => Some((node.id, node.tpu().unwrap())),
|
||||
Mode::TpuForwards => Some((node.id, node.tpu_forwards().unwrap())),
|
||||
Mode::Repair => Some((node.id, node.repair().unwrap())),
|
||||
Mode::ServeRepair => Some((node.id, node.serve_repair().unwrap())),
|
||||
Mode::Gossip => Some((*node.pubkey(), node.gossip().unwrap())),
|
||||
Mode::Tvu => Some((*node.pubkey(), node.tvu().unwrap())),
|
||||
Mode::TvuForwards => Some((*node.pubkey(), node.tvu_forwards().unwrap())),
|
||||
Mode::Tpu => Some((*node.pubkey(), node.tpu().unwrap())),
|
||||
Mode::TpuForwards => Some((*node.pubkey(), node.tpu_forwards().unwrap())),
|
||||
Mode::Repair => Some((*node.pubkey(), node.repair().unwrap())),
|
||||
Mode::ServeRepair => Some((*node.pubkey(), node.serve_repair().unwrap())),
|
||||
Mode::Rpc => None,
|
||||
};
|
||||
break;
|
||||
|
|
|
@ -563,7 +563,7 @@ impl ClusterInfo {
|
|||
// there's not much point in saving entrypoint ContactInfo since by
|
||||
// definition that information is already available
|
||||
let contact_info = v.value.contact_info().unwrap();
|
||||
if contact_info.id != self_pubkey
|
||||
if contact_info.pubkey() != &self_pubkey
|
||||
&& contact_info
|
||||
.gossip()
|
||||
.map(|addr| !entrypoint_gossip_addrs.contains(&addr))
|
||||
|
@ -759,7 +759,7 @@ impl ClusterInfo {
|
|||
.rpc()
|
||||
.ok()
|
||||
.filter(|addr| self.socket_addr_space.check(addr))?;
|
||||
let node_version = self.get_node_version(&node.id);
|
||||
let node_version = self.get_node_version(node.pubkey());
|
||||
if my_shred_version != 0
|
||||
&& (node.shred_version() != 0 && node.shred_version() != my_shred_version)
|
||||
{
|
||||
|
@ -769,9 +769,13 @@ impl ClusterInfo {
|
|||
Some(format!(
|
||||
"{:15} {:2}| {:5} | {:44} |{:^9}| {:5}| {:5}| {}\n",
|
||||
rpc_addr.to_string(),
|
||||
if node.id == my_pubkey { "me" } else { "" },
|
||||
if node.pubkey() == &my_pubkey {
|
||||
"me"
|
||||
} else {
|
||||
""
|
||||
},
|
||||
now.saturating_sub(last_updated),
|
||||
node.id,
|
||||
node.pubkey(),
|
||||
if let Some(node_version) = node_version {
|
||||
node_version.to_string()
|
||||
} else {
|
||||
|
@ -812,7 +816,7 @@ impl ClusterInfo {
|
|||
total_spy_nodes = total_spy_nodes.saturating_add(1);
|
||||
}
|
||||
|
||||
let node_version = self.get_node_version(&node.id);
|
||||
let node_version = self.get_node_version(node.pubkey());
|
||||
if my_shred_version != 0 && (node.shred_version() != 0 && node.shred_version() != my_shred_version) {
|
||||
different_shred_nodes = different_shred_nodes.saturating_add(1);
|
||||
None
|
||||
|
@ -831,9 +835,9 @@ impl ClusterInfo {
|
|||
.as_ref()
|
||||
.map(IpAddr::to_string)
|
||||
.unwrap_or_else(|| String::from("none")),
|
||||
if node.id == my_pubkey { "me" } else { "" },
|
||||
if node.pubkey() == &my_pubkey { "me" } else { "" },
|
||||
now.saturating_sub(last_updated),
|
||||
node.id,
|
||||
node.pubkey(),
|
||||
if let Some(node_version) = node_version {
|
||||
node_version.to_string()
|
||||
} else {
|
||||
|
@ -1277,7 +1281,9 @@ impl ClusterInfo {
|
|||
let gossip_crds = self.gossip.crds.read().unwrap();
|
||||
gossip_crds
|
||||
.get_nodes_contact_info()
|
||||
.filter(|node| node.id != self_pubkey && self.check_socket_addr_space(&node.rpc()))
|
||||
.filter(|node| {
|
||||
node.pubkey() != &self_pubkey && self.check_socket_addr_space(&node.rpc())
|
||||
})
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
@ -1297,7 +1303,7 @@ impl ClusterInfo {
|
|||
gossip_crds
|
||||
.get_nodes_contact_info()
|
||||
// shred_version not considered for gossip peers (ie, spy nodes do not set shred_version)
|
||||
.filter(|node| node.id != me && self.check_socket_addr_space(&node.gossip()))
|
||||
.filter(|node| node.pubkey() != &me && self.check_socket_addr_space(&node.gossip()))
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
@ -1307,7 +1313,9 @@ impl ClusterInfo {
|
|||
let self_pubkey = self.id();
|
||||
self.time_gossip_read_lock("all_tvu_peers", &self.stats.all_tvu_peers)
|
||||
.get_nodes_contact_info()
|
||||
.filter(|node| node.id != self_pubkey && self.check_socket_addr_space(&node.tvu()))
|
||||
.filter(|node| {
|
||||
node.pubkey() != &self_pubkey && self.check_socket_addr_space(&node.tvu())
|
||||
})
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
@ -1319,7 +1327,7 @@ impl ClusterInfo {
|
|||
self.time_gossip_read_lock("tvu_peers", &self.stats.tvu_peers)
|
||||
.get_nodes_contact_info()
|
||||
.filter(|node| {
|
||||
node.id != self_pubkey
|
||||
node.pubkey() != &self_pubkey
|
||||
&& node.shred_version() == self_shred_version
|
||||
&& self.check_socket_addr_space(&node.tvu())
|
||||
})
|
||||
|
@ -1336,11 +1344,11 @@ impl ClusterInfo {
|
|||
gossip_crds
|
||||
.get_nodes_contact_info()
|
||||
.filter(|node| {
|
||||
node.id != self_pubkey
|
||||
node.pubkey() != &self_pubkey
|
||||
&& node.shred_version() == self_shred_version
|
||||
&& self.check_socket_addr_space(&node.tvu())
|
||||
&& self.check_socket_addr_space(&node.serve_repair())
|
||||
&& match gossip_crds.get::<&LowestSlot>(node.id) {
|
||||
&& match gossip_crds.get::<&LowestSlot>(*node.pubkey()) {
|
||||
None => true, // fallback to legacy behavior
|
||||
Some(lowest_slot) => lowest_slot.lowest <= slot,
|
||||
}
|
||||
|
@ -1364,7 +1372,9 @@ impl ClusterInfo {
|
|||
let gossip_crds = self.gossip.crds.read().unwrap();
|
||||
gossip_crds
|
||||
.get_nodes_contact_info()
|
||||
.filter(|node| node.id != self_pubkey && self.check_socket_addr_space(&node.tpu()))
|
||||
.filter(|node| {
|
||||
node.pubkey() != &self_pubkey && self.check_socket_addr_space(&node.tpu())
|
||||
})
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
@ -1660,7 +1670,7 @@ impl ClusterInfo {
|
|||
return true;
|
||||
}
|
||||
for entrypoint in entrypoints.iter_mut() {
|
||||
if entrypoint.id == Pubkey::default() {
|
||||
if entrypoint.pubkey() == &Pubkey::default() {
|
||||
// If a pull from the entrypoint was successful it should exist in the CRDS table
|
||||
if let Some(entrypoint_from_gossip) = entrypoint
|
||||
.gossip()
|
||||
|
@ -1681,7 +1691,7 @@ impl ClusterInfo {
|
|||
info!(
|
||||
"Setting shred version to {:?} from entrypoint {:?}",
|
||||
entrypoint.shred_version(),
|
||||
entrypoint.id
|
||||
entrypoint.pubkey()
|
||||
);
|
||||
self.my_contact_info
|
||||
.write()
|
||||
|
@ -1692,7 +1702,7 @@ impl ClusterInfo {
|
|||
self.my_shred_version() != 0
|
||||
&& entrypoints
|
||||
.iter()
|
||||
.all(|entrypoint| entrypoint.id != Pubkey::default())
|
||||
.all(|entrypoint| entrypoint.pubkey() != &Pubkey::default())
|
||||
}
|
||||
|
||||
fn handle_purge(
|
||||
|
@ -1725,7 +1735,8 @@ impl ClusterInfo {
|
|||
.read()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|k| k.id)
|
||||
.map(LegacyContactInfo::pubkey)
|
||||
.copied()
|
||||
.chain(std::iter::once(self.id()))
|
||||
.collect();
|
||||
self.stats.trim_crds_table.add_relaxed(1);
|
||||
|
@ -1913,7 +1924,7 @@ impl ClusterInfo {
|
|||
.with_min_len(1024)
|
||||
.filter(|(_, _, caller)| match caller.contact_info() {
|
||||
None => false,
|
||||
Some(caller) if caller.id == self_pubkey => {
|
||||
Some(caller) if caller.pubkey() == &self_pubkey => {
|
||||
warn!("PullRequest ignored, I'm talking to myself");
|
||||
self.stats.window_request_loopback.add_relaxed(1);
|
||||
false
|
||||
|
@ -3146,7 +3157,7 @@ fn filter_on_shred_version(
|
|||
values.retain(|value| match &value.data {
|
||||
// Allow node to update its own contact info in case their
|
||||
// shred-version changes
|
||||
CrdsData::LegacyContactInfo(node) => node.id == *from,
|
||||
CrdsData::LegacyContactInfo(node) => node.pubkey() == from,
|
||||
CrdsData::NodeInstance(_) => true,
|
||||
_ => false,
|
||||
})
|
||||
|
|
|
@ -348,7 +348,7 @@ pub(crate) fn get_gossip_nodes<R: Rng>(
|
|||
if value.local_timestamp < active_cutoff {
|
||||
// In order to mitigate eclipse attack, for staked nodes
|
||||
// continue retrying periodically.
|
||||
let stake = stakes.get(&node.id).copied().unwrap_or_default();
|
||||
let stake = stakes.get(node.pubkey()).copied().unwrap_or_default();
|
||||
if stake == 0u64 || !rng.gen_ratio(1, 16) {
|
||||
return None;
|
||||
}
|
||||
|
@ -356,14 +356,14 @@ pub(crate) fn get_gossip_nodes<R: Rng>(
|
|||
Some(node)
|
||||
})
|
||||
.filter(|node| {
|
||||
&node.id != pubkey
|
||||
node.pubkey() != pubkey
|
||||
&& verify_shred_version(node.shred_version())
|
||||
&& node
|
||||
.gossip()
|
||||
.map(|addr| socket_addr_space.check(&addr))
|
||||
.unwrap_or_default()
|
||||
&& match gossip_validators {
|
||||
Some(nodes) => nodes.contains(&node.id),
|
||||
Some(nodes) => nodes.contains(node.pubkey()),
|
||||
None => true,
|
||||
}
|
||||
})
|
||||
|
@ -381,7 +381,7 @@ pub(crate) fn dedup_gossip_addresses(
|
|||
.filter_map(|node| Some((node.gossip().ok()?, node)))
|
||||
.into_grouping_map()
|
||||
.aggregate(|acc, _node_gossip, node| {
|
||||
let stake = stakes.get(&node.id).copied().unwrap_or_default();
|
||||
let stake = stakes.get(node.pubkey()).copied().unwrap_or_default();
|
||||
match acc {
|
||||
Some((ref s, _)) if s >= &stake => acc,
|
||||
Some(_) | None => Some((stake, node)),
|
||||
|
@ -410,7 +410,7 @@ pub(crate) fn maybe_ping_gossip_addresses<R: Rng + CryptoRng>(
|
|||
Ok(addr) => addr,
|
||||
};
|
||||
let (check, ping) = {
|
||||
let node = (node.id, node_gossip);
|
||||
let node = (*node.pubkey(), node_gossip);
|
||||
ping_cache.check(now, node, &mut pingf)
|
||||
};
|
||||
if let Some(ping) = ping {
|
||||
|
@ -465,7 +465,7 @@ mod test {
|
|||
//incorrect dest
|
||||
let mut res = crds_gossip.process_prune_msg(
|
||||
&id,
|
||||
&ci.id,
|
||||
ci.pubkey(),
|
||||
&Pubkey::from(hash(&[1; 32]).to_bytes()),
|
||||
&[prune_pubkey],
|
||||
now,
|
||||
|
@ -476,7 +476,7 @@ mod test {
|
|||
//correct dest
|
||||
res = crds_gossip.process_prune_msg(
|
||||
&id, // self_pubkey
|
||||
&ci.id, // peer
|
||||
ci.pubkey(), // peer
|
||||
&id, // destination
|
||||
&[prune_pubkey], // origins
|
||||
now,
|
||||
|
@ -488,7 +488,7 @@ mod test {
|
|||
let timeout = now + crds_gossip.push.prune_timeout * 2;
|
||||
res = crds_gossip.process_prune_msg(
|
||||
&id, // self_pubkey
|
||||
&ci.id, // peer
|
||||
ci.pubkey(), // peer
|
||||
&id, // destination
|
||||
&[prune_pubkey], // origins
|
||||
now,
|
||||
|
|
|
@ -829,7 +829,7 @@ pub(crate) mod tests {
|
|||
ping_cache
|
||||
.lock()
|
||||
.unwrap()
|
||||
.mock_pong(new.id, new.gossip().unwrap(), Instant::now());
|
||||
.mock_pong(*new.pubkey(), new.gossip().unwrap(), Instant::now());
|
||||
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
||||
crds.write()
|
||||
.unwrap()
|
||||
|
@ -893,12 +893,12 @@ pub(crate) mod tests {
|
|||
let node = CrdsGossipPull::default();
|
||||
crds.insert(entry, now, GossipRoute::LocalMessage).unwrap();
|
||||
let old = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
||||
ping_cache.mock_pong(old.id, old.gossip().unwrap(), Instant::now());
|
||||
ping_cache.mock_pong(*old.pubkey(), old.gossip().unwrap(), Instant::now());
|
||||
let old = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(old));
|
||||
crds.insert(old.clone(), now, GossipRoute::LocalMessage)
|
||||
.unwrap();
|
||||
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
||||
ping_cache.mock_pong(new.id, new.gossip().unwrap(), Instant::now());
|
||||
ping_cache.mock_pong(*new.pubkey(), new.gossip().unwrap(), Instant::now());
|
||||
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
||||
crds.insert(new, now, GossipRoute::LocalMessage).unwrap();
|
||||
let crds = RwLock::new(crds);
|
||||
|
@ -956,7 +956,7 @@ pub(crate) mod tests {
|
|||
.insert(entry, now, GossipRoute::LocalMessage)
|
||||
.unwrap();
|
||||
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), now);
|
||||
ping_cache.mock_pong(new.id, new.gossip().unwrap(), Instant::now());
|
||||
ping_cache.mock_pong(*new.pubkey(), new.gossip().unwrap(), Instant::now());
|
||||
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
||||
node_crds
|
||||
.insert(new, now, GossipRoute::LocalMessage)
|
||||
|
@ -1058,7 +1058,7 @@ pub(crate) mod tests {
|
|||
128, // capacity
|
||||
);
|
||||
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
||||
ping_cache.mock_pong(new.id, new.gossip().unwrap(), Instant::now());
|
||||
ping_cache.mock_pong(*new.pubkey(), new.gossip().unwrap(), Instant::now());
|
||||
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
||||
node_crds.insert(new, 0, GossipRoute::LocalMessage).unwrap();
|
||||
let node_crds = RwLock::new(node_crds);
|
||||
|
@ -1118,14 +1118,14 @@ pub(crate) mod tests {
|
|||
128, // capacity
|
||||
);
|
||||
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 1);
|
||||
ping_cache.mock_pong(new.id, new.gossip().unwrap(), Instant::now());
|
||||
ping_cache.mock_pong(*new.pubkey(), new.gossip().unwrap(), Instant::now());
|
||||
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
||||
node_crds.insert(new, 0, GossipRoute::LocalMessage).unwrap();
|
||||
|
||||
let mut dest_crds = Crds::default();
|
||||
let new_id = solana_sdk::pubkey::new_rand();
|
||||
let new = ContactInfo::new_localhost(&new_id, 1);
|
||||
ping_cache.mock_pong(new.id, new.gossip().unwrap(), Instant::now());
|
||||
ping_cache.mock_pong(*new.pubkey(), new.gossip().unwrap(), Instant::now());
|
||||
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
||||
dest_crds
|
||||
.insert(new.clone(), 0, GossipRoute::LocalMessage)
|
||||
|
@ -1134,7 +1134,11 @@ pub(crate) mod tests {
|
|||
|
||||
// node contains a key from the dest node, but at an older local timestamp
|
||||
let same_key = ContactInfo::new_localhost(&new_id, 0);
|
||||
ping_cache.mock_pong(same_key.id, same_key.gossip().unwrap(), Instant::now());
|
||||
ping_cache.mock_pong(
|
||||
*same_key.pubkey(),
|
||||
same_key.gossip().unwrap(),
|
||||
Instant::now(),
|
||||
);
|
||||
let same_key = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(same_key));
|
||||
assert_eq!(same_key.label(), new.label());
|
||||
assert!(same_key.wallclock() < new.wallclock());
|
||||
|
|
|
@ -272,7 +272,7 @@ impl CrdsGossipPush {
|
|||
);
|
||||
let nodes = crds_gossip::dedup_gossip_addresses(nodes, stakes)
|
||||
.into_values()
|
||||
.map(|(_stake, node)| node.id)
|
||||
.map(|(_stake, node)| *node.pubkey())
|
||||
.collect::<Vec<_>>();
|
||||
if nodes.is_empty() {
|
||||
return;
|
||||
|
@ -336,7 +336,7 @@ mod tests {
|
|||
// push a new message
|
||||
assert_eq!(
|
||||
push.process_push_message(&crds, vec![(Pubkey::default(), vec![value])], 0),
|
||||
[ci.id].into_iter().collect()
|
||||
[*ci.pubkey()].into_iter().collect()
|
||||
);
|
||||
|
||||
// push an old version
|
||||
|
@ -372,7 +372,7 @@ mod tests {
|
|||
let crds = RwLock::<Crds>::default();
|
||||
let push = CrdsGossipPush::default();
|
||||
let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
||||
let origin = ci.id;
|
||||
let origin = *ci.pubkey();
|
||||
ci.set_wallclock(0);
|
||||
let value_old = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(ci.clone()));
|
||||
|
||||
|
@ -398,7 +398,7 @@ mod tests {
|
|||
let push = CrdsGossipPush::default();
|
||||
let mut ping_cache = new_ping_cache();
|
||||
let peer = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
||||
ping_cache.mock_pong(peer.id, peer.gossip().unwrap(), Instant::now());
|
||||
ping_cache.mock_pong(*peer.pubkey(), peer.gossip().unwrap(), Instant::now());
|
||||
let peer = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(peer));
|
||||
assert_eq!(
|
||||
crds.insert(peer.clone(), now, GossipRoute::LocalMessage),
|
||||
|
@ -450,7 +450,7 @@ mod tests {
|
|||
.map(|wallclock| {
|
||||
let mut peer = ContactInfo::new_rand(&mut rng, /*pubkey=*/ None);
|
||||
peer.set_wallclock(wallclock);
|
||||
ping_cache.mock_pong(peer.id, peer.gossip().unwrap(), Instant::now());
|
||||
ping_cache.mock_pong(*peer.pubkey(), peer.gossip().unwrap(), Instant::now());
|
||||
CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(peer))
|
||||
})
|
||||
.collect();
|
||||
|
|
|
@ -598,7 +598,7 @@ impl CrdsValue {
|
|||
}
|
||||
pub fn pubkey(&self) -> Pubkey {
|
||||
match &self.data {
|
||||
CrdsData::LegacyContactInfo(contact_info) => contact_info.id,
|
||||
CrdsData::LegacyContactInfo(contact_info) => *contact_info.pubkey(),
|
||||
CrdsData::Vote(_, vote) => vote.from,
|
||||
CrdsData::LowestSlot(_, slots) => slots.from,
|
||||
CrdsData::LegacySnapshotHashes(hash) => hash.from,
|
||||
|
@ -719,7 +719,7 @@ mod test {
|
|||
let mut rng = rand::thread_rng();
|
||||
let v = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(LegacyContactInfo::default()));
|
||||
assert_eq!(v.wallclock(), 0);
|
||||
let key = v.contact_info().unwrap().id;
|
||||
let key = *v.contact_info().unwrap().pubkey();
|
||||
assert_eq!(v.label(), CrdsValueLabel::LegacyContactInfo(key));
|
||||
|
||||
let v = Vote::new(Pubkey::default(), new_test_vote_tx(&mut rng), 0).unwrap();
|
||||
|
|
|
@ -254,7 +254,7 @@ fn spy(
|
|||
tvu_peers = spy_ref.all_tvu_peers();
|
||||
|
||||
let found_node_by_pubkey = if let Some(pubkey) = find_node_by_pubkey {
|
||||
all_peers.iter().any(|x| x.id == pubkey)
|
||||
all_peers.iter().any(|node| node.pubkey() == &pubkey)
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
|
|
@ -20,7 +20,7 @@ use {
|
|||
Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, AbiExample, Deserialize, Serialize,
|
||||
)]
|
||||
pub struct LegacyContactInfo {
|
||||
pub id: Pubkey,
|
||||
id: Pubkey,
|
||||
/// gossip address
|
||||
gossip: SocketAddr,
|
||||
/// address to connect to for replication
|
||||
|
@ -156,6 +156,11 @@ impl LegacyContactInfo {
|
|||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn pubkey(&self) -> &Pubkey {
|
||||
&self.id
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn wallclock(&self) -> u64 {
|
||||
self.wallclock
|
||||
|
@ -166,6 +171,10 @@ impl LegacyContactInfo {
|
|||
self.shred_version
|
||||
}
|
||||
|
||||
pub fn set_pubkey(&mut self, pubkey: Pubkey) {
|
||||
self.id = pubkey
|
||||
}
|
||||
|
||||
pub fn set_wallclock(&mut self, wallclock: u64) {
|
||||
self.wallclock = wallclock;
|
||||
}
|
||||
|
|
|
@ -197,7 +197,7 @@ fn process_spy_results(
|
|||
}
|
||||
}
|
||||
if let Some(node) = pubkey {
|
||||
if !validators.iter().any(|x| x.id == node) {
|
||||
if !validators.iter().any(|x| x.pubkey() == &node) {
|
||||
eprintln!("Error: Could not find node {node:?}");
|
||||
exit(1);
|
||||
}
|
||||
|
|
|
@ -518,7 +518,7 @@ fn network_run_pull(
|
|||
let self_info = gossip_crds.get::<&CrdsValue>(&label).unwrap().clone();
|
||||
requests
|
||||
.into_iter()
|
||||
.map(move |(peer, filters)| (peer.id, filters, self_info.clone()))
|
||||
.map(move |(peer, filters)| (*peer.pubkey(), filters, self_info.clone()))
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
|
@ -764,7 +764,7 @@ fn test_prune_errors() {
|
|||
//incorrect dest
|
||||
let mut res = crds_gossip.process_prune_msg(
|
||||
&id, // self_pubkey
|
||||
&ci.id, // peer
|
||||
ci.pubkey(), // peer
|
||||
&Pubkey::from(hash(&[1; 32]).to_bytes()), // destination
|
||||
&[prune_pubkey], // origins
|
||||
now,
|
||||
|
@ -775,7 +775,7 @@ fn test_prune_errors() {
|
|||
//correct dest
|
||||
res = crds_gossip.process_prune_msg(
|
||||
&id, // self_pubkey
|
||||
&ci.id, // peer
|
||||
ci.pubkey(), // peer
|
||||
&id, // destination
|
||||
&[prune_pubkey], // origins
|
||||
now,
|
||||
|
@ -787,7 +787,7 @@ fn test_prune_errors() {
|
|||
let timeout = now + crds_gossip.push.prune_timeout * 2;
|
||||
res = crds_gossip.process_prune_msg(
|
||||
&id, // self_pubkey
|
||||
&ci.id, // peer
|
||||
ci.pubkey(), // peer
|
||||
&id, // destination
|
||||
&[prune_pubkey], // origins
|
||||
now,
|
||||
|
|
|
@ -214,12 +214,12 @@ fn gossip_rstar() {
|
|||
let xv = &listen[0].0;
|
||||
xv.lookup_contact_info(&xv.id(), |ci| ci.clone()).unwrap()
|
||||
};
|
||||
trace!("rstar leader {}", xd.id);
|
||||
trace!("rstar leader {}", xd.pubkey());
|
||||
for n in 0..(num - 1) {
|
||||
let y = (n + 1) % listen.len();
|
||||
let yv = &listen[y].0;
|
||||
yv.insert_legacy_info(xd.clone());
|
||||
trace!("rstar insert {} into {}", xd.id, yv.id());
|
||||
trace!("rstar insert {} into {}", xd.pubkey(), yv.id());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ pub fn spend_and_verify_all_nodes<S: ::std::hash::BuildHasher + Sync + Send>(
|
|||
assert!(cluster_nodes.len() >= nodes);
|
||||
let ignore_nodes = Arc::new(ignore_nodes);
|
||||
cluster_nodes.par_iter().for_each(|ingress_node| {
|
||||
if ignore_nodes.contains(&ingress_node.id) {
|
||||
if ignore_nodes.contains(ingress_node.pubkey()) {
|
||||
return;
|
||||
}
|
||||
let random_keypair = Keypair::new();
|
||||
|
@ -101,7 +101,7 @@ pub fn spend_and_verify_all_nodes<S: ::std::hash::BuildHasher + Sync + Send>(
|
|||
.retry_transfer_until_confirmed(funding_keypair, &mut transaction, 10, confs)
|
||||
.unwrap();
|
||||
for validator in &cluster_nodes {
|
||||
if ignore_nodes.contains(&validator.id) {
|
||||
if ignore_nodes.contains(validator.pubkey()) {
|
||||
continue;
|
||||
}
|
||||
let (rpc, tpu) = get_client_facing_addr(validator);
|
||||
|
@ -242,8 +242,8 @@ pub fn kill_entry_and_spend_and_verify_rest(
|
|||
|
||||
for ingress_node in &cluster_nodes {
|
||||
client
|
||||
.poll_get_balance_with_commitment(&ingress_node.id, CommitmentConfig::processed())
|
||||
.unwrap_or_else(|err| panic!("Node {} has no balance: {}", ingress_node.id, err));
|
||||
.poll_get_balance_with_commitment(ingress_node.pubkey(), CommitmentConfig::processed())
|
||||
.unwrap_or_else(|err| panic!("Node {} has no balance: {}", ingress_node.pubkey(), err));
|
||||
}
|
||||
|
||||
info!("sleeping for 2 leader fortnights");
|
||||
|
@ -257,7 +257,7 @@ pub fn kill_entry_and_spend_and_verify_rest(
|
|||
));
|
||||
info!("done sleeping for 2 fortnights");
|
||||
for ingress_node in &cluster_nodes {
|
||||
if &ingress_node.id == entry_point_info.pubkey() {
|
||||
if ingress_node.pubkey() == entry_point_info.pubkey() {
|
||||
info!("ingress_node.id == entry_point_info.id, continuing...");
|
||||
continue;
|
||||
}
|
||||
|
@ -384,11 +384,11 @@ pub fn check_no_new_roots(
|
|||
let client = ThinClient::new(rpc, tpu, connection_cache.clone());
|
||||
let initial_root = client
|
||||
.get_slot()
|
||||
.unwrap_or_else(|_| panic!("get_slot for {} failed", ingress_node.id));
|
||||
.unwrap_or_else(|_| panic!("get_slot for {} failed", ingress_node.pubkey()));
|
||||
roots[i] = initial_root;
|
||||
client
|
||||
.get_slot_with_commitment(CommitmentConfig::processed())
|
||||
.unwrap_or_else(|_| panic!("get_slot for {} failed", ingress_node.id))
|
||||
.unwrap_or_else(|_| panic!("get_slot for {} failed", ingress_node.pubkey()))
|
||||
})
|
||||
.max()
|
||||
.unwrap();
|
||||
|
@ -403,7 +403,7 @@ pub fn check_no_new_roots(
|
|||
let client = ThinClient::new(rpc, tpu, connection_cache.clone());
|
||||
current_slot = client
|
||||
.get_slot_with_commitment(CommitmentConfig::processed())
|
||||
.unwrap_or_else(|_| panic!("get_slot for {} failed", contact_infos[0].id));
|
||||
.unwrap_or_else(|_| panic!("get_slot for {} failed", contact_infos[0].pubkey()));
|
||||
if current_slot > end_slot {
|
||||
reached_end_slot = true;
|
||||
break;
|
||||
|
@ -411,7 +411,10 @@ pub fn check_no_new_roots(
|
|||
if last_print.elapsed().as_secs() > 3 {
|
||||
info!(
|
||||
"{} current slot: {} on validator: {}, waiting for any validator with slot: {}",
|
||||
test_name, current_slot, contact_info.id, end_slot
|
||||
test_name,
|
||||
current_slot,
|
||||
contact_info.pubkey(),
|
||||
end_slot
|
||||
);
|
||||
last_print = Instant::now();
|
||||
}
|
||||
|
@ -427,7 +430,7 @@ pub fn check_no_new_roots(
|
|||
assert_eq!(
|
||||
client
|
||||
.get_slot()
|
||||
.unwrap_or_else(|_| panic!("get_slot for {} failed", ingress_node.id)),
|
||||
.unwrap_or_else(|_| panic!("get_slot for {} failed", ingress_node.pubkey())),
|
||||
roots[i]
|
||||
);
|
||||
}
|
||||
|
@ -441,7 +444,7 @@ fn poll_all_nodes_for_signature(
|
|||
confs: usize,
|
||||
) -> Result<(), TransportError> {
|
||||
for validator in cluster_nodes {
|
||||
if &validator.id == entry_point_info.pubkey() {
|
||||
if validator.pubkey() == entry_point_info.pubkey() {
|
||||
continue;
|
||||
}
|
||||
let (rpc, tpu) = get_client_facing_addr(validator);
|
||||
|
|
|
@ -352,7 +352,7 @@ fn test_forwarding() {
|
|||
|
||||
let validator_info = cluster_nodes
|
||||
.iter()
|
||||
.find(|c| c.id != leader_pubkey)
|
||||
.find(|c| c.pubkey() != &leader_pubkey)
|
||||
.unwrap();
|
||||
|
||||
// Confirm that transactions were forwarded to and processed by the leader.
|
||||
|
@ -1359,7 +1359,7 @@ fn test_snapshots_blockstore_floor() {
|
|||
)
|
||||
.unwrap();
|
||||
let mut known_validators = HashSet::new();
|
||||
known_validators.insert(cluster_nodes[0].id);
|
||||
known_validators.insert(*cluster_nodes[0].pubkey());
|
||||
validator_snapshot_test_config
|
||||
.validator_config
|
||||
.known_validators = Some(known_validators);
|
||||
|
|
|
@ -115,7 +115,7 @@ fn test_consistency_halt() {
|
|||
setup_snapshot_validator_config(snapshot_interval_slots, num_account_paths);
|
||||
|
||||
let mut known_validators = HashSet::new();
|
||||
known_validators.insert(cluster_nodes[0].id);
|
||||
known_validators.insert(*cluster_nodes[0].pubkey());
|
||||
|
||||
validator_snapshot_test_config
|
||||
.validator_config
|
||||
|
|
|
@ -33,7 +33,7 @@ impl TpuInfo for ClusterTpuInfo {
|
|||
.cluster_info
|
||||
.tpu_peers()
|
||||
.into_iter()
|
||||
.filter_map(|node| Some((node.id, node.tpu().ok()?)))
|
||||
.filter_map(|node| Some((*node.pubkey(), node.tpu().ok()?)))
|
||||
.collect();
|
||||
}
|
||||
|
||||
|
|
|
@ -3459,14 +3459,14 @@ pub mod rpc_full {
|
|||
.unwrap_or_default()
|
||||
{
|
||||
let (version, feature_set) = if let Some(version) =
|
||||
cluster_info.get_node_version(&contact_info.id)
|
||||
cluster_info.get_node_version(contact_info.pubkey())
|
||||
{
|
||||
(Some(version.to_string()), Some(version.feature_set))
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
Some(RpcContactInfo {
|
||||
pubkey: contact_info.id.to_string(),
|
||||
pubkey: contact_info.pubkey().to_string(),
|
||||
gossip: contact_info.gossip().ok(),
|
||||
tpu: contact_info
|
||||
.tpu()
|
||||
|
|
|
@ -220,7 +220,7 @@ fn get_rpc_peers(
|
|||
|
||||
if bootstrap_config.only_known_rpc {
|
||||
rpc_peers.retain(|rpc_peer| {
|
||||
is_known_validator(&rpc_peer.id, &validator_config.known_validators)
|
||||
is_known_validator(rpc_peer.pubkey(), &validator_config.known_validators)
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -229,12 +229,14 @@ fn get_rpc_peers(
|
|||
// Filter out blacklisted nodes
|
||||
let rpc_peers: Vec<_> = rpc_peers
|
||||
.into_iter()
|
||||
.filter(|rpc_peer| !blacklisted_rpc_nodes.contains(&rpc_peer.id))
|
||||
.filter(|rpc_peer| !blacklisted_rpc_nodes.contains(rpc_peer.pubkey()))
|
||||
.collect();
|
||||
let rpc_peers_blacklisted = rpc_peers_total - rpc_peers.len();
|
||||
let rpc_known_peers = rpc_peers
|
||||
.iter()
|
||||
.filter(|rpc_peer| is_known_validator(&rpc_peer.id, &validator_config.known_validators))
|
||||
.filter(|rpc_peer| {
|
||||
is_known_validator(rpc_peer.pubkey(), &validator_config.known_validators)
|
||||
})
|
||||
.count();
|
||||
|
||||
info!("Total {rpc_peers_total} RPC nodes found. {rpc_known_peers} known, {rpc_peers_blacklisted} blacklisted");
|
||||
|
@ -507,7 +509,7 @@ fn get_vetted_rpc_nodes(
|
|||
|
||||
info!(
|
||||
"Using RPC service from node {}: {:?}",
|
||||
rpc_contact_info.id,
|
||||
rpc_contact_info.pubkey(),
|
||||
rpc_contact_info.rpc()
|
||||
);
|
||||
|
||||
|
@ -535,7 +537,7 @@ fn get_vetted_rpc_nodes(
|
|||
fail_rpc_node(
|
||||
"Failed to ping RPC".to_string(),
|
||||
&validator_config.known_validators,
|
||||
&rpc_contact_info.id,
|
||||
rpc_contact_info.pubkey(),
|
||||
&mut newly_blacklisted_rpc_nodes.write().unwrap(),
|
||||
);
|
||||
false
|
||||
|
@ -545,7 +547,7 @@ fn get_vetted_rpc_nodes(
|
|||
fail_rpc_node(
|
||||
format!("Failed to get RPC node version: {err}"),
|
||||
&validator_config.known_validators,
|
||||
&rpc_contact_info.id,
|
||||
rpc_contact_info.pubkey(),
|
||||
&mut newly_blacklisted_rpc_nodes.write().unwrap(),
|
||||
);
|
||||
false
|
||||
|
@ -676,7 +678,7 @@ pub fn rpc_bootstrap(
|
|||
fail_rpc_node(
|
||||
err,
|
||||
&validator_config.known_validators,
|
||||
&rpc_contact_info.id,
|
||||
rpc_contact_info.pubkey(),
|
||||
&mut blacklisted_rpc_nodes,
|
||||
);
|
||||
}
|
||||
|
@ -781,7 +783,7 @@ fn get_rpc_nodes(
|
|||
} else {
|
||||
let rpc_peers = peer_snapshot_hashes
|
||||
.iter()
|
||||
.map(|peer_snapshot_hash| peer_snapshot_hash.rpc_contact_info.id)
|
||||
.map(|peer_snapshot_hash| peer_snapshot_hash.rpc_contact_info.pubkey())
|
||||
.collect::<Vec<_>>();
|
||||
let final_snapshot_hash = peer_snapshot_hashes[0].snapshot_hash;
|
||||
info!(
|
||||
|
@ -1034,7 +1036,7 @@ fn get_eligible_peer_snapshot_hashes(
|
|||
let peer_snapshot_hashes = rpc_peers
|
||||
.iter()
|
||||
.flat_map(|rpc_peer| {
|
||||
get_snapshot_hashes_for_node(cluster_info, &rpc_peer.id).map(|snapshot_hash| {
|
||||
get_snapshot_hashes_for_node(cluster_info, rpc_peer.pubkey()).map(|snapshot_hash| {
|
||||
PeerSnapshotHash {
|
||||
rpc_contact_info: rpc_peer.clone(),
|
||||
snapshot_hash,
|
||||
|
@ -1262,7 +1264,7 @@ fn download_snapshot(
|
|||
&& *download_abort_count < maximum_snapshot_download_abort
|
||||
{
|
||||
if let Some(ref known_validators) = validator_config.known_validators {
|
||||
if known_validators.contains(&rpc_contact_info.id)
|
||||
if known_validators.contains(rpc_contact_info.pubkey())
|
||||
&& known_validators.len() == 1
|
||||
&& bootstrap_config.only_known_rpc
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue