diff --git a/bench-tps/src/main.rs b/bench-tps/src/main.rs index b38a9d2a09..da1e82c2b0 100644 --- a/bench-tps/src/main.rs +++ b/bench-tps/src/main.rs @@ -156,7 +156,7 @@ fn create_client( info!("Searching for target_node: {:?}", target_node); let mut target_client = None; for node in nodes { - if node.id == target_node { + if node.pubkey() == &target_node { target_client = Some(get_client( &[node], &SocketAddrSpace::Unspecified, diff --git a/core/benches/cluster_nodes.rs b/core/benches/cluster_nodes.rs index 2201a73f2f..30a1186acc 100644 --- a/core/benches/cluster_nodes.rs +++ b/core/benches/cluster_nodes.rs @@ -63,7 +63,7 @@ fn get_retransmit_peers_deterministic_wrapper(b: &mut Bencher, unstaked_ratio: O let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let bank = Bank::new_for_benches(&genesis_config); let (nodes, cluster_nodes) = make_cluster_nodes(&mut rng, unstaked_ratio); - let slot_leader = nodes[1..].choose(&mut rng).unwrap().id; + let slot_leader = *nodes[1..].choose(&mut rng).unwrap().pubkey(); let slot = rand::random::(); b.iter(|| { get_retransmit_peers_deterministic( diff --git a/core/src/broadcast_stage/broadcast_duplicates_run.rs b/core/src/broadcast_stage/broadcast_duplicates_run.rs index abe09756d4..8b94c1f3ed 100644 --- a/core/src/broadcast_stage/broadcast_duplicates_run.rs +++ b/core/src/broadcast_stage/broadcast_duplicates_run.rs @@ -314,10 +314,10 @@ impl BroadcastRun for BroadcastDuplicatesRun { .unwrap() .remove(shred.signature()) { - if cluster_partition.contains(&node.id) { + if cluster_partition.contains(node.pubkey()) { info!( "skipping node {} for original shred index {}, slot {}", - node.id, + node.pubkey(), shred.index(), shred.slot() ); diff --git a/core/src/cluster_nodes.rs b/core/src/cluster_nodes.rs index 99cc031dd9..9487cf5c55 100644 --- a/core/src/cluster_nodes.rs +++ b/core/src/cluster_nodes.rs @@ -93,7 +93,7 @@ impl Node { fn pubkey(&self) -> Pubkey { match &self.node { NodeId::Pubkey(pubkey) => *pubkey, - NodeId::ContactInfo(node) => node.id, + NodeId::ContactInfo(node) => *node.pubkey(), } } @@ -245,11 +245,11 @@ impl ClusterNodes { .inspect(|node| { if let Some(node) = node.contact_info() { if let Ok(addr) = node.tvu() { - addrs.entry(addr).or_insert(node.id); + addrs.entry(addr).or_insert(*node.pubkey()); } if !drop_redundant_turbine_path { if let Ok(addr) = node.tvu_forwards() { - frwds.entry(addr).or_insert(node.id); + frwds.entry(addr).or_insert(*node.pubkey()); } } } @@ -341,7 +341,7 @@ fn get_nodes(cluster_info: &ClusterInfo, stakes: &HashMap) -> Vec 0 { @@ -633,7 +639,13 @@ mod tests { .map(|node| (node.pubkey(), node)) .collect(); for node in &nodes { - assert_eq!(cluster_nodes[&node.id].contact_info().unwrap().id, node.id); + assert_eq!( + cluster_nodes[node.pubkey()] + .contact_info() + .unwrap() + .pubkey(), + node.pubkey() + ); } for (pubkey, stake) in &stakes { if *stake > 0 { diff --git a/core/src/cluster_slots.rs b/core/src/cluster_slots.rs index a5be1b4965..10b0291dbc 100644 --- a/core/src/cluster_slots.rs +++ b/core/src/cluster_slots.rs @@ -179,7 +179,7 @@ impl ClusterSlots { .iter() .map(|peer| { validator_stakes - .get(&peer.id) + .get(peer.pubkey()) .map(|node| node.total_stake) .unwrap_or(0) + 1 @@ -193,7 +193,7 @@ impl ClusterSlots { let slot_peers = slot_peers.read().unwrap(); repair_peers .iter() - .map(|peer| slot_peers.get(&peer.id).cloned().unwrap_or(0)) + .map(|peer| slot_peers.get(peer.pubkey()).cloned().unwrap_or(0)) .zip(stakes) .map(|(a, b)| (a / 2 + b / 2).max(1u64)) .collect() @@ -210,7 +210,7 @@ impl ClusterSlots { repair_peers .iter() .enumerate() - .filter_map(|(i, ci)| Some((slot_peers.get(&ci.id)? + 1, i))) + .filter_map(|(i, ci)| Some((slot_peers.get(ci.pubkey())? + 1, i))) .collect() }) .unwrap_or_default() @@ -291,8 +291,8 @@ mod tests { .write() .unwrap() .insert(0, Arc::new(RwLock::new(map))); - c1.id = k1; - c2.id = k2; + c1.set_pubkey(k1); + c2.set_pubkey(k2); assert_eq!(cs.compute_weights(0, &[c1, c2]), vec![std::u64::MAX / 4, 1]); } @@ -320,8 +320,8 @@ mod tests { .into_iter() .collect(); *cs.validator_stakes.write().unwrap() = Arc::new(validator_stakes); - c1.id = k1; - c2.id = k2; + c1.set_pubkey(k1); + c2.set_pubkey(k2); assert_eq!( cs.compute_weights(0, &[c1, c2]), vec![std::u64::MAX / 4 + 1, 1] @@ -333,7 +333,7 @@ mod tests { let cs = ClusterSlots::default(); let mut contact_infos = vec![ContactInfo::default(); 2]; for ci in contact_infos.iter_mut() { - ci.id = solana_sdk::pubkey::new_rand(); + ci.set_pubkey(solana_sdk::pubkey::new_rand()); } let slot = 9; @@ -345,7 +345,7 @@ mod tests { // Give second validator max stake let validator_stakes: HashMap<_, _> = vec![( - *Arc::new(contact_infos[1].id), + *contact_infos[1].pubkey(), NodeVoteAccounts { total_stake: std::u64::MAX / 2, vote_accounts: vec![Pubkey::default()], @@ -358,7 +358,7 @@ mod tests { // Mark the first validator as completed slot 9, should pick that validator, // even though it only has default stake, while the other validator has // max stake - cs.insert_node_id(slot, contact_infos[0].id); + cs.insert_node_id(slot, *contact_infos[0].pubkey()); assert_eq!( cs.compute_weights_exclude_nonfrozen(slot, &contact_infos), vec![(1, 0)] diff --git a/core/src/serve_repair.rs b/core/src/serve_repair.rs index 82100ce3f5..fa7b814a5e 100644 --- a/core/src/serve_repair.rs +++ b/core/src/serve_repair.rs @@ -267,13 +267,13 @@ pub(crate) enum RepairResponse { impl RepairProtocol { fn sender(&self) -> &Pubkey { match self { - Self::LegacyWindowIndex(ci, _, _) => &ci.id, - Self::LegacyHighestWindowIndex(ci, _, _) => &ci.id, - Self::LegacyOrphan(ci, _) => &ci.id, - Self::LegacyWindowIndexWithNonce(ci, _, _, _) => &ci.id, - Self::LegacyHighestWindowIndexWithNonce(ci, _, _, _) => &ci.id, - Self::LegacyOrphanWithNonce(ci, _, _) => &ci.id, - Self::LegacyAncestorHashes(ci, _, _) => &ci.id, + Self::LegacyWindowIndex(ci, _, _) => ci.pubkey(), + Self::LegacyHighestWindowIndex(ci, _, _) => ci.pubkey(), + Self::LegacyOrphan(ci, _) => ci.pubkey(), + Self::LegacyWindowIndexWithNonce(ci, _, _, _) => ci.pubkey(), + Self::LegacyHighestWindowIndexWithNonce(ci, _, _, _) => ci.pubkey(), + Self::LegacyOrphanWithNonce(ci, _, _) => ci.pubkey(), + Self::LegacyAncestorHashes(ci, _, _) => ci.pubkey(), Self::Pong(pong) => pong.from(), Self::WindowIndex { header, .. } => &header.sender, Self::HighestWindowIndex { header, .. } => &header.sender, @@ -346,7 +346,7 @@ impl RepairPeers { .zip(weights) .filter_map(|(peer, &weight)| { let addr = peer.serve_repair().ok()?; - Some(((peer.id, addr), weight)) + Some(((*peer.pubkey(), addr), weight)) }) .unzip(); if peers.is_empty() { @@ -1077,7 +1077,7 @@ impl ServeRepair { .map(|i| index[i]) .filter_map(|i| { let addr = repair_peers[i].serve_repair().ok()?; - Some((repair_peers[i].id, addr)) + Some((*repair_peers[i].pubkey(), addr)) }) .take(ANCESTOR_HASH_REPAIR_SAMPLE_SIZE) .collect(); @@ -1100,7 +1100,7 @@ impl ServeRepair { .unzip(); let k = WeightedIndex::new(weights)?.sample(&mut rand::thread_rng()); let n = index[k]; - Ok((repair_peers[n].id, repair_peers[n].serve_repair()?)) + Ok((*repair_peers[n].pubkey(), repair_peers[n].serve_repair()?)) } pub(crate) fn map_repair_request( @@ -2172,7 +2172,7 @@ mod tests { let known_validators = Some(vec![*contact_info2.pubkey()].into_iter().collect()); let repair_peers = serve_repair.repair_peers(&known_validators, 1); assert_eq!(repair_peers.len(), 1); - assert_eq!(&repair_peers[0].id, contact_info2.pubkey()); + assert_eq!(repair_peers[0].pubkey(), contact_info2.pubkey()); assert!(serve_repair .repair_request( &cluster_slots, @@ -2190,7 +2190,7 @@ mod tests { let repair_peers: HashSet = serve_repair .repair_peers(&None, 1) .into_iter() - .map(|c| c.id) + .map(|node| *node.pubkey()) .collect(); assert_eq!(repair_peers.len(), 2); assert!(repair_peers.contains(contact_info2.pubkey())); diff --git a/core/src/validator.rs b/core/src/validator.rs index d821ced35c..f41ff1a554 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -2104,7 +2104,7 @@ fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: boo // Contact infos are refreshed twice during this period. age < CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS }) - .map(|node| (node.id, node)) + .map(|node| (*node.pubkey(), node)) .collect(); let my_shred_version = cluster_info.my_shred_version(); let my_id = cluster_info.id(); diff --git a/dos/src/main.rs b/dos/src/main.rs index 69d8b2f10f..85c4e3691f 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -430,13 +430,13 @@ fn get_target( if node.gossip().ok() == Some(entrypoint_addr) { info!("{:?}", node.gossip()); target = match mode { - Mode::Gossip => Some((node.id, node.gossip().unwrap())), - Mode::Tvu => Some((node.id, node.tvu().unwrap())), - Mode::TvuForwards => Some((node.id, node.tvu_forwards().unwrap())), - Mode::Tpu => Some((node.id, node.tpu().unwrap())), - Mode::TpuForwards => Some((node.id, node.tpu_forwards().unwrap())), - Mode::Repair => Some((node.id, node.repair().unwrap())), - Mode::ServeRepair => Some((node.id, node.serve_repair().unwrap())), + Mode::Gossip => Some((*node.pubkey(), node.gossip().unwrap())), + Mode::Tvu => Some((*node.pubkey(), node.tvu().unwrap())), + Mode::TvuForwards => Some((*node.pubkey(), node.tvu_forwards().unwrap())), + Mode::Tpu => Some((*node.pubkey(), node.tpu().unwrap())), + Mode::TpuForwards => Some((*node.pubkey(), node.tpu_forwards().unwrap())), + Mode::Repair => Some((*node.pubkey(), node.repair().unwrap())), + Mode::ServeRepair => Some((*node.pubkey(), node.serve_repair().unwrap())), Mode::Rpc => None, }; break; diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 664c6b7f2f..8edd98618c 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -563,7 +563,7 @@ impl ClusterInfo { // there's not much point in saving entrypoint ContactInfo since by // definition that information is already available let contact_info = v.value.contact_info().unwrap(); - if contact_info.id != self_pubkey + if contact_info.pubkey() != &self_pubkey && contact_info .gossip() .map(|addr| !entrypoint_gossip_addrs.contains(&addr)) @@ -759,7 +759,7 @@ impl ClusterInfo { .rpc() .ok() .filter(|addr| self.socket_addr_space.check(addr))?; - let node_version = self.get_node_version(&node.id); + let node_version = self.get_node_version(node.pubkey()); if my_shred_version != 0 && (node.shred_version() != 0 && node.shred_version() != my_shred_version) { @@ -769,9 +769,13 @@ impl ClusterInfo { Some(format!( "{:15} {:2}| {:5} | {:44} |{:^9}| {:5}| {:5}| {}\n", rpc_addr.to_string(), - if node.id == my_pubkey { "me" } else { "" }, + if node.pubkey() == &my_pubkey { + "me" + } else { + "" + }, now.saturating_sub(last_updated), - node.id, + node.pubkey(), if let Some(node_version) = node_version { node_version.to_string() } else { @@ -812,7 +816,7 @@ impl ClusterInfo { total_spy_nodes = total_spy_nodes.saturating_add(1); } - let node_version = self.get_node_version(&node.id); + let node_version = self.get_node_version(node.pubkey()); if my_shred_version != 0 && (node.shred_version() != 0 && node.shred_version() != my_shred_version) { different_shred_nodes = different_shred_nodes.saturating_add(1); None @@ -831,9 +835,9 @@ impl ClusterInfo { .as_ref() .map(IpAddr::to_string) .unwrap_or_else(|| String::from("none")), - if node.id == my_pubkey { "me" } else { "" }, + if node.pubkey() == &my_pubkey { "me" } else { "" }, now.saturating_sub(last_updated), - node.id, + node.pubkey(), if let Some(node_version) = node_version { node_version.to_string() } else { @@ -1277,7 +1281,9 @@ impl ClusterInfo { let gossip_crds = self.gossip.crds.read().unwrap(); gossip_crds .get_nodes_contact_info() - .filter(|node| node.id != self_pubkey && self.check_socket_addr_space(&node.rpc())) + .filter(|node| { + node.pubkey() != &self_pubkey && self.check_socket_addr_space(&node.rpc()) + }) .cloned() .collect() } @@ -1297,7 +1303,7 @@ impl ClusterInfo { gossip_crds .get_nodes_contact_info() // shred_version not considered for gossip peers (ie, spy nodes do not set shred_version) - .filter(|node| node.id != me && self.check_socket_addr_space(&node.gossip())) + .filter(|node| node.pubkey() != &me && self.check_socket_addr_space(&node.gossip())) .cloned() .collect() } @@ -1307,7 +1313,9 @@ impl ClusterInfo { let self_pubkey = self.id(); self.time_gossip_read_lock("all_tvu_peers", &self.stats.all_tvu_peers) .get_nodes_contact_info() - .filter(|node| node.id != self_pubkey && self.check_socket_addr_space(&node.tvu())) + .filter(|node| { + node.pubkey() != &self_pubkey && self.check_socket_addr_space(&node.tvu()) + }) .cloned() .collect() } @@ -1319,7 +1327,7 @@ impl ClusterInfo { self.time_gossip_read_lock("tvu_peers", &self.stats.tvu_peers) .get_nodes_contact_info() .filter(|node| { - node.id != self_pubkey + node.pubkey() != &self_pubkey && node.shred_version() == self_shred_version && self.check_socket_addr_space(&node.tvu()) }) @@ -1336,11 +1344,11 @@ impl ClusterInfo { gossip_crds .get_nodes_contact_info() .filter(|node| { - node.id != self_pubkey + node.pubkey() != &self_pubkey && node.shred_version() == self_shred_version && self.check_socket_addr_space(&node.tvu()) && self.check_socket_addr_space(&node.serve_repair()) - && match gossip_crds.get::<&LowestSlot>(node.id) { + && match gossip_crds.get::<&LowestSlot>(*node.pubkey()) { None => true, // fallback to legacy behavior Some(lowest_slot) => lowest_slot.lowest <= slot, } @@ -1364,7 +1372,9 @@ impl ClusterInfo { let gossip_crds = self.gossip.crds.read().unwrap(); gossip_crds .get_nodes_contact_info() - .filter(|node| node.id != self_pubkey && self.check_socket_addr_space(&node.tpu())) + .filter(|node| { + node.pubkey() != &self_pubkey && self.check_socket_addr_space(&node.tpu()) + }) .cloned() .collect() } @@ -1660,7 +1670,7 @@ impl ClusterInfo { return true; } for entrypoint in entrypoints.iter_mut() { - if entrypoint.id == Pubkey::default() { + if entrypoint.pubkey() == &Pubkey::default() { // If a pull from the entrypoint was successful it should exist in the CRDS table if let Some(entrypoint_from_gossip) = entrypoint .gossip() @@ -1681,7 +1691,7 @@ impl ClusterInfo { info!( "Setting shred version to {:?} from entrypoint {:?}", entrypoint.shred_version(), - entrypoint.id + entrypoint.pubkey() ); self.my_contact_info .write() @@ -1692,7 +1702,7 @@ impl ClusterInfo { self.my_shred_version() != 0 && entrypoints .iter() - .all(|entrypoint| entrypoint.id != Pubkey::default()) + .all(|entrypoint| entrypoint.pubkey() != &Pubkey::default()) } fn handle_purge( @@ -1725,7 +1735,8 @@ impl ClusterInfo { .read() .unwrap() .iter() - .map(|k| k.id) + .map(LegacyContactInfo::pubkey) + .copied() .chain(std::iter::once(self.id())) .collect(); self.stats.trim_crds_table.add_relaxed(1); @@ -1913,7 +1924,7 @@ impl ClusterInfo { .with_min_len(1024) .filter(|(_, _, caller)| match caller.contact_info() { None => false, - Some(caller) if caller.id == self_pubkey => { + Some(caller) if caller.pubkey() == &self_pubkey => { warn!("PullRequest ignored, I'm talking to myself"); self.stats.window_request_loopback.add_relaxed(1); false @@ -3146,7 +3157,7 @@ fn filter_on_shred_version( values.retain(|value| match &value.data { // Allow node to update its own contact info in case their // shred-version changes - CrdsData::LegacyContactInfo(node) => node.id == *from, + CrdsData::LegacyContactInfo(node) => node.pubkey() == from, CrdsData::NodeInstance(_) => true, _ => false, }) diff --git a/gossip/src/crds_gossip.rs b/gossip/src/crds_gossip.rs index 8cc7fbd088..e064c0a5ca 100644 --- a/gossip/src/crds_gossip.rs +++ b/gossip/src/crds_gossip.rs @@ -348,7 +348,7 @@ pub(crate) fn get_gossip_nodes( if value.local_timestamp < active_cutoff { // In order to mitigate eclipse attack, for staked nodes // continue retrying periodically. - let stake = stakes.get(&node.id).copied().unwrap_or_default(); + let stake = stakes.get(node.pubkey()).copied().unwrap_or_default(); if stake == 0u64 || !rng.gen_ratio(1, 16) { return None; } @@ -356,14 +356,14 @@ pub(crate) fn get_gossip_nodes( Some(node) }) .filter(|node| { - &node.id != pubkey + node.pubkey() != pubkey && verify_shred_version(node.shred_version()) && node .gossip() .map(|addr| socket_addr_space.check(&addr)) .unwrap_or_default() && match gossip_validators { - Some(nodes) => nodes.contains(&node.id), + Some(nodes) => nodes.contains(node.pubkey()), None => true, } }) @@ -381,7 +381,7 @@ pub(crate) fn dedup_gossip_addresses( .filter_map(|node| Some((node.gossip().ok()?, node))) .into_grouping_map() .aggregate(|acc, _node_gossip, node| { - let stake = stakes.get(&node.id).copied().unwrap_or_default(); + let stake = stakes.get(node.pubkey()).copied().unwrap_or_default(); match acc { Some((ref s, _)) if s >= &stake => acc, Some(_) | None => Some((stake, node)), @@ -410,7 +410,7 @@ pub(crate) fn maybe_ping_gossip_addresses( Ok(addr) => addr, }; let (check, ping) = { - let node = (node.id, node_gossip); + let node = (*node.pubkey(), node_gossip); ping_cache.check(now, node, &mut pingf) }; if let Some(ping) = ping { @@ -465,7 +465,7 @@ mod test { //incorrect dest let mut res = crds_gossip.process_prune_msg( &id, - &ci.id, + ci.pubkey(), &Pubkey::from(hash(&[1; 32]).to_bytes()), &[prune_pubkey], now, @@ -476,7 +476,7 @@ mod test { //correct dest res = crds_gossip.process_prune_msg( &id, // self_pubkey - &ci.id, // peer + ci.pubkey(), // peer &id, // destination &[prune_pubkey], // origins now, @@ -488,7 +488,7 @@ mod test { let timeout = now + crds_gossip.push.prune_timeout * 2; res = crds_gossip.process_prune_msg( &id, // self_pubkey - &ci.id, // peer + ci.pubkey(), // peer &id, // destination &[prune_pubkey], // origins now, diff --git a/gossip/src/crds_gossip_pull.rs b/gossip/src/crds_gossip_pull.rs index 63ce378792..493b13931c 100644 --- a/gossip/src/crds_gossip_pull.rs +++ b/gossip/src/crds_gossip_pull.rs @@ -829,7 +829,7 @@ pub(crate) mod tests { ping_cache .lock() .unwrap() - .mock_pong(new.id, new.gossip().unwrap(), Instant::now()); + .mock_pong(*new.pubkey(), new.gossip().unwrap(), Instant::now()); let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new)); crds.write() .unwrap() @@ -893,12 +893,12 @@ pub(crate) mod tests { let node = CrdsGossipPull::default(); crds.insert(entry, now, GossipRoute::LocalMessage).unwrap(); let old = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0); - ping_cache.mock_pong(old.id, old.gossip().unwrap(), Instant::now()); + ping_cache.mock_pong(*old.pubkey(), old.gossip().unwrap(), Instant::now()); let old = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(old)); crds.insert(old.clone(), now, GossipRoute::LocalMessage) .unwrap(); let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0); - ping_cache.mock_pong(new.id, new.gossip().unwrap(), Instant::now()); + ping_cache.mock_pong(*new.pubkey(), new.gossip().unwrap(), Instant::now()); let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new)); crds.insert(new, now, GossipRoute::LocalMessage).unwrap(); let crds = RwLock::new(crds); @@ -956,7 +956,7 @@ pub(crate) mod tests { .insert(entry, now, GossipRoute::LocalMessage) .unwrap(); let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), now); - ping_cache.mock_pong(new.id, new.gossip().unwrap(), Instant::now()); + ping_cache.mock_pong(*new.pubkey(), new.gossip().unwrap(), Instant::now()); let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new)); node_crds .insert(new, now, GossipRoute::LocalMessage) @@ -1058,7 +1058,7 @@ pub(crate) mod tests { 128, // capacity ); let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0); - ping_cache.mock_pong(new.id, new.gossip().unwrap(), Instant::now()); + ping_cache.mock_pong(*new.pubkey(), new.gossip().unwrap(), Instant::now()); let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new)); node_crds.insert(new, 0, GossipRoute::LocalMessage).unwrap(); let node_crds = RwLock::new(node_crds); @@ -1118,14 +1118,14 @@ pub(crate) mod tests { 128, // capacity ); let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 1); - ping_cache.mock_pong(new.id, new.gossip().unwrap(), Instant::now()); + ping_cache.mock_pong(*new.pubkey(), new.gossip().unwrap(), Instant::now()); let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new)); node_crds.insert(new, 0, GossipRoute::LocalMessage).unwrap(); let mut dest_crds = Crds::default(); let new_id = solana_sdk::pubkey::new_rand(); let new = ContactInfo::new_localhost(&new_id, 1); - ping_cache.mock_pong(new.id, new.gossip().unwrap(), Instant::now()); + ping_cache.mock_pong(*new.pubkey(), new.gossip().unwrap(), Instant::now()); let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new)); dest_crds .insert(new.clone(), 0, GossipRoute::LocalMessage) @@ -1134,7 +1134,11 @@ pub(crate) mod tests { // node contains a key from the dest node, but at an older local timestamp let same_key = ContactInfo::new_localhost(&new_id, 0); - ping_cache.mock_pong(same_key.id, same_key.gossip().unwrap(), Instant::now()); + ping_cache.mock_pong( + *same_key.pubkey(), + same_key.gossip().unwrap(), + Instant::now(), + ); let same_key = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(same_key)); assert_eq!(same_key.label(), new.label()); assert!(same_key.wallclock() < new.wallclock()); diff --git a/gossip/src/crds_gossip_push.rs b/gossip/src/crds_gossip_push.rs index 692518c835..ee0b88db83 100644 --- a/gossip/src/crds_gossip_push.rs +++ b/gossip/src/crds_gossip_push.rs @@ -272,7 +272,7 @@ impl CrdsGossipPush { ); let nodes = crds_gossip::dedup_gossip_addresses(nodes, stakes) .into_values() - .map(|(_stake, node)| node.id) + .map(|(_stake, node)| *node.pubkey()) .collect::>(); if nodes.is_empty() { return; @@ -336,7 +336,7 @@ mod tests { // push a new message assert_eq!( push.process_push_message(&crds, vec![(Pubkey::default(), vec![value])], 0), - [ci.id].into_iter().collect() + [*ci.pubkey()].into_iter().collect() ); // push an old version @@ -372,7 +372,7 @@ mod tests { let crds = RwLock::::default(); let push = CrdsGossipPush::default(); let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0); - let origin = ci.id; + let origin = *ci.pubkey(); ci.set_wallclock(0); let value_old = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(ci.clone())); @@ -398,7 +398,7 @@ mod tests { let push = CrdsGossipPush::default(); let mut ping_cache = new_ping_cache(); let peer = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0); - ping_cache.mock_pong(peer.id, peer.gossip().unwrap(), Instant::now()); + ping_cache.mock_pong(*peer.pubkey(), peer.gossip().unwrap(), Instant::now()); let peer = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(peer)); assert_eq!( crds.insert(peer.clone(), now, GossipRoute::LocalMessage), @@ -450,7 +450,7 @@ mod tests { .map(|wallclock| { let mut peer = ContactInfo::new_rand(&mut rng, /*pubkey=*/ None); peer.set_wallclock(wallclock); - ping_cache.mock_pong(peer.id, peer.gossip().unwrap(), Instant::now()); + ping_cache.mock_pong(*peer.pubkey(), peer.gossip().unwrap(), Instant::now()); CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(peer)) }) .collect(); diff --git a/gossip/src/crds_value.rs b/gossip/src/crds_value.rs index a0302dee79..d907f1cb93 100644 --- a/gossip/src/crds_value.rs +++ b/gossip/src/crds_value.rs @@ -598,7 +598,7 @@ impl CrdsValue { } pub fn pubkey(&self) -> Pubkey { match &self.data { - CrdsData::LegacyContactInfo(contact_info) => contact_info.id, + CrdsData::LegacyContactInfo(contact_info) => *contact_info.pubkey(), CrdsData::Vote(_, vote) => vote.from, CrdsData::LowestSlot(_, slots) => slots.from, CrdsData::LegacySnapshotHashes(hash) => hash.from, @@ -719,7 +719,7 @@ mod test { let mut rng = rand::thread_rng(); let v = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(LegacyContactInfo::default())); assert_eq!(v.wallclock(), 0); - let key = v.contact_info().unwrap().id; + let key = *v.contact_info().unwrap().pubkey(); assert_eq!(v.label(), CrdsValueLabel::LegacyContactInfo(key)); let v = Vote::new(Pubkey::default(), new_test_vote_tx(&mut rng), 0).unwrap(); diff --git a/gossip/src/gossip_service.rs b/gossip/src/gossip_service.rs index 6845b67cc3..f12c4bc9ea 100644 --- a/gossip/src/gossip_service.rs +++ b/gossip/src/gossip_service.rs @@ -254,7 +254,7 @@ fn spy( tvu_peers = spy_ref.all_tvu_peers(); let found_node_by_pubkey = if let Some(pubkey) = find_node_by_pubkey { - all_peers.iter().any(|x| x.id == pubkey) + all_peers.iter().any(|node| node.pubkey() == &pubkey) } else { false }; diff --git a/gossip/src/legacy_contact_info.rs b/gossip/src/legacy_contact_info.rs index d390011492..5af12b77d6 100644 --- a/gossip/src/legacy_contact_info.rs +++ b/gossip/src/legacy_contact_info.rs @@ -20,7 +20,7 @@ use { Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, AbiExample, Deserialize, Serialize, )] pub struct LegacyContactInfo { - pub id: Pubkey, + id: Pubkey, /// gossip address gossip: SocketAddr, /// address to connect to for replication @@ -156,6 +156,11 @@ impl LegacyContactInfo { } } + #[inline] + pub fn pubkey(&self) -> &Pubkey { + &self.id + } + #[inline] pub fn wallclock(&self) -> u64 { self.wallclock @@ -166,6 +171,10 @@ impl LegacyContactInfo { self.shred_version } + pub fn set_pubkey(&mut self, pubkey: Pubkey) { + self.id = pubkey + } + pub fn set_wallclock(&mut self, wallclock: u64) { self.wallclock = wallclock; } diff --git a/gossip/src/main.rs b/gossip/src/main.rs index 6ce8b724b8..77054f2f3f 100644 --- a/gossip/src/main.rs +++ b/gossip/src/main.rs @@ -197,7 +197,7 @@ fn process_spy_results( } } if let Some(node) = pubkey { - if !validators.iter().any(|x| x.id == node) { + if !validators.iter().any(|x| x.pubkey() == &node) { eprintln!("Error: Could not find node {node:?}"); exit(1); } diff --git a/gossip/tests/crds_gossip.rs b/gossip/tests/crds_gossip.rs index e727f17b03..b7d31e080c 100644 --- a/gossip/tests/crds_gossip.rs +++ b/gossip/tests/crds_gossip.rs @@ -518,7 +518,7 @@ fn network_run_pull( let self_info = gossip_crds.get::<&CrdsValue>(&label).unwrap().clone(); requests .into_iter() - .map(move |(peer, filters)| (peer.id, filters, self_info.clone())) + .map(move |(peer, filters)| (*peer.pubkey(), filters, self_info.clone())) }) .collect() }; @@ -764,7 +764,7 @@ fn test_prune_errors() { //incorrect dest let mut res = crds_gossip.process_prune_msg( &id, // self_pubkey - &ci.id, // peer + ci.pubkey(), // peer &Pubkey::from(hash(&[1; 32]).to_bytes()), // destination &[prune_pubkey], // origins now, @@ -775,7 +775,7 @@ fn test_prune_errors() { //correct dest res = crds_gossip.process_prune_msg( &id, // self_pubkey - &ci.id, // peer + ci.pubkey(), // peer &id, // destination &[prune_pubkey], // origins now, @@ -787,7 +787,7 @@ fn test_prune_errors() { let timeout = now + crds_gossip.push.prune_timeout * 2; res = crds_gossip.process_prune_msg( &id, // self_pubkey - &ci.id, // peer + ci.pubkey(), // peer &id, // destination &[prune_pubkey], // origins now, diff --git a/gossip/tests/gossip.rs b/gossip/tests/gossip.rs index 7589a2f183..ff6261ac6a 100644 --- a/gossip/tests/gossip.rs +++ b/gossip/tests/gossip.rs @@ -214,12 +214,12 @@ fn gossip_rstar() { let xv = &listen[0].0; xv.lookup_contact_info(&xv.id(), |ci| ci.clone()).unwrap() }; - trace!("rstar leader {}", xd.id); + trace!("rstar leader {}", xd.pubkey()); for n in 0..(num - 1) { let y = (n + 1) % listen.len(); let yv = &listen[y].0; yv.insert_legacy_info(xd.clone()); - trace!("rstar insert {} into {}", xd.id, yv.id()); + trace!("rstar insert {} into {}", xd.pubkey(), yv.id()); } }); } diff --git a/local-cluster/src/cluster_tests.rs b/local-cluster/src/cluster_tests.rs index 3d3d950209..8e37cb452d 100644 --- a/local-cluster/src/cluster_tests.rs +++ b/local-cluster/src/cluster_tests.rs @@ -78,7 +78,7 @@ pub fn spend_and_verify_all_nodes( assert!(cluster_nodes.len() >= nodes); let ignore_nodes = Arc::new(ignore_nodes); cluster_nodes.par_iter().for_each(|ingress_node| { - if ignore_nodes.contains(&ingress_node.id) { + if ignore_nodes.contains(ingress_node.pubkey()) { return; } let random_keypair = Keypair::new(); @@ -101,7 +101,7 @@ pub fn spend_and_verify_all_nodes( .retry_transfer_until_confirmed(funding_keypair, &mut transaction, 10, confs) .unwrap(); for validator in &cluster_nodes { - if ignore_nodes.contains(&validator.id) { + if ignore_nodes.contains(validator.pubkey()) { continue; } let (rpc, tpu) = get_client_facing_addr(validator); @@ -242,8 +242,8 @@ pub fn kill_entry_and_spend_and_verify_rest( for ingress_node in &cluster_nodes { client - .poll_get_balance_with_commitment(&ingress_node.id, CommitmentConfig::processed()) - .unwrap_or_else(|err| panic!("Node {} has no balance: {}", ingress_node.id, err)); + .poll_get_balance_with_commitment(ingress_node.pubkey(), CommitmentConfig::processed()) + .unwrap_or_else(|err| panic!("Node {} has no balance: {}", ingress_node.pubkey(), err)); } info!("sleeping for 2 leader fortnights"); @@ -257,7 +257,7 @@ pub fn kill_entry_and_spend_and_verify_rest( )); info!("done sleeping for 2 fortnights"); for ingress_node in &cluster_nodes { - if &ingress_node.id == entry_point_info.pubkey() { + if ingress_node.pubkey() == entry_point_info.pubkey() { info!("ingress_node.id == entry_point_info.id, continuing..."); continue; } @@ -384,11 +384,11 @@ pub fn check_no_new_roots( let client = ThinClient::new(rpc, tpu, connection_cache.clone()); let initial_root = client .get_slot() - .unwrap_or_else(|_| panic!("get_slot for {} failed", ingress_node.id)); + .unwrap_or_else(|_| panic!("get_slot for {} failed", ingress_node.pubkey())); roots[i] = initial_root; client .get_slot_with_commitment(CommitmentConfig::processed()) - .unwrap_or_else(|_| panic!("get_slot for {} failed", ingress_node.id)) + .unwrap_or_else(|_| panic!("get_slot for {} failed", ingress_node.pubkey())) }) .max() .unwrap(); @@ -403,7 +403,7 @@ pub fn check_no_new_roots( let client = ThinClient::new(rpc, tpu, connection_cache.clone()); current_slot = client .get_slot_with_commitment(CommitmentConfig::processed()) - .unwrap_or_else(|_| panic!("get_slot for {} failed", contact_infos[0].id)); + .unwrap_or_else(|_| panic!("get_slot for {} failed", contact_infos[0].pubkey())); if current_slot > end_slot { reached_end_slot = true; break; @@ -411,7 +411,10 @@ pub fn check_no_new_roots( if last_print.elapsed().as_secs() > 3 { info!( "{} current slot: {} on validator: {}, waiting for any validator with slot: {}", - test_name, current_slot, contact_info.id, end_slot + test_name, + current_slot, + contact_info.pubkey(), + end_slot ); last_print = Instant::now(); } @@ -427,7 +430,7 @@ pub fn check_no_new_roots( assert_eq!( client .get_slot() - .unwrap_or_else(|_| panic!("get_slot for {} failed", ingress_node.id)), + .unwrap_or_else(|_| panic!("get_slot for {} failed", ingress_node.pubkey())), roots[i] ); } @@ -441,7 +444,7 @@ fn poll_all_nodes_for_signature( confs: usize, ) -> Result<(), TransportError> { for validator in cluster_nodes { - if &validator.id == entry_point_info.pubkey() { + if validator.pubkey() == entry_point_info.pubkey() { continue; } let (rpc, tpu) = get_client_facing_addr(validator); diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index b41512f344..d75f2aeb6c 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -352,7 +352,7 @@ fn test_forwarding() { let validator_info = cluster_nodes .iter() - .find(|c| c.id != leader_pubkey) + .find(|c| c.pubkey() != &leader_pubkey) .unwrap(); // Confirm that transactions were forwarded to and processed by the leader. @@ -1359,7 +1359,7 @@ fn test_snapshots_blockstore_floor() { ) .unwrap(); let mut known_validators = HashSet::new(); - known_validators.insert(cluster_nodes[0].id); + known_validators.insert(*cluster_nodes[0].pubkey()); validator_snapshot_test_config .validator_config .known_validators = Some(known_validators); diff --git a/local-cluster/tests/local_cluster_slow_2.rs b/local-cluster/tests/local_cluster_slow_2.rs index b7b5d4f612..0db7379b09 100644 --- a/local-cluster/tests/local_cluster_slow_2.rs +++ b/local-cluster/tests/local_cluster_slow_2.rs @@ -115,7 +115,7 @@ fn test_consistency_halt() { setup_snapshot_validator_config(snapshot_interval_slots, num_account_paths); let mut known_validators = HashSet::new(); - known_validators.insert(cluster_nodes[0].id); + known_validators.insert(*cluster_nodes[0].pubkey()); validator_snapshot_test_config .validator_config diff --git a/rpc/src/cluster_tpu_info.rs b/rpc/src/cluster_tpu_info.rs index ce75dda07c..c23a80d109 100644 --- a/rpc/src/cluster_tpu_info.rs +++ b/rpc/src/cluster_tpu_info.rs @@ -33,7 +33,7 @@ impl TpuInfo for ClusterTpuInfo { .cluster_info .tpu_peers() .into_iter() - .filter_map(|node| Some((node.id, node.tpu().ok()?))) + .filter_map(|node| Some((*node.pubkey(), node.tpu().ok()?))) .collect(); } diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 964223d812..69a746327f 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -3459,14 +3459,14 @@ pub mod rpc_full { .unwrap_or_default() { let (version, feature_set) = if let Some(version) = - cluster_info.get_node_version(&contact_info.id) + cluster_info.get_node_version(contact_info.pubkey()) { (Some(version.to_string()), Some(version.feature_set)) } else { (None, None) }; Some(RpcContactInfo { - pubkey: contact_info.id.to_string(), + pubkey: contact_info.pubkey().to_string(), gossip: contact_info.gossip().ok(), tpu: contact_info .tpu() diff --git a/validator/src/bootstrap.rs b/validator/src/bootstrap.rs index 8eff0c5df0..55d1d59e57 100644 --- a/validator/src/bootstrap.rs +++ b/validator/src/bootstrap.rs @@ -220,7 +220,7 @@ fn get_rpc_peers( if bootstrap_config.only_known_rpc { rpc_peers.retain(|rpc_peer| { - is_known_validator(&rpc_peer.id, &validator_config.known_validators) + is_known_validator(rpc_peer.pubkey(), &validator_config.known_validators) }); } @@ -229,12 +229,14 @@ fn get_rpc_peers( // Filter out blacklisted nodes let rpc_peers: Vec<_> = rpc_peers .into_iter() - .filter(|rpc_peer| !blacklisted_rpc_nodes.contains(&rpc_peer.id)) + .filter(|rpc_peer| !blacklisted_rpc_nodes.contains(rpc_peer.pubkey())) .collect(); let rpc_peers_blacklisted = rpc_peers_total - rpc_peers.len(); let rpc_known_peers = rpc_peers .iter() - .filter(|rpc_peer| is_known_validator(&rpc_peer.id, &validator_config.known_validators)) + .filter(|rpc_peer| { + is_known_validator(rpc_peer.pubkey(), &validator_config.known_validators) + }) .count(); info!("Total {rpc_peers_total} RPC nodes found. {rpc_known_peers} known, {rpc_peers_blacklisted} blacklisted"); @@ -507,7 +509,7 @@ fn get_vetted_rpc_nodes( info!( "Using RPC service from node {}: {:?}", - rpc_contact_info.id, + rpc_contact_info.pubkey(), rpc_contact_info.rpc() ); @@ -535,7 +537,7 @@ fn get_vetted_rpc_nodes( fail_rpc_node( "Failed to ping RPC".to_string(), &validator_config.known_validators, - &rpc_contact_info.id, + rpc_contact_info.pubkey(), &mut newly_blacklisted_rpc_nodes.write().unwrap(), ); false @@ -545,7 +547,7 @@ fn get_vetted_rpc_nodes( fail_rpc_node( format!("Failed to get RPC node version: {err}"), &validator_config.known_validators, - &rpc_contact_info.id, + rpc_contact_info.pubkey(), &mut newly_blacklisted_rpc_nodes.write().unwrap(), ); false @@ -676,7 +678,7 @@ pub fn rpc_bootstrap( fail_rpc_node( err, &validator_config.known_validators, - &rpc_contact_info.id, + rpc_contact_info.pubkey(), &mut blacklisted_rpc_nodes, ); } @@ -781,7 +783,7 @@ fn get_rpc_nodes( } else { let rpc_peers = peer_snapshot_hashes .iter() - .map(|peer_snapshot_hash| peer_snapshot_hash.rpc_contact_info.id) + .map(|peer_snapshot_hash| peer_snapshot_hash.rpc_contact_info.pubkey()) .collect::>(); let final_snapshot_hash = peer_snapshot_hashes[0].snapshot_hash; info!( @@ -1034,7 +1036,7 @@ fn get_eligible_peer_snapshot_hashes( let peer_snapshot_hashes = rpc_peers .iter() .flat_map(|rpc_peer| { - get_snapshot_hashes_for_node(cluster_info, &rpc_peer.id).map(|snapshot_hash| { + get_snapshot_hashes_for_node(cluster_info, rpc_peer.pubkey()).map(|snapshot_hash| { PeerSnapshotHash { rpc_contact_info: rpc_peer.clone(), snapshot_hash, @@ -1262,7 +1264,7 @@ fn download_snapshot( && *download_abort_count < maximum_snapshot_download_abort { if let Some(ref known_validators) = validator_config.known_validators { - if known_validators.contains(&rpc_contact_info.id) + if known_validators.contains(rpc_contact_info.pubkey()) && known_validators.len() == 1 && bootstrap_config.only_known_rpc {