embeds the new gossip ContactInfo in ClusterInfo (#30022)

Working towards replacing the legacy gossip contact-info with the new
one, the commit updates the respective field in gossip cluster-info.
This commit is contained in:
behzad nouri 2023-02-10 20:07:45 +00:00 committed by GitHub
parent 157a9d725f
commit ded457cd73
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
39 changed files with 648 additions and 524 deletions

View File

@ -740,7 +740,7 @@ pub mod test {
let num_instructions = 2; let num_instructions = 2;
let mut start = Measure::start("total accounts run"); let mut start = Measure::start("total accounts run");
run_accounts_bench( run_accounts_bench(
cluster.entry_point_info.rpc, cluster.entry_point_info.rpc().unwrap(),
faucet_addr, faucet_addr,
&[&cluster.funding_keypair], &[&cluster.funding_keypair],
iterations, iterations,

View File

@ -80,8 +80,8 @@ fn test_bench_tps_local_cluster(config: Config) {
cluster.transfer(&cluster.funding_keypair, &faucet_pubkey, 100_000_000); cluster.transfer(&cluster.funding_keypair, &faucet_pubkey, 100_000_000);
let client = Arc::new(ThinClient::new( let client = Arc::new(ThinClient::new(
cluster.entry_point_info.rpc, cluster.entry_point_info.rpc().unwrap(),
cluster.entry_point_info.tpu, cluster.entry_point_info.tpu().unwrap(),
cluster.connection_cache.clone(), cluster.connection_cache.clone(),
)); ));

View File

@ -12,7 +12,7 @@ use {
}, },
solana_gossip::{ solana_gossip::{
cluster_info::{ClusterInfo, Node}, cluster_info::{ClusterInfo, Node},
legacy_contact_info::LegacyContactInfo as ContactInfo, contact_info::ContactInfo,
}, },
solana_ledger::{ solana_ledger::{
genesis_utils::{create_genesis_config, GenesisConfigInfo}, genesis_utils::{create_genesis_config, GenesisConfigInfo},

View File

@ -10,7 +10,7 @@ use {
solana_entry::entry::Entry, solana_entry::entry::Entry,
solana_gossip::{ solana_gossip::{
cluster_info::{ClusterInfo, Node}, cluster_info::{ClusterInfo, Node},
legacy_contact_info::LegacyContactInfo as ContactInfo, contact_info::ContactInfo,
}, },
solana_ledger::{ solana_ledger::{
genesis_utils::{create_genesis_config, GenesisConfigInfo}, genesis_utils::{create_genesis_config, GenesisConfigInfo},
@ -29,7 +29,7 @@ use {
solana_streamer::socket::SocketAddrSpace, solana_streamer::socket::SocketAddrSpace,
std::{ std::{
iter::repeat_with, iter::repeat_with,
net::UdpSocket, net::{Ipv4Addr, UdpSocket},
sync::{ sync::{
atomic::{AtomicUsize, Ordering}, atomic::{AtomicUsize, Ordering},
Arc, RwLock, Arc, RwLock,
@ -60,10 +60,12 @@ fn bench_retransmitter(bencher: &mut Bencher) {
let id = Pubkey::new_unique(); let id = Pubkey::new_unique();
let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut contact_info = ContactInfo::new_localhost(&id, timestamp()); let mut contact_info = ContactInfo::new_localhost(&id, timestamp());
contact_info.tvu = socket.local_addr().unwrap(); let port = socket.local_addr().unwrap().port();
contact_info.tvu.set_ip("127.0.0.1".parse().unwrap()); contact_info.set_tvu((Ipv4Addr::LOCALHOST, port)).unwrap();
contact_info.tvu_forwards = contact_info.tvu; contact_info
info!("local: {:?}", contact_info.tvu); .set_tvu_forwards(contact_info.tvu().unwrap())
.unwrap();
info!("local: {:?}", contact_info.tvu().unwrap());
cluster_info.insert_info(contact_info); cluster_info.insert_info(contact_info);
socket.set_nonblocking(true).unwrap(); socket.set_nonblocking(true).unwrap();
socket socket

View File

@ -464,10 +464,7 @@ mod tests {
use { use {
super::*, super::*,
rand::seq::SliceRandom, rand::seq::SliceRandom,
solana_gossip::{ solana_gossip::{cluster_info::make_accounts_hashes_message, contact_info::ContactInfo},
cluster_info::make_accounts_hashes_message,
legacy_contact_info::LegacyContactInfo as ContactInfo,
},
solana_sdk::{ solana_sdk::{
hash::hash, hash::hash,
signature::{Keypair, Signer}, signature::{Keypair, Signer},

View File

@ -772,7 +772,7 @@ mod test {
}, },
solana_gossip::{ solana_gossip::{
cluster_info::{ClusterInfo, Node}, cluster_info::{ClusterInfo, Node},
legacy_contact_info::LegacyContactInfo as ContactInfo, contact_info::ContactInfo,
}, },
solana_ledger::{blockstore::make_many_slot_entries, get_tmp_ledger_path, shred::Nonce}, solana_ledger::{blockstore::make_many_slot_entries, get_tmp_ledger_path, shred::Nonce},
solana_runtime::{accounts_background_service::AbsRequestSender, bank_forks::BankForks}, solana_runtime::{accounts_background_service::AbsRequestSender, bank_forks::BankForks},
@ -1151,13 +1151,13 @@ mod test {
) { ) {
let request_bytes = requester_serve_repair.ancestor_repair_request_bytes( let request_bytes = requester_serve_repair.ancestor_repair_request_bytes(
&requester_cluster_info.keypair(), &requester_cluster_info.keypair(),
&responder_info.id, responder_info.pubkey(),
dead_slot, dead_slot,
nonce, nonce,
); );
if let Ok(request_bytes) = request_bytes { if let Ok(request_bytes) = request_bytes {
let _ = let socket = responder_info.serve_repair().unwrap();
ancestor_hashes_request_socket.send_to(&request_bytes, responder_info.serve_repair); let _ = ancestor_hashes_request_socket.send_to(&request_bytes, socket);
} }
} }
@ -1225,7 +1225,7 @@ mod test {
let packet = &mut response_packet[0]; let packet = &mut response_packet[0];
packet packet
.meta_mut() .meta_mut()
.set_socket_addr(&responder_info.serve_repair); .set_socket_addr(&responder_info.serve_repair().unwrap());
let decision = AncestorHashesService::verify_and_process_ancestor_response( let decision = AncestorHashesService::verify_and_process_ancestor_response(
packet, packet,
&ancestor_hashes_request_statuses, &ancestor_hashes_request_statuses,
@ -1239,7 +1239,7 @@ mod test {
assert_eq!(decision, None); assert_eq!(decision, None);
// Add the responder to the eligible list for requests // Add the responder to the eligible list for requests
let responder_id = responder_info.id; let responder_id = *responder_info.pubkey();
cluster_slots.insert_node_id(dead_slot, responder_id); cluster_slots.insert_node_id(dead_slot, responder_id);
requester_cluster_info.insert_info(responder_info.clone()); requester_cluster_info.insert_info(responder_info.clone());
// Now the request should actually be made // Now the request should actually be made
@ -1265,7 +1265,7 @@ mod test {
let packet = &mut response_packet[0]; let packet = &mut response_packet[0];
packet packet
.meta_mut() .meta_mut()
.set_socket_addr(&responder_info.serve_repair); .set_socket_addr(&responder_info.serve_repair().unwrap());
let decision = AncestorHashesService::verify_and_process_ancestor_response( let decision = AncestorHashesService::verify_and_process_ancestor_response(
packet, packet,
&ancestor_hashes_request_statuses, &ancestor_hashes_request_statuses,
@ -1588,7 +1588,7 @@ mod test {
let (dumped_slots_sender, _dumped_slots_receiver) = unbounded(); let (dumped_slots_sender, _dumped_slots_receiver) = unbounded();
// Add the responder to the eligible list for requests // Add the responder to the eligible list for requests
let responder_id = responder_info.id; let responder_id = *responder_info.pubkey();
cluster_slots.insert_node_id(dead_slot, responder_id); cluster_slots.insert_node_id(dead_slot, responder_id);
requester_cluster_info.insert_info(responder_info.clone()); requester_cluster_info.insert_info(responder_info.clone());
@ -1608,7 +1608,7 @@ mod test {
let packet = &mut response_packet[0]; let packet = &mut response_packet[0];
packet packet
.meta_mut() .meta_mut()
.set_socket_addr(&responder_info.serve_repair); .set_socket_addr(&responder_info.serve_repair().unwrap());
let decision = AncestorHashesService::verify_and_process_ancestor_response( let decision = AncestorHashesService::verify_and_process_ancestor_response(
packet, packet,
&ancestor_hashes_request_statuses, &ancestor_hashes_request_statuses,
@ -1670,7 +1670,7 @@ mod test {
let packet = &mut response_packet[0]; let packet = &mut response_packet[0];
packet packet
.meta_mut() .meta_mut()
.set_socket_addr(&responder_info.serve_repair); .set_socket_addr(&responder_info.serve_repair().unwrap());
let decision = AncestorHashesService::verify_and_process_ancestor_response( let decision = AncestorHashesService::verify_and_process_ancestor_response(
packet, packet,
&ancestor_hashes_request_statuses, &ancestor_hashes_request_statuses,

View File

@ -159,7 +159,7 @@ impl BroadcastRun for BroadcastFakeShredsRun {
mod tests { mod tests {
use { use {
super::*, super::*,
solana_gossip::legacy_contact_info::LegacyContactInfo as ContactInfo, solana_gossip::contact_info::ContactInfo,
solana_sdk::signature::Signer, solana_sdk::signature::Signer,
solana_streamer::socket::SocketAddrSpace, solana_streamer::socket::SocketAddrSpace,
std::net::{IpAddr, Ipv4Addr, SocketAddr}, std::net::{IpAddr, Ipv4Addr, SocketAddr},

View File

@ -9,7 +9,7 @@ use {
crds::GossipRoute, crds::GossipRoute,
crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS, crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
crds_value::{CrdsData, CrdsValue}, crds_value::{CrdsData, CrdsValue},
legacy_contact_info::LegacyContactInfo as ContactInfo, legacy_contact_info::{LegacyContactInfo as ContactInfo, LegacyContactInfo},
weighted_shuffle::WeightedShuffle, weighted_shuffle::WeightedShuffle,
}, },
solana_ledger::shred::ShredId, solana_ledger::shred::ShredId,
@ -316,7 +316,9 @@ fn get_nodes(cluster_info: &ClusterInfo, stakes: &HashMap<Pubkey, u64>) -> Vec<N
// The local node itself. // The local node itself.
std::iter::once({ std::iter::once({
let stake = stakes.get(&self_pubkey).copied().unwrap_or_default(); let stake = stakes.get(&self_pubkey).copied().unwrap_or_default();
let node = NodeId::from(cluster_info.my_contact_info()); let node = LegacyContactInfo::try_from(&cluster_info.my_contact_info())
.map(NodeId::from)
.expect("Operator must spin up node with valid contact-info");
Node { node, stake } Node { node, stake }
}) })
// All known tvu-peers from gossip. // All known tvu-peers from gossip.
@ -458,13 +460,17 @@ pub fn make_test_cluster<R: Rng>(
HashMap<Pubkey, u64>, // stakes HashMap<Pubkey, u64>, // stakes
ClusterInfo, ClusterInfo,
) { ) {
use solana_gossip::contact_info::ContactInfo;
let (unstaked_numerator, unstaked_denominator) = unstaked_ratio.unwrap_or((1, 7)); let (unstaked_numerator, unstaked_denominator) = unstaked_ratio.unwrap_or((1, 7));
let mut nodes: Vec<_> = repeat_with(|| ContactInfo::new_rand(rng, None)) let mut nodes: Vec<_> = repeat_with(|| {
let pubkey = solana_sdk::pubkey::new_rand();
ContactInfo::new_localhost(&pubkey, /*wallclock:*/ timestamp())
})
.take(num_nodes) .take(num_nodes)
.collect(); .collect();
nodes.shuffle(rng); nodes.shuffle(rng);
let keypair = Arc::new(Keypair::new()); let keypair = Arc::new(Keypair::new());
nodes[0].id = keypair.pubkey(); nodes[0].set_pubkey(keypair.pubkey());
let this_node = nodes[0].clone(); let this_node = nodes[0].clone();
let mut stakes: HashMap<Pubkey, u64> = nodes let mut stakes: HashMap<Pubkey, u64> = nodes
.iter() .iter()
@ -472,13 +478,18 @@ pub fn make_test_cluster<R: Rng>(
if rng.gen_ratio(unstaked_numerator, unstaked_denominator) { if rng.gen_ratio(unstaked_numerator, unstaked_denominator) {
None // No stake for some of the nodes. None // No stake for some of the nodes.
} else { } else {
Some((node.id, rng.gen_range(0, 20))) Some((*node.pubkey(), rng.gen_range(0, 20)))
} }
}) })
.collect(); .collect();
// Add some staked nodes with no contact-info. // Add some staked nodes with no contact-info.
stakes.extend(repeat_with(|| (Pubkey::new_unique(), rng.gen_range(0, 20))).take(100)); stakes.extend(repeat_with(|| (Pubkey::new_unique(), rng.gen_range(0, 20))).take(100));
let cluster_info = ClusterInfo::new(this_node, keypair, SocketAddrSpace::Unspecified); let cluster_info = ClusterInfo::new(this_node, keypair, SocketAddrSpace::Unspecified);
let nodes: Vec<_> = nodes
.iter()
.map(LegacyContactInfo::try_from)
.collect::<Result<_, _>>()
.unwrap();
{ {
let now = timestamp(); let now = timestamp();
let mut gossip_crds = cluster_info.gossip.crds.write().unwrap(); let mut gossip_crds = cluster_info.gossip.crds.write().unwrap();

View File

@ -803,9 +803,7 @@ pub(crate) fn post_shred_deferment_timestamp() -> u64 {
mod test { mod test {
use { use {
super::*, super::*,
solana_gossip::{ solana_gossip::{cluster_info::Node, contact_info::ContactInfo},
cluster_info::Node, legacy_contact_info::LegacyContactInfo as ContactInfo,
},
solana_ledger::{ solana_ledger::{
blockstore::{ blockstore::{
make_chaining_slot_entries, make_many_slot_entries, make_slot_entries, Blockstore, make_chaining_slot_entries, make_many_slot_entries, make_slot_entries, Blockstore,
@ -1277,7 +1275,7 @@ mod test {
// a valid target for repair // a valid target for repair
let dead_slot = 9; let dead_slot = 9;
let cluster_slots = ClusterSlots::default(); let cluster_slots = ClusterSlots::default();
cluster_slots.insert_node_id(dead_slot, valid_repair_peer.id); cluster_slots.insert_node_id(dead_slot, *valid_repair_peer.pubkey());
cluster_info.insert_info(valid_repair_peer); cluster_info.insert_info(valid_repair_peer);
// Not enough time has passed, should not update the // Not enough time has passed, should not update the

View File

@ -1332,7 +1332,7 @@ mod tests {
use { use {
super::*, super::*,
crate::{repair_response, result::Error}, crate::{repair_response, result::Error},
solana_gossip::{socketaddr, socketaddr_any}, solana_gossip::{contact_info::ContactInfo, socketaddr, socketaddr_any},
solana_ledger::{ solana_ledger::{
blockstore::make_many_slot_entries, blockstore::make_many_slot_entries,
blockstore_processor::fill_blockstore_slot_with_ticks, blockstore_processor::fill_blockstore_slot_with_ticks,
@ -1810,21 +1810,21 @@ mod tests {
assert_matches!(rv, Err(Error::ClusterInfo(ClusterInfoError::NoPeers))); assert_matches!(rv, Err(Error::ClusterInfo(ClusterInfoError::NoPeers)));
let serve_repair_addr = socketaddr!(Ipv4Addr::LOCALHOST, 1243); let serve_repair_addr = socketaddr!(Ipv4Addr::LOCALHOST, 1243);
let nxt = ContactInfo { let mut nxt = ContactInfo::new(
id: solana_sdk::pubkey::new_rand(), solana_sdk::pubkey::new_rand(),
gossip: socketaddr!(Ipv4Addr::LOCALHOST, 1234), timestamp(), // wallclock
tvu: socketaddr!(Ipv4Addr::LOCALHOST, 1235), 0u16, // shred_version
tvu_forwards: socketaddr!(Ipv4Addr::LOCALHOST, 1236), );
repair: socketaddr!(Ipv4Addr::LOCALHOST, 1237), nxt.set_gossip((Ipv4Addr::LOCALHOST, 1234)).unwrap();
tpu: socketaddr!(Ipv4Addr::LOCALHOST, 1238), nxt.set_tvu((Ipv4Addr::LOCALHOST, 1235)).unwrap();
tpu_forwards: socketaddr!(Ipv4Addr::LOCALHOST, 1239), nxt.set_tvu_forwards((Ipv4Addr::LOCALHOST, 1236)).unwrap();
tpu_vote: socketaddr!(Ipv4Addr::LOCALHOST, 1240), nxt.set_repair((Ipv4Addr::LOCALHOST, 1237)).unwrap();
rpc: socketaddr!(Ipv4Addr::LOCALHOST, 1241), nxt.set_tpu((Ipv4Addr::LOCALHOST, 1238)).unwrap();
rpc_pubsub: socketaddr!(Ipv4Addr::LOCALHOST, 1242), nxt.set_tpu_forwards((Ipv4Addr::LOCALHOST, 1239)).unwrap();
serve_repair: serve_repair_addr, nxt.set_tpu_vote((Ipv4Addr::LOCALHOST, 1240)).unwrap();
wallclock: 0, nxt.set_rpc((Ipv4Addr::LOCALHOST, 1241)).unwrap();
shred_version: 0, nxt.set_rpc_pubsub((Ipv4Addr::LOCALHOST, 1242)).unwrap();
}; nxt.set_serve_repair(serve_repair_addr).unwrap();
cluster_info.insert_info(nxt.clone()); cluster_info.insert_info(nxt.clone());
let rv = serve_repair let rv = serve_repair
.repair_request( .repair_request(
@ -1837,25 +1837,25 @@ mod tests {
&identity_keypair, &identity_keypair,
) )
.unwrap(); .unwrap();
assert_eq!(nxt.serve_repair, serve_repair_addr); assert_eq!(nxt.serve_repair().unwrap(), serve_repair_addr);
assert_eq!(rv.0, nxt.serve_repair); assert_eq!(rv.0, nxt.serve_repair().unwrap());
let serve_repair_addr2 = socketaddr!([127, 0, 0, 2], 1243); let serve_repair_addr2 = socketaddr!([127, 0, 0, 2], 1243);
let nxt = ContactInfo { let mut nxt = ContactInfo::new(
id: solana_sdk::pubkey::new_rand(), solana_sdk::pubkey::new_rand(),
gossip: socketaddr!(Ipv4Addr::LOCALHOST, 1234), timestamp(), // wallclock
tvu: socketaddr!(Ipv4Addr::LOCALHOST, 1235), 0u16, // shred_version
tvu_forwards: socketaddr!(Ipv4Addr::LOCALHOST, 1236), );
repair: socketaddr!(Ipv4Addr::LOCALHOST, 1237), nxt.set_gossip((Ipv4Addr::LOCALHOST, 1234)).unwrap();
tpu: socketaddr!(Ipv4Addr::LOCALHOST, 1238), nxt.set_tvu((Ipv4Addr::LOCALHOST, 1235)).unwrap();
tpu_forwards: socketaddr!(Ipv4Addr::LOCALHOST, 1239), nxt.set_tvu_forwards((Ipv4Addr::LOCALHOST, 1236)).unwrap();
tpu_vote: socketaddr!(Ipv4Addr::LOCALHOST, 1240), nxt.set_repair((Ipv4Addr::LOCALHOST, 1237)).unwrap();
rpc: socketaddr!(Ipv4Addr::LOCALHOST, 1241), nxt.set_tpu((Ipv4Addr::LOCALHOST, 1238)).unwrap();
rpc_pubsub: socketaddr!(Ipv4Addr::LOCALHOST, 1242), nxt.set_tpu_forwards((Ipv4Addr::LOCALHOST, 1239)).unwrap();
serve_repair: serve_repair_addr2, nxt.set_tpu_vote((Ipv4Addr::LOCALHOST, 1240)).unwrap();
wallclock: 0, nxt.set_rpc((Ipv4Addr::LOCALHOST, 1241)).unwrap();
shred_version: 0, nxt.set_rpc_pubsub((Ipv4Addr::LOCALHOST, 1242)).unwrap();
}; nxt.set_serve_repair(serve_repair_addr2).unwrap();
cluster_info.insert_info(nxt); cluster_info.insert_info(nxt);
let mut one = false; let mut one = false;
let mut two = false; let mut two = false;
@ -2136,7 +2136,7 @@ mod tests {
// 1) repair validator set doesn't exist in gossip // 1) repair validator set doesn't exist in gossip
// 2) repair validator set only includes our own id // 2) repair validator set only includes our own id
// then no repairs should be generated // then no repairs should be generated
for pubkey in &[solana_sdk::pubkey::new_rand(), me.id] { for pubkey in &[solana_sdk::pubkey::new_rand(), *me.pubkey()] {
let known_validators = Some(vec![*pubkey].into_iter().collect()); let known_validators = Some(vec![*pubkey].into_iter().collect());
assert!(serve_repair.repair_peers(&known_validators, 1).is_empty()); assert!(serve_repair.repair_peers(&known_validators, 1).is_empty());
assert!(serve_repair assert!(serve_repair
@ -2153,10 +2153,10 @@ mod tests {
} }
// If known validator exists in gossip, should return repair successfully // If known validator exists in gossip, should return repair successfully
let known_validators = Some(vec![contact_info2.id].into_iter().collect()); let known_validators = Some(vec![*contact_info2.pubkey()].into_iter().collect());
let repair_peers = serve_repair.repair_peers(&known_validators, 1); let repair_peers = serve_repair.repair_peers(&known_validators, 1);
assert_eq!(repair_peers.len(), 1); assert_eq!(repair_peers.len(), 1);
assert_eq!(repair_peers[0].id, contact_info2.id); assert_eq!(&repair_peers[0].id, contact_info2.pubkey());
assert!(serve_repair assert!(serve_repair
.repair_request( .repair_request(
&cluster_slots, &cluster_slots,
@ -2177,8 +2177,8 @@ mod tests {
.map(|c| c.id) .map(|c| c.id)
.collect(); .collect();
assert_eq!(repair_peers.len(), 2); assert_eq!(repair_peers.len(), 2);
assert!(repair_peers.contains(&contact_info2.id)); assert!(repair_peers.contains(contact_info2.pubkey()));
assert!(repair_peers.contains(&contact_info3.id)); assert!(repair_peers.contains(contact_info3.pubkey()));
assert!(serve_repair assert!(serve_repair
.repair_request( .repair_request(
&cluster_slots, &cluster_slots,

View File

@ -100,10 +100,12 @@ impl StakedNodesUpdaterService {
Some((node.tvu.ip(), *stake)) Some((node.tvu.ip(), *stake))
}) })
.collect(); .collect();
let my_pubkey = cluster_info.my_contact_info().id; let my_pubkey = *cluster_info.my_contact_info().pubkey();
if let Some(stake) = staked_nodes.get(&my_pubkey) { if let Some(stake) = staked_nodes.get(&my_pubkey) {
id_to_stake.insert(my_pubkey, *stake); id_to_stake.insert(my_pubkey, *stake);
ip_to_stake.insert(cluster_info.my_contact_info().tvu.ip(), *stake); if let Ok(tvu) = cluster_info.my_contact_info().tvu() {
ip_to_stake.insert(tvu.ip(), *stake);
}
} }
Self::override_stake( Self::override_stake(
cluster_info, cluster_info,

View File

@ -164,7 +164,11 @@ impl Tpu {
let (_, tpu_quic_t) = spawn_server( let (_, tpu_quic_t) = spawn_server(
transactions_quic_sockets, transactions_quic_sockets,
keypair, keypair,
cluster_info.my_contact_info().tpu.ip(), cluster_info
.my_contact_info()
.tpu_quic()
.expect("Operator must spin up node with valid (QUIC) TPU address")
.ip(),
packet_sender, packet_sender,
exit.clone(), exit.clone(),
MAX_QUIC_CONNECTIONS_PER_PEER, MAX_QUIC_CONNECTIONS_PER_PEER,
@ -179,7 +183,11 @@ impl Tpu {
let (_, tpu_forwards_quic_t) = spawn_server( let (_, tpu_forwards_quic_t) = spawn_server(
transactions_forwards_quic_sockets, transactions_forwards_quic_sockets,
keypair, keypair,
cluster_info.my_contact_info().tpu_forwards.ip(), cluster_info
.my_contact_info()
.tpu_forwards_quic()
.expect("Operator must spin up node with valid (QUIC) TPU-forwards address")
.ip(),
forwarded_packet_sender, forwarded_packet_sender,
exit.clone(), exit.clone(),
MAX_QUIC_CONNECTIONS_PER_PEER, MAX_QUIC_CONNECTIONS_PER_PEER,

View File

@ -398,7 +398,7 @@ impl Validator {
tpu_enable_udp: bool, tpu_enable_udp: bool,
) -> Result<Self, String> { ) -> Result<Self, String> {
let id = identity_keypair.pubkey(); let id = identity_keypair.pubkey();
assert_eq!(id, node.info.id); assert_eq!(&id, node.info.pubkey());
warn!("identity: {}", id); warn!("identity: {}", id);
warn!("vote account: {}", vote_account); warn!("vote account: {}", vote_account);
@ -555,8 +555,8 @@ impl Validator {
Some(poh_timing_point_sender.clone()), Some(poh_timing_point_sender.clone()),
)?; )?;
node.info.wallclock = timestamp(); node.info.set_wallclock(timestamp());
node.info.shred_version = compute_shred_version( node.info.set_shred_version(compute_shred_version(
&genesis_config.hash(), &genesis_config.hash(),
Some( Some(
&bank_forks &bank_forks
@ -567,15 +567,16 @@ impl Validator {
.read() .read()
.unwrap(), .unwrap(),
), ),
); ));
Self::print_node_info(&node); Self::print_node_info(&node);
if let Some(expected_shred_version) = config.expected_shred_version { if let Some(expected_shred_version) = config.expected_shred_version {
if expected_shred_version != node.info.shred_version { if expected_shred_version != node.info.shred_version() {
return Err(format!( return Err(format!(
"shred version mismatch: expected {} found: {}", "shred version mismatch: expected {} found: {}",
expected_shred_version, node.info.shred_version, expected_shred_version,
node.info.shred_version(),
)); ));
} }
} }
@ -762,7 +763,13 @@ impl Validator {
let connection_cache = ConnectionCache::new_with_client_options( let connection_cache = ConnectionCache::new_with_client_options(
tpu_connection_pool_size, tpu_connection_pool_size,
None, None,
Some((&identity_keypair, node.info.tpu.ip())), Some((
&identity_keypair,
node.info
.tpu()
.expect("Operator must spin up node with valid TPU address")
.ip(),
)),
Some((&staked_nodes, &identity_keypair.pubkey())), Some((&staked_nodes, &identity_keypair.pubkey())),
); );
Arc::new(connection_cache) Arc::new(connection_cache)
@ -781,18 +788,16 @@ impl Validator {
optimistically_confirmed_bank_tracker, optimistically_confirmed_bank_tracker,
bank_notification_sender, bank_notification_sender,
) = if let Some((rpc_addr, rpc_pubsub_addr)) = config.rpc_addrs { ) = if let Some((rpc_addr, rpc_pubsub_addr)) = config.rpc_addrs {
if ContactInfo::is_valid_address(&node.info.rpc, &socket_addr_space) { assert_eq!(
assert!(ContactInfo::is_valid_address( node.info
&node.info.rpc_pubsub, .rpc()
&socket_addr_space .map(|addr| socket_addr_space.check(&addr))
)); .ok(),
} else { node.info
assert!(!ContactInfo::is_valid_address( .rpc_pubsub()
&node.info.rpc_pubsub, .map(|addr| socket_addr_space.check(&addr))
&socket_addr_space .ok()
)); );
}
let (bank_notification_sender, bank_notification_receiver) = unbounded(); let (bank_notification_sender, bank_notification_receiver) = unbounded();
let confirmed_bank_subscribers = if !bank_notification_senders.is_empty() { let confirmed_bank_subscribers = if !bank_notification_senders.is_empty() {
Some(Arc::new(RwLock::new(bank_notification_senders))) Some(Arc::new(RwLock::new(bank_notification_senders)))
@ -873,7 +878,7 @@ impl Validator {
None => None, None => None,
Some(tcp_listener) => Some(solana_net_utils::ip_echo_server( Some(tcp_listener) => Some(solana_net_utils::ip_echo_server(
tcp_listener, tcp_listener,
Some(node.info.shred_version), Some(node.info.shred_version()),
)), )),
}; };
@ -1002,7 +1007,7 @@ impl Validator {
cluster_confirmed_slot_receiver, cluster_confirmed_slot_receiver,
TvuConfig { TvuConfig {
max_ledger_shreds: config.max_ledger_shreds, max_ledger_shreds: config.max_ledger_shreds,
shred_version: node.info.shred_version, shred_version: node.info.shred_version(),
repair_validators: config.repair_validators.clone(), repair_validators: config.repair_validators.clone(),
repair_whitelist: config.repair_whitelist.clone(), repair_whitelist: config.repair_whitelist.clone(),
wait_for_vote_to_start_leader, wait_for_vote_to_start_leader,
@ -1036,7 +1041,7 @@ impl Validator {
&blockstore, &blockstore,
&config.broadcast_stage_type, &config.broadcast_stage_type,
&exit, &exit,
node.info.shred_version, node.info.shred_version(),
vote_tracker, vote_tracker,
bank_forks.clone(), bank_forks.clone(),
verified_vote_sender, verified_vote_sender,
@ -2088,6 +2093,7 @@ mod tests {
use { use {
super::*, super::*,
crossbeam_channel::{bounded, RecvTimeoutError}, crossbeam_channel::{bounded, RecvTimeoutError},
solana_gossip::contact_info::{ContactInfo, LegacyContactInfo},
solana_ledger::{create_new_tmp_ledger, genesis_utils::create_genesis_config_with_leader}, solana_ledger::{create_new_tmp_ledger, genesis_utils::create_genesis_config_with_leader},
solana_sdk::{genesis_config::create_genesis_config, poh_config::PohConfig}, solana_sdk::{genesis_config::create_genesis_config, poh_config::PohConfig},
solana_tpu_client::tpu_client::{ solana_tpu_client::tpu_client::{
@ -2111,7 +2117,10 @@ mod tests {
let voting_keypair = Arc::new(Keypair::new()); let voting_keypair = Arc::new(Keypair::new());
let config = ValidatorConfig { let config = ValidatorConfig {
rpc_addrs: Some((validator_node.info.rpc, validator_node.info.rpc_pubsub)), rpc_addrs: Some((
validator_node.info.rpc().unwrap(),
validator_node.info.rpc_pubsub().unwrap(),
)),
..ValidatorConfig::default_for_test() ..ValidatorConfig::default_for_test()
}; };
let start_progress = Arc::new(RwLock::new(ValidatorStartProgress::default())); let start_progress = Arc::new(RwLock::new(ValidatorStartProgress::default()));
@ -2121,7 +2130,7 @@ mod tests {
&validator_ledger_path, &validator_ledger_path,
&voting_keypair.pubkey(), &voting_keypair.pubkey(),
Arc::new(RwLock::new(vec![voting_keypair.clone()])), Arc::new(RwLock::new(vec![voting_keypair.clone()])),
vec![leader_node.info], vec![LegacyContactInfo::try_from(&leader_node.info).unwrap()],
&config, &config,
true, // should_check_duplicate_instance true, // should_check_duplicate_instance
start_progress.clone(), start_progress.clone(),
@ -2204,7 +2213,10 @@ mod tests {
ledger_paths.push(validator_ledger_path.clone()); ledger_paths.push(validator_ledger_path.clone());
let vote_account_keypair = Keypair::new(); let vote_account_keypair = Keypair::new();
let config = ValidatorConfig { let config = ValidatorConfig {
rpc_addrs: Some((validator_node.info.rpc, validator_node.info.rpc_pubsub)), rpc_addrs: Some((
validator_node.info.rpc().unwrap(),
validator_node.info.rpc_pubsub().unwrap(),
)),
..ValidatorConfig::default_for_test() ..ValidatorConfig::default_for_test()
}; };
Validator::new( Validator::new(
@ -2213,7 +2225,7 @@ mod tests {
&validator_ledger_path, &validator_ledger_path,
&vote_account_keypair.pubkey(), &vote_account_keypair.pubkey(),
Arc::new(RwLock::new(vec![Arc::new(vote_account_keypair)])), Arc::new(RwLock::new(vec![Arc::new(vote_account_keypair)])),
vec![leader_node.info.clone()], vec![LegacyContactInfo::try_from(&leader_node.info).unwrap()],
&config, &config,
true, // should_check_duplicate_instance true, // should_check_duplicate_instance
Arc::new(RwLock::new(ValidatorStartProgress::default())), Arc::new(RwLock::new(ValidatorStartProgress::default())),

View File

@ -480,7 +480,7 @@ mod test {
use { use {
super::*, super::*,
solana_entry::entry::{create_ticks, Entry}, solana_entry::entry::{create_ticks, Entry},
solana_gossip::legacy_contact_info::LegacyContactInfo as ContactInfo, solana_gossip::contact_info::ContactInfo,
solana_ledger::{ solana_ledger::{
blockstore::{make_many_slot_entries, Blockstore}, blockstore::{make_many_slot_entries, Blockstore},
get_tmp_ledger_path, get_tmp_ledger_path,

View File

@ -6,9 +6,7 @@ use {
accounts_hash_verifier::AccountsHashVerifier, accounts_hash_verifier::AccountsHashVerifier,
snapshot_packager_service::SnapshotPackagerService, snapshot_packager_service::SnapshotPackagerService,
}, },
solana_gossip::{ solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo},
cluster_info::ClusterInfo, legacy_contact_info::LegacyContactInfo as ContactInfo,
},
solana_runtime::{ solana_runtime::{
accounts_background_service::{ accounts_background_service::{
AbsRequestHandlers, AbsRequestSender, AccountsBackgroundService, DroppedSlotsReceiver, AbsRequestHandlers, AbsRequestSender, AccountsBackgroundService, DroppedSlotsReceiver,

View File

@ -10,9 +10,7 @@ use {
accounts_hash_verifier::AccountsHashVerifier, accounts_hash_verifier::AccountsHashVerifier,
snapshot_packager_service::SnapshotPackagerService, snapshot_packager_service::SnapshotPackagerService,
}, },
solana_gossip::{ solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo},
cluster_info::ClusterInfo, legacy_contact_info::LegacyContactInfo as ContactInfo,
},
solana_runtime::{ solana_runtime::{
accounts_background_service::{ accounts_background_service::{
AbsRequestHandlers, AbsRequestSender, AccountsBackgroundService, AbsRequestHandlers, AbsRequestSender, AccountsBackgroundService,
@ -516,10 +514,11 @@ fn test_concurrent_snapshot_packaging(
let cluster_info = Arc::new({ let cluster_info = Arc::new({
let keypair = Arc::new(Keypair::new()); let keypair = Arc::new(Keypair::new());
let contact_info = ContactInfo { let contact_info = ContactInfo::new(
id: keypair.pubkey(), keypair.pubkey(),
..ContactInfo::default() timestamp(), // wallclock
}; 0u16, // shred_version
);
ClusterInfo::new(contact_info, keypair, SocketAddrSpace::Unspecified) ClusterInfo::new(contact_info, keypair, SocketAddrSpace::Unspecified)
}); });

View File

@ -789,6 +789,7 @@ pub mod test {
solana_client::thin_client::ThinClient, solana_client::thin_client::ThinClient,
solana_core::validator::ValidatorConfig, solana_core::validator::ValidatorConfig,
solana_faucet::faucet::run_local_faucet, solana_faucet::faucet::run_local_faucet,
solana_gossip::contact_info::LegacyContactInfo,
solana_local_cluster::{ solana_local_cluster::{
cluster::Cluster, cluster::Cluster,
local_cluster::{ClusterConfig, LocalCluster}, local_cluster::{ClusterConfig, LocalCluster},
@ -896,7 +897,11 @@ pub mod test {
assert_eq!(cluster.validators.len(), num_nodes); assert_eq!(cluster.validators.len(), num_nodes);
let nodes = cluster.get_node_pubkeys(); let nodes = cluster.get_node_pubkeys();
let node = cluster.get_contact_info(&nodes[0]).unwrap().clone(); let node = cluster
.get_contact_info(&nodes[0])
.map(LegacyContactInfo::try_from)
.unwrap()
.unwrap();
let nodes_slice = [node]; let nodes_slice = [node];
// send random transactions to TPU // send random transactions to TPU
@ -905,7 +910,7 @@ pub mod test {
&nodes_slice, &nodes_slice,
10, 10,
DosClientParameters { DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip, entrypoint_addr: cluster.entry_point_info.gossip().unwrap(),
mode: Mode::Tpu, mode: Mode::Tpu,
data_size: 1024, data_size: 1024,
data_type: DataType::Random, data_type: DataType::Random,
@ -929,12 +934,16 @@ pub mod test {
assert_eq!(cluster.validators.len(), num_nodes); assert_eq!(cluster.validators.len(), num_nodes);
let nodes = cluster.get_node_pubkeys(); let nodes = cluster.get_node_pubkeys();
let node = cluster.get_contact_info(&nodes[0]).unwrap().clone(); let node = cluster
.get_contact_info(&nodes[0])
.map(LegacyContactInfo::try_from)
.unwrap()
.unwrap();
let nodes_slice = [node]; let nodes_slice = [node];
let client = Arc::new(ThinClient::new( let client = Arc::new(ThinClient::new(
cluster.entry_point_info.rpc, cluster.entry_point_info.rpc().unwrap(),
cluster.entry_point_info.tpu, cluster.entry_point_info.tpu().unwrap(),
cluster.connection_cache.clone(), cluster.connection_cache.clone(),
)); ));
@ -944,7 +953,7 @@ pub mod test {
10, 10,
Some(client.clone()), Some(client.clone()),
DosClientParameters { DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip, entrypoint_addr: cluster.entry_point_info.gossip().unwrap(),
mode: Mode::Tpu, mode: Mode::Tpu,
data_size: 0, // irrelevant data_size: 0, // irrelevant
data_type: DataType::Transaction, data_type: DataType::Transaction,
@ -971,7 +980,7 @@ pub mod test {
10, 10,
Some(client.clone()), Some(client.clone()),
DosClientParameters { DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip, entrypoint_addr: cluster.entry_point_info.gossip().unwrap(),
mode: Mode::Tpu, mode: Mode::Tpu,
data_size: 0, // irrelevant data_size: 0, // irrelevant
data_type: DataType::Transaction, data_type: DataType::Transaction,
@ -998,7 +1007,7 @@ pub mod test {
10, 10,
Some(client), Some(client),
DosClientParameters { DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip, entrypoint_addr: cluster.entry_point_info.gossip().unwrap(),
mode: Mode::Tpu, mode: Mode::Tpu,
data_size: 0, // irrelevant data_size: 0, // irrelevant
data_type: DataType::Transaction, data_type: DataType::Transaction,
@ -1061,12 +1070,16 @@ pub mod test {
cluster.transfer(&cluster.funding_keypair, &faucet_pubkey, 100_000_000); cluster.transfer(&cluster.funding_keypair, &faucet_pubkey, 100_000_000);
let nodes = cluster.get_node_pubkeys(); let nodes = cluster.get_node_pubkeys();
let node = cluster.get_contact_info(&nodes[0]).unwrap().clone(); let node = cluster
.get_contact_info(&nodes[0])
.map(LegacyContactInfo::try_from)
.unwrap()
.unwrap();
let nodes_slice = [node]; let nodes_slice = [node];
let client = Arc::new(ThinClient::new( let client = Arc::new(ThinClient::new(
cluster.entry_point_info.rpc, cluster.entry_point_info.rpc().unwrap(),
cluster.entry_point_info.tpu, cluster.entry_point_info.tpu().unwrap(),
cluster.connection_cache.clone(), cluster.connection_cache.clone(),
)); ));
@ -1077,7 +1090,7 @@ pub mod test {
10, 10,
Some(client.clone()), Some(client.clone()),
DosClientParameters { DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip, entrypoint_addr: cluster.entry_point_info.gossip().unwrap(),
mode: Mode::Tpu, mode: Mode::Tpu,
data_size: 0, // irrelevant if not random data_size: 0, // irrelevant if not random
data_type: DataType::Transaction, data_type: DataType::Transaction,
@ -1106,7 +1119,7 @@ pub mod test {
10, 10,
Some(client.clone()), Some(client.clone()),
DosClientParameters { DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip, entrypoint_addr: cluster.entry_point_info.gossip().unwrap(),
mode: Mode::Tpu, mode: Mode::Tpu,
data_size: 0, // irrelevant if not random data_size: 0, // irrelevant if not random
data_type: DataType::Transaction, data_type: DataType::Transaction,
@ -1134,7 +1147,7 @@ pub mod test {
10, 10,
Some(client.clone()), Some(client.clone()),
DosClientParameters { DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip, entrypoint_addr: cluster.entry_point_info.gossip().unwrap(),
mode: Mode::Tpu, mode: Mode::Tpu,
data_size: 0, // irrelevant if not random data_size: 0, // irrelevant if not random
data_type: DataType::Transaction, data_type: DataType::Transaction,
@ -1162,7 +1175,7 @@ pub mod test {
10, 10,
Some(client), Some(client),
DosClientParameters { DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip, entrypoint_addr: cluster.entry_point_info.gossip().unwrap(),
mode: Mode::Tpu, mode: Mode::Tpu,
data_size: 0, // irrelevant if not random data_size: 0, // irrelevant if not random
data_type: DataType::Transaction, data_type: DataType::Transaction,

View File

@ -24,6 +24,7 @@ use {
cluster_info_metrics::{ cluster_info_metrics::{
submit_gossip_stats, Counter, GossipStats, ScopedTimer, TimedGuard, submit_gossip_stats, Counter, GossipStats, ScopedTimer, TimedGuard,
}, },
contact_info::{ContactInfo, LegacyContactInfo},
crds::{Crds, Cursor, GossipRoute}, crds::{Crds, Cursor, GossipRoute},
crds_gossip::CrdsGossip, crds_gossip::CrdsGossip,
crds_gossip_error::CrdsGossipError, crds_gossip_error::CrdsGossipError,
@ -35,7 +36,6 @@ use {
duplicate_shred::DuplicateShred, duplicate_shred::DuplicateShred,
epoch_slots::EpochSlots, epoch_slots::EpochSlots,
gossip_error::GossipError, gossip_error::GossipError,
legacy_contact_info::LegacyContactInfo as ContactInfo,
ping_pong::{self, PingCache, Pong}, ping_pong::{self, PingCache, Pong},
socketaddr, socketaddr_any, socketaddr, socketaddr_any,
weighted_shuffle::WeightedShuffle, weighted_shuffle::WeightedShuffle,
@ -161,7 +161,7 @@ pub struct ClusterInfo {
/// set the keypair that will be used to sign crds values generated. It is unset only in tests. /// set the keypair that will be used to sign crds values generated. It is unset only in tests.
keypair: RwLock<Arc<Keypair>>, keypair: RwLock<Arc<Keypair>>,
/// Network entrypoints /// Network entrypoints
entrypoints: RwLock<Vec<ContactInfo>>, entrypoints: RwLock<Vec<LegacyContactInfo>>,
outbound_budget: DataBudget, outbound_budget: DataBudget,
my_contact_info: RwLock<ContactInfo>, my_contact_info: RwLock<ContactInfo>,
ping_cache: Mutex<PingCache>, ping_cache: Mutex<PingCache>,
@ -407,8 +407,8 @@ impl ClusterInfo {
keypair: Arc<Keypair>, keypair: Arc<Keypair>,
socket_addr_space: SocketAddrSpace, socket_addr_space: SocketAddrSpace,
) -> Self { ) -> Self {
assert_eq!(contact_info.id, keypair.pubkey()); assert_eq!(contact_info.pubkey(), &keypair.pubkey());
let id = contact_info.id; let id = *contact_info.pubkey();
let me = Self { let me = Self {
gossip: CrdsGossip::default(), gossip: CrdsGossip::default(),
keypair: RwLock::new(keypair), keypair: RwLock::new(keypair),
@ -444,9 +444,16 @@ impl ClusterInfo {
fn push_self(&self) { fn push_self(&self) {
let now = timestamp(); let now = timestamp();
self.my_contact_info.write().unwrap().wallclock = now; let node = {
let entries: Vec<_> = vec![ let mut node = self.my_contact_info.write().unwrap();
CrdsData::LegacyContactInfo(self.my_contact_info()), node.set_wallclock(now);
node.clone()
};
let entries: Vec<_> = [
LegacyContactInfo::try_from(&node)
.map(CrdsData::LegacyContactInfo)
.expect("Operator must spin up node with valid contact-info"),
CrdsData::ContactInfo(node),
CrdsData::NodeInstance(self.instance.read().unwrap().with_wallclock(now)), CrdsData::NodeInstance(self.instance.read().unwrap().with_wallclock(now)),
] ]
.into_iter() .into_iter()
@ -465,7 +472,7 @@ impl ClusterInfo {
gossip_validators: Option<&HashSet<Pubkey>>, gossip_validators: Option<&HashSet<Pubkey>>,
sender: &PacketBatchSender, sender: &PacketBatchSender,
) { ) {
let ContactInfo { shred_version, .. } = *self.my_contact_info.read().unwrap(); let shred_version = self.my_contact_info.read().unwrap().shred_version();
let self_keypair: Arc<Keypair> = self.keypair().clone(); let self_keypair: Arc<Keypair> = self.keypair().clone();
let mut pings = Vec::new(); let mut pings = Vec::new();
self.gossip.refresh_push_active_set( self.gossip.refresh_push_active_set(
@ -498,18 +505,34 @@ impl ClusterInfo {
} }
// TODO kill insert_info, only used by tests // TODO kill insert_info, only used by tests
pub fn insert_info(&self, contact_info: ContactInfo) { pub fn insert_legacy_info(&self, contact_info: LegacyContactInfo) {
let value = let value =
CrdsValue::new_signed(CrdsData::LegacyContactInfo(contact_info), &self.keypair()); CrdsValue::new_signed(CrdsData::LegacyContactInfo(contact_info), &self.keypair());
let mut gossip_crds = self.gossip.crds.write().unwrap(); let mut gossip_crds = self.gossip.crds.write().unwrap();
let _ = gossip_crds.insert(value, timestamp(), GossipRoute::LocalMessage); let _ = gossip_crds.insert(value, timestamp(), GossipRoute::LocalMessage);
} }
pub fn set_entrypoint(&self, entrypoint: ContactInfo) { pub fn insert_info(&self, node: ContactInfo) {
let entries: Vec<_> = [
LegacyContactInfo::try_from(&node)
.map(CrdsData::LegacyContactInfo)
.expect("Operator must spin up node with valid contact-info"),
CrdsData::ContactInfo(node),
]
.into_iter()
.map(|entry| CrdsValue::new_signed(entry, &self.keypair()))
.collect();
let mut gossip_crds = self.gossip.crds.write().unwrap();
for entry in entries {
let _ = gossip_crds.insert(entry, timestamp(), GossipRoute::LocalMessage);
}
}
pub fn set_entrypoint(&self, entrypoint: LegacyContactInfo) {
self.set_entrypoints(vec![entrypoint]); self.set_entrypoints(vec![entrypoint]);
} }
pub fn set_entrypoints(&self, entrypoints: Vec<ContactInfo>) { pub fn set_entrypoints(&self, entrypoints: Vec<LegacyContactInfo>) {
*self.entrypoints.write().unwrap() = entrypoints; *self.entrypoints.write().unwrap() = entrypoints;
} }
@ -638,7 +661,7 @@ impl ClusterInfo {
*instance = NodeInstance::new(&mut thread_rng(), id, timestamp()); *instance = NodeInstance::new(&mut thread_rng(), id, timestamp());
} }
*self.keypair.write().unwrap() = new_keypair; *self.keypair.write().unwrap() = new_keypair;
self.my_contact_info.write().unwrap().id = id; self.my_contact_info.write().unwrap().set_pubkey(id);
self.insert_self(); self.insert_self();
self.push_message(CrdsValue::new_signed( self.push_message(CrdsValue::new_signed(
@ -650,7 +673,7 @@ impl ClusterInfo {
pub fn lookup_contact_info<F, Y>(&self, id: &Pubkey, map: F) -> Option<Y> pub fn lookup_contact_info<F, Y>(&self, id: &Pubkey, map: F) -> Option<Y>
where where
F: FnOnce(&ContactInfo) -> Y, F: FnOnce(&LegacyContactInfo) -> Y,
{ {
let gossip_crds = self.gossip.crds.read().unwrap(); let gossip_crds = self.gossip.crds.read().unwrap();
gossip_crds.get(*id).map(map) gossip_crds.get(*id).map(map)
@ -659,7 +682,7 @@ impl ClusterInfo {
pub fn lookup_contact_info_by_gossip_addr( pub fn lookup_contact_info_by_gossip_addr(
&self, &self,
gossip_addr: &SocketAddr, gossip_addr: &SocketAddr,
) -> Option<ContactInfo> { ) -> Option<LegacyContactInfo> {
let gossip_crds = self.gossip.crds.read().unwrap(); let gossip_crds = self.gossip.crds.read().unwrap();
let mut nodes = gossip_crds.get_nodes_contact_info(); let mut nodes = gossip_crds.get_nodes_contact_info();
nodes.find(|node| node.gossip == *gossip_addr).cloned() nodes.find(|node| node.gossip == *gossip_addr).cloned()
@ -670,7 +693,7 @@ impl ClusterInfo {
} }
pub fn my_shred_version(&self) -> u16 { pub fn my_shred_version(&self) -> u16 {
self.my_contact_info.read().unwrap().shred_version self.my_contact_info.read().unwrap().shred_version()
} }
fn lookup_epoch_slots(&self, ix: EpochSlotsIndex) -> EpochSlots { fn lookup_epoch_slots(&self, ix: EpochSlotsIndex) -> EpochSlots {
@ -1095,7 +1118,9 @@ impl ClusterInfo {
transaction: &Transaction, transaction: &Transaction,
tpu: Option<SocketAddr>, tpu: Option<SocketAddr>,
) -> Result<(), GossipError> { ) -> Result<(), GossipError> {
let tpu = tpu.unwrap_or_else(|| self.my_contact_info().tpu); let tpu = tpu
.map(Ok)
.unwrap_or_else(|| self.my_contact_info().tpu())?;
let buf = serialize(transaction)?; let buf = serialize(transaction)?;
self.socket.send_to(&buf, tpu)?; self.socket.send_to(&buf, tpu)?;
Ok(()) Ok(())
@ -1224,7 +1249,7 @@ impl ClusterInfo {
} }
/// all validators that have a valid rpc port regardless of `shred_version`. /// all validators that have a valid rpc port regardless of `shred_version`.
pub fn all_rpc_peers(&self) -> Vec<ContactInfo> { pub fn all_rpc_peers(&self) -> Vec<LegacyContactInfo> {
let self_pubkey = self.id(); let self_pubkey = self.id();
let gossip_crds = self.gossip.crds.read().unwrap(); let gossip_crds = self.gossip.crds.read().unwrap();
gossip_crds gossip_crds
@ -1238,7 +1263,7 @@ impl ClusterInfo {
} }
// All nodes in gossip (including spy nodes) and the last time we heard about them // All nodes in gossip (including spy nodes) and the last time we heard about them
pub fn all_peers(&self) -> Vec<(ContactInfo, u64)> { pub fn all_peers(&self) -> Vec<(LegacyContactInfo, u64)> {
let gossip_crds = self.gossip.crds.read().unwrap(); let gossip_crds = self.gossip.crds.read().unwrap();
gossip_crds gossip_crds
.get_nodes() .get_nodes()
@ -1246,7 +1271,7 @@ impl ClusterInfo {
.collect() .collect()
} }
pub fn gossip_peers(&self) -> Vec<ContactInfo> { pub fn gossip_peers(&self) -> Vec<LegacyContactInfo> {
let me = self.id(); let me = self.id();
let gossip_crds = self.gossip.crds.read().unwrap(); let gossip_crds = self.gossip.crds.read().unwrap();
gossip_crds gossip_crds
@ -1260,7 +1285,7 @@ impl ClusterInfo {
} }
/// all validators that have a valid tvu port regardless of `shred_version`. /// all validators that have a valid tvu port regardless of `shred_version`.
pub fn all_tvu_peers(&self) -> Vec<ContactInfo> { pub fn all_tvu_peers(&self) -> Vec<LegacyContactInfo> {
let self_pubkey = self.id(); let self_pubkey = self.id();
self.time_gossip_read_lock("all_tvu_peers", &self.stats.all_tvu_peers) self.time_gossip_read_lock("all_tvu_peers", &self.stats.all_tvu_peers)
.get_nodes_contact_info() .get_nodes_contact_info()
@ -1273,7 +1298,7 @@ impl ClusterInfo {
} }
/// all validators that have a valid tvu port and are on the same `shred_version`. /// all validators that have a valid tvu port and are on the same `shred_version`.
pub fn tvu_peers(&self) -> Vec<ContactInfo> { pub fn tvu_peers(&self) -> Vec<LegacyContactInfo> {
let self_pubkey = self.id(); let self_pubkey = self.id();
let self_shred_version = self.my_shred_version(); let self_shred_version = self.my_shred_version();
self.time_gossip_read_lock("tvu_peers", &self.stats.tvu_peers) self.time_gossip_read_lock("tvu_peers", &self.stats.tvu_peers)
@ -1288,7 +1313,7 @@ impl ClusterInfo {
} }
/// all tvu peers with valid gossip addrs that likely have the slot being requested /// all tvu peers with valid gossip addrs that likely have the slot being requested
pub fn repair_peers(&self, slot: Slot) -> Vec<ContactInfo> { pub fn repair_peers(&self, slot: Slot) -> Vec<LegacyContactInfo> {
let _st = ScopedTimer::from(&self.stats.repair_peers); let _st = ScopedTimer::from(&self.stats.repair_peers);
let self_pubkey = self.id(); let self_pubkey = self.id();
let self_shred_version = self.my_shred_version(); let self_shred_version = self.my_shred_version();
@ -1309,14 +1334,14 @@ impl ClusterInfo {
.collect() .collect()
} }
fn is_spy_node(contact_info: &ContactInfo, socket_addr_space: &SocketAddrSpace) -> bool { fn is_spy_node(contact_info: &LegacyContactInfo, socket_addr_space: &SocketAddrSpace) -> bool {
!ContactInfo::is_valid_address(&contact_info.tpu, socket_addr_space) !ContactInfo::is_valid_address(&contact_info.tpu, socket_addr_space)
|| !ContactInfo::is_valid_address(&contact_info.gossip, socket_addr_space) || !ContactInfo::is_valid_address(&contact_info.gossip, socket_addr_space)
|| !ContactInfo::is_valid_address(&contact_info.tvu, socket_addr_space) || !ContactInfo::is_valid_address(&contact_info.tvu, socket_addr_space)
} }
/// compute broadcast table /// compute broadcast table
pub fn tpu_peers(&self) -> Vec<ContactInfo> { pub fn tpu_peers(&self) -> Vec<LegacyContactInfo> {
let self_pubkey = self.id(); let self_pubkey = self.id();
let gossip_crds = self.gossip.crds.read().unwrap(); let gossip_crds = self.gossip.crds.read().unwrap();
gossip_crds gossip_crds
@ -1330,19 +1355,29 @@ impl ClusterInfo {
} }
fn insert_self(&self) { fn insert_self(&self) {
let value = CrdsValue::new_signed( let node = self.my_contact_info();
CrdsData::LegacyContactInfo(self.my_contact_info()), let entries: Vec<_> = [
&self.keypair(), LegacyContactInfo::try_from(&node)
); .map(CrdsData::LegacyContactInfo)
.expect("Operator must spin up node with valid contact-info"),
CrdsData::ContactInfo(node),
]
.into_iter()
.map(|entry| CrdsValue::new_signed(entry, &self.keypair()))
.collect();
let mut gossip_crds = self.gossip.crds.write().unwrap(); let mut gossip_crds = self.gossip.crds.write().unwrap();
let _ = gossip_crds.insert(value, timestamp(), GossipRoute::LocalMessage); for entry in entries {
if let Err(err) = gossip_crds.insert(entry, timestamp(), GossipRoute::LocalMessage) {
error!("Insert self failed: {err:?}");
}
}
} }
// If the network entrypoint hasn't been discovered yet, add it to the crds table // If the network entrypoint hasn't been discovered yet, add it to the crds table
fn append_entrypoint_to_pulls( fn append_entrypoint_to_pulls(
&self, &self,
thread_pool: &ThreadPool, thread_pool: &ThreadPool,
pulls: &mut HashMap<ContactInfo, Vec<CrdsFilter>>, pulls: &mut HashMap<LegacyContactInfo, Vec<CrdsFilter>>,
) { ) {
const THROTTLE_DELAY: u64 = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 2; const THROTTLE_DELAY: u64 = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 2;
let entrypoint = { let entrypoint = {
@ -1461,7 +1496,9 @@ impl ClusterInfo {
self.append_entrypoint_to_pulls(thread_pool, &mut pulls); self.append_entrypoint_to_pulls(thread_pool, &mut pulls);
let num_requests = pulls.values().map(Vec::len).sum::<usize>() as u64; let num_requests = pulls.values().map(Vec::len).sum::<usize>() as u64;
self.stats.new_pull_requests_count.add_relaxed(num_requests); self.stats.new_pull_requests_count.add_relaxed(num_requests);
let self_info = CrdsData::LegacyContactInfo(self.my_contact_info()); let self_info = LegacyContactInfo::try_from(&self.my_contact_info())
.map(CrdsData::LegacyContactInfo)
.expect("Operator must spin up node with valid contact-info");
let self_info = CrdsValue::new_signed(self_info, &self.keypair()); let self_info = CrdsValue::new_signed(self_info, &self.keypair());
let pulls = pulls let pulls = pulls
.into_iter() .into_iter()
@ -1514,7 +1551,7 @@ impl ClusterInfo {
push_messages push_messages
.into_iter() .into_iter()
.filter_map(|(pubkey, messages)| { .filter_map(|(pubkey, messages)| {
let peer: &ContactInfo = gossip_crds.get(pubkey)?; let peer: &LegacyContactInfo = gossip_crds.get(pubkey)?;
Some((peer.gossip, messages)) Some((peer.gossip, messages))
}) })
.collect() .collect()
@ -1625,7 +1662,10 @@ impl ClusterInfo {
"Setting shred version to {:?} from entrypoint {:?}", "Setting shred version to {:?} from entrypoint {:?}",
entrypoint.shred_version, entrypoint.id entrypoint.shred_version, entrypoint.id
); );
self.my_contact_info.write().unwrap().shred_version = entrypoint.shred_version; self.my_contact_info
.write()
.unwrap()
.set_shred_version(entrypoint.shred_version);
} }
} }
self.my_shred_version() != 0 self.my_shred_version() != 0
@ -2276,7 +2316,7 @@ impl ClusterInfo {
.into_par_iter() .into_par_iter()
.with_min_len(256) .with_min_len(256)
.filter_map(|(from, prunes)| { .filter_map(|(from, prunes)| {
let peer: &ContactInfo = gossip_crds.get(from)?; let peer: &LegacyContactInfo = gossip_crds.get(from)?;
let mut prune_data = PruneData { let mut prune_data = PruneData {
pubkey: self_pubkey, pubkey: self_pubkey,
prunes, prunes,
@ -2665,13 +2705,9 @@ impl ClusterInfo {
} }
pub fn gossip_contact_info(id: Pubkey, gossip: SocketAddr, shred_version: u16) -> ContactInfo { pub fn gossip_contact_info(id: Pubkey, gossip: SocketAddr, shred_version: u16) -> ContactInfo {
ContactInfo { let mut node = ContactInfo::new(id, /*wallclock:*/ timestamp(), shred_version);
id, let _ = node.set_gossip(gossip);
gossip, node
wallclock: timestamp(),
shred_version,
..ContactInfo::default()
}
} }
/// An alternative to Spy Node that has a valid gossip address and fully participate in Gossip. /// An alternative to Spy Node that has a valid gossip address and fully participate in Gossip.
@ -2806,21 +2842,41 @@ impl Node {
let serve_repair = UdpSocket::bind("127.0.0.1:0").unwrap(); let serve_repair = UdpSocket::bind("127.0.0.1:0").unwrap();
let ancestor_hashes_requests = UdpSocket::bind("0.0.0.0:0").unwrap(); let ancestor_hashes_requests = UdpSocket::bind("0.0.0.0:0").unwrap();
let info = ContactInfo { let mut info = ContactInfo::new(
id: *pubkey, *pubkey,
gossip: gossip_addr, timestamp(), // wallclock
tvu: tvu.local_addr().unwrap(), 0u16, // shred_version
tvu_forwards: tvu_forwards.local_addr().unwrap(), );
repair: repair.local_addr().unwrap(), macro_rules! set_socket {
tpu: tpu.local_addr().unwrap(), ($method:ident, $addr:expr, $name:literal) => {
tpu_forwards: tpu_forwards.local_addr().unwrap(), info.$method($addr).expect(&format!(
tpu_vote: tpu_vote.local_addr().unwrap(), "Operator must spin up node with valid {} address",
rpc: rpc_addr, $name
rpc_pubsub: rpc_pubsub_addr, ))
serve_repair: serve_repair.local_addr().unwrap(),
wallclock: timestamp(),
shred_version: 0,
}; };
}
set_socket!(set_gossip, gossip_addr, "gossip");
set_socket!(set_tvu, tvu.local_addr().unwrap(), "TVU");
set_socket!(
set_tvu_forwards,
tvu_forwards.local_addr().unwrap(),
"TVU-forwards"
);
set_socket!(set_repair, repair.local_addr().unwrap(), "repair");
set_socket!(set_tpu, tpu.local_addr().unwrap(), "TPU");
set_socket!(
set_tpu_forwards,
tpu_forwards.local_addr().unwrap(),
"TPU-forwards"
);
set_socket!(set_tpu_vote, tpu_vote.local_addr().unwrap(), "TPU-vote");
set_socket!(set_rpc, rpc_addr, "RPC");
set_socket!(set_rpc_pubsub, rpc_pubsub_addr, "RPC-pubsub");
set_socket!(
set_serve_repair,
serve_repair.local_addr().unwrap(),
"serve-repair"
);
Node { Node {
info, info,
sockets: Sockets { sockets: Sockets {
@ -2886,21 +2942,30 @@ impl Node {
let rpc_port = find_available_port_in_range(bind_ip_addr, port_range).unwrap(); let rpc_port = find_available_port_in_range(bind_ip_addr, port_range).unwrap();
let rpc_pubsub_port = find_available_port_in_range(bind_ip_addr, port_range).unwrap(); let rpc_pubsub_port = find_available_port_in_range(bind_ip_addr, port_range).unwrap();
let info = ContactInfo { let addr = gossip_addr.ip();
id: *pubkey, let mut info = ContactInfo::new(
gossip: SocketAddr::new(gossip_addr.ip(), gossip_port), *pubkey,
tvu: SocketAddr::new(gossip_addr.ip(), tvu_port), timestamp(), // wallclock
tvu_forwards: SocketAddr::new(gossip_addr.ip(), tvu_forwards_port), 0u16, // shred_version
repair: SocketAddr::new(gossip_addr.ip(), repair_port), );
tpu: SocketAddr::new(gossip_addr.ip(), tpu_port), macro_rules! set_socket {
tpu_forwards: SocketAddr::new(gossip_addr.ip(), tpu_forwards_port), ($method:ident, $port:ident, $name:literal) => {
tpu_vote: SocketAddr::new(gossip_addr.ip(), tpu_vote_port), info.$method((addr, $port)).expect(&format!(
rpc: SocketAddr::new(gossip_addr.ip(), rpc_port), "Operator must spin up node with valid {} address",
rpc_pubsub: SocketAddr::new(gossip_addr.ip(), rpc_pubsub_port), $name
serve_repair: SocketAddr::new(gossip_addr.ip(), serve_repair_port), ))
wallclock: timestamp(),
shred_version: 0,
}; };
}
set_socket!(set_gossip, gossip_port, "gossip");
set_socket!(set_tvu, tvu_port, "TVU");
set_socket!(set_tvu_forwards, tvu_forwards_port, "TVU-forwards");
set_socket!(set_repair, repair_port, "repair");
set_socket!(set_tpu, tpu_port, "TPU");
set_socket!(set_tpu_forwards, tpu_forwards_port, "TPU-forwards");
set_socket!(set_tpu_vote, tpu_vote_port, "TPU-vote");
set_socket!(set_rpc, rpc_port, "RPC");
set_socket!(set_rpc_pubsub, rpc_pubsub_port, "RPC-pubsub");
set_socket!(set_serve_repair, serve_repair_port, "serve-repair");
trace!("new ContactInfo: {:?}", info); trace!("new ContactInfo: {:?}", info);
Node { Node {
@ -2973,21 +3038,20 @@ impl Node {
let (_, ancestor_hashes_requests) = Self::bind(bind_ip_addr, port_range); let (_, ancestor_hashes_requests) = Self::bind(bind_ip_addr, port_range);
let info = ContactInfo { let mut info = ContactInfo::new(
id: *pubkey, *pubkey,
gossip: SocketAddr::new(gossip_addr.ip(), gossip_port), timestamp(), // wallclock
tvu: SocketAddr::new(gossip_addr.ip(), tvu_port), 0u16, // shred_version
tvu_forwards: SocketAddr::new(gossip_addr.ip(), tvu_forwards_port), );
repair: SocketAddr::new(gossip_addr.ip(), repair_port), let addr = gossip_addr.ip();
tpu: overwrite_tpu_addr.unwrap_or_else(|| SocketAddr::new(gossip_addr.ip(), tpu_port)), let _ = info.set_gossip((addr, gossip_port));
tpu_forwards: SocketAddr::new(gossip_addr.ip(), tpu_forwards_port), let _ = info.set_tvu((addr, tvu_port));
tpu_vote: SocketAddr::new(gossip_addr.ip(), tpu_vote_port), let _ = info.set_tvu_forwards((addr, tvu_forwards_port));
rpc: socketaddr_any!(), let _ = info.set_repair((addr, repair_port));
rpc_pubsub: socketaddr_any!(), let _ = info.set_tpu(overwrite_tpu_addr.unwrap_or_else(|| SocketAddr::new(addr, tpu_port)));
serve_repair: SocketAddr::new(gossip_addr.ip(), serve_repair_port), let _ = info.set_tpu_forwards((addr, tpu_forwards_port));
wallclock: 0, let _ = info.set_tpu_vote((addr, tpu_vote_port));
shred_version: 0, let _ = info.set_serve_repair((addr, serve_repair_port));
};
trace!("new ContactInfo: {:?}", info); trace!("new ContactInfo: {:?}", info);
Node { Node {
@ -3130,7 +3194,7 @@ mod tests {
//check that a gossip nodes always show up as spies //check that a gossip nodes always show up as spies
let (node, _, _) = ClusterInfo::spy_node(solana_sdk::pubkey::new_rand(), 0); let (node, _, _) = ClusterInfo::spy_node(solana_sdk::pubkey::new_rand(), 0);
assert!(ClusterInfo::is_spy_node( assert!(ClusterInfo::is_spy_node(
&node, &LegacyContactInfo::try_from(&node).unwrap(),
&SocketAddrSpace::Unspecified &SocketAddrSpace::Unspecified
)); ));
let (node, _, _) = ClusterInfo::gossip_node( let (node, _, _) = ClusterInfo::gossip_node(
@ -3139,7 +3203,7 @@ mod tests {
0, 0,
); );
assert!(ClusterInfo::is_spy_node( assert!(ClusterInfo::is_spy_node(
&node, &LegacyContactInfo::try_from(&node).unwrap(),
&SocketAddrSpace::Unspecified &SocketAddrSpace::Unspecified
)); ));
} }
@ -3165,21 +3229,21 @@ mod tests {
let serve_repair = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8910); let serve_repair = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8910);
let info = ContactInfo { let mut info = ContactInfo::new(
id: keypair.pubkey(), keypair.pubkey(),
gossip, timestamp(), // wallclock
tvu, 0u16, // shred_version
tvu_forwards, );
repair, info.set_gossip(gossip).unwrap();
tpu, info.set_tvu(tvu).unwrap();
tpu_forwards, info.set_tvu_forwards(tvu_forwards).unwrap();
tpu_vote, info.set_repair(repair).unwrap();
rpc, info.set_tpu(tpu).unwrap();
rpc_pubsub, info.set_tpu_forwards(tpu_forwards).unwrap();
serve_repair, info.set_tpu_vote(tpu_vote).unwrap();
wallclock: timestamp(), info.set_rpc(rpc).unwrap();
shred_version: 0, info.set_rpc_pubsub(rpc_pubsub).unwrap();
}; info.set_serve_repair(serve_repair).unwrap();
Node { Node {
info, info,
sockets: Sockets { sockets: Sockets {
@ -3387,7 +3451,7 @@ RPC Enabled Nodes: 1"#;
} }
fn test_crds_values(pubkey: Pubkey) -> Vec<CrdsValue> { fn test_crds_values(pubkey: Pubkey) -> Vec<CrdsValue> {
let entrypoint = ContactInfo::new_localhost(&pubkey, timestamp()); let entrypoint = LegacyContactInfo::new_localhost(&pubkey, timestamp());
let entrypoint_crdsvalue = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(entrypoint)); let entrypoint_crdsvalue = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(entrypoint));
vec![entrypoint_crdsvalue] vec![entrypoint_crdsvalue]
} }
@ -3577,7 +3641,7 @@ RPC Enabled Nodes: 1"#;
let keypair = Arc::new(Keypair::new()); let keypair = Arc::new(Keypair::new());
let d = ContactInfo::new_localhost(&keypair.pubkey(), timestamp()); let d = ContactInfo::new_localhost(&keypair.pubkey(), timestamp());
let cluster_info = ClusterInfo::new(d.clone(), keypair, SocketAddrSpace::Unspecified); let cluster_info = ClusterInfo::new(d.clone(), keypair, SocketAddrSpace::Unspecified);
assert_eq!(d.id, cluster_info.id()); assert_eq!(d.pubkey(), &cluster_info.id());
} }
#[test] #[test]
@ -3586,7 +3650,7 @@ RPC Enabled Nodes: 1"#;
let d = ContactInfo::new_localhost(&keypair.pubkey(), timestamp()); let d = ContactInfo::new_localhost(&keypair.pubkey(), timestamp());
let cluster_info = ClusterInfo::new(d, keypair, SocketAddrSpace::Unspecified); let cluster_info = ClusterInfo::new(d, keypair, SocketAddrSpace::Unspecified);
let d = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); let d = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp());
let label = CrdsValueLabel::LegacyContactInfo(d.id); let label = CrdsValueLabel::LegacyContactInfo(*d.pubkey());
cluster_info.insert_info(d); cluster_info.insert_info(d);
let gossip_crds = cluster_info.gossip.crds.read().unwrap(); let gossip_crds = cluster_info.gossip.crds.read().unwrap();
assert!(gossip_crds.get::<&CrdsValue>(&label).is_some()); assert!(gossip_crds.get::<&CrdsValue>(&label).is_some());
@ -3622,7 +3686,7 @@ RPC Enabled Nodes: 1"#;
#[test] #[test]
fn new_with_external_ip_test_random() { fn new_with_external_ip_test_random() {
let ip = Ipv4Addr::UNSPECIFIED; let ip = Ipv4Addr::LOCALHOST;
let node = Node::new_with_external_ip( let node = Node::new_with_external_ip(
&solana_sdk::pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
&socketaddr!(ip, 0), &socketaddr!(ip, 0),
@ -3643,11 +3707,11 @@ RPC Enabled Nodes: 1"#;
VALIDATOR_PORT_RANGE.1 + (2 * MINIMUM_VALIDATOR_PORT_RANGE_WIDTH), VALIDATOR_PORT_RANGE.1 + (2 * MINIMUM_VALIDATOR_PORT_RANGE_WIDTH),
); );
let ip = IpAddr::V4(Ipv4Addr::UNSPECIFIED); let ip = IpAddr::V4(Ipv4Addr::LOCALHOST);
let port = bind_in_range(ip, port_range).expect("Failed to bind").0; let port = bind_in_range(ip, port_range).expect("Failed to bind").0;
let node = Node::new_with_external_ip( let node = Node::new_with_external_ip(
&solana_sdk::pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
&socketaddr!(Ipv4Addr::UNSPECIFIED, port), &socketaddr!(Ipv4Addr::LOCALHOST, port),
port_range, port_range,
ip, ip,
None, None,
@ -3674,11 +3738,11 @@ RPC Enabled Nodes: 1"#;
SocketAddrSpace::Unspecified, SocketAddrSpace::Unspecified,
); );
let stakes = HashMap::<Pubkey, u64>::default(); let stakes = HashMap::<Pubkey, u64>::default();
cluster_info cluster_info.ping_cache.lock().unwrap().mock_pong(
.ping_cache *peer.pubkey(),
.lock() peer.gossip().unwrap(),
.unwrap() Instant::now(),
.mock_pong(peer.id, peer.gossip, Instant::now()); );
cluster_info.insert_info(peer); cluster_info.insert_info(peer);
cluster_info.gossip.refresh_push_active_set( cluster_info.gossip.refresh_push_active_set(
&cluster_info.keypair(), &cluster_info.keypair(),
@ -3950,7 +4014,7 @@ RPC Enabled Nodes: 1"#;
// Test with different shred versions. // Test with different shred versions.
let mut rng = rand::thread_rng(); let mut rng = rand::thread_rng();
let node_pubkey = Pubkey::new_unique(); let node_pubkey = Pubkey::new_unique();
let mut node = ContactInfo::new_rand(&mut rng, Some(node_pubkey)); let mut node = LegacyContactInfo::new_rand(&mut rng, Some(node_pubkey));
node.shred_version = 42; node.shred_version = 42;
let epoch_slots = EpochSlots::new_rand(&mut rng, Some(node_pubkey)); let epoch_slots = EpochSlots::new_rand(&mut rng, Some(node_pubkey));
let entries = vec![ let entries = vec![
@ -3973,7 +4037,7 @@ RPC Enabled Nodes: 1"#;
// Match shred versions. // Match shred versions.
{ {
let mut node = cluster_info.my_contact_info.write().unwrap(); let mut node = cluster_info.my_contact_info.write().unwrap();
node.shred_version = 42; node.set_shred_version(42);
} }
cluster_info.push_self(); cluster_info.push_self();
cluster_info.flush_push_queue(); cluster_info.flush_push_queue();
@ -3994,7 +4058,7 @@ RPC Enabled Nodes: 1"#;
SocketAddrSpace::Unspecified, SocketAddrSpace::Unspecified,
); );
let entrypoint_pubkey = solana_sdk::pubkey::new_rand(); let entrypoint_pubkey = solana_sdk::pubkey::new_rand();
let entrypoint = ContactInfo::new_localhost(&entrypoint_pubkey, timestamp()); let entrypoint = LegacyContactInfo::new_localhost(&entrypoint_pubkey, timestamp());
cluster_info.set_entrypoint(entrypoint.clone()); cluster_info.set_entrypoint(entrypoint.clone());
let (pings, pulls) = cluster_info.new_pull_requests(&thread_pool, None, &HashMap::new()); let (pings, pulls) = cluster_info.new_pull_requests(&thread_pool, None, &HashMap::new());
assert!(pings.is_empty()); assert!(pings.is_empty());
@ -4032,7 +4096,8 @@ RPC Enabled Nodes: 1"#;
#[test] #[test]
fn test_split_messages_small() { fn test_split_messages_small() {
let value = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(ContactInfo::default())); let value =
CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(LegacyContactInfo::default()));
test_split_messages(value); test_split_messages(value);
} }
@ -4134,7 +4199,8 @@ RPC Enabled Nodes: 1"#;
} }
fn check_pull_request_size(filter: CrdsFilter) { fn check_pull_request_size(filter: CrdsFilter) {
let value = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(ContactInfo::default())); let value =
CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(LegacyContactInfo::default()));
let protocol = Protocol::PullRequest(filter, value); let protocol = Protocol::PullRequest(filter, value);
assert!(serialized_size(&protocol).unwrap() <= PACKET_DATA_SIZE as u64); assert!(serialized_size(&protocol).unwrap() <= PACKET_DATA_SIZE as u64);
} }
@ -4158,21 +4224,21 @@ RPC Enabled Nodes: 1"#;
stakes.insert(id2, 10); stakes.insert(id2, 10);
// duplicate // duplicate
contact_info.wallclock = timestamp() + 1; contact_info.set_wallclock(timestamp() + 1);
cluster_info.insert_info(contact_info); cluster_info.insert_info(contact_info);
// no tvu // no tvu
let id3 = Pubkey::from([3u8; 32]); let id3 = Pubkey::from([3u8; 32]);
let mut contact_info = ContactInfo::new_localhost(&id3, timestamp()); let mut contact_info = ContactInfo::new_localhost(&id3, timestamp());
contact_info.tvu = "0.0.0.0:0".parse().unwrap(); contact_info.remove_tvu();
cluster_info.insert_info(contact_info); cluster_info.insert_info(contact_info);
stakes.insert(id3, 10); stakes.insert(id3, 10);
// normal but with different shred version // normal but with different shred version
let id4 = Pubkey::from([4u8; 32]); let id4 = Pubkey::from([4u8; 32]);
let mut contact_info = ContactInfo::new_localhost(&id4, timestamp()); let mut contact_info = ContactInfo::new_localhost(&id4, timestamp());
contact_info.shred_version = 1; contact_info.set_shred_version(1);
assert_ne!(contact_info.shred_version, d.shred_version); assert_ne!(contact_info.shred_version(), d.shred_version());
cluster_info.insert_info(contact_info); cluster_info.insert_info(contact_info);
stakes.insert(id4, 10); stakes.insert(id4, 10);
} }
@ -4188,17 +4254,19 @@ RPC Enabled Nodes: 1"#;
); );
let entrypoint_pubkey = solana_sdk::pubkey::new_rand(); let entrypoint_pubkey = solana_sdk::pubkey::new_rand();
let mut entrypoint = ContactInfo::new_localhost(&entrypoint_pubkey, timestamp()); let mut entrypoint = ContactInfo::new_localhost(&entrypoint_pubkey, timestamp());
entrypoint.gossip = socketaddr!("127.0.0.2:1234"); entrypoint
cluster_info.set_entrypoint(entrypoint.clone()); .set_gossip(socketaddr!("127.0.0.2:1234"))
.unwrap();
cluster_info.set_entrypoint(LegacyContactInfo::try_from(&entrypoint).unwrap());
let mut stakes = HashMap::new(); let mut stakes = HashMap::new();
let other_node_pubkey = solana_sdk::pubkey::new_rand(); let other_node_pubkey = solana_sdk::pubkey::new_rand();
let other_node = ContactInfo::new_localhost(&other_node_pubkey, timestamp()); let other_node = ContactInfo::new_localhost(&other_node_pubkey, timestamp());
assert_ne!(other_node.gossip, entrypoint.gossip); assert_ne!(other_node.gossip().unwrap(), entrypoint.gossip().unwrap());
cluster_info.ping_cache.lock().unwrap().mock_pong( cluster_info.ping_cache.lock().unwrap().mock_pong(
other_node.id, *other_node.pubkey(),
other_node.gossip, other_node.gossip().unwrap(),
Instant::now(), Instant::now(),
); );
cluster_info.insert_info(other_node.clone()); cluster_info.insert_info(other_node.clone());
@ -4209,7 +4277,9 @@ RPC Enabled Nodes: 1"#;
let (pings, pulls) = cluster_info.new_pull_requests(&thread_pool, None, &stakes); let (pings, pulls) = cluster_info.new_pull_requests(&thread_pool, None, &stakes);
assert!(pings.is_empty()); assert!(pings.is_empty());
assert_eq!(pulls.len(), MIN_NUM_BLOOM_FILTERS); assert_eq!(pulls.len(), MIN_NUM_BLOOM_FILTERS);
assert!(pulls.into_iter().all(|(addr, _)| addr == other_node.gossip)); assert!(pulls
.into_iter()
.all(|(addr, _)| addr == other_node.gossip().unwrap()));
// Pull request 2: pretend it's been a while since we've pulled from `entrypoint`. There should // Pull request 2: pretend it's been a while since we've pulled from `entrypoint`. There should
// now be two pull requests // now be two pull requests
@ -4221,7 +4291,7 @@ RPC Enabled Nodes: 1"#;
assert_eq!( assert_eq!(
pulls pulls
.iter() .iter()
.filter(|(addr, _)| *addr == node.gossip) .filter(|(addr, _)| *addr == node.gossip().unwrap())
.count(), .count(),
MIN_NUM_BLOOM_FILTERS MIN_NUM_BLOOM_FILTERS
); );
@ -4231,7 +4301,9 @@ RPC Enabled Nodes: 1"#;
let (pings, pulls) = cluster_info.new_pull_requests(&thread_pool, None, &stakes); let (pings, pulls) = cluster_info.new_pull_requests(&thread_pool, None, &stakes);
assert!(pings.is_empty()); assert!(pings.is_empty());
assert_eq!(pulls.len(), MIN_NUM_BLOOM_FILTERS); assert_eq!(pulls.len(), MIN_NUM_BLOOM_FILTERS);
assert!(pulls.into_iter().all(|(addr, _)| addr == other_node.gossip)); assert!(pulls
.into_iter()
.all(|(addr, _)| addr == other_node.gossip().unwrap()));
} }
#[test] #[test]
@ -4298,7 +4370,7 @@ RPC Enabled Nodes: 1"#;
.expect("unable to serialize default filter") as usize; .expect("unable to serialize default filter") as usize;
let protocol = Protocol::PullRequest( let protocol = Protocol::PullRequest(
CrdsFilter::default(), CrdsFilter::default(),
CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(ContactInfo::default())), CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(LegacyContactInfo::default())),
); );
let protocol_size = let protocol_size =
serialized_size(&protocol).expect("unable to serialize gossip protocol") as usize; serialized_size(&protocol).expect("unable to serialize gossip protocol") as usize;
@ -4370,12 +4442,12 @@ RPC Enabled Nodes: 1"#;
// Simulating starting up with two entrypoints, no known id, only a gossip // Simulating starting up with two entrypoints, no known id, only a gossip
// address // address
let entrypoint1_gossip_addr = socketaddr!("127.0.0.2:1234"); let entrypoint1_gossip_addr = socketaddr!("127.0.0.2:1234");
let mut entrypoint1 = ContactInfo::new_localhost(&Pubkey::default(), timestamp()); let mut entrypoint1 = LegacyContactInfo::new_localhost(&Pubkey::default(), timestamp());
entrypoint1.gossip = entrypoint1_gossip_addr; entrypoint1.gossip = entrypoint1_gossip_addr;
assert_eq!(entrypoint1.shred_version, 0); assert_eq!(entrypoint1.shred_version, 0);
let entrypoint2_gossip_addr = socketaddr!("127.0.0.2:5678"); let entrypoint2_gossip_addr = socketaddr!("127.0.0.2:5678");
let mut entrypoint2 = ContactInfo::new_localhost(&Pubkey::default(), timestamp()); let mut entrypoint2 = LegacyContactInfo::new_localhost(&Pubkey::default(), timestamp());
entrypoint2.gossip = entrypoint2_gossip_addr; entrypoint2.gossip = entrypoint2_gossip_addr;
assert_eq!(entrypoint2.shred_version, 0); assert_eq!(entrypoint2.shred_version, 0);
cluster_info.set_entrypoints(vec![entrypoint1, entrypoint2]); cluster_info.set_entrypoints(vec![entrypoint1, entrypoint2]);
@ -4384,9 +4456,13 @@ RPC Enabled Nodes: 1"#;
// 0 // 0
let mut gossiped_entrypoint1_info = let mut gossiped_entrypoint1_info =
ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp());
gossiped_entrypoint1_info.gossip = entrypoint1_gossip_addr; gossiped_entrypoint1_info
gossiped_entrypoint1_info.shred_version = 0; .set_gossip(entrypoint1_gossip_addr)
.unwrap();
gossiped_entrypoint1_info.set_shred_version(0);
cluster_info.insert_info(gossiped_entrypoint1_info.clone()); cluster_info.insert_info(gossiped_entrypoint1_info.clone());
let gossiped_entrypoint1_info =
LegacyContactInfo::try_from(&gossiped_entrypoint1_info).unwrap();
assert!(!cluster_info assert!(!cluster_info
.entrypoints .entrypoints
.read() .read()
@ -4411,9 +4487,13 @@ RPC Enabled Nodes: 1"#;
// !0 // !0
let mut gossiped_entrypoint2_info = let mut gossiped_entrypoint2_info =
ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp());
gossiped_entrypoint2_info.gossip = entrypoint2_gossip_addr; gossiped_entrypoint2_info
gossiped_entrypoint2_info.shred_version = 1; .set_gossip(entrypoint2_gossip_addr)
.unwrap();
gossiped_entrypoint2_info.set_shred_version(1);
cluster_info.insert_info(gossiped_entrypoint2_info.clone()); cluster_info.insert_info(gossiped_entrypoint2_info.clone());
let gossiped_entrypoint2_info =
LegacyContactInfo::try_from(&gossiped_entrypoint2_info).unwrap();
assert!(!cluster_info assert!(!cluster_info
.entrypoints .entrypoints
.read() .read()
@ -4443,7 +4523,7 @@ RPC Enabled Nodes: 1"#;
{ {
let mut contact_info = let mut contact_info =
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()); ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp());
contact_info.shred_version = 2; contact_info.set_shred_version(2);
contact_info contact_info
}, },
node_keypair, node_keypair,
@ -4454,7 +4534,7 @@ RPC Enabled Nodes: 1"#;
// Simulating starting up with default entrypoint, no known id, only a gossip // Simulating starting up with default entrypoint, no known id, only a gossip
// address // address
let entrypoint_gossip_addr = socketaddr!("127.0.0.2:1234"); let entrypoint_gossip_addr = socketaddr!("127.0.0.2:1234");
let mut entrypoint = ContactInfo::new_localhost(&Pubkey::default(), timestamp()); let mut entrypoint = LegacyContactInfo::new_localhost(&Pubkey::default(), timestamp());
entrypoint.gossip = entrypoint_gossip_addr; entrypoint.gossip = entrypoint_gossip_addr;
assert_eq!(entrypoint.shred_version, 0); assert_eq!(entrypoint.shred_version, 0);
cluster_info.set_entrypoint(entrypoint); cluster_info.set_entrypoint(entrypoint);
@ -4462,8 +4542,10 @@ RPC Enabled Nodes: 1"#;
// Simulate getting entrypoint ContactInfo from gossip // Simulate getting entrypoint ContactInfo from gossip
let mut gossiped_entrypoint_info = let mut gossiped_entrypoint_info =
ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp());
gossiped_entrypoint_info.gossip = entrypoint_gossip_addr; gossiped_entrypoint_info
gossiped_entrypoint_info.shred_version = 1; .set_gossip(entrypoint_gossip_addr)
.unwrap();
gossiped_entrypoint_info.set_shred_version(1);
cluster_info.insert_info(gossiped_entrypoint_info.clone()); cluster_info.insert_info(gossiped_entrypoint_info.clone());
// Adopt the entrypoint's gossiped contact info and verify // Adopt the entrypoint's gossiped contact info and verify
@ -4471,7 +4553,7 @@ RPC Enabled Nodes: 1"#;
assert_eq!(cluster_info.entrypoints.read().unwrap().len(), 1); assert_eq!(cluster_info.entrypoints.read().unwrap().len(), 1);
assert_eq!( assert_eq!(
cluster_info.entrypoints.read().unwrap()[0], cluster_info.entrypoints.read().unwrap()[0],
gossiped_entrypoint_info LegacyContactInfo::try_from(&gossiped_entrypoint_info).unwrap(),
); );
assert!(entrypoints_processed); assert!(entrypoints_processed);
assert_eq!(cluster_info.my_shred_version(), 2); // <--- No change to shred version assert_eq!(cluster_info.my_shred_version(), 2); // <--- No change to shred version
@ -4625,7 +4707,7 @@ RPC Enabled Nodes: 1"#;
SocketAddrSpace::Unspecified, SocketAddrSpace::Unspecified,
)); ));
let entrypoint_pubkey = solana_sdk::pubkey::new_rand(); let entrypoint_pubkey = solana_sdk::pubkey::new_rand();
let entrypoint = ContactInfo::new_localhost(&entrypoint_pubkey, timestamp()); let entrypoint = LegacyContactInfo::new_localhost(&entrypoint_pubkey, timestamp());
cluster_info.set_entrypoint(entrypoint); cluster_info.set_entrypoint(entrypoint);
let mut rng = rand::thread_rng(); let mut rng = rand::thread_rng();
@ -4636,7 +4718,7 @@ RPC Enabled Nodes: 1"#;
let data: Vec<_> = repeat_with(|| { let data: Vec<_> = repeat_with(|| {
let keypair = Keypair::new(); let keypair = Keypair::new();
peers.push(keypair.pubkey()); peers.push(keypair.pubkey());
let mut rand_ci = ContactInfo::new_rand(&mut rng, Some(keypair.pubkey())); let mut rand_ci = LegacyContactInfo::new_rand(&mut rng, Some(keypair.pubkey()));
rand_ci.shred_version = shred_version; rand_ci.shred_version = shred_version;
rand_ci.wallclock = timestamp(); rand_ci.wallclock = timestamp();
CrdsValue::new_signed(CrdsData::LegacyContactInfo(rand_ci), &keypair) CrdsValue::new_signed(CrdsData::LegacyContactInfo(rand_ci), &keypair)

View File

@ -1,5 +1,5 @@
use { use {
crate::duplicate_shred, crate::{contact_info, duplicate_shred},
crossbeam_channel::{RecvTimeoutError, SendError}, crossbeam_channel::{RecvTimeoutError, SendError},
std::io, std::io,
thiserror::Error, thiserror::Error,
@ -12,6 +12,8 @@ pub enum GossipError {
#[error(transparent)] #[error(transparent)]
DuplicateShredError(#[from] duplicate_shred::Error), DuplicateShredError(#[from] duplicate_shred::Error),
#[error(transparent)] #[error(transparent)]
InvalidContactInfo(#[from] contact_info::Error),
#[error(transparent)]
Io(#[from] io::Error), Io(#[from] io::Error),
#[error(transparent)] #[error(transparent)]
RecvTimeoutError(#[from] RecvTimeoutError), RecvTimeoutError(#[from] RecvTimeoutError),

View File

@ -331,7 +331,10 @@ pub fn make_gossip_node(
mod tests { mod tests {
use { use {
super::*, super::*,
crate::cluster_info::{ClusterInfo, Node}, crate::{
cluster_info::{ClusterInfo, Node},
contact_info::ContactInfo,
},
std::sync::{atomic::AtomicBool, Arc}, std::sync::{atomic::AtomicBool, Arc},
}; };
@ -422,7 +425,7 @@ mod tests {
None, None,
TIMEOUT, TIMEOUT,
None, None,
Some(&peer0_info.gossip), Some(&peer0_info.gossip().unwrap()),
); );
assert!(met_criteria); assert!(met_criteria);

View File

@ -2,7 +2,6 @@ use {
crate::crds_value::MAX_WALLCLOCK, crate::crds_value::MAX_WALLCLOCK,
solana_sdk::{ solana_sdk::{
pubkey::Pubkey, pubkey::Pubkey,
rpc_port,
sanitize::{Sanitize, SanitizeError}, sanitize::{Sanitize, SanitizeError},
timing::timestamp, timing::timestamp,
}, },
@ -117,41 +116,6 @@ impl LegacyContactInfo {
node node
} }
// Used in tests
pub fn new_with_socketaddr(pubkey: &Pubkey, bind_addr: &SocketAddr) -> Self {
fn next_port(addr: &SocketAddr, nxt: u16) -> SocketAddr {
let mut nxt_addr = *addr;
nxt_addr.set_port(addr.port() + nxt);
nxt_addr
}
let tpu = *bind_addr;
let gossip = next_port(bind_addr, 1);
let tvu = next_port(bind_addr, 2);
let tpu_forwards = next_port(bind_addr, 3);
let tvu_forwards = next_port(bind_addr, 4);
let repair = next_port(bind_addr, 5);
let rpc = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PORT);
let rpc_pubsub = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PUBSUB_PORT);
let serve_repair = next_port(bind_addr, 6);
let tpu_vote = next_port(bind_addr, 7);
Self {
id: *pubkey,
gossip,
tvu,
tvu_forwards,
repair,
tpu,
tpu_forwards,
tpu_vote,
rpc,
rpc_pubsub,
serve_repair,
wallclock: timestamp(),
shred_version: 0,
}
}
// Construct a LegacyContactInfo that's only usable for gossip // Construct a LegacyContactInfo that's only usable for gossip
pub fn new_gossip_entry_point(gossip_addr: &SocketAddr) -> Self { pub fn new_gossip_entry_point(gossip_addr: &SocketAddr) -> Self {
Self { Self {
@ -196,10 +160,7 @@ impl LegacyContactInfo {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use { use super::*;
super::*,
solana_sdk::signature::{Keypair, Signer},
};
#[test] #[test]
fn test_is_valid_address() { fn test_is_valid_address() {
@ -252,45 +213,6 @@ mod tests {
assert!(ci.tpu_vote.ip().is_unspecified()); assert!(ci.tpu_vote.ip().is_unspecified());
assert!(ci.serve_repair.ip().is_unspecified()); assert!(ci.serve_repair.ip().is_unspecified());
} }
#[test]
fn test_socketaddr() {
let addr = socketaddr!(Ipv4Addr::LOCALHOST, 10);
let ci = LegacyContactInfo::new_with_socketaddr(&Keypair::new().pubkey(), &addr);
assert_eq!(ci.tpu, addr);
assert_eq!(ci.tpu_vote.port(), 17);
assert_eq!(ci.gossip.port(), 11);
assert_eq!(ci.tvu.port(), 12);
assert_eq!(ci.tpu_forwards.port(), 13);
assert_eq!(ci.rpc.port(), rpc_port::DEFAULT_RPC_PORT);
assert_eq!(ci.rpc_pubsub.port(), rpc_port::DEFAULT_RPC_PUBSUB_PORT);
assert_eq!(ci.serve_repair.port(), 16);
}
#[test]
fn replayed_data_new_with_socketaddr_with_pubkey() {
let keypair = Keypair::new();
let d1 = LegacyContactInfo::new_with_socketaddr(
&keypair.pubkey(),
&socketaddr!(Ipv4Addr::LOCALHOST, 1234),
);
assert_eq!(d1.id, keypair.pubkey());
assert_eq!(d1.gossip, socketaddr!(Ipv4Addr::LOCALHOST, 1235));
assert_eq!(d1.tvu, socketaddr!(Ipv4Addr::LOCALHOST, 1236));
assert_eq!(d1.tpu_forwards, socketaddr!(Ipv4Addr::LOCALHOST, 1237));
assert_eq!(d1.tpu, socketaddr!(Ipv4Addr::LOCALHOST, 1234));
assert_eq!(
d1.rpc,
socketaddr!(Ipv4Addr::LOCALHOST, rpc_port::DEFAULT_RPC_PORT)
);
assert_eq!(
d1.rpc_pubsub,
socketaddr!(Ipv4Addr::LOCALHOST, rpc_port::DEFAULT_RPC_PUBSUB_PORT)
);
assert_eq!(d1.tvu_forwards, socketaddr!(Ipv4Addr::LOCALHOST, 1238));
assert_eq!(d1.repair, socketaddr!(Ipv4Addr::LOCALHOST, 1239));
assert_eq!(d1.serve_repair, socketaddr!(Ipv4Addr::LOCALHOST, 1240));
assert_eq!(d1.tpu_vote, socketaddr!(Ipv4Addr::LOCALHOST, 1241));
}
#[test] #[test]
fn test_valid_client_facing() { fn test_valid_client_facing() {

View File

@ -163,7 +163,7 @@ fn gossip_ring() {
let yv = &listen[y].0; let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap(); let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.wallclock = timestamp(); d.wallclock = timestamp();
listen[x].0.insert_info(d); listen[x].0.insert_legacy_info(d);
} }
}); });
} }
@ -181,7 +181,7 @@ fn gossip_ring_large() {
let yv = &listen[y].0; let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap(); let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.wallclock = timestamp(); d.wallclock = timestamp();
listen[x].0.insert_info(d); listen[x].0.insert_legacy_info(d);
} }
}); });
} }
@ -198,7 +198,7 @@ fn gossip_star() {
let mut yd = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap(); let mut yd = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
yd.wallclock = timestamp(); yd.wallclock = timestamp();
let xv = &listen[x].0; let xv = &listen[x].0;
xv.insert_info(yd); xv.insert_legacy_info(yd);
trace!("star leader {}", &xv.id()); trace!("star leader {}", &xv.id());
} }
}); });
@ -218,7 +218,7 @@ fn gossip_rstar() {
for n in 0..(num - 1) { for n in 0..(num - 1) {
let y = (n + 1) % listen.len(); let y = (n + 1) % listen.len();
let yv = &listen[y].0; let yv = &listen[y].0;
yv.insert_info(xd.clone()); yv.insert_legacy_info(xd.clone());
trace!("rstar insert {} into {}", xd.id, yv.id()); trace!("rstar insert {} into {}", xd.id, yv.id());
} }
}); });

View File

@ -1,7 +1,7 @@
use { use {
solana_client::thin_client::ThinClient, solana_client::thin_client::ThinClient,
solana_core::validator::{Validator, ValidatorConfig}, solana_core::validator::{Validator, ValidatorConfig},
solana_gossip::{cluster_info::Node, legacy_contact_info::LegacyContactInfo as ContactInfo}, solana_gossip::{cluster_info::Node, contact_info::ContactInfo},
solana_sdk::{pubkey::Pubkey, signature::Keypair}, solana_sdk::{pubkey::Pubkey, signature::Keypair},
solana_streamer::socket::SocketAddrSpace, solana_streamer::socket::SocketAddrSpace,
std::{path::PathBuf, sync::Arc}, std::{path::PathBuf, sync::Arc},

View File

@ -11,11 +11,11 @@ use {
solana_entry::entry::{Entry, EntrySlice}, solana_entry::entry::{Entry, EntrySlice},
solana_gossip::{ solana_gossip::{
cluster_info::{self, ClusterInfo}, cluster_info::{self, ClusterInfo},
contact_info::{ContactInfo, LegacyContactInfo},
crds::Cursor, crds::Cursor,
crds_value::{self, CrdsData, CrdsValue, CrdsValueLabel}, crds_value::{self, CrdsData, CrdsValue, CrdsValueLabel},
gossip_error::GossipError, gossip_error::GossipError,
gossip_service::{self, discover_cluster, GossipService}, gossip_service::{self, discover_cluster, GossipService},
legacy_contact_info::LegacyContactInfo as ContactInfo,
}, },
solana_ledger::blockstore::Blockstore, solana_ledger::blockstore::Blockstore,
solana_runtime::vote_transaction::VoteTransaction, solana_runtime::vote_transaction::VoteTransaction,
@ -37,6 +37,7 @@ use {
solana_streamer::socket::SocketAddrSpace, solana_streamer::socket::SocketAddrSpace,
solana_vote_program::vote_transaction, solana_vote_program::vote_transaction,
std::{ std::{
borrow::Borrow,
collections::{HashMap, HashSet}, collections::{HashMap, HashSet},
net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener}, net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener},
path::Path, path::Path,
@ -49,8 +50,10 @@ use {
}, },
}; };
pub fn get_client_facing_addr(contact_info: &ContactInfo) -> (SocketAddr, SocketAddr) { pub fn get_client_facing_addr<T: Borrow<LegacyContactInfo>>(
let (rpc, mut tpu) = contact_info.client_facing_addr(); contact_info: T,
) -> (SocketAddr, SocketAddr) {
let (rpc, mut tpu) = contact_info.borrow().client_facing_addr();
// QUIC certificate authentication requires the IP Address to match. ContactInfo might have // QUIC certificate authentication requires the IP Address to match. ContactInfo might have
// 0.0.0.0 as the IP instead of 127.0.0.1. // 0.0.0.0 as the IP instead of 127.0.0.1.
tpu.set_ip(IpAddr::V4(Ipv4Addr::LOCALHOST)); tpu.set_ip(IpAddr::V4(Ipv4Addr::LOCALHOST));
@ -66,8 +69,12 @@ pub fn spend_and_verify_all_nodes<S: ::std::hash::BuildHasher + Sync + Send>(
socket_addr_space: SocketAddrSpace, socket_addr_space: SocketAddrSpace,
connection_cache: &Arc<ConnectionCache>, connection_cache: &Arc<ConnectionCache>,
) { ) {
let cluster_nodes = let cluster_nodes = discover_cluster(
discover_cluster(&entry_point_info.gossip, nodes, socket_addr_space).unwrap(); &entry_point_info.gossip().unwrap(),
nodes,
socket_addr_space,
)
.unwrap();
assert!(cluster_nodes.len() >= nodes); assert!(cluster_nodes.len() >= nodes);
let ignore_nodes = Arc::new(ignore_nodes); let ignore_nodes = Arc::new(ignore_nodes);
cluster_nodes.par_iter().for_each(|ingress_node| { cluster_nodes.par_iter().for_each(|ingress_node| {
@ -109,7 +116,9 @@ pub fn verify_balances<S: ::std::hash::BuildHasher>(
node: &ContactInfo, node: &ContactInfo,
connection_cache: Arc<ConnectionCache>, connection_cache: Arc<ConnectionCache>,
) { ) {
let (rpc, tpu) = get_client_facing_addr(node); let (rpc, tpu) = LegacyContactInfo::try_from(node)
.map(get_client_facing_addr)
.unwrap();
let client = ThinClient::new(rpc, tpu, connection_cache); let client = ThinClient::new(rpc, tpu, connection_cache);
for (pk, b) in expected_balances { for (pk, b) in expected_balances {
let bal = client let bal = client
@ -120,7 +129,7 @@ pub fn verify_balances<S: ::std::hash::BuildHasher>(
} }
pub fn send_many_transactions( pub fn send_many_transactions(
node: &ContactInfo, node: &LegacyContactInfo,
funding_keypair: &Keypair, funding_keypair: &Keypair,
connection_cache: &Arc<ConnectionCache>, connection_cache: &Arc<ConnectionCache>,
max_tokens_per_transfer: u64, max_tokens_per_transfer: u64,
@ -216,10 +225,16 @@ pub fn kill_entry_and_spend_and_verify_rest(
socket_addr_space: SocketAddrSpace, socket_addr_space: SocketAddrSpace,
) { ) {
info!("kill_entry_and_spend_and_verify_rest..."); info!("kill_entry_and_spend_and_verify_rest...");
let cluster_nodes = let cluster_nodes = discover_cluster(
discover_cluster(&entry_point_info.gossip, nodes, socket_addr_space).unwrap(); &entry_point_info.gossip().unwrap(),
nodes,
socket_addr_space,
)
.unwrap();
assert!(cluster_nodes.len() >= nodes); assert!(cluster_nodes.len() >= nodes);
let (rpc, tpu) = get_client_facing_addr(entry_point_info); let (rpc, tpu) = LegacyContactInfo::try_from(entry_point_info)
.map(get_client_facing_addr)
.unwrap();
let client = ThinClient::new(rpc, tpu, connection_cache.clone()); let client = ThinClient::new(rpc, tpu, connection_cache.clone());
// sleep long enough to make sure we are in epoch 3 // sleep long enough to make sure we are in epoch 3
@ -234,7 +249,7 @@ pub fn kill_entry_and_spend_and_verify_rest(
info!("sleeping for 2 leader fortnights"); info!("sleeping for 2 leader fortnights");
sleep(Duration::from_millis(slot_millis * first_two_epoch_slots)); sleep(Duration::from_millis(slot_millis * first_two_epoch_slots));
info!("done sleeping for first 2 warmup epochs"); info!("done sleeping for first 2 warmup epochs");
info!("killing entry point: {}", entry_point_info.id); info!("killing entry point: {}", entry_point_info.pubkey());
entry_point_validator_exit.write().unwrap().exit(); entry_point_validator_exit.write().unwrap().exit();
info!("sleeping for some time"); info!("sleeping for some time");
sleep(Duration::from_millis( sleep(Duration::from_millis(
@ -242,7 +257,7 @@ pub fn kill_entry_and_spend_and_verify_rest(
)); ));
info!("done sleeping for 2 fortnights"); info!("done sleeping for 2 fortnights");
for ingress_node in &cluster_nodes { for ingress_node in &cluster_nodes {
if ingress_node.id == entry_point_info.id { if &ingress_node.id == entry_point_info.pubkey() {
info!("ingress_node.id == entry_point_info.id, continuing..."); info!("ingress_node.id == entry_point_info.id, continuing...");
continue; continue;
} }
@ -330,13 +345,15 @@ pub fn check_for_new_roots(
assert!(loop_start.elapsed() < loop_timeout); assert!(loop_start.elapsed() < loop_timeout);
for (i, ingress_node) in contact_infos.iter().enumerate() { for (i, ingress_node) in contact_infos.iter().enumerate() {
let (rpc, tpu) = get_client_facing_addr(ingress_node); let (rpc, tpu) = LegacyContactInfo::try_from(ingress_node)
.map(get_client_facing_addr)
.unwrap();
let client = ThinClient::new(rpc, tpu, connection_cache.clone()); let client = ThinClient::new(rpc, tpu, connection_cache.clone());
let root_slot = client let root_slot = client
.get_slot_with_commitment(CommitmentConfig::finalized()) .get_slot_with_commitment(CommitmentConfig::finalized())
.unwrap_or(0); .unwrap_or(0);
roots[i].insert(root_slot); roots[i].insert(root_slot);
num_roots_map.insert(ingress_node.id, roots[i].len()); num_roots_map.insert(*ingress_node.pubkey(), roots[i].len());
let num_roots = roots.iter().map(|r| r.len()).min().unwrap(); let num_roots = roots.iter().map(|r| r.len()).min().unwrap();
done = num_roots >= num_new_roots; done = num_roots >= num_new_roots;
if done || last_print.elapsed().as_secs() > 3 { if done || last_print.elapsed().as_secs() > 3 {
@ -353,7 +370,7 @@ pub fn check_for_new_roots(
pub fn check_no_new_roots( pub fn check_no_new_roots(
num_slots_to_wait: usize, num_slots_to_wait: usize,
contact_infos: &[ContactInfo], contact_infos: &[LegacyContactInfo],
connection_cache: &Arc<ConnectionCache>, connection_cache: &Arc<ConnectionCache>,
test_name: &str, test_name: &str,
) { ) {
@ -418,13 +435,13 @@ pub fn check_no_new_roots(
fn poll_all_nodes_for_signature( fn poll_all_nodes_for_signature(
entry_point_info: &ContactInfo, entry_point_info: &ContactInfo,
cluster_nodes: &[ContactInfo], cluster_nodes: &[LegacyContactInfo],
connection_cache: &Arc<ConnectionCache>, connection_cache: &Arc<ConnectionCache>,
sig: &Signature, sig: &Signature,
confs: usize, confs: usize,
) -> Result<(), TransportError> { ) -> Result<(), TransportError> {
for validator in cluster_nodes { for validator in cluster_nodes {
if validator.id == entry_point_info.id { if &validator.id == entry_point_info.pubkey() {
continue; continue;
} }
let (rpc, tpu) = get_client_facing_addr(validator); let (rpc, tpu) = get_client_facing_addr(validator);

View File

@ -12,8 +12,9 @@ use {
validator::{Validator, ValidatorConfig, ValidatorStartProgress}, validator::{Validator, ValidatorConfig, ValidatorStartProgress},
}, },
solana_gossip::{ solana_gossip::{
cluster_info::Node, gossip_service::discover_cluster, cluster_info::Node,
legacy_contact_info::LegacyContactInfo as ContactInfo, contact_info::{ContactInfo, LegacyContactInfo},
gossip_service::discover_cluster,
}, },
solana_ledger::create_new_tmp_ledger, solana_ledger::create_new_tmp_ledger,
solana_runtime::{ solana_runtime::{
@ -265,7 +266,10 @@ impl LocalCluster {
let (leader_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); let (leader_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
let leader_contact_info = leader_node.info.clone(); let leader_contact_info = leader_node.info.clone();
let mut leader_config = safe_clone_config(&config.validator_configs[0]); let mut leader_config = safe_clone_config(&config.validator_configs[0]);
leader_config.rpc_addrs = Some((leader_node.info.rpc, leader_node.info.rpc_pubsub)); leader_config.rpc_addrs = Some((
leader_node.info.rpc().unwrap(),
leader_node.info.rpc_pubsub().unwrap(),
));
Self::sync_ledger_path_across_nested_config_fields(&mut leader_config, &leader_ledger_path); Self::sync_ledger_path_across_nested_config_fields(&mut leader_config, &leader_ledger_path);
let leader_keypair = Arc::new(leader_keypair.insecure_clone()); let leader_keypair = Arc::new(leader_keypair.insecure_clone());
let leader_vote_keypair = Arc::new(leader_vote_keypair.insecure_clone()); let leader_vote_keypair = Arc::new(leader_vote_keypair.insecure_clone());
@ -349,14 +353,14 @@ impl LocalCluster {
}); });
discover_cluster( discover_cluster(
&cluster.entry_point_info.gossip, &cluster.entry_point_info.gossip().unwrap(),
config.node_stakes.len() + config.num_listeners as usize, config.node_stakes.len() + config.num_listeners as usize,
socket_addr_space, socket_addr_space,
) )
.unwrap(); .unwrap();
discover_cluster( discover_cluster(
&cluster.entry_point_info.gossip, &cluster.entry_point_info.gossip().unwrap(),
config.node_stakes.len(), config.node_stakes.len(),
socket_addr_space, socket_addr_space,
) )
@ -429,7 +433,9 @@ impl LocalCluster {
mut voting_keypair: Option<Arc<Keypair>>, mut voting_keypair: Option<Arc<Keypair>>,
socket_addr_space: SocketAddrSpace, socket_addr_space: SocketAddrSpace,
) -> Pubkey { ) -> Pubkey {
let (rpc, tpu) = cluster_tests::get_client_facing_addr(&self.entry_point_info); let (rpc, tpu) = LegacyContactInfo::try_from(&self.entry_point_info)
.map(cluster_tests::get_client_facing_addr)
.unwrap();
let client = ThinClient::new(rpc, tpu, self.connection_cache.clone()); let client = ThinClient::new(rpc, tpu, self.connection_cache.clone());
// Must have enough tokens to fund vote account and set delegate // Must have enough tokens to fund vote account and set delegate
@ -467,7 +473,10 @@ impl LocalCluster {
} }
let mut config = safe_clone_config(validator_config); let mut config = safe_clone_config(validator_config);
config.rpc_addrs = Some((validator_node.info.rpc, validator_node.info.rpc_pubsub)); config.rpc_addrs = Some((
validator_node.info.rpc().unwrap(),
validator_node.info.rpc_pubsub().unwrap(),
));
Self::sync_ledger_path_across_nested_config_fields(&mut config, &ledger_path); Self::sync_ledger_path_across_nested_config_fields(&mut config, &ledger_path);
let voting_keypair = voting_keypair.unwrap(); let voting_keypair = voting_keypair.unwrap();
let validator_server = Validator::new( let validator_server = Validator::new(
@ -476,7 +485,7 @@ impl LocalCluster {
&ledger_path, &ledger_path,
&voting_keypair.pubkey(), &voting_keypair.pubkey(),
Arc::new(RwLock::new(vec![voting_keypair.clone()])), Arc::new(RwLock::new(vec![voting_keypair.clone()])),
vec![self.entry_point_info.clone()], vec![LegacyContactInfo::try_from(&self.entry_point_info).unwrap()],
&config, &config,
true, // should_check_duplicate_instance true, // should_check_duplicate_instance
Arc::new(RwLock::new(ValidatorStartProgress::default())), Arc::new(RwLock::new(ValidatorStartProgress::default())),
@ -517,7 +526,9 @@ impl LocalCluster {
} }
pub fn transfer(&self, source_keypair: &Keypair, dest_pubkey: &Pubkey, lamports: u64) -> u64 { pub fn transfer(&self, source_keypair: &Keypair, dest_pubkey: &Pubkey, lamports: u64) -> u64 {
let (rpc, tpu) = cluster_tests::get_client_facing_addr(&self.entry_point_info); let (rpc, tpu) = LegacyContactInfo::try_from(&self.entry_point_info)
.map(cluster_tests::get_client_facing_addr)
.unwrap();
let client = ThinClient::new(rpc, tpu, self.connection_cache.clone()); let client = ThinClient::new(rpc, tpu, self.connection_cache.clone());
Self::transfer_with_client(&client, source_keypair, dest_pubkey, lamports) Self::transfer_with_client(&client, source_keypair, dest_pubkey, lamports)
} }
@ -536,7 +547,7 @@ impl LocalCluster {
assert!(!alive_node_contact_infos.is_empty()); assert!(!alive_node_contact_infos.is_empty());
info!("{} discovering nodes", test_name); info!("{} discovering nodes", test_name);
let cluster_nodes = discover_cluster( let cluster_nodes = discover_cluster(
&alive_node_contact_infos[0].gossip, &alive_node_contact_infos[0].gossip().unwrap(),
alive_node_contact_infos.len(), alive_node_contact_infos.len(),
socket_addr_space, socket_addr_space,
) )
@ -561,12 +572,12 @@ impl LocalCluster {
let alive_node_contact_infos: Vec<_> = self let alive_node_contact_infos: Vec<_> = self
.validators .validators
.values() .values()
.map(|v| v.info.contact_info.clone()) .map(|node| &node.info.contact_info)
.collect(); .collect();
assert!(!alive_node_contact_infos.is_empty()); assert!(!alive_node_contact_infos.is_empty());
info!("{} discovering nodes", test_name); info!("{} discovering nodes", test_name);
let cluster_nodes = discover_cluster( let cluster_nodes = discover_cluster(
&alive_node_contact_infos[0].gossip, &alive_node_contact_infos[0].gossip().unwrap(),
alive_node_contact_infos.len(), alive_node_contact_infos.len(),
socket_addr_space, socket_addr_space,
) )
@ -575,7 +586,11 @@ impl LocalCluster {
info!("{} making sure no new roots on any nodes", test_name); info!("{} making sure no new roots on any nodes", test_name);
cluster_tests::check_no_new_roots( cluster_tests::check_no_new_roots(
num_slots_to_wait, num_slots_to_wait,
&alive_node_contact_infos, &alive_node_contact_infos
.into_iter()
.map(LegacyContactInfo::try_from)
.collect::<std::result::Result<Vec<_>, _>>()
.unwrap(),
&self.connection_cache, &self.connection_cache,
test_name, test_name,
); );
@ -762,7 +777,9 @@ impl Cluster for LocalCluster {
fn get_validator_client(&self, pubkey: &Pubkey) -> Option<ThinClient> { fn get_validator_client(&self, pubkey: &Pubkey) -> Option<ThinClient> {
self.validators.get(pubkey).map(|f| { self.validators.get(pubkey).map(|f| {
let (rpc, tpu) = cluster_tests::get_client_facing_addr(&f.info.contact_info); let (rpc, tpu) = LegacyContactInfo::try_from(&f.info.contact_info)
.map(cluster_tests::get_client_facing_addr)
.unwrap();
ThinClient::new(rpc, tpu, self.connection_cache.clone()) ThinClient::new(rpc, tpu, self.connection_cache.clone())
}) })
} }
@ -785,10 +802,11 @@ impl Cluster for LocalCluster {
// Update the stored ContactInfo for this node // Update the stored ContactInfo for this node
let node = Node::new_localhost_with_pubkey(pubkey); let node = Node::new_localhost_with_pubkey(pubkey);
cluster_validator_info.info.contact_info = node.info.clone(); cluster_validator_info.info.contact_info = node.info.clone();
cluster_validator_info.config.rpc_addrs = Some((node.info.rpc, node.info.rpc_pubsub)); cluster_validator_info.config.rpc_addrs =
Some((node.info.rpc().unwrap(), node.info.rpc_pubsub().unwrap()));
let entry_point_info = { let entry_point_info = {
if *pubkey == self.entry_point_info.id { if pubkey == self.entry_point_info.pubkey() {
self.entry_point_info = node.info.clone(); self.entry_point_info = node.info.clone();
None None
} else { } else {
@ -836,7 +854,9 @@ impl Cluster for LocalCluster {
&validator_info.voting_keypair.pubkey(), &validator_info.voting_keypair.pubkey(),
Arc::new(RwLock::new(vec![validator_info.voting_keypair.clone()])), Arc::new(RwLock::new(vec![validator_info.voting_keypair.clone()])),
entry_point_info entry_point_info
.map(|entry_point_info| vec![entry_point_info]) .map(|entry_point_info| {
vec![LegacyContactInfo::try_from(&entry_point_info).unwrap()]
})
.unwrap_or_default(), .unwrap_or_default(),
&safe_clone_config(&cluster_validator_info.config), &safe_clone_config(&cluster_validator_info.config),
true, // should_check_duplicate_instance true, // should_check_duplicate_instance

View File

@ -73,7 +73,7 @@ impl LocalCluster {
) -> NextSnapshotResult { ) -> NextSnapshotResult {
// Get slot after which this was generated // Get slot after which this was generated
let client = self let client = self
.get_validator_client(&self.entry_point_info.id) .get_validator_client(self.entry_point_info.pubkey())
.unwrap(); .unwrap();
let last_slot = client let last_slot = client
.get_slot_with_commitment(CommitmentConfig::processed()) .get_slot_with_commitment(CommitmentConfig::processed())

View File

@ -343,7 +343,7 @@ pub fn run_cluster_partition<C>(
); );
let cluster_nodes = discover_cluster( let cluster_nodes = discover_cluster(
&cluster.entry_point_info.gossip, &cluster.entry_point_info.gossip().unwrap(),
num_nodes, num_nodes,
SocketAddrSpace::Unspecified, SocketAddrSpace::Unspecified,
) )

View File

@ -16,7 +16,7 @@ use {
validator::ValidatorConfig, validator::ValidatorConfig,
}, },
solana_download_utils::download_snapshot_archive, solana_download_utils::download_snapshot_archive,
solana_gossip::gossip_service::discover_cluster, solana_gossip::{contact_info::LegacyContactInfo, gossip_service::discover_cluster},
solana_ledger::{ solana_ledger::{
ancestor_iterator::AncestorIterator, bank_forks_utils, blockstore::Blockstore, ancestor_iterator::AncestorIterator, bank_forks_utils, blockstore::Blockstore,
blockstore_processor::ProcessOptions, blockstore_processor::ProcessOptions,
@ -195,11 +195,13 @@ fn test_local_cluster_signature_subscribe() {
// Get non leader // Get non leader
let non_bootstrap_id = nodes let non_bootstrap_id = nodes
.into_iter() .into_iter()
.find(|id| *id != cluster.entry_point_info.id) .find(|id| id != cluster.entry_point_info.pubkey())
.unwrap(); .unwrap();
let non_bootstrap_info = cluster.get_contact_info(&non_bootstrap_id).unwrap(); let non_bootstrap_info = cluster.get_contact_info(&non_bootstrap_id).unwrap();
let (rpc, tpu) = cluster_tests::get_client_facing_addr(non_bootstrap_info); let (rpc, tpu) = LegacyContactInfo::try_from(non_bootstrap_info)
.map(cluster_tests::get_client_facing_addr)
.unwrap();
let tx_client = ThinClient::new(rpc, tpu, cluster.connection_cache.clone()); let tx_client = ThinClient::new(rpc, tpu, cluster.connection_cache.clone());
let (blockhash, _) = tx_client let (blockhash, _) = tx_client
@ -214,7 +216,10 @@ fn test_local_cluster_signature_subscribe() {
); );
let (mut sig_subscribe_client, receiver) = PubsubClient::signature_subscribe( let (mut sig_subscribe_client, receiver) = PubsubClient::signature_subscribe(
&format!("ws://{}", &non_bootstrap_info.rpc_pubsub.to_string()), &format!(
"ws://{}",
&non_bootstrap_info.rpc_pubsub().unwrap().to_string()
),
&transaction.signatures[0], &transaction.signatures[0],
Some(RpcSignatureSubscribeConfig { Some(RpcSignatureSubscribeConfig {
commitment: Some(CommitmentConfig::processed()), commitment: Some(CommitmentConfig::processed()),
@ -312,7 +317,7 @@ fn test_two_unbalanced_stakes() {
num_slots_per_epoch, num_slots_per_epoch,
); );
cluster.close_preserve_ledgers(); cluster.close_preserve_ledgers();
let leader_pubkey = cluster.entry_point_info.id; let leader_pubkey = *cluster.entry_point_info.pubkey();
let leader_ledger = cluster.validators[&leader_pubkey].info.ledger_path.clone(); let leader_ledger = cluster.validators[&leader_pubkey].info.ledger_path.clone();
cluster_tests::verify_ledger_ticks(&leader_ledger, num_ticks_per_slot as usize); cluster_tests::verify_ledger_ticks(&leader_ledger, num_ticks_per_slot as usize);
} }
@ -335,14 +340,14 @@ fn test_forwarding() {
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let cluster_nodes = discover_cluster( let cluster_nodes = discover_cluster(
&cluster.entry_point_info.gossip, &cluster.entry_point_info.gossip().unwrap(),
2, 2,
SocketAddrSpace::Unspecified, SocketAddrSpace::Unspecified,
) )
.unwrap(); .unwrap();
assert!(cluster_nodes.len() >= 2); assert!(cluster_nodes.len() >= 2);
let leader_pubkey = cluster.entry_point_info.id; let leader_pubkey = *cluster.entry_point_info.pubkey();
let validator_info = cluster_nodes let validator_info = cluster_nodes
.iter() .iter()
@ -394,7 +399,7 @@ fn test_restart_node() {
slots_per_epoch, slots_per_epoch,
); );
cluster_tests::send_many_transactions( cluster_tests::send_many_transactions(
&cluster.entry_point_info, &LegacyContactInfo::try_from(&cluster.entry_point_info).unwrap(),
&cluster.funding_keypair, &cluster.funding_keypair,
&cluster.connection_cache, &cluster.connection_cache,
10, 10,
@ -419,14 +424,16 @@ fn test_mainnet_beta_cluster_type() {
}; };
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let cluster_nodes = discover_cluster( let cluster_nodes = discover_cluster(
&cluster.entry_point_info.gossip, &cluster.entry_point_info.gossip().unwrap(),
1, 1,
SocketAddrSpace::Unspecified, SocketAddrSpace::Unspecified,
) )
.unwrap(); .unwrap();
assert_eq!(cluster_nodes.len(), 1); assert_eq!(cluster_nodes.len(), 1);
let (rpc, tpu) = cluster_tests::get_client_facing_addr(&cluster.entry_point_info); let (rpc, tpu) = LegacyContactInfo::try_from(&cluster.entry_point_info)
.map(cluster_tests::get_client_facing_addr)
.unwrap();
let client = ThinClient::new(rpc, tpu, cluster.connection_cache.clone()); let client = ThinClient::new(rpc, tpu, cluster.connection_cache.clone());
// Programs that are available at epoch 0 // Programs that are available at epoch 0
@ -506,7 +513,7 @@ fn test_snapshot_download() {
// Download the snapshot, then boot a validator from it. // Download the snapshot, then boot a validator from it.
download_snapshot_archive( download_snapshot_archive(
&cluster.entry_point_info.rpc, &cluster.entry_point_info.rpc().unwrap(),
&validator_snapshot_test_config &validator_snapshot_test_config
.validator_config .validator_config
.snapshot_config .snapshot_config
@ -637,7 +644,7 @@ fn test_incremental_snapshot_download() {
// Download the snapshots, then boot a validator from them. // Download the snapshots, then boot a validator from them.
download_snapshot_archive( download_snapshot_archive(
&cluster.entry_point_info.rpc, &cluster.entry_point_info.rpc().unwrap(),
&validator_snapshot_test_config &validator_snapshot_test_config
.validator_config .validator_config
.snapshot_config .snapshot_config
@ -665,7 +672,7 @@ fn test_incremental_snapshot_download() {
.unwrap(); .unwrap();
download_snapshot_archive( download_snapshot_archive(
&cluster.entry_point_info.rpc, &cluster.entry_point_info.rpc().unwrap(),
&validator_snapshot_test_config &validator_snapshot_test_config
.validator_config .validator_config
.snapshot_config .snapshot_config
@ -813,7 +820,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st
// Download the snapshots, then boot a validator from them. // Download the snapshots, then boot a validator from them.
info!("Downloading full snapshot to validator..."); info!("Downloading full snapshot to validator...");
download_snapshot_archive( download_snapshot_archive(
&cluster.entry_point_info.rpc, &cluster.entry_point_info.rpc().unwrap(),
validator_snapshot_test_config validator_snapshot_test_config
.full_snapshot_archives_dir .full_snapshot_archives_dir
.path(), .path(),
@ -847,7 +854,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st
info!("Downloading incremental snapshot to validator..."); info!("Downloading incremental snapshot to validator...");
download_snapshot_archive( download_snapshot_archive(
&cluster.entry_point_info.rpc, &cluster.entry_point_info.rpc().unwrap(),
validator_snapshot_test_config validator_snapshot_test_config
.full_snapshot_archives_dir .full_snapshot_archives_dir
.path(), .path(),
@ -1244,7 +1251,7 @@ fn test_snapshot_restart_tower() {
let all_pubkeys = cluster.get_node_pubkeys(); let all_pubkeys = cluster.get_node_pubkeys();
let validator_id = all_pubkeys let validator_id = all_pubkeys
.into_iter() .into_iter()
.find(|x| *x != cluster.entry_point_info.id) .find(|x| x != cluster.entry_point_info.pubkey())
.unwrap(); .unwrap();
let validator_info = cluster.exit_node(&validator_id); let validator_info = cluster.exit_node(&validator_id);
@ -1344,7 +1351,7 @@ fn test_snapshots_blockstore_floor() {
// Start up a new node from a snapshot // Start up a new node from a snapshot
let cluster_nodes = discover_cluster( let cluster_nodes = discover_cluster(
&cluster.entry_point_info.gossip, &cluster.entry_point_info.gossip().unwrap(),
1, 1,
SocketAddrSpace::Unspecified, SocketAddrSpace::Unspecified,
) )
@ -1365,7 +1372,7 @@ fn test_snapshots_blockstore_floor() {
let all_pubkeys = cluster.get_node_pubkeys(); let all_pubkeys = cluster.get_node_pubkeys();
let validator_id = all_pubkeys let validator_id = all_pubkeys
.into_iter() .into_iter()
.find(|x| *x != cluster.entry_point_info.id) .find(|x| x != cluster.entry_point_info.pubkey())
.unwrap(); .unwrap();
let validator_client = cluster.get_validator_client(&validator_id).unwrap(); let validator_client = cluster.get_validator_client(&validator_id).unwrap();
let mut current_slot = 0; let mut current_slot = 0;
@ -1430,7 +1437,7 @@ fn test_snapshots_restart_validity() {
// forwarded to and processed. // forwarded to and processed.
trace!("Sending transactions"); trace!("Sending transactions");
let new_balances = cluster_tests::send_many_transactions( let new_balances = cluster_tests::send_many_transactions(
&cluster.entry_point_info, &LegacyContactInfo::try_from(&cluster.entry_point_info).unwrap(),
&cluster.funding_keypair, &cluster.funding_keypair,
&cluster.connection_cache, &cluster.connection_cache,
10, 10,
@ -1525,7 +1532,7 @@ fn test_wait_for_max_stake() {
..ClusterConfig::default() ..ClusterConfig::default()
}; };
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let client = RpcClient::new_socket(cluster.entry_point_info.rpc); let client = RpcClient::new_socket(cluster.entry_point_info.rpc().unwrap());
assert!(client assert!(client
.wait_for_max_stake(CommitmentConfig::default(), 33.0f32) .wait_for_max_stake(CommitmentConfig::default(), 33.0f32)
@ -1550,7 +1557,7 @@ fn test_no_voting() {
}; };
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let client = cluster let client = cluster
.get_validator_client(&cluster.entry_point_info.id) .get_validator_client(cluster.entry_point_info.pubkey())
.unwrap(); .unwrap();
loop { loop {
let last_slot = client let last_slot = client
@ -1563,7 +1570,7 @@ fn test_no_voting() {
} }
cluster.close_preserve_ledgers(); cluster.close_preserve_ledgers();
let leader_pubkey = cluster.entry_point_info.id; let leader_pubkey = *cluster.entry_point_info.pubkey();
let ledger_path = cluster.validators[&leader_pubkey].info.ledger_path.clone(); let ledger_path = cluster.validators[&leader_pubkey].info.ledger_path.clone();
let ledger = Blockstore::open(&ledger_path).unwrap(); let ledger = Blockstore::open(&ledger_path).unwrap();
for i in 0..2 * VOTE_THRESHOLD_DEPTH { for i in 0..2 * VOTE_THRESHOLD_DEPTH {
@ -2481,11 +2488,11 @@ fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) {
let on_partition_start = |cluster: &mut LocalCluster, _: &mut ()| { let on_partition_start = |cluster: &mut LocalCluster, _: &mut ()| {
let update_client = cluster let update_client = cluster
.get_validator_client(&cluster.entry_point_info.id) .get_validator_client(cluster.entry_point_info.pubkey())
.unwrap(); .unwrap();
update_client_sender.send(update_client).unwrap(); update_client_sender.send(update_client).unwrap();
let scan_client = cluster let scan_client = cluster
.get_validator_client(&cluster.entry_point_info.id) .get_validator_client(cluster.entry_point_info.pubkey())
.unwrap(); .unwrap();
scan_client_sender.send(scan_client).unwrap(); scan_client_sender.send(scan_client).unwrap();
}; };
@ -2542,7 +2549,10 @@ fn test_rpc_block_subscribe() {
}; };
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let (mut block_subscribe_client, receiver) = PubsubClient::block_subscribe( let (mut block_subscribe_client, receiver) = PubsubClient::block_subscribe(
&format!("ws://{}", &cluster.entry_point_info.rpc_pubsub.to_string()), &format!(
"ws://{}",
&cluster.entry_point_info.rpc_pubsub().unwrap().to_string()
),
RpcBlockSubscribeFilter::All, RpcBlockSubscribeFilter::All,
Some(RpcBlockSubscribeConfig { Some(RpcBlockSubscribeConfig {
commitment: Some(CommitmentConfig::confirmed()), commitment: Some(CommitmentConfig::confirmed()),
@ -2628,13 +2638,15 @@ fn test_oc_bad_signatures() {
); );
// 3) Start up a spy to listen for and push votes to leader TPU // 3) Start up a spy to listen for and push votes to leader TPU
let (rpc, tpu) = cluster_tests::get_client_facing_addr(&cluster.entry_point_info); let (rpc, tpu) = LegacyContactInfo::try_from(&cluster.entry_point_info)
.map(cluster_tests::get_client_facing_addr)
.unwrap();
let client = ThinClient::new(rpc, tpu, cluster.connection_cache.clone()); let client = ThinClient::new(rpc, tpu, cluster.connection_cache.clone());
let cluster_funding_keypair = cluster.funding_keypair.insecure_clone(); let cluster_funding_keypair = cluster.funding_keypair.insecure_clone();
let voter_thread_sleep_ms: usize = 100; let voter_thread_sleep_ms: usize = 100;
let num_votes_simulated = Arc::new(AtomicUsize::new(0)); let num_votes_simulated = Arc::new(AtomicUsize::new(0));
let gossip_voter = cluster_tests::start_gossip_voter( let gossip_voter = cluster_tests::start_gossip_voter(
&cluster.entry_point_info.gossip, &cluster.entry_point_info.gossip().unwrap(),
&node_keypair, &node_keypair,
|(_label, leader_vote_tx)| { |(_label, leader_vote_tx)| {
let vote = vote_parser::parse_vote_transaction(&leader_vote_tx) let vote = vote_parser::parse_vote_transaction(&leader_vote_tx)
@ -2686,7 +2698,10 @@ fn test_oc_bad_signatures() {
); );
let (mut block_subscribe_client, receiver) = PubsubClient::block_subscribe( let (mut block_subscribe_client, receiver) = PubsubClient::block_subscribe(
&format!("ws://{}", &cluster.entry_point_info.rpc_pubsub.to_string()), &format!(
"ws://{}",
&cluster.entry_point_info.rpc_pubsub().unwrap().to_string()
),
RpcBlockSubscribeFilter::All, RpcBlockSubscribeFilter::All,
Some(RpcBlockSubscribeConfig { Some(RpcBlockSubscribeConfig {
commitment: Some(CommitmentConfig::confirmed()), commitment: Some(CommitmentConfig::confirmed()),
@ -2982,10 +2997,10 @@ fn run_test_load_program_accounts(scan_commitment: CommitmentConfig) {
let all_pubkeys = cluster.get_node_pubkeys(); let all_pubkeys = cluster.get_node_pubkeys();
let other_validator_id = all_pubkeys let other_validator_id = all_pubkeys
.into_iter() .into_iter()
.find(|x| *x != cluster.entry_point_info.id) .find(|x| x != cluster.entry_point_info.pubkey())
.unwrap(); .unwrap();
let client = cluster let client = cluster
.get_validator_client(&cluster.entry_point_info.id) .get_validator_client(cluster.entry_point_info.pubkey())
.unwrap(); .unwrap();
update_client_sender.send(client).unwrap(); update_client_sender.send(client).unwrap();
let scan_client = cluster.get_validator_client(&other_validator_id).unwrap(); let scan_client = cluster.get_validator_client(&other_validator_id).unwrap();

View File

@ -448,14 +448,14 @@ fn test_duplicate_shreds_broadcast_leader() {
let our_info = cluster.exit_node(&our_id); let our_info = cluster.exit_node(&our_id);
let node_keypair = our_info.info.keypair; let node_keypair = our_info.info.keypair;
let vote_keypair = our_info.info.voting_keypair; let vote_keypair = our_info.info.voting_keypair;
let bad_leader_id = cluster.entry_point_info.id; let bad_leader_id = *cluster.entry_point_info.pubkey();
let bad_leader_ledger_path = cluster.validators[&bad_leader_id].info.ledger_path.clone(); let bad_leader_ledger_path = cluster.validators[&bad_leader_id].info.ledger_path.clone();
info!("our node id: {}", node_keypair.pubkey()); info!("our node id: {}", node_keypair.pubkey());
// 3) Start up a gossip instance to listen for and push votes // 3) Start up a gossip instance to listen for and push votes
let voter_thread_sleep_ms = 100; let voter_thread_sleep_ms = 100;
let gossip_voter = cluster_tests::start_gossip_voter( let gossip_voter = cluster_tests::start_gossip_voter(
&cluster.entry_point_info.gossip, &cluster.entry_point_info.gossip().unwrap(),
&node_keypair, &node_keypair,
move |(label, leader_vote_tx)| { move |(label, leader_vote_tx)| {
// Filter out votes not from the bad leader // Filter out votes not from the bad leader
@ -721,7 +721,8 @@ fn test_switch_threshold_uses_gossip_votes() {
cluster cluster
.get_contact_info(&context.heaviest_validator_key) .get_contact_info(&context.heaviest_validator_key)
.unwrap() .unwrap()
.gossip, .gossip()
.unwrap(),
&SocketAddrSpace::Unspecified, &SocketAddrSpace::Unspecified,
) )
.unwrap(); .unwrap();
@ -798,7 +799,7 @@ fn test_listener_startup() {
}; };
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let cluster_nodes = discover_cluster( let cluster_nodes = discover_cluster(
&cluster.entry_point_info.gossip, &cluster.entry_point_info.gossip().unwrap(),
4, 4,
SocketAddrSpace::Unspecified, SocketAddrSpace::Unspecified,
) )

View File

@ -90,7 +90,7 @@ fn test_consistency_halt() {
sleep(Duration::from_millis(5000)); sleep(Duration::from_millis(5000));
let cluster_nodes = discover_cluster( let cluster_nodes = discover_cluster(
&cluster.entry_point_info.gossip, &cluster.entry_point_info.gossip().unwrap(),
1, 1,
SocketAddrSpace::Unspecified, SocketAddrSpace::Unspecified,
) )
@ -123,7 +123,7 @@ fn test_consistency_halt() {
let num_nodes = 2; let num_nodes = 2;
assert_eq!( assert_eq!(
discover_cluster( discover_cluster(
&cluster.entry_point_info.gossip, &cluster.entry_point_info.gossip().unwrap(),
num_nodes, num_nodes,
SocketAddrSpace::Unspecified SocketAddrSpace::Unspecified
) )
@ -136,7 +136,7 @@ fn test_consistency_halt() {
let mut encountered_error = false; let mut encountered_error = false;
loop { loop {
let discover = discover_cluster( let discover = discover_cluster(
&cluster.entry_point_info.gossip, &cluster.entry_point_info.gossip().unwrap(),
2, 2,
SocketAddrSpace::Unspecified, SocketAddrSpace::Unspecified,
); );
@ -154,7 +154,7 @@ fn test_consistency_halt() {
} }
} }
let client = cluster let client = cluster
.get_validator_client(&cluster.entry_point_info.id) .get_validator_client(cluster.entry_point_info.pubkey())
.unwrap(); .unwrap();
if let Ok(slot) = client.get_slot() { if let Ok(slot) = client.get_slot() {
if slot > 210 { if slot > 210 {
@ -187,7 +187,7 @@ fn test_leader_failure_4() {
&local.entry_point_info, &local.entry_point_info,
&local &local
.validators .validators
.get(&local.entry_point_info.id) .get(local.entry_point_info.pubkey())
.unwrap() .unwrap()
.config .config
.validator_exit, .validator_exit,

View File

@ -59,7 +59,7 @@ impl TpuInfo for ClusterTpuInfo {
mod test { mod test {
use { use {
super::*, super::*,
solana_gossip::legacy_contact_info::LegacyContactInfo as ContactInfo, solana_gossip::contact_info::ContactInfo,
solana_ledger::{ solana_ledger::{
blockstore::Blockstore, get_tmp_ledger_path, leader_schedule_cache::LeaderScheduleCache, blockstore::Blockstore, get_tmp_ledger_path, leader_schedule_cache::LeaderScheduleCache,
}, },

View File

@ -16,9 +16,7 @@ use {
solana_client::connection_cache::ConnectionCache, solana_client::connection_cache::ConnectionCache,
solana_entry::entry::Entry, solana_entry::entry::Entry,
solana_faucet::faucet::request_airdrop_transaction, solana_faucet::faucet::request_airdrop_transaction,
solana_gossip::{ solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo},
cluster_info::ClusterInfo, legacy_contact_info::LegacyContactInfo as ContactInfo,
},
solana_ledger::{ solana_ledger::{
blockstore::{Blockstore, SignatureInfosForAddress}, blockstore::{Blockstore, SignatureInfosForAddress},
blockstore_db::BlockstoreError, blockstore_db::BlockstoreError,
@ -348,13 +346,13 @@ impl JsonRpcRequestProcessor {
let exit = Arc::new(AtomicBool::new(false)); let exit = Arc::new(AtomicBool::new(false));
let cluster_info = Arc::new({ let cluster_info = Arc::new({
let keypair = Arc::new(Keypair::new()); let keypair = Arc::new(Keypair::new());
let contact_info = ContactInfo { let contact_info = ContactInfo::new_localhost(
id: keypair.pubkey(), &keypair.pubkey(),
..ContactInfo::default() solana_sdk::timing::timestamp(), // wallclock
}; );
ClusterInfo::new(contact_info, keypair, socket_addr_space) ClusterInfo::new(contact_info, keypair, socket_addr_space)
}); });
let tpu_address = cluster_info.my_contact_info().tpu; let tpu_address = cluster_info.my_contact_info().tpu().unwrap();
let (sender, receiver) = unbounded(); let (sender, receiver) = unbounded();
SendTransactionService::new::<NullTpuInfo>( SendTransactionService::new::<NullTpuInfo>(
tpu_address, tpu_address,
@ -3444,11 +3442,7 @@ pub mod rpc_full {
let cluster_info = &meta.cluster_info; let cluster_info = &meta.cluster_info;
let socket_addr_space = cluster_info.socket_addr_space(); let socket_addr_space = cluster_info.socket_addr_space();
let valid_address_or_none = |addr: &SocketAddr| -> Option<SocketAddr> { let valid_address_or_none = |addr: &SocketAddr| -> Option<SocketAddr> {
if ContactInfo::is_valid_address(addr, socket_addr_space) { ContactInfo::is_valid_address(addr, socket_addr_space).then_some(*addr)
Some(*addr)
} else {
None
}
}; };
let my_shred_version = cluster_info.my_shred_version(); let my_shred_version = cluster_info.my_shred_version();
Ok(cluster_info Ok(cluster_info
@ -4686,10 +4680,10 @@ pub mod tests {
pub(crate) fn new_test_cluster_info() -> ClusterInfo { pub(crate) fn new_test_cluster_info() -> ClusterInfo {
let keypair = Arc::new(Keypair::new()); let keypair = Arc::new(Keypair::new());
let contact_info = ContactInfo { let contact_info = ContactInfo::new_localhost(
id: keypair.pubkey(), &keypair.pubkey(),
..ContactInfo::default() solana_sdk::timing::timestamp(), // wallclock
}; );
ClusterInfo::new(contact_info, keypair, SocketAddrSpace::Unspecified) ClusterInfo::new(contact_info, keypair, SocketAddrSpace::Unspecified)
} }
@ -5123,6 +5117,15 @@ pub mod tests {
let request = create_test_request("getClusterNodes", None); let request = create_test_request("getClusterNodes", None);
let result: Value = parse_success_result(rpc.handle_request_sync(request)); let result: Value = parse_success_result(rpc.handle_request_sync(request));
let expected = json!([{ let expected = json!([{
"pubkey": rpc.identity.to_string(),
"gossip": "127.0.0.1:8000",
"shredVersion": 0u16,
"tpu": "127.0.0.1:8003",
"rpc": format!("127.0.0.1:{}", rpc_port::DEFAULT_RPC_PORT),
"pubsub": format!("127.0.0.1:{}", rpc_port::DEFAULT_RPC_PUBSUB_PORT),
"version": null,
"featureSet": null,
}, {
"pubkey": rpc.leader_pubkey().to_string(), "pubkey": rpc.leader_pubkey().to_string(),
"gossip": "127.0.0.1:1235", "gossip": "127.0.0.1:1235",
"shredVersion": 0u16, "shredVersion": 0u16,
@ -6390,7 +6393,7 @@ pub mod tests {
); );
ClusterInfo::new(contact_info, keypair, SocketAddrSpace::Unspecified) ClusterInfo::new(contact_info, keypair, SocketAddrSpace::Unspecified)
}); });
let tpu_address = cluster_info.my_contact_info().tpu; let tpu_address = cluster_info.my_contact_info().tpu().unwrap();
let (meta, receiver) = JsonRpcRequestProcessor::new( let (meta, receiver) = JsonRpcRequestProcessor::new(
JsonRpcConfig::default(), JsonRpcConfig::default(),
None, None,
@ -6657,7 +6660,7 @@ pub mod tests {
))); )));
let cluster_info = Arc::new(new_test_cluster_info()); let cluster_info = Arc::new(new_test_cluster_info());
let tpu_address = cluster_info.my_contact_info().tpu; let tpu_address = cluster_info.my_contact_info().tpu().unwrap();
let (request_processor, receiver) = JsonRpcRequestProcessor::new( let (request_processor, receiver) = JsonRpcRequestProcessor::new(
JsonRpcConfig::default(), JsonRpcConfig::default(),
None, None,

View File

@ -377,7 +377,10 @@ impl JsonRpcService {
LARGEST_ACCOUNTS_CACHE_DURATION, LARGEST_ACCOUNTS_CACHE_DURATION,
))); )));
let tpu_address = cluster_info.my_contact_info().tpu; let tpu_address = cluster_info
.my_contact_info()
.tpu()
.map_err(|err| format!("{err}"))?;
// sadly, some parts of our current rpc implemention block the jsonrpc's // sadly, some parts of our current rpc implemention block the jsonrpc's
// _socket-listening_ event loop for too long, due to (blocking) long IO or intesive CPU, // _socket-listening_ event loop for too long, due to (blocking) long IO or intesive CPU,

View File

@ -16,6 +16,7 @@ impl SocketAddrSpace {
} }
/// Returns true if the IP address is valid. /// Returns true if the IP address is valid.
#[must_use]
pub fn check(&self, addr: &SocketAddr) -> bool { pub fn check(&self, addr: &SocketAddr) -> bool {
if self == &SocketAddrSpace::Unspecified { if self == &SocketAddrSpace::Unspecified {
return true; return true;

View File

@ -750,15 +750,16 @@ impl TestValidator {
config.node_config.bind_ip_addr, config.node_config.bind_ip_addr,
); );
if let Some((rpc, rpc_pubsub)) = config.rpc_ports { if let Some((rpc, rpc_pubsub)) = config.rpc_ports {
node.info.rpc = SocketAddr::new(node.info.gossip.ip(), rpc); let addr = node.info.gossip().unwrap().ip();
node.info.rpc_pubsub = SocketAddr::new(node.info.gossip.ip(), rpc_pubsub); node.info.set_rpc((addr, rpc)).unwrap();
node.info.set_rpc_pubsub((addr, rpc_pubsub)).unwrap();
} }
let vote_account_address = validator_vote_account.pubkey(); let vote_account_address = validator_vote_account.pubkey();
let rpc_url = format!("http://{}", node.info.rpc); let rpc_url = format!("http://{}", node.info.rpc().unwrap());
let rpc_pubsub_url = format!("ws://{}/", node.info.rpc_pubsub); let rpc_pubsub_url = format!("ws://{}/", node.info.rpc_pubsub().unwrap());
let tpu = node.info.tpu; let tpu = node.info.tpu().unwrap();
let gossip = node.info.gossip; let gossip = node.info.gossip().unwrap();
{ {
let mut authorized_voter_keypairs = config.authorized_voter_keypairs.write().unwrap(); let mut authorized_voter_keypairs = config.authorized_voter_keypairs.write().unwrap();
@ -793,10 +794,13 @@ impl TestValidator {
let mut validator_config = ValidatorConfig { let mut validator_config = ValidatorConfig {
geyser_plugin_config_files: config.geyser_plugin_config_files.clone(), geyser_plugin_config_files: config.geyser_plugin_config_files.clone(),
rpc_addrs: Some(( rpc_addrs: Some((
SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), node.info.rpc.port()),
SocketAddr::new( SocketAddr::new(
IpAddr::V4(Ipv4Addr::UNSPECIFIED), IpAddr::V4(Ipv4Addr::UNSPECIFIED),
node.info.rpc_pubsub.port(), node.info.rpc().unwrap().port(),
),
SocketAddr::new(
IpAddr::V4(Ipv4Addr::UNSPECIFIED),
node.info.rpc_pubsub().unwrap().port(),
), ),
)), )),
rpc_config: config.rpc_config.clone(), rpc_config: config.rpc_config.clone(),

View File

@ -707,7 +707,7 @@ pub mod test {
let account_keypair_refs: Vec<_> = account_keypairs.iter().collect(); let account_keypair_refs: Vec<_> = account_keypairs.iter().collect();
let mut start = Measure::start("total accounts run"); let mut start = Measure::start("total accounts run");
run_transactions_dos( run_transactions_dos(
cluster.entry_point_info.rpc, cluster.entry_point_info.rpc().unwrap(),
faucet_addr, faucet_addr,
&[&cluster.funding_keypair], &[&cluster.funding_keypair],
iterations, iterations,

View File

@ -9,9 +9,7 @@ use {
solana_core::{ solana_core::{
consensus::Tower, tower_storage::TowerStorage, validator::ValidatorStartProgress, consensus::Tower, tower_storage::TowerStorage, validator::ValidatorStartProgress,
}, },
solana_gossip::{ solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo},
cluster_info::ClusterInfo, legacy_contact_info::LegacyContactInfo as ContactInfo,
},
solana_rpc::rpc::verify_pubkey, solana_rpc::rpc::verify_pubkey,
solana_rpc_client_api::{config::RpcAccountIndex, custom_error::RpcCustomError}, solana_rpc_client_api::{config::RpcAccountIndex, custom_error::RpcCustomError},
solana_runtime::{accounts_index::AccountIndex, bank_forks::BankForks}, solana_runtime::{accounts_index::AccountIndex, bank_forks::BankForks},
@ -24,7 +22,7 @@ use {
collections::{HashMap, HashSet}, collections::{HashMap, HashSet},
error, error,
fmt::{self, Display}, fmt::{self, Display},
net::SocketAddr, net::{IpAddr, Ipv4Addr, SocketAddr},
path::{Path, PathBuf}, path::{Path, PathBuf},
sync::{Arc, RwLock}, sync::{Arc, RwLock},
thread::{self, Builder}, thread::{self, Builder},
@ -91,36 +89,28 @@ pub struct AdminRpcRepairWhitelist {
} }
impl From<ContactInfo> for AdminRpcContactInfo { impl From<ContactInfo> for AdminRpcContactInfo {
fn from(contact_info: ContactInfo) -> Self { fn from(node: ContactInfo) -> Self {
let ContactInfo { macro_rules! unwrap_socket {
id, ($name:ident) => {
gossip, node.$name().unwrap_or_else(|_| {
tvu, SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), /*port:*/ 0u16)
tvu_forwards, })
repair, };
tpu, }
tpu_forwards,
tpu_vote,
rpc,
rpc_pubsub,
serve_repair,
wallclock,
shred_version,
} = contact_info;
Self { Self {
id: id.to_string(), id: node.pubkey().to_string(),
last_updated_timestamp: wallclock, last_updated_timestamp: node.wallclock(),
gossip, gossip: unwrap_socket!(gossip),
tvu, tvu: unwrap_socket!(tvu),
tvu_forwards, tvu_forwards: unwrap_socket!(tvu_forwards),
repair, repair: unwrap_socket!(repair),
tpu, tpu: unwrap_socket!(tpu),
tpu_forwards, tpu_forwards: unwrap_socket!(tpu_forwards),
tpu_vote, tpu_vote: unwrap_socket!(tpu_vote),
rpc, rpc: unwrap_socket!(rpc),
rpc_pubsub, rpc_pubsub: unwrap_socket!(rpc_pubsub),
serve_repair, serve_repair: unwrap_socket!(serve_repair),
shred_version, shred_version: node.shred_version(),
} }
} }
} }
@ -693,10 +683,11 @@ mod tests {
fn start_with_config(config: TestConfig) -> Self { fn start_with_config(config: TestConfig) -> Self {
let keypair = Arc::new(Keypair::new()); let keypair = Arc::new(Keypair::new());
let cluster_info = Arc::new(ClusterInfo::new( let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo { ContactInfo::new(
id: keypair.pubkey(), keypair.pubkey(),
..ContactInfo::default() solana_sdk::timing::timestamp(), // wallclock
}, 0u16, // shred_version
),
keypair, keypair,
SocketAddrSpace::Unspecified, SocketAddrSpace::Unspecified,
)); ));

View File

@ -60,38 +60,43 @@ fn verify_reachable_ports(
validator_config: &ValidatorConfig, validator_config: &ValidatorConfig,
socket_addr_space: &SocketAddrSpace, socket_addr_space: &SocketAddrSpace,
) -> bool { ) -> bool {
let verify_address = |addr: &Option<SocketAddr>| -> bool {
addr.as_ref()
.map(|addr| socket_addr_space.check(addr))
.unwrap_or_default()
};
let mut udp_sockets = vec![&node.sockets.gossip, &node.sockets.repair]; let mut udp_sockets = vec![&node.sockets.gossip, &node.sockets.repair];
if ContactInfo::is_valid_address(&node.info.serve_repair, socket_addr_space) { if verify_address(&node.info.serve_repair().ok()) {
udp_sockets.push(&node.sockets.serve_repair); udp_sockets.push(&node.sockets.serve_repair);
} }
if ContactInfo::is_valid_address(&node.info.tpu, socket_addr_space) { if verify_address(&node.info.tpu().ok()) {
udp_sockets.extend(node.sockets.tpu.iter()); udp_sockets.extend(node.sockets.tpu.iter());
udp_sockets.push(&node.sockets.tpu_quic); udp_sockets.push(&node.sockets.tpu_quic);
} }
if ContactInfo::is_valid_address(&node.info.tpu_forwards, socket_addr_space) { if verify_address(&node.info.tpu_forwards().ok()) {
udp_sockets.extend(node.sockets.tpu_forwards.iter()); udp_sockets.extend(node.sockets.tpu_forwards.iter());
udp_sockets.push(&node.sockets.tpu_forwards_quic); udp_sockets.push(&node.sockets.tpu_forwards_quic);
} }
if ContactInfo::is_valid_address(&node.info.tpu_vote, socket_addr_space) { if verify_address(&node.info.tpu_vote().ok()) {
udp_sockets.extend(node.sockets.tpu_vote.iter()); udp_sockets.extend(node.sockets.tpu_vote.iter());
} }
if ContactInfo::is_valid_address(&node.info.tvu, socket_addr_space) { if verify_address(&node.info.tvu().ok()) {
udp_sockets.extend(node.sockets.tvu.iter()); udp_sockets.extend(node.sockets.tvu.iter());
udp_sockets.extend(node.sockets.broadcast.iter()); udp_sockets.extend(node.sockets.broadcast.iter());
udp_sockets.extend(node.sockets.retransmit_sockets.iter()); udp_sockets.extend(node.sockets.retransmit_sockets.iter());
} }
if ContactInfo::is_valid_address(&node.info.tvu_forwards, socket_addr_space) { if verify_address(&node.info.tvu_forwards().ok()) {
udp_sockets.extend(node.sockets.tvu_forwards.iter()); udp_sockets.extend(node.sockets.tvu_forwards.iter());
} }
let mut tcp_listeners = vec![]; let mut tcp_listeners = vec![];
if let Some((rpc_addr, rpc_pubsub_addr)) = validator_config.rpc_addrs { if let Some((rpc_addr, rpc_pubsub_addr)) = validator_config.rpc_addrs {
for (purpose, bind_addr, public_addr) in &[ for (purpose, bind_addr, public_addr) in &[
("RPC", rpc_addr, &node.info.rpc), ("RPC", rpc_addr, node.info.rpc()),
("RPC pubsub", rpc_pubsub_addr, &node.info.rpc_pubsub), ("RPC pubsub", rpc_pubsub_addr, node.info.rpc_pubsub()),
] { ] {
if ContactInfo::is_valid_address(public_addr, socket_addr_space) { if verify_address(&public_addr.as_ref().ok().copied()) {
tcp_listeners.push(( tcp_listeners.push((
bind_addr.port(), bind_addr.port(),
TcpListener::bind(bind_addr).unwrap_or_else(|err| { TcpListener::bind(bind_addr).unwrap_or_else(|err| {
@ -501,7 +506,10 @@ pub fn rpc_bootstrap(
identity_keypair.clone(), identity_keypair.clone(),
cluster_entrypoints, cluster_entrypoints,
ledger_path, ledger_path,
&node.info.gossip, &node
.info
.gossip()
.expect("Operator must spin up node with valid gossip address"),
node.sockets.gossip.try_clone().unwrap(), node.sockets.gossip.try_clone().unwrap(),
validator_config.expected_shred_version, validator_config.expected_shred_version,
validator_config.gossip_validators.clone(), validator_config.gossip_validators.clone(),

View File

@ -1599,27 +1599,39 @@ pub fn main() {
); );
if restricted_repair_only_mode { if restricted_repair_only_mode {
let any = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0);
// When in --restricted_repair_only_mode is enabled only the gossip and repair ports // When in --restricted_repair_only_mode is enabled only the gossip and repair ports
// need to be reachable by the entrypoint to respond to gossip pull requests and repair // need to be reachable by the entrypoint to respond to gossip pull requests and repair
// requests initiated by the node. All other ports are unused. // requests initiated by the node. All other ports are unused.
node.info.tpu = any; node.info.remove_tpu();
node.info.tpu_forwards = any; node.info.remove_tpu_forwards();
node.info.tvu = any; node.info.remove_tvu();
node.info.tvu_forwards = any; node.info.remove_tvu_forwards();
node.info.serve_repair = any; node.info.remove_serve_repair();
// A node in this configuration shouldn't be an entrypoint to other nodes // A node in this configuration shouldn't be an entrypoint to other nodes
node.sockets.ip_echo = None; node.sockets.ip_echo = None;
} }
if !private_rpc { if !private_rpc {
macro_rules! set_socket {
($method:ident, $addr:expr, $name:literal) => {
node.info.$method($addr).expect(&format!(
"Operator must spin up node with valid {} address",
$name
))
};
}
if let Some(public_rpc_addr) = public_rpc_addr { if let Some(public_rpc_addr) = public_rpc_addr {
node.info.rpc = public_rpc_addr; set_socket!(set_rpc, public_rpc_addr, "RPC");
node.info.rpc_pubsub = public_rpc_addr; set_socket!(set_rpc_pubsub, public_rpc_addr, "RPC-pubsub");
} else if let Some((rpc_addr, rpc_pubsub_addr)) = validator_config.rpc_addrs { } else if let Some((rpc_addr, rpc_pubsub_addr)) = validator_config.rpc_addrs {
node.info.rpc = SocketAddr::new(node.info.gossip.ip(), rpc_addr.port()); let addr = node
node.info.rpc_pubsub = SocketAddr::new(node.info.gossip.ip(), rpc_pubsub_addr.port()); .info
.gossip()
.expect("Operator must spin up node with valid gossip address")
.ip();
set_socket!(set_rpc, (addr, rpc_addr.port()), "RPC");
set_socket!(set_rpc_pubsub, (addr, rpc_pubsub_addr.port()), "RPC-pubsub");
} }
} }