makes sockets in LegacyContactInfo private (#31248)
Working towards LegacyContactInfo => ContactInfo migration, the commit hides some implementation details of LegacyContactInfo and expands API parity with the new ContactInfo.
This commit is contained in:
parent
b2e1d736f9
commit
cb65a785bc
|
@ -661,7 +661,7 @@ fn main() {
|
||||||
});
|
});
|
||||||
|
|
||||||
info!("done found {} nodes", gossip_nodes.len());
|
info!("done found {} nodes", gossip_nodes.len());
|
||||||
gossip_nodes[0].rpc
|
gossip_nodes[0].rpc().unwrap()
|
||||||
} else {
|
} else {
|
||||||
info!("Using {:?} as the RPC address", entrypoint_addr);
|
info!("Using {:?} as the RPC address", entrypoint_addr);
|
||||||
entrypoint_addr
|
entrypoint_addr
|
||||||
|
|
|
@ -14,10 +14,7 @@ use {
|
||||||
},
|
},
|
||||||
crossbeam_channel::{unbounded, Receiver, RecvError, RecvTimeoutError, Sender},
|
crossbeam_channel::{unbounded, Receiver, RecvError, RecvTimeoutError, Sender},
|
||||||
itertools::Itertools,
|
itertools::Itertools,
|
||||||
solana_gossip::{
|
solana_gossip::cluster_info::{ClusterInfo, ClusterInfoError},
|
||||||
cluster_info::{ClusterInfo, ClusterInfoError},
|
|
||||||
legacy_contact_info::LegacyContactInfo as ContactInfo,
|
|
||||||
},
|
|
||||||
solana_ledger::{blockstore::Blockstore, shred::Shred},
|
solana_ledger::{blockstore::Blockstore, shred::Shred},
|
||||||
solana_measure::measure::Measure,
|
solana_measure::measure::Measure,
|
||||||
solana_metrics::{inc_new_counter_error, inc_new_counter_info},
|
solana_metrics::{inc_new_counter_error, inc_new_counter_info},
|
||||||
|
@ -412,10 +409,13 @@ pub fn broadcast_shreds(
|
||||||
let cluster_nodes =
|
let cluster_nodes =
|
||||||
cluster_nodes_cache.get(slot, &root_bank, &working_bank, cluster_info);
|
cluster_nodes_cache.get(slot, &root_bank, &working_bank, cluster_info);
|
||||||
update_peer_stats(&cluster_nodes, last_datapoint_submit);
|
update_peer_stats(&cluster_nodes, last_datapoint_submit);
|
||||||
shreds.flat_map(move |shred| {
|
shreds.filter_map(move |shred| {
|
||||||
let node = cluster_nodes.get_broadcast_peer(&shred.id())?;
|
cluster_nodes
|
||||||
ContactInfo::is_valid_address(&node.tvu, socket_addr_space)
|
.get_broadcast_peer(&shred.id())?
|
||||||
.then(|| (shred.payload(), node.tvu))
|
.tvu()
|
||||||
|
.ok()
|
||||||
|
.filter(|addr| socket_addr_space.check(addr))
|
||||||
|
.map(|addr| (shred.payload(), addr))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
|
@ -305,7 +305,7 @@ impl BroadcastRun for BroadcastDuplicatesRun {
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|shred| {
|
.filter_map(|shred| {
|
||||||
let node = cluster_nodes.get_broadcast_peer(&shred.id())?;
|
let node = cluster_nodes.get_broadcast_peer(&shred.id())?;
|
||||||
if !ContactInfo::is_valid_address(&node.tvu, socket_addr_space) {
|
if !socket_addr_space.check(&node.tvu().ok()?) {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
if self
|
if self
|
||||||
|
@ -338,14 +338,15 @@ impl BroadcastRun for BroadcastDuplicatesRun {
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|pubkey| {
|
.filter_map(|pubkey| {
|
||||||
let tvu = cluster_info
|
let tvu = cluster_info
|
||||||
.lookup_contact_info(pubkey, |contact_info| contact_info.tvu)?;
|
.lookup_contact_info(pubkey, ContactInfo::tvu)?
|
||||||
|
.ok()?;
|
||||||
Some((shred.payload(), tvu))
|
Some((shred.payload(), tvu))
|
||||||
})
|
})
|
||||||
.collect(),
|
.collect(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(vec![(shred.payload(), node.tvu)])
|
Some(vec![(shred.payload(), node.tvu().ok()?)])
|
||||||
})
|
})
|
||||||
.flatten()
|
.flatten()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
|
@ -139,10 +139,12 @@ impl BroadcastRun for BroadcastFakeShredsRun {
|
||||||
peers.iter().enumerate().for_each(|(i, peer)| {
|
peers.iter().enumerate().for_each(|(i, peer)| {
|
||||||
if fake == (i <= self.partition) {
|
if fake == (i <= self.partition) {
|
||||||
// Send fake shreds to the first N peers
|
// Send fake shreds to the first N peers
|
||||||
|
if let Ok(addr) = peer.tvu_forwards() {
|
||||||
data_shreds.iter().for_each(|b| {
|
data_shreds.iter().for_each(|b| {
|
||||||
sock.send_to(b.payload(), peer.tvu_forwards).unwrap();
|
sock.send_to(b.payload(), addr).unwrap();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -177,39 +177,42 @@ impl ClusterNodes<RetransmitStage> {
|
||||||
frwds,
|
frwds,
|
||||||
} = self.get_retransmit_peers(slot_leader, shred, root_bank, fanout)?;
|
} = self.get_retransmit_peers(slot_leader, shred, root_bank, fanout)?;
|
||||||
if neighbors.is_empty() {
|
if neighbors.is_empty() {
|
||||||
let peers = children
|
let peers = children.into_iter().filter_map(|node| {
|
||||||
.into_iter()
|
node.contact_info()?
|
||||||
.filter_map(Node::contact_info)
|
.tvu()
|
||||||
.filter(|node| addrs.get(&node.tvu) == Some(&node.id))
|
.ok()
|
||||||
.map(|node| node.tvu)
|
.filter(|addr| addrs.get(addr) == Some(&node.pubkey()))
|
||||||
.collect();
|
});
|
||||||
return Ok((root_distance, peers));
|
return Ok((root_distance, peers.collect()));
|
||||||
}
|
}
|
||||||
// If the node is on the critical path (i.e. the first node in each
|
// If the node is on the critical path (i.e. the first node in each
|
||||||
// neighborhood), it should send the packet to tvu socket of its
|
// neighborhood), it should send the packet to tvu socket of its
|
||||||
// children and also tvu_forward socket of its neighbors. Otherwise it
|
// children and also tvu_forward socket of its neighbors. Otherwise it
|
||||||
// should only forward to tvu_forwards socket of its children.
|
// should only forward to tvu_forwards socket of its children.
|
||||||
if neighbors[0].pubkey() != self.pubkey {
|
if neighbors[0].pubkey() != self.pubkey {
|
||||||
let peers = children
|
let peers = children.into_iter().filter_map(|node| {
|
||||||
.into_iter()
|
node.contact_info()?
|
||||||
.filter_map(Node::contact_info)
|
.tvu_forwards()
|
||||||
.filter(|node| frwds.get(&node.tvu_forwards) == Some(&node.id))
|
.ok()
|
||||||
.map(|node| node.tvu_forwards);
|
.filter(|addr| frwds.get(addr) == Some(&node.pubkey()))
|
||||||
|
});
|
||||||
return Ok((root_distance, peers.collect()));
|
return Ok((root_distance, peers.collect()));
|
||||||
}
|
}
|
||||||
// First neighbor is this node itself, so skip it.
|
// First neighbor is this node itself, so skip it.
|
||||||
let peers = neighbors[1..]
|
let peers = neighbors[1..]
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|node| node.contact_info())
|
.filter_map(|node| {
|
||||||
.filter(|node| frwds.get(&node.tvu_forwards) == Some(&node.id))
|
node.contact_info()?
|
||||||
.map(|node| node.tvu_forwards)
|
.tvu_forwards()
|
||||||
.chain(
|
.ok()
|
||||||
children
|
.filter(|addr| frwds.get(addr) == Some(&node.pubkey()))
|
||||||
.into_iter()
|
})
|
||||||
.filter_map(Node::contact_info)
|
.chain(children.into_iter().filter_map(|node| {
|
||||||
.filter(|node| addrs.get(&node.tvu) == Some(&node.id))
|
node.contact_info()?
|
||||||
.map(|node| node.tvu),
|
.tvu()
|
||||||
);
|
.ok()
|
||||||
|
.filter(|addr| addrs.get(addr) == Some(&node.pubkey()))
|
||||||
|
}));
|
||||||
Ok((root_distance, peers.collect()))
|
Ok((root_distance, peers.collect()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -241,9 +244,13 @@ impl ClusterNodes<RetransmitStage> {
|
||||||
.map(|index| &self.nodes[index])
|
.map(|index| &self.nodes[index])
|
||||||
.inspect(|node| {
|
.inspect(|node| {
|
||||||
if let Some(node) = node.contact_info() {
|
if let Some(node) = node.contact_info() {
|
||||||
addrs.entry(node.tvu).or_insert(node.id);
|
if let Ok(addr) = node.tvu() {
|
||||||
|
addrs.entry(addr).or_insert(node.id);
|
||||||
|
}
|
||||||
if !drop_redundant_turbine_path {
|
if !drop_redundant_turbine_path {
|
||||||
frwds.entry(node.tvu_forwards).or_insert(node.id);
|
if let Ok(addr) = node.tvu_forwards() {
|
||||||
|
frwds.entry(addr).or_insert(node.id);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
@ -11,40 +11,37 @@ pub(crate) fn next_leader_tpu(
|
||||||
cluster_info: &ClusterInfo,
|
cluster_info: &ClusterInfo,
|
||||||
poh_recorder: &RwLock<PohRecorder>,
|
poh_recorder: &RwLock<PohRecorder>,
|
||||||
) -> Option<(Pubkey, SocketAddr)> {
|
) -> Option<(Pubkey, SocketAddr)> {
|
||||||
next_leader_x(cluster_info, poh_recorder, |leader| leader.tpu)
|
next_leader_x(cluster_info, poh_recorder, ContactInfo::tpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn next_leader_tpu_forwards(
|
pub(crate) fn next_leader_tpu_forwards(
|
||||||
cluster_info: &ClusterInfo,
|
cluster_info: &ClusterInfo,
|
||||||
poh_recorder: &RwLock<PohRecorder>,
|
poh_recorder: &RwLock<PohRecorder>,
|
||||||
) -> Option<(Pubkey, SocketAddr)> {
|
) -> Option<(Pubkey, SocketAddr)> {
|
||||||
next_leader_x(cluster_info, poh_recorder, |leader| leader.tpu_forwards)
|
next_leader_x(cluster_info, poh_recorder, ContactInfo::tpu_forwards)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn next_leader_tpu_vote(
|
pub(crate) fn next_leader_tpu_vote(
|
||||||
cluster_info: &ClusterInfo,
|
cluster_info: &ClusterInfo,
|
||||||
poh_recorder: &RwLock<PohRecorder>,
|
poh_recorder: &RwLock<PohRecorder>,
|
||||||
) -> Option<(Pubkey, SocketAddr)> {
|
) -> Option<(Pubkey, SocketAddr)> {
|
||||||
next_leader_x(cluster_info, poh_recorder, |leader| leader.tpu_vote)
|
next_leader_x(cluster_info, poh_recorder, ContactInfo::tpu_vote)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn next_leader_x<F>(
|
fn next_leader_x<F, E>(
|
||||||
cluster_info: &ClusterInfo,
|
cluster_info: &ClusterInfo,
|
||||||
poh_recorder: &RwLock<PohRecorder>,
|
poh_recorder: &RwLock<PohRecorder>,
|
||||||
port_selector: F,
|
port_selector: F,
|
||||||
) -> Option<(Pubkey, SocketAddr)>
|
) -> Option<(Pubkey, SocketAddr)>
|
||||||
where
|
where
|
||||||
F: FnOnce(&ContactInfo) -> SocketAddr,
|
F: FnOnce(&ContactInfo) -> Result<SocketAddr, E>,
|
||||||
{
|
{
|
||||||
let leader_pubkey = poh_recorder
|
let leader_pubkey = poh_recorder
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.leader_after_n_slots(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET);
|
.leader_after_n_slots(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET)?;
|
||||||
if let Some(leader_pubkey) = leader_pubkey {
|
|
||||||
cluster_info
|
cluster_info
|
||||||
.lookup_contact_info(&leader_pubkey, port_selector)
|
.lookup_contact_info(&leader_pubkey, port_selector)?
|
||||||
.map(|addr| (leader_pubkey, addr))
|
.map(|addr| (leader_pubkey, addr))
|
||||||
} else {
|
.ok()
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
use {
|
use {
|
||||||
crate::serve_repair::RepairVerifyError,
|
crate::serve_repair::RepairVerifyError,
|
||||||
solana_gossip::{cluster_info, gossip_error::GossipError},
|
solana_gossip::{cluster_info, contact_info, gossip_error::GossipError},
|
||||||
solana_ledger::blockstore,
|
solana_ledger::blockstore,
|
||||||
thiserror::Error,
|
thiserror::Error,
|
||||||
};
|
};
|
||||||
|
@ -16,6 +16,8 @@ pub enum Error {
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
Gossip(#[from] GossipError),
|
Gossip(#[from] GossipError),
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
|
InvalidContactInfo(#[from] contact_info::Error),
|
||||||
|
#[error(transparent)]
|
||||||
Io(#[from] std::io::Error),
|
Io(#[from] std::io::Error),
|
||||||
#[error("ReadyTimeout")]
|
#[error("ReadyTimeout")]
|
||||||
ReadyTimeout,
|
ReadyTimeout,
|
||||||
|
|
|
@ -338,17 +338,21 @@ pub(crate) struct RepairPeers {
|
||||||
|
|
||||||
impl RepairPeers {
|
impl RepairPeers {
|
||||||
fn new(asof: Instant, peers: &[ContactInfo], weights: &[u64]) -> Result<Self> {
|
fn new(asof: Instant, peers: &[ContactInfo], weights: &[u64]) -> Result<Self> {
|
||||||
if peers.is_empty() {
|
|
||||||
return Err(Error::from(ClusterInfoError::NoPeers));
|
|
||||||
}
|
|
||||||
if peers.len() != weights.len() {
|
if peers.len() != weights.len() {
|
||||||
return Err(Error::from(WeightedError::InvalidWeight));
|
return Err(Error::from(WeightedError::InvalidWeight));
|
||||||
}
|
}
|
||||||
let weighted_index = WeightedIndex::new(weights)?;
|
let (peers, weights): (Vec<_>, Vec<u64>) = peers
|
||||||
let peers = peers
|
|
||||||
.iter()
|
.iter()
|
||||||
.map(|peer| (peer.id, peer.serve_repair))
|
.zip(weights)
|
||||||
.collect();
|
.filter_map(|(peer, &weight)| {
|
||||||
|
let addr = peer.serve_repair().ok()?;
|
||||||
|
Some(((peer.id, addr), weight))
|
||||||
|
})
|
||||||
|
.unzip();
|
||||||
|
if peers.is_empty() {
|
||||||
|
return Err(Error::from(ClusterInfoError::NoPeers));
|
||||||
|
}
|
||||||
|
let weighted_index = WeightedIndex::new(weights)?;
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
asof,
|
asof,
|
||||||
peers,
|
peers,
|
||||||
|
@ -1070,9 +1074,12 @@ impl ServeRepair {
|
||||||
.unzip();
|
.unzip();
|
||||||
let peers = WeightedShuffle::new("repair_request_ancestor_hashes", &weights)
|
let peers = WeightedShuffle::new("repair_request_ancestor_hashes", &weights)
|
||||||
.shuffle(&mut rand::thread_rng())
|
.shuffle(&mut rand::thread_rng())
|
||||||
.take(ANCESTOR_HASH_REPAIR_SAMPLE_SIZE)
|
|
||||||
.map(|i| index[i])
|
.map(|i| index[i])
|
||||||
.map(|i| (repair_peers[i].id, repair_peers[i].serve_repair))
|
.filter_map(|i| {
|
||||||
|
let addr = repair_peers[i].serve_repair().ok()?;
|
||||||
|
Some((repair_peers[i].id, addr))
|
||||||
|
})
|
||||||
|
.take(ANCESTOR_HASH_REPAIR_SAMPLE_SIZE)
|
||||||
.collect();
|
.collect();
|
||||||
Ok(peers)
|
Ok(peers)
|
||||||
}
|
}
|
||||||
|
@ -1093,7 +1100,7 @@ impl ServeRepair {
|
||||||
.unzip();
|
.unzip();
|
||||||
let k = WeightedIndex::new(weights)?.sample(&mut rand::thread_rng());
|
let k = WeightedIndex::new(weights)?.sample(&mut rand::thread_rng());
|
||||||
let n = index[k];
|
let n = index[k];
|
||||||
Ok((repair_peers[n].id, repair_peers[n].serve_repair))
|
Ok((repair_peers[n].id, repair_peers[n].serve_repair()?))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn map_repair_request(
|
pub(crate) fn map_repair_request(
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
use {
|
use {
|
||||||
rand::{thread_rng, Rng},
|
rand::{thread_rng, Rng},
|
||||||
solana_client::{connection_cache::ConnectionCache, tpu_connection::TpuConnection},
|
solana_client::{connection_cache::ConnectionCache, tpu_connection::TpuConnection},
|
||||||
solana_gossip::cluster_info::ClusterInfo,
|
solana_gossip::{cluster_info::ClusterInfo, contact_info::LegacyContactInfo as ContactInfo},
|
||||||
solana_poh::poh_recorder::PohRecorder,
|
solana_poh::poh_recorder::PohRecorder,
|
||||||
std::{
|
std::{
|
||||||
sync::{
|
sync::{
|
||||||
|
@ -46,8 +46,8 @@ impl WarmQuicCacheService {
|
||||||
.map_or(true, |last_leader| last_leader != leader_pubkey)
|
.map_or(true, |last_leader| last_leader != leader_pubkey)
|
||||||
{
|
{
|
||||||
maybe_last_leader = Some(leader_pubkey);
|
maybe_last_leader = Some(leader_pubkey);
|
||||||
if let Some(addr) = cluster_info
|
if let Some(Ok(addr)) = cluster_info
|
||||||
.lookup_contact_info(&leader_pubkey, |leader| leader.tpu)
|
.lookup_contact_info(&leader_pubkey, ContactInfo::tpu)
|
||||||
{
|
{
|
||||||
let conn = connection_cache.get_connection(&addr);
|
let conn = connection_cache.get_connection(&addr);
|
||||||
if let Err(err) = conn.send_data(&[0u8]) {
|
if let Err(err) = conn.send_data(&[0u8]) {
|
||||||
|
|
|
@ -427,16 +427,16 @@ fn get_target(
|
||||||
info!("ADDR = {}", entrypoint_addr);
|
info!("ADDR = {}", entrypoint_addr);
|
||||||
|
|
||||||
for node in nodes {
|
for node in nodes {
|
||||||
if node.gossip == entrypoint_addr {
|
if node.gossip().ok() == Some(entrypoint_addr) {
|
||||||
info!("{}", node.gossip);
|
info!("{:?}", node.gossip());
|
||||||
target = match mode {
|
target = match mode {
|
||||||
Mode::Gossip => Some((node.id, node.gossip)),
|
Mode::Gossip => Some((node.id, node.gossip().unwrap())),
|
||||||
Mode::Tvu => Some((node.id, node.tvu)),
|
Mode::Tvu => Some((node.id, node.tvu().unwrap())),
|
||||||
Mode::TvuForwards => Some((node.id, node.tvu_forwards)),
|
Mode::TvuForwards => Some((node.id, node.tvu_forwards().unwrap())),
|
||||||
Mode::Tpu => Some((node.id, node.tpu)),
|
Mode::Tpu => Some((node.id, node.tpu().unwrap())),
|
||||||
Mode::TpuForwards => Some((node.id, node.tpu_forwards)),
|
Mode::TpuForwards => Some((node.id, node.tpu_forwards().unwrap())),
|
||||||
Mode::Repair => Some((node.id, node.repair)),
|
Mode::Repair => Some((node.id, node.repair().unwrap())),
|
||||||
Mode::ServeRepair => Some((node.id, node.serve_repair)),
|
Mode::ServeRepair => Some((node.id, node.serve_repair().unwrap())),
|
||||||
Mode::Rpc => None,
|
Mode::Rpc => None,
|
||||||
};
|
};
|
||||||
break;
|
break;
|
||||||
|
@ -457,9 +457,9 @@ fn get_rpc_client(
|
||||||
|
|
||||||
// find target node
|
// find target node
|
||||||
for node in nodes {
|
for node in nodes {
|
||||||
if node.gossip == entrypoint_addr {
|
if node.gossip().ok() == Some(entrypoint_addr) {
|
||||||
info!("{}", node.gossip);
|
info!("{:?}", node.gossip());
|
||||||
return Ok(RpcClient::new_socket(node.rpc));
|
return Ok(RpcClient::new_socket(node.rpc().unwrap()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err("Node with entrypoint_addr was not found")
|
Err("Node with entrypoint_addr was not found")
|
||||||
|
@ -813,7 +813,7 @@ pub mod test {
|
||||||
&solana_sdk::pubkey::new_rand(),
|
&solana_sdk::pubkey::new_rand(),
|
||||||
timestamp(),
|
timestamp(),
|
||||||
)];
|
)];
|
||||||
let entrypoint_addr = nodes[0].gossip;
|
let entrypoint_addr = nodes[0].gossip().unwrap();
|
||||||
|
|
||||||
run_dos_no_client(
|
run_dos_no_client(
|
||||||
&nodes,
|
&nodes,
|
||||||
|
|
|
@ -549,7 +549,7 @@ impl ClusterInfo {
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.iter()
|
.iter()
|
||||||
.map(|contact_info| contact_info.gossip)
|
.filter_map(|node| node.gossip().ok())
|
||||||
.collect::<HashSet<_>>();
|
.collect::<HashSet<_>>();
|
||||||
let self_pubkey = self.id();
|
let self_pubkey = self.id();
|
||||||
let gossip_crds = self.gossip.crds.read().unwrap();
|
let gossip_crds = self.gossip.crds.read().unwrap();
|
||||||
|
@ -564,7 +564,10 @@ impl ClusterInfo {
|
||||||
// definition that information is already available
|
// definition that information is already available
|
||||||
let contact_info = v.value.contact_info().unwrap();
|
let contact_info = v.value.contact_info().unwrap();
|
||||||
if contact_info.id != self_pubkey
|
if contact_info.id != self_pubkey
|
||||||
&& !entrypoint_gossip_addrs.contains(&contact_info.gossip)
|
&& contact_info
|
||||||
|
.gossip()
|
||||||
|
.map(|addr| !entrypoint_gossip_addrs.contains(&addr))
|
||||||
|
.unwrap_or_default()
|
||||||
{
|
{
|
||||||
return Some(v.value.clone());
|
return Some(v.value.clone());
|
||||||
}
|
}
|
||||||
|
@ -698,7 +701,9 @@ impl ClusterInfo {
|
||||||
) -> Option<LegacyContactInfo> {
|
) -> Option<LegacyContactInfo> {
|
||||||
let gossip_crds = self.gossip.crds.read().unwrap();
|
let gossip_crds = self.gossip.crds.read().unwrap();
|
||||||
let mut nodes = gossip_crds.get_nodes_contact_info();
|
let mut nodes = gossip_crds.get_nodes_contact_info();
|
||||||
nodes.find(|node| node.gossip == *gossip_addr).cloned()
|
nodes
|
||||||
|
.find(|node| node.gossip().ok() == Some(*gossip_addr))
|
||||||
|
.cloned()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn my_contact_info(&self) -> ContactInfo {
|
pub fn my_contact_info(&self) -> ContactInfo {
|
||||||
|
@ -720,6 +725,18 @@ impl ClusterInfo {
|
||||||
.unwrap_or_else(|| EpochSlots::new(self_pubkey, timestamp()))
|
.unwrap_or_else(|| EpochSlots::new(self_pubkey, timestamp()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn addr_to_string(&self, default_ip: &Option<IpAddr>, addr: &Option<SocketAddr>) -> String {
|
||||||
|
addr.filter(|addr| self.socket_addr_space.check(addr))
|
||||||
|
.map(|addr| {
|
||||||
|
if &Some(addr.ip()) == default_ip {
|
||||||
|
addr.port().to_string()
|
||||||
|
} else {
|
||||||
|
addr.to_string()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.unwrap_or_else(|| String::from("none"))
|
||||||
|
}
|
||||||
|
|
||||||
pub fn rpc_info_trace(&self) -> String {
|
pub fn rpc_info_trace(&self) -> String {
|
||||||
let now = timestamp();
|
let now = timestamp();
|
||||||
let my_pubkey = self.id();
|
let my_pubkey = self.id();
|
||||||
|
@ -728,30 +745,17 @@ impl ClusterInfo {
|
||||||
.all_peers()
|
.all_peers()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter_map(|(node, last_updated)| {
|
.filter_map(|(node, last_updated)| {
|
||||||
if !ContactInfo::is_valid_address(&node.rpc, &self.socket_addr_space) {
|
let node_rpc = node
|
||||||
return None;
|
.rpc()
|
||||||
}
|
.ok()
|
||||||
|
.filter(|addr| self.socket_addr_space.check(addr))?;
|
||||||
let node_version = self.get_node_version(&node.id);
|
let node_version = self.get_node_version(&node.id);
|
||||||
if my_shred_version != 0
|
if my_shred_version != 0
|
||||||
&& (node.shred_version != 0 && node.shred_version != my_shred_version)
|
&& (node.shred_version != 0 && node.shred_version != my_shred_version)
|
||||||
{
|
{
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
let rpc_addr = node_rpc.ip();
|
||||||
let addr_to_string = |default_ip: &IpAddr, addr: &SocketAddr| -> String {
|
|
||||||
if ContactInfo::is_valid_address(addr, &self.socket_addr_space) {
|
|
||||||
if &addr.ip() == default_ip {
|
|
||||||
addr.port().to_string()
|
|
||||||
} else {
|
|
||||||
addr.to_string()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
"none".to_string()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let rpc_addr = node.rpc.ip();
|
|
||||||
Some(format!(
|
Some(format!(
|
||||||
"{:15} {:2}| {:5} | {:44} |{:^9}| {:5}| {:5}| {}\n",
|
"{:15} {:2}| {:5} | {:44} |{:^9}| {:5}| {:5}| {}\n",
|
||||||
rpc_addr.to_string(),
|
rpc_addr.to_string(),
|
||||||
|
@ -763,8 +767,8 @@ impl ClusterInfo {
|
||||||
} else {
|
} else {
|
||||||
"-".to_string()
|
"-".to_string()
|
||||||
},
|
},
|
||||||
addr_to_string(&rpc_addr, &node.rpc),
|
self.addr_to_string(&Some(rpc_addr), &node.rpc().ok()),
|
||||||
addr_to_string(&rpc_addr, &node.rpc_pubsub),
|
self.addr_to_string(&Some(rpc_addr), &node.rpc_pubsub().ok()),
|
||||||
node.shred_version,
|
node.shred_version,
|
||||||
))
|
))
|
||||||
})
|
})
|
||||||
|
@ -806,25 +810,17 @@ impl ClusterInfo {
|
||||||
if is_spy_node {
|
if is_spy_node {
|
||||||
shred_spy_nodes = shred_spy_nodes.saturating_add(1);
|
shred_spy_nodes = shred_spy_nodes.saturating_add(1);
|
||||||
}
|
}
|
||||||
let addr_to_string = |default_ip: &IpAddr, addr: &SocketAddr| -> String {
|
let ip_addr = node.gossip().as_ref().map(SocketAddr::ip).ok();
|
||||||
if ContactInfo::is_valid_address(addr, &self.socket_addr_space) {
|
|
||||||
if &addr.ip() == default_ip {
|
|
||||||
addr.port().to_string()
|
|
||||||
} else {
|
|
||||||
addr.to_string()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
"none".to_string()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let ip_addr = node.gossip.ip();
|
|
||||||
Some(format!(
|
Some(format!(
|
||||||
"{:15} {:2}| {:5} | {:44} |{:^9}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {}\n",
|
"{:15} {:2}| {:5} | {:44} |{:^9}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {}\n",
|
||||||
if ContactInfo::is_valid_address(&node.gossip, &self.socket_addr_space) {
|
node.gossip()
|
||||||
ip_addr.to_string()
|
.ok()
|
||||||
} else {
|
.filter(|addr| self.socket_addr_space.check(addr))
|
||||||
"none".to_string()
|
.as_ref()
|
||||||
},
|
.map(SocketAddr::ip)
|
||||||
|
.as_ref()
|
||||||
|
.map(IpAddr::to_string)
|
||||||
|
.unwrap_or_else(|| String::from("none")),
|
||||||
if node.id == my_pubkey { "me" } else { "" },
|
if node.id == my_pubkey { "me" } else { "" },
|
||||||
now.saturating_sub(last_updated),
|
now.saturating_sub(last_updated),
|
||||||
node.id,
|
node.id,
|
||||||
|
@ -833,14 +829,14 @@ impl ClusterInfo {
|
||||||
} else {
|
} else {
|
||||||
"-".to_string()
|
"-".to_string()
|
||||||
},
|
},
|
||||||
addr_to_string(&ip_addr, &node.gossip),
|
self.addr_to_string(&ip_addr, &node.gossip().ok()),
|
||||||
addr_to_string(&ip_addr, &node.tpu_vote),
|
self.addr_to_string(&ip_addr, &node.tpu_vote().ok()),
|
||||||
addr_to_string(&ip_addr, &node.tpu),
|
self.addr_to_string(&ip_addr, &node.tpu().ok()),
|
||||||
addr_to_string(&ip_addr, &node.tpu_forwards),
|
self.addr_to_string(&ip_addr, &node.tpu_forwards().ok()),
|
||||||
addr_to_string(&ip_addr, &node.tvu),
|
self.addr_to_string(&ip_addr, &node.tvu().ok()),
|
||||||
addr_to_string(&ip_addr, &node.tvu_forwards),
|
self.addr_to_string(&ip_addr, &node.tvu_forwards().ok()),
|
||||||
addr_to_string(&ip_addr, &node.repair),
|
self.addr_to_string(&ip_addr, &node.repair().ok()),
|
||||||
addr_to_string(&ip_addr, &node.serve_repair),
|
self.addr_to_string(&ip_addr, &node.serve_repair().ok()),
|
||||||
node.shred_version,
|
node.shred_version,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
@ -1259,16 +1255,19 @@ impl ClusterInfo {
|
||||||
Some(version.version.clone().into())
|
Some(version.version.clone().into())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn check_socket_addr_space<E>(&self, addr: &Result<SocketAddr, E>) -> bool {
|
||||||
|
addr.as_ref()
|
||||||
|
.map(|addr| self.socket_addr_space.check(addr))
|
||||||
|
.unwrap_or_default()
|
||||||
|
}
|
||||||
|
|
||||||
/// all validators that have a valid rpc port regardless of `shred_version`.
|
/// all validators that have a valid rpc port regardless of `shred_version`.
|
||||||
pub fn all_rpc_peers(&self) -> Vec<LegacyContactInfo> {
|
pub fn all_rpc_peers(&self) -> Vec<LegacyContactInfo> {
|
||||||
let self_pubkey = self.id();
|
let self_pubkey = self.id();
|
||||||
let gossip_crds = self.gossip.crds.read().unwrap();
|
let gossip_crds = self.gossip.crds.read().unwrap();
|
||||||
gossip_crds
|
gossip_crds
|
||||||
.get_nodes_contact_info()
|
.get_nodes_contact_info()
|
||||||
.filter(|x| {
|
.filter(|node| node.id != self_pubkey && self.check_socket_addr_space(&node.rpc()))
|
||||||
x.id != self_pubkey
|
|
||||||
&& ContactInfo::is_valid_address(&x.rpc, &self.socket_addr_space)
|
|
||||||
})
|
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
@ -1288,9 +1287,7 @@ impl ClusterInfo {
|
||||||
gossip_crds
|
gossip_crds
|
||||||
.get_nodes_contact_info()
|
.get_nodes_contact_info()
|
||||||
// shred_version not considered for gossip peers (ie, spy nodes do not set shred_version)
|
// shred_version not considered for gossip peers (ie, spy nodes do not set shred_version)
|
||||||
.filter(|x| {
|
.filter(|node| node.id != me && self.check_socket_addr_space(&node.gossip()))
|
||||||
x.id != me && ContactInfo::is_valid_address(&x.gossip, &self.socket_addr_space)
|
|
||||||
})
|
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
@ -1300,10 +1297,7 @@ impl ClusterInfo {
|
||||||
let self_pubkey = self.id();
|
let self_pubkey = self.id();
|
||||||
self.time_gossip_read_lock("all_tvu_peers", &self.stats.all_tvu_peers)
|
self.time_gossip_read_lock("all_tvu_peers", &self.stats.all_tvu_peers)
|
||||||
.get_nodes_contact_info()
|
.get_nodes_contact_info()
|
||||||
.filter(|x| {
|
.filter(|node| node.id != self_pubkey && self.check_socket_addr_space(&node.tvu()))
|
||||||
ContactInfo::is_valid_address(&x.tvu, &self.socket_addr_space)
|
|
||||||
&& x.id != self_pubkey
|
|
||||||
})
|
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
@ -1317,7 +1311,7 @@ impl ClusterInfo {
|
||||||
.filter(|node| {
|
.filter(|node| {
|
||||||
node.id != self_pubkey
|
node.id != self_pubkey
|
||||||
&& node.shred_version == self_shred_version
|
&& node.shred_version == self_shred_version
|
||||||
&& ContactInfo::is_valid_address(&node.tvu, &self.socket_addr_space)
|
&& self.check_socket_addr_space(&node.tvu())
|
||||||
})
|
})
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect()
|
.collect()
|
||||||
|
@ -1334,8 +1328,8 @@ impl ClusterInfo {
|
||||||
.filter(|node| {
|
.filter(|node| {
|
||||||
node.id != self_pubkey
|
node.id != self_pubkey
|
||||||
&& node.shred_version == self_shred_version
|
&& node.shred_version == self_shred_version
|
||||||
&& ContactInfo::is_valid_address(&node.tvu, &self.socket_addr_space)
|
&& self.check_socket_addr_space(&node.tvu())
|
||||||
&& ContactInfo::is_valid_address(&node.serve_repair, &self.socket_addr_space)
|
&& self.check_socket_addr_space(&node.serve_repair())
|
||||||
&& match gossip_crds.get::<&LowestSlot>(node.id) {
|
&& match gossip_crds.get::<&LowestSlot>(node.id) {
|
||||||
None => true, // fallback to legacy behavior
|
None => true, // fallback to legacy behavior
|
||||||
Some(lowest_slot) => lowest_slot.lowest <= slot,
|
Some(lowest_slot) => lowest_slot.lowest <= slot,
|
||||||
|
@ -1345,10 +1339,13 @@ impl ClusterInfo {
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_spy_node(contact_info: &LegacyContactInfo, socket_addr_space: &SocketAddrSpace) -> bool {
|
fn is_spy_node(node: &LegacyContactInfo, socket_addr_space: &SocketAddrSpace) -> bool {
|
||||||
!ContactInfo::is_valid_address(&contact_info.tpu, socket_addr_space)
|
![node.tpu(), node.gossip(), node.tvu()]
|
||||||
|| !ContactInfo::is_valid_address(&contact_info.gossip, socket_addr_space)
|
.into_iter()
|
||||||
|| !ContactInfo::is_valid_address(&contact_info.tvu, socket_addr_space)
|
.all(|addr| {
|
||||||
|
addr.map(|addr| socket_addr_space.check(&addr))
|
||||||
|
.unwrap_or_default()
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// compute broadcast table
|
/// compute broadcast table
|
||||||
|
@ -1357,10 +1354,7 @@ impl ClusterInfo {
|
||||||
let gossip_crds = self.gossip.crds.read().unwrap();
|
let gossip_crds = self.gossip.crds.read().unwrap();
|
||||||
gossip_crds
|
gossip_crds
|
||||||
.get_nodes_contact_info()
|
.get_nodes_contact_info()
|
||||||
.filter(|x| {
|
.filter(|node| node.id != self_pubkey && self.check_socket_addr_space(&node.tpu()))
|
||||||
x.id != self_pubkey
|
|
||||||
&& ContactInfo::is_valid_address(&x.tpu, &self.socket_addr_space)
|
|
||||||
})
|
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
@ -1403,14 +1397,16 @@ impl ClusterInfo {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
entrypoint.wallclock = now;
|
entrypoint.wallclock = now;
|
||||||
|
if let Ok(entrypoint_gossip) = entrypoint.gossip() {
|
||||||
if self
|
if self
|
||||||
.time_gossip_read_lock("entrypoint", &self.stats.entrypoint)
|
.time_gossip_read_lock("entrypoint", &self.stats.entrypoint)
|
||||||
.get_nodes_contact_info()
|
.get_nodes_contact_info()
|
||||||
.any(|node| node.gossip == entrypoint.gossip)
|
.any(|node| node.gossip().ok() == Some(entrypoint_gossip))
|
||||||
{
|
{
|
||||||
return; // Found the entrypoint, no need to pull from it
|
return; // Found the entrypoint, no need to pull from it
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
entrypoint.clone()
|
entrypoint.clone()
|
||||||
};
|
};
|
||||||
let filters = if pulls.is_empty() {
|
let filters = if pulls.is_empty() {
|
||||||
|
@ -1513,7 +1509,8 @@ impl ClusterInfo {
|
||||||
let self_info = CrdsValue::new_signed(self_info, &self.keypair());
|
let self_info = CrdsValue::new_signed(self_info, &self.keypair());
|
||||||
let pulls = pulls
|
let pulls = pulls
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.flat_map(|(peer, filters)| repeat(peer.gossip).zip(filters))
|
.filter_map(|(peer, filters)| Some((peer.gossip().ok()?, filters)))
|
||||||
|
.flat_map(|(addr, filters)| repeat(addr).zip(filters))
|
||||||
.map(|(gossip_addr, filter)| {
|
.map(|(gossip_addr, filter)| {
|
||||||
let request = Protocol::PullRequest(filter, self_info.clone());
|
let request = Protocol::PullRequest(filter, self_info.clone());
|
||||||
(gossip_addr, request)
|
(gossip_addr, request)
|
||||||
|
@ -1563,7 +1560,7 @@ impl ClusterInfo {
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter_map(|(pubkey, messages)| {
|
.filter_map(|(pubkey, messages)| {
|
||||||
let peer: &LegacyContactInfo = gossip_crds.get(pubkey)?;
|
let peer: &LegacyContactInfo = gossip_crds.get(pubkey)?;
|
||||||
Some((peer.gossip, messages))
|
Some((peer.gossip().ok()?, messages))
|
||||||
})
|
})
|
||||||
.collect()
|
.collect()
|
||||||
};
|
};
|
||||||
|
@ -1655,8 +1652,10 @@ impl ClusterInfo {
|
||||||
for entrypoint in entrypoints.iter_mut() {
|
for entrypoint in entrypoints.iter_mut() {
|
||||||
if entrypoint.id == Pubkey::default() {
|
if entrypoint.id == Pubkey::default() {
|
||||||
// If a pull from the entrypoint was successful it should exist in the CRDS table
|
// If a pull from the entrypoint was successful it should exist in the CRDS table
|
||||||
if let Some(entrypoint_from_gossip) =
|
if let Some(entrypoint_from_gossip) = entrypoint
|
||||||
self.lookup_contact_info_by_gossip_addr(&entrypoint.gossip)
|
.gossip()
|
||||||
|
.ok()
|
||||||
|
.and_then(|addr| self.lookup_contact_info_by_gossip_addr(&addr))
|
||||||
{
|
{
|
||||||
// Update the entrypoint's id so future entrypoint pulls correctly reference it
|
// Update the entrypoint's id so future entrypoint pulls correctly reference it
|
||||||
*entrypoint = entrypoint_from_gossip;
|
*entrypoint = entrypoint_from_gossip;
|
||||||
|
@ -2337,7 +2336,7 @@ impl ClusterInfo {
|
||||||
};
|
};
|
||||||
prune_data.sign(&self.keypair());
|
prune_data.sign(&self.keypair());
|
||||||
let prune_message = Protocol::PruneMessage(self_pubkey, prune_data);
|
let prune_message = Protocol::PruneMessage(self_pubkey, prune_data);
|
||||||
Some((peer.gossip, prune_message))
|
Some((peer.gossip().ok()?, prune_message))
|
||||||
})
|
})
|
||||||
.collect()
|
.collect()
|
||||||
})
|
})
|
||||||
|
@ -4109,7 +4108,7 @@ RPC Enabled Nodes: 1"#;
|
||||||
assert!(pings.is_empty());
|
assert!(pings.is_empty());
|
||||||
assert_eq!(pulls.len(), MIN_NUM_BLOOM_FILTERS);
|
assert_eq!(pulls.len(), MIN_NUM_BLOOM_FILTERS);
|
||||||
for (addr, msg) in pulls {
|
for (addr, msg) in pulls {
|
||||||
assert_eq!(addr, entrypoint.gossip);
|
assert_eq!(addr, entrypoint.gossip().unwrap());
|
||||||
match msg {
|
match msg {
|
||||||
Protocol::PullRequest(_, value) => {
|
Protocol::PullRequest(_, value) => {
|
||||||
assert!(value.verify());
|
assert!(value.verify());
|
||||||
|
@ -4490,12 +4489,12 @@ RPC Enabled Nodes: 1"#;
|
||||||
// address
|
// address
|
||||||
let entrypoint1_gossip_addr = socketaddr!("127.0.0.2:1234");
|
let entrypoint1_gossip_addr = socketaddr!("127.0.0.2:1234");
|
||||||
let mut entrypoint1 = LegacyContactInfo::new_localhost(&Pubkey::default(), timestamp());
|
let mut entrypoint1 = LegacyContactInfo::new_localhost(&Pubkey::default(), timestamp());
|
||||||
entrypoint1.gossip = entrypoint1_gossip_addr;
|
entrypoint1.set_gossip(entrypoint1_gossip_addr).unwrap();
|
||||||
assert_eq!(entrypoint1.shred_version, 0);
|
assert_eq!(entrypoint1.shred_version, 0);
|
||||||
|
|
||||||
let entrypoint2_gossip_addr = socketaddr!("127.0.0.2:5678");
|
let entrypoint2_gossip_addr = socketaddr!("127.0.0.2:5678");
|
||||||
let mut entrypoint2 = LegacyContactInfo::new_localhost(&Pubkey::default(), timestamp());
|
let mut entrypoint2 = LegacyContactInfo::new_localhost(&Pubkey::default(), timestamp());
|
||||||
entrypoint2.gossip = entrypoint2_gossip_addr;
|
entrypoint2.set_gossip(entrypoint2_gossip_addr).unwrap();
|
||||||
assert_eq!(entrypoint2.shred_version, 0);
|
assert_eq!(entrypoint2.shred_version, 0);
|
||||||
cluster_info.set_entrypoints(vec![entrypoint1, entrypoint2]);
|
cluster_info.set_entrypoints(vec![entrypoint1, entrypoint2]);
|
||||||
|
|
||||||
|
@ -4582,7 +4581,7 @@ RPC Enabled Nodes: 1"#;
|
||||||
// address
|
// address
|
||||||
let entrypoint_gossip_addr = socketaddr!("127.0.0.2:1234");
|
let entrypoint_gossip_addr = socketaddr!("127.0.0.2:1234");
|
||||||
let mut entrypoint = LegacyContactInfo::new_localhost(&Pubkey::default(), timestamp());
|
let mut entrypoint = LegacyContactInfo::new_localhost(&Pubkey::default(), timestamp());
|
||||||
entrypoint.gossip = entrypoint_gossip_addr;
|
entrypoint.set_gossip(entrypoint_gossip_addr).unwrap();
|
||||||
assert_eq!(entrypoint.shred_version, 0);
|
assert_eq!(entrypoint.shred_version, 0);
|
||||||
cluster_info.set_entrypoint(entrypoint);
|
cluster_info.set_entrypoint(entrypoint);
|
||||||
|
|
||||||
|
|
|
@ -441,41 +441,12 @@ impl Sanitize for ContactInfo {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TryFrom<&ContactInfo> for LegacyContactInfo {
|
|
||||||
type Error = Error;
|
|
||||||
|
|
||||||
fn try_from(node: &ContactInfo) -> Result<Self, Self::Error> {
|
|
||||||
macro_rules! unwrap_socket {
|
|
||||||
($name:ident) => {
|
|
||||||
node.$name().ok().unwrap_or_else(socket_addr_unspecified)
|
|
||||||
};
|
|
||||||
}
|
|
||||||
sanitize_quic_offset(&node.tpu().ok(), &node.tpu_quic().ok())?;
|
|
||||||
sanitize_quic_offset(&node.tpu_forwards().ok(), &node.tpu_forwards_quic().ok())?;
|
|
||||||
Ok(Self {
|
|
||||||
id: *node.pubkey(),
|
|
||||||
gossip: unwrap_socket!(gossip),
|
|
||||||
tvu: unwrap_socket!(tvu),
|
|
||||||
tvu_forwards: unwrap_socket!(tvu_forwards),
|
|
||||||
repair: unwrap_socket!(repair),
|
|
||||||
tpu: unwrap_socket!(tpu),
|
|
||||||
tpu_forwards: unwrap_socket!(tpu_forwards),
|
|
||||||
tpu_vote: unwrap_socket!(tpu_vote),
|
|
||||||
rpc: unwrap_socket!(rpc),
|
|
||||||
rpc_pubsub: unwrap_socket!(rpc_pubsub),
|
|
||||||
serve_repair: unwrap_socket!(serve_repair),
|
|
||||||
wallclock: node.wallclock(),
|
|
||||||
shred_version: node.shred_version(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Workaround until feature(const_socketaddr) is stable.
|
// Workaround until feature(const_socketaddr) is stable.
|
||||||
fn socket_addr_unspecified() -> SocketAddr {
|
pub(crate) fn socket_addr_unspecified() -> SocketAddr {
|
||||||
SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), /*port:*/ 0u16)
|
SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), /*port:*/ 0u16)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sanitize_socket(socket: &SocketAddr) -> Result<(), Error> {
|
pub(crate) fn sanitize_socket(socket: &SocketAddr) -> Result<(), Error> {
|
||||||
if socket.port() == 0u16 {
|
if socket.port() == 0u16 {
|
||||||
return Err(Error::InvalidPort(socket.port()));
|
return Err(Error::InvalidPort(socket.port()));
|
||||||
}
|
}
|
||||||
|
@ -540,7 +511,7 @@ fn sanitize_entries(addrs: &[IpAddr], sockets: &[SocketEntry]) -> Result<(), Err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verifies that the other socket is at QUIC_PORT_OFFSET from the first one.
|
// Verifies that the other socket is at QUIC_PORT_OFFSET from the first one.
|
||||||
fn sanitize_quic_offset(
|
pub(crate) fn sanitize_quic_offset(
|
||||||
socket: &Option<SocketAddr>, // udp
|
socket: &Option<SocketAddr>, // udp
|
||||||
other: &Option<SocketAddr>, // quic: udp + QUIC_PORT_OFFSET
|
other: &Option<SocketAddr>, // quic: udp + QUIC_PORT_OFFSET
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
@ -802,27 +773,30 @@ mod tests {
|
||||||
|
|
||||||
fn cross_verify_with_legacy(node: &ContactInfo) {
|
fn cross_verify_with_legacy(node: &ContactInfo) {
|
||||||
let old = LegacyContactInfo::try_from(node).unwrap();
|
let old = LegacyContactInfo::try_from(node).unwrap();
|
||||||
assert_eq!(old.gossip, node.gossip().unwrap());
|
assert_eq!(old.gossip().unwrap(), node.gossip().unwrap());
|
||||||
assert_eq!(old.repair, node.repair().unwrap());
|
assert_eq!(old.repair().unwrap(), node.repair().unwrap());
|
||||||
assert_eq!(old.rpc, node.rpc().unwrap());
|
assert_eq!(old.rpc().unwrap(), node.rpc().unwrap());
|
||||||
assert_eq!(old.rpc_pubsub, node.rpc_pubsub().unwrap());
|
assert_eq!(old.rpc_pubsub().unwrap(), node.rpc_pubsub().unwrap());
|
||||||
assert_eq!(old.serve_repair, node.serve_repair().unwrap());
|
assert_eq!(old.serve_repair().unwrap(), node.serve_repair().unwrap());
|
||||||
assert_eq!(old.tpu, node.tpu().unwrap());
|
assert_eq!(old.tpu().unwrap(), node.tpu().unwrap());
|
||||||
assert_eq!(old.tpu_forwards, node.tpu_forwards().unwrap());
|
assert_eq!(old.tpu_forwards().unwrap(), node.tpu_forwards().unwrap());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
node.tpu_forwards_quic().unwrap(),
|
node.tpu_forwards_quic().unwrap(),
|
||||||
SocketAddr::new(
|
SocketAddr::new(
|
||||||
old.tpu_forwards.ip(),
|
old.tpu_forwards().unwrap().ip(),
|
||||||
old.tpu_forwards.port() + QUIC_PORT_OFFSET
|
old.tpu_forwards().unwrap().port() + QUIC_PORT_OFFSET
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
node.tpu_quic().unwrap(),
|
node.tpu_quic().unwrap(),
|
||||||
SocketAddr::new(old.tpu.ip(), old.tpu.port() + QUIC_PORT_OFFSET)
|
SocketAddr::new(
|
||||||
|
old.tpu().unwrap().ip(),
|
||||||
|
old.tpu().unwrap().port() + QUIC_PORT_OFFSET
|
||||||
|
)
|
||||||
);
|
);
|
||||||
assert_eq!(old.tpu_vote, node.tpu_vote().unwrap());
|
assert_eq!(old.tpu_vote().unwrap(), node.tpu_vote().unwrap());
|
||||||
assert_eq!(old.tvu, node.tvu().unwrap());
|
assert_eq!(old.tvu().unwrap(), node.tvu().unwrap());
|
||||||
assert_eq!(old.tvu_forwards, node.tvu_forwards().unwrap());
|
assert_eq!(old.tvu_forwards().unwrap(), node.tvu_forwards().unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|
|
@ -719,10 +719,7 @@ impl CrdsStats {
|
||||||
mod tests {
|
mod tests {
|
||||||
use {
|
use {
|
||||||
super::*,
|
super::*,
|
||||||
crate::{
|
crate::crds_value::{new_rand_timestamp, LegacySnapshotHashes, NodeInstance},
|
||||||
crds_value::{new_rand_timestamp, LegacySnapshotHashes, NodeInstance},
|
|
||||||
socketaddr,
|
|
||||||
},
|
|
||||||
rand::{thread_rng, Rng, SeedableRng},
|
rand::{thread_rng, Rng, SeedableRng},
|
||||||
rand_chacha::ChaChaRng,
|
rand_chacha::ChaChaRng,
|
||||||
rayon::ThreadPoolBuilder,
|
rayon::ThreadPoolBuilder,
|
||||||
|
@ -1447,7 +1444,7 @@ mod tests {
|
||||||
let v2 = VersionedCrdsValue::new(
|
let v2 = VersionedCrdsValue::new(
|
||||||
{
|
{
|
||||||
let mut contact_info = ContactInfo::new_localhost(&Pubkey::default(), 0);
|
let mut contact_info = ContactInfo::new_localhost(&Pubkey::default(), 0);
|
||||||
contact_info.rpc = socketaddr!(Ipv4Addr::UNSPECIFIED, 0);
|
contact_info.set_rpc((Ipv4Addr::LOCALHOST, 1244)).unwrap();
|
||||||
CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(contact_info))
|
CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(contact_info))
|
||||||
},
|
},
|
||||||
Cursor::default(),
|
Cursor::default(),
|
||||||
|
|
|
@ -358,7 +358,10 @@ pub(crate) fn get_gossip_nodes<R: Rng>(
|
||||||
.filter(|node| {
|
.filter(|node| {
|
||||||
&node.id != pubkey
|
&node.id != pubkey
|
||||||
&& verify_shred_version(node.shred_version)
|
&& verify_shred_version(node.shred_version)
|
||||||
&& ContactInfo::is_valid_address(&node.gossip, socket_addr_space)
|
&& node
|
||||||
|
.gossip()
|
||||||
|
.map(|addr| socket_addr_space.check(&addr))
|
||||||
|
.unwrap_or_default()
|
||||||
&& match gossip_validators {
|
&& match gossip_validators {
|
||||||
Some(nodes) => nodes.contains(&node.id),
|
Some(nodes) => nodes.contains(&node.id),
|
||||||
None => true,
|
None => true,
|
||||||
|
@ -375,7 +378,8 @@ pub(crate) fn dedup_gossip_addresses(
|
||||||
) -> HashMap</*gossip:*/ SocketAddr, (/*stake:*/ u64, ContactInfo)> {
|
) -> HashMap</*gossip:*/ SocketAddr, (/*stake:*/ u64, ContactInfo)> {
|
||||||
nodes
|
nodes
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.into_grouping_map_by(|node| node.gossip)
|
.filter_map(|node| Some((node.gossip().ok()?, node)))
|
||||||
|
.into_grouping_map()
|
||||||
.aggregate(|acc, _node_gossip, node| {
|
.aggregate(|acc, _node_gossip, node| {
|
||||||
let stake = stakes.get(&node.id).copied().unwrap_or_default();
|
let stake = stakes.get(&node.id).copied().unwrap_or_default();
|
||||||
match acc {
|
match acc {
|
||||||
|
@ -401,12 +405,16 @@ pub(crate) fn maybe_ping_gossip_addresses<R: Rng + CryptoRng>(
|
||||||
nodes
|
nodes
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|node| {
|
.filter(|node| {
|
||||||
|
let node_gossip = match node.gossip() {
|
||||||
|
Err(_) => return false,
|
||||||
|
Ok(addr) => addr,
|
||||||
|
};
|
||||||
let (check, ping) = {
|
let (check, ping) = {
|
||||||
let node = (node.id, node.gossip);
|
let node = (node.id, node_gossip);
|
||||||
ping_cache.check(now, node, &mut pingf)
|
ping_cache.check(now, node, &mut pingf)
|
||||||
};
|
};
|
||||||
if let Some(ping) = ping {
|
if let Some(ping) = ping {
|
||||||
pings.push((node.gossip, ping));
|
pings.push((node_gossip, ping));
|
||||||
}
|
}
|
||||||
check
|
check
|
||||||
})
|
})
|
||||||
|
|
|
@ -829,7 +829,7 @@ pub(crate) mod tests {
|
||||||
ping_cache
|
ping_cache
|
||||||
.lock()
|
.lock()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.mock_pong(new.id, new.gossip, Instant::now());
|
.mock_pong(new.id, new.gossip().unwrap(), Instant::now());
|
||||||
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
||||||
crds.write()
|
crds.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -893,12 +893,12 @@ pub(crate) mod tests {
|
||||||
let node = CrdsGossipPull::default();
|
let node = CrdsGossipPull::default();
|
||||||
crds.insert(entry, now, GossipRoute::LocalMessage).unwrap();
|
crds.insert(entry, now, GossipRoute::LocalMessage).unwrap();
|
||||||
let old = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
let old = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
||||||
ping_cache.mock_pong(old.id, old.gossip, Instant::now());
|
ping_cache.mock_pong(old.id, old.gossip().unwrap(), Instant::now());
|
||||||
let old = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(old));
|
let old = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(old));
|
||||||
crds.insert(old.clone(), now, GossipRoute::LocalMessage)
|
crds.insert(old.clone(), now, GossipRoute::LocalMessage)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
||||||
ping_cache.mock_pong(new.id, new.gossip, Instant::now());
|
ping_cache.mock_pong(new.id, new.gossip().unwrap(), Instant::now());
|
||||||
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
||||||
crds.insert(new, now, GossipRoute::LocalMessage).unwrap();
|
crds.insert(new, now, GossipRoute::LocalMessage).unwrap();
|
||||||
let crds = RwLock::new(crds);
|
let crds = RwLock::new(crds);
|
||||||
|
@ -956,7 +956,7 @@ pub(crate) mod tests {
|
||||||
.insert(entry, now, GossipRoute::LocalMessage)
|
.insert(entry, now, GossipRoute::LocalMessage)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), now);
|
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), now);
|
||||||
ping_cache.mock_pong(new.id, new.gossip, Instant::now());
|
ping_cache.mock_pong(new.id, new.gossip().unwrap(), Instant::now());
|
||||||
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
||||||
node_crds
|
node_crds
|
||||||
.insert(new, now, GossipRoute::LocalMessage)
|
.insert(new, now, GossipRoute::LocalMessage)
|
||||||
|
@ -1058,7 +1058,7 @@ pub(crate) mod tests {
|
||||||
128, // capacity
|
128, // capacity
|
||||||
);
|
);
|
||||||
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
||||||
ping_cache.mock_pong(new.id, new.gossip, Instant::now());
|
ping_cache.mock_pong(new.id, new.gossip().unwrap(), Instant::now());
|
||||||
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
||||||
node_crds.insert(new, 0, GossipRoute::LocalMessage).unwrap();
|
node_crds.insert(new, 0, GossipRoute::LocalMessage).unwrap();
|
||||||
let node_crds = RwLock::new(node_crds);
|
let node_crds = RwLock::new(node_crds);
|
||||||
|
@ -1118,14 +1118,14 @@ pub(crate) mod tests {
|
||||||
128, // capacity
|
128, // capacity
|
||||||
);
|
);
|
||||||
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 1);
|
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 1);
|
||||||
ping_cache.mock_pong(new.id, new.gossip, Instant::now());
|
ping_cache.mock_pong(new.id, new.gossip().unwrap(), Instant::now());
|
||||||
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
||||||
node_crds.insert(new, 0, GossipRoute::LocalMessage).unwrap();
|
node_crds.insert(new, 0, GossipRoute::LocalMessage).unwrap();
|
||||||
|
|
||||||
let mut dest_crds = Crds::default();
|
let mut dest_crds = Crds::default();
|
||||||
let new_id = solana_sdk::pubkey::new_rand();
|
let new_id = solana_sdk::pubkey::new_rand();
|
||||||
let new = ContactInfo::new_localhost(&new_id, 1);
|
let new = ContactInfo::new_localhost(&new_id, 1);
|
||||||
ping_cache.mock_pong(new.id, new.gossip, Instant::now());
|
ping_cache.mock_pong(new.id, new.gossip().unwrap(), Instant::now());
|
||||||
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
|
||||||
dest_crds
|
dest_crds
|
||||||
.insert(new.clone(), 0, GossipRoute::LocalMessage)
|
.insert(new.clone(), 0, GossipRoute::LocalMessage)
|
||||||
|
@ -1134,7 +1134,7 @@ pub(crate) mod tests {
|
||||||
|
|
||||||
// node contains a key from the dest node, but at an older local timestamp
|
// node contains a key from the dest node, but at an older local timestamp
|
||||||
let same_key = ContactInfo::new_localhost(&new_id, 0);
|
let same_key = ContactInfo::new_localhost(&new_id, 0);
|
||||||
ping_cache.mock_pong(same_key.id, same_key.gossip, Instant::now());
|
ping_cache.mock_pong(same_key.id, same_key.gossip().unwrap(), Instant::now());
|
||||||
let same_key = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(same_key));
|
let same_key = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(same_key));
|
||||||
assert_eq!(same_key.label(), new.label());
|
assert_eq!(same_key.label(), new.label());
|
||||||
assert!(same_key.wallclock() < new.wallclock());
|
assert!(same_key.wallclock() < new.wallclock());
|
||||||
|
|
|
@ -398,7 +398,7 @@ mod tests {
|
||||||
let push = CrdsGossipPush::default();
|
let push = CrdsGossipPush::default();
|
||||||
let mut ping_cache = new_ping_cache();
|
let mut ping_cache = new_ping_cache();
|
||||||
let peer = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
let peer = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
||||||
ping_cache.mock_pong(peer.id, peer.gossip, Instant::now());
|
ping_cache.mock_pong(peer.id, peer.gossip().unwrap(), Instant::now());
|
||||||
let peer = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(peer));
|
let peer = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(peer));
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
crds.insert(peer.clone(), now, GossipRoute::LocalMessage),
|
crds.insert(peer.clone(), now, GossipRoute::LocalMessage),
|
||||||
|
@ -450,7 +450,7 @@ mod tests {
|
||||||
.map(|wallclock| {
|
.map(|wallclock| {
|
||||||
let mut peer = ContactInfo::new_rand(&mut rng, /*pubkey=*/ None);
|
let mut peer = ContactInfo::new_rand(&mut rng, /*pubkey=*/ None);
|
||||||
peer.wallclock = wallclock;
|
peer.wallclock = wallclock;
|
||||||
ping_cache.mock_pong(peer.id, peer.gossip, Instant::now());
|
ping_cache.mock_pong(peer.id, peer.gossip().unwrap(), Instant::now());
|
||||||
CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(peer))
|
CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(peer))
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
|
@ -260,7 +260,9 @@ fn spy(
|
||||||
};
|
};
|
||||||
|
|
||||||
let found_node_by_gossip_addr = if let Some(gossip_addr) = find_node_by_gossip_addr {
|
let found_node_by_gossip_addr = if let Some(gossip_addr) = find_node_by_gossip_addr {
|
||||||
all_peers.iter().any(|x| x.gossip == *gossip_addr)
|
all_peers
|
||||||
|
.iter()
|
||||||
|
.any(|node| node.gossip().ok() == Some(*gossip_addr))
|
||||||
} else {
|
} else {
|
||||||
false
|
false
|
||||||
};
|
};
|
||||||
|
|
|
@ -1,5 +1,10 @@
|
||||||
use {
|
use {
|
||||||
crate::crds_value::MAX_WALLCLOCK,
|
crate::{
|
||||||
|
contact_info::{
|
||||||
|
sanitize_quic_offset, sanitize_socket, socket_addr_unspecified, ContactInfo, Error,
|
||||||
|
},
|
||||||
|
crds_value::MAX_WALLCLOCK,
|
||||||
|
},
|
||||||
solana_sdk::{
|
solana_sdk::{
|
||||||
pubkey::Pubkey,
|
pubkey::Pubkey,
|
||||||
sanitize::{Sanitize, SanitizeError},
|
sanitize::{Sanitize, SanitizeError},
|
||||||
|
@ -16,25 +21,25 @@ use {
|
||||||
pub struct LegacyContactInfo {
|
pub struct LegacyContactInfo {
|
||||||
pub id: Pubkey,
|
pub id: Pubkey,
|
||||||
/// gossip address
|
/// gossip address
|
||||||
pub gossip: SocketAddr,
|
gossip: SocketAddr,
|
||||||
/// address to connect to for replication
|
/// address to connect to for replication
|
||||||
pub tvu: SocketAddr,
|
tvu: SocketAddr,
|
||||||
/// address to forward shreds to
|
/// address to forward shreds to
|
||||||
pub tvu_forwards: SocketAddr,
|
tvu_forwards: SocketAddr,
|
||||||
/// address to send repair responses to
|
/// address to send repair responses to
|
||||||
pub repair: SocketAddr,
|
repair: SocketAddr,
|
||||||
/// transactions address
|
/// transactions address
|
||||||
pub tpu: SocketAddr,
|
tpu: SocketAddr,
|
||||||
/// address to forward unprocessed transactions to
|
/// address to forward unprocessed transactions to
|
||||||
pub tpu_forwards: SocketAddr,
|
tpu_forwards: SocketAddr,
|
||||||
/// address to which to send bank state requests
|
/// address to which to send bank state requests
|
||||||
pub tpu_vote: SocketAddr,
|
tpu_vote: SocketAddr,
|
||||||
/// address to which to send JSON-RPC requests
|
/// address to which to send JSON-RPC requests
|
||||||
pub rpc: SocketAddr,
|
rpc: SocketAddr,
|
||||||
/// websocket for JSON-RPC push notifications
|
/// websocket for JSON-RPC push notifications
|
||||||
pub rpc_pubsub: SocketAddr,
|
rpc_pubsub: SocketAddr,
|
||||||
/// address to send repair requests to
|
/// address to send repair requests to
|
||||||
pub serve_repair: SocketAddr,
|
serve_repair: SocketAddr,
|
||||||
/// latest wallclock picked
|
/// latest wallclock picked
|
||||||
pub wallclock: u64,
|
pub wallclock: u64,
|
||||||
/// node shred version
|
/// node shred version
|
||||||
|
@ -50,6 +55,30 @@ impl Sanitize for LegacyContactInfo {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
macro_rules! get_socket {
|
||||||
|
($name:ident) => {
|
||||||
|
pub fn $name(&self) -> Result<SocketAddr, Error> {
|
||||||
|
let socket = &self.$name;
|
||||||
|
sanitize_socket(socket)?;
|
||||||
|
Ok(socket).copied()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! set_socket {
|
||||||
|
($name:ident, $key:ident) => {
|
||||||
|
pub fn $name<T>(&mut self, socket: T) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
SocketAddr: From<T>,
|
||||||
|
{
|
||||||
|
let socket = SocketAddr::from(socket);
|
||||||
|
sanitize_socket(&socket)?;
|
||||||
|
self.$key = socket;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! socketaddr {
|
macro_rules! socketaddr {
|
||||||
($ip:expr, $port:expr) => {
|
($ip:expr, $port:expr) => {
|
||||||
|
@ -126,6 +155,20 @@ impl LegacyContactInfo {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
get_socket!(gossip);
|
||||||
|
get_socket!(tvu);
|
||||||
|
get_socket!(tvu_forwards);
|
||||||
|
get_socket!(repair);
|
||||||
|
get_socket!(tpu);
|
||||||
|
get_socket!(tpu_forwards);
|
||||||
|
get_socket!(tpu_vote);
|
||||||
|
get_socket!(rpc);
|
||||||
|
get_socket!(rpc_pubsub);
|
||||||
|
get_socket!(serve_repair);
|
||||||
|
|
||||||
|
set_socket!(set_gossip, gossip);
|
||||||
|
set_socket!(set_rpc, rpc);
|
||||||
|
|
||||||
fn is_valid_ip(addr: IpAddr) -> bool {
|
fn is_valid_ip(addr: IpAddr) -> bool {
|
||||||
!(addr.is_unspecified() || addr.is_multicast())
|
!(addr.is_unspecified() || addr.is_multicast())
|
||||||
// || (addr.is_loopback() && !cfg_test))
|
// || (addr.is_loopback() && !cfg_test))
|
||||||
|
@ -158,6 +201,35 @@ impl LegacyContactInfo {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl TryFrom<&ContactInfo> for LegacyContactInfo {
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
fn try_from(node: &ContactInfo) -> Result<Self, Self::Error> {
|
||||||
|
macro_rules! unwrap_socket {
|
||||||
|
($name:ident) => {
|
||||||
|
node.$name().ok().unwrap_or_else(socket_addr_unspecified)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
sanitize_quic_offset(&node.tpu().ok(), &node.tpu_quic().ok())?;
|
||||||
|
sanitize_quic_offset(&node.tpu_forwards().ok(), &node.tpu_forwards_quic().ok())?;
|
||||||
|
Ok(Self {
|
||||||
|
id: *node.pubkey(),
|
||||||
|
gossip: unwrap_socket!(gossip),
|
||||||
|
tvu: unwrap_socket!(tvu),
|
||||||
|
tvu_forwards: unwrap_socket!(tvu_forwards),
|
||||||
|
repair: unwrap_socket!(repair),
|
||||||
|
tpu: unwrap_socket!(tpu),
|
||||||
|
tpu_forwards: unwrap_socket!(tpu_forwards),
|
||||||
|
tpu_vote: unwrap_socket!(tpu_vote),
|
||||||
|
rpc: unwrap_socket!(rpc),
|
||||||
|
rpc_pubsub: unwrap_socket!(rpc_pubsub),
|
||||||
|
serve_repair: unwrap_socket!(serve_repair),
|
||||||
|
wallclock: node.wallclock(),
|
||||||
|
shred_version: node.shred_version(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
|
@ -292,14 +292,15 @@ fn process_rpc_url(
|
||||||
|
|
||||||
let rpc_addrs: Vec<_> = validators
|
let rpc_addrs: Vec<_> = validators
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|contact_info| {
|
.filter(|node| {
|
||||||
if (any || all || Some(contact_info.gossip) == entrypoint_addr)
|
any || all
|
||||||
&& ContactInfo::is_valid_address(&contact_info.rpc, &socket_addr_space)
|
|| node
|
||||||
{
|
.gossip()
|
||||||
return Some(contact_info.rpc);
|
.map(|addr| Some(addr) == entrypoint_addr)
|
||||||
}
|
.unwrap_or_default()
|
||||||
None
|
|
||||||
})
|
})
|
||||||
|
.filter_map(|node| node.rpc().ok())
|
||||||
|
.filter(|addr| socket_addr_space.check(addr))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
if rpc_addrs.is_empty() {
|
if rpc_addrs.is_empty() {
|
||||||
|
|
|
@ -484,7 +484,7 @@ fn network_run_pull(
|
||||||
if node.keypair.pubkey() != other.keypair.pubkey() {
|
if node.keypair.pubkey() != other.keypair.pubkey() {
|
||||||
ping_cache.mock_pong(
|
ping_cache.mock_pong(
|
||||||
other.keypair.pubkey(),
|
other.keypair.pubkey(),
|
||||||
other.contact_info.gossip,
|
other.contact_info.gossip().unwrap(),
|
||||||
Instant::now(),
|
Instant::now(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -130,13 +130,13 @@ fn retransmit_to(
|
||||||
let dests: Vec<_> = if forwarded {
|
let dests: Vec<_> = if forwarded {
|
||||||
peers
|
peers
|
||||||
.iter()
|
.iter()
|
||||||
.map(|peer| peer.tvu_forwards)
|
.filter_map(|peer| peer.tvu_forwards().ok())
|
||||||
.filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space))
|
.filter(|addr| socket_addr_space.check(addr))
|
||||||
.collect()
|
.collect()
|
||||||
} else {
|
} else {
|
||||||
peers
|
peers
|
||||||
.iter()
|
.iter()
|
||||||
.map(|peer| peer.tvu)
|
.filter_map(|peer| peer.tvu().ok())
|
||||||
.filter(|addr| socket_addr_space.check(addr))
|
.filter(|addr| socket_addr_space.check(addr))
|
||||||
.collect()
|
.collect()
|
||||||
};
|
};
|
||||||
|
|
|
@ -353,7 +353,7 @@ pub fn run_cluster_partition<C>(
|
||||||
// Check epochs have correct number of slots
|
// Check epochs have correct number of slots
|
||||||
info!("PARTITION_TEST sleeping until partition starting condition",);
|
info!("PARTITION_TEST sleeping until partition starting condition",);
|
||||||
for node in &cluster_nodes {
|
for node in &cluster_nodes {
|
||||||
let node_client = RpcClient::new_socket(node.rpc);
|
let node_client = RpcClient::new_socket(node.rpc().unwrap());
|
||||||
let epoch_info = node_client.get_epoch_info().unwrap();
|
let epoch_info = node_client.get_epoch_info().unwrap();
|
||||||
assert_eq!(epoch_info.slots_in_epoch, slots_per_epoch);
|
assert_eq!(epoch_info.slots_in_epoch, slots_per_epoch);
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,7 +33,7 @@ impl TpuInfo for ClusterTpuInfo {
|
||||||
.cluster_info
|
.cluster_info
|
||||||
.tpu_peers()
|
.tpu_peers()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|ci| (ci.id, ci.tpu))
|
.filter_map(|node| Some((node.id, node.tpu().ok()?)))
|
||||||
.collect();
|
.collect();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3447,16 +3447,16 @@ pub mod rpc_full {
|
||||||
debug!("get_cluster_nodes rpc request received");
|
debug!("get_cluster_nodes rpc request received");
|
||||||
let cluster_info = &meta.cluster_info;
|
let cluster_info = &meta.cluster_info;
|
||||||
let socket_addr_space = cluster_info.socket_addr_space();
|
let socket_addr_space = cluster_info.socket_addr_space();
|
||||||
let valid_address_or_none = |addr: &SocketAddr| -> Option<SocketAddr> {
|
|
||||||
ContactInfo::is_valid_address(addr, socket_addr_space).then_some(*addr)
|
|
||||||
};
|
|
||||||
let my_shred_version = cluster_info.my_shred_version();
|
let my_shred_version = cluster_info.my_shred_version();
|
||||||
Ok(cluster_info
|
Ok(cluster_info
|
||||||
.all_peers()
|
.all_peers()
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|(contact_info, _)| {
|
.filter_map(|(contact_info, _)| {
|
||||||
if my_shred_version == contact_info.shred_version
|
if my_shred_version == contact_info.shred_version
|
||||||
&& ContactInfo::is_valid_address(&contact_info.gossip, socket_addr_space)
|
&& contact_info
|
||||||
|
.gossip()
|
||||||
|
.map(|addr| socket_addr_space.check(&addr))
|
||||||
|
.unwrap_or_default()
|
||||||
{
|
{
|
||||||
let (version, feature_set) = if let Some(version) =
|
let (version, feature_set) = if let Some(version) =
|
||||||
cluster_info.get_node_version(&contact_info.id)
|
cluster_info.get_node_version(&contact_info.id)
|
||||||
|
@ -3467,10 +3467,19 @@ pub mod rpc_full {
|
||||||
};
|
};
|
||||||
Some(RpcContactInfo {
|
Some(RpcContactInfo {
|
||||||
pubkey: contact_info.id.to_string(),
|
pubkey: contact_info.id.to_string(),
|
||||||
gossip: Some(contact_info.gossip),
|
gossip: contact_info.gossip().ok(),
|
||||||
tpu: valid_address_or_none(&contact_info.tpu),
|
tpu: contact_info
|
||||||
rpc: valid_address_or_none(&contact_info.rpc),
|
.tpu()
|
||||||
pubsub: valid_address_or_none(&contact_info.rpc_pubsub),
|
.ok()
|
||||||
|
.filter(|addr| socket_addr_space.check(addr)),
|
||||||
|
rpc: contact_info
|
||||||
|
.rpc()
|
||||||
|
.ok()
|
||||||
|
.filter(|addr| socket_addr_space.check(addr)),
|
||||||
|
pubsub: contact_info
|
||||||
|
.rpc_pubsub()
|
||||||
|
.ok()
|
||||||
|
.filter(|addr| socket_addr_space.check(addr)),
|
||||||
version,
|
version,
|
||||||
feature_set,
|
feature_set,
|
||||||
shred_version: Some(my_shred_version),
|
shred_version: Some(my_shred_version),
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use std::net::{IpAddr, SocketAddr};
|
use std::net::{IpAddr, SocketAddr};
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq)]
|
#[derive(Clone, Copy)]
|
||||||
pub enum SocketAddrSpace {
|
pub enum SocketAddrSpace {
|
||||||
Unspecified,
|
Unspecified,
|
||||||
Global,
|
Global,
|
||||||
|
@ -18,7 +18,7 @@ impl SocketAddrSpace {
|
||||||
/// Returns true if the IP address is valid.
|
/// Returns true if the IP address is valid.
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn check(&self, addr: &SocketAddr) -> bool {
|
pub fn check(&self, addr: &SocketAddr) -> bool {
|
||||||
if self == &SocketAddrSpace::Unspecified {
|
if matches!(self, SocketAddrSpace::Unspecified) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
// TODO: remove these once IpAddr::is_global is stable.
|
// TODO: remove these once IpAddr::is_global is stable.
|
||||||
|
|
|
@ -606,7 +606,7 @@ fn main() {
|
||||||
});
|
});
|
||||||
|
|
||||||
info!("done found {} nodes", gossip_nodes.len());
|
info!("done found {} nodes", gossip_nodes.len());
|
||||||
gossip_nodes[0].rpc
|
gossip_nodes[0].rpc().unwrap()
|
||||||
} else {
|
} else {
|
||||||
info!("Using {:?} as the RPC address", entrypoint_addr);
|
info!("Using {:?} as the RPC address", entrypoint_addr);
|
||||||
entrypoint_addr
|
entrypoint_addr
|
||||||
|
|
|
@ -124,7 +124,7 @@ fn verify_reachable_ports(
|
||||||
}
|
}
|
||||||
|
|
||||||
solana_net_utils::verify_reachable_ports(
|
solana_net_utils::verify_reachable_ports(
|
||||||
&cluster_entrypoint.gossip,
|
&cluster_entrypoint.gossip().unwrap(),
|
||||||
tcp_listeners,
|
tcp_listeners,
|
||||||
&udp_sockets,
|
&udp_sockets,
|
||||||
)
|
)
|
||||||
|
@ -186,8 +186,10 @@ fn get_rpc_peers(
|
||||||
.unwrap_or_else(|| cluster_info.my_shred_version());
|
.unwrap_or_else(|| cluster_info.my_shred_version());
|
||||||
if shred_version == 0 {
|
if shred_version == 0 {
|
||||||
let all_zero_shred_versions = cluster_entrypoints.iter().all(|cluster_entrypoint| {
|
let all_zero_shred_versions = cluster_entrypoints.iter().all(|cluster_entrypoint| {
|
||||||
cluster_info
|
cluster_entrypoint
|
||||||
.lookup_contact_info_by_gossip_addr(&cluster_entrypoint.gossip)
|
.gossip()
|
||||||
|
.ok()
|
||||||
|
.and_then(|addr| cluster_info.lookup_contact_info_by_gossip_addr(&addr))
|
||||||
.map_or(false, |entrypoint| entrypoint.shred_version == 0)
|
.map_or(false, |entrypoint| entrypoint.shred_version == 0)
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -391,7 +393,7 @@ pub fn attempt_download_genesis_and_snapshot(
|
||||||
authorized_voter_keypairs: Arc<RwLock<Vec<Arc<Keypair>>>>,
|
authorized_voter_keypairs: Arc<RwLock<Vec<Arc<Keypair>>>>,
|
||||||
) -> Result<(), String> {
|
) -> Result<(), String> {
|
||||||
download_then_check_genesis_hash(
|
download_then_check_genesis_hash(
|
||||||
&rpc_contact_info.rpc,
|
&rpc_contact_info.rpc().map_err(|err| format!("{err:?}"))?,
|
||||||
ledger_path,
|
ledger_path,
|
||||||
&mut validator_config.expected_genesis_hash,
|
&mut validator_config.expected_genesis_hash,
|
||||||
bootstrap_config.max_genesis_archive_unpacked_size,
|
bootstrap_config.max_genesis_archive_unpacked_size,
|
||||||
|
@ -485,7 +487,7 @@ fn get_vetted_rpc_nodes(
|
||||||
vetted_rpc_nodes.extend(
|
vetted_rpc_nodes.extend(
|
||||||
rpc_node_details
|
rpc_node_details
|
||||||
.into_par_iter()
|
.into_par_iter()
|
||||||
.map(|rpc_node_details| {
|
.filter_map(|rpc_node_details| {
|
||||||
let GetRpcNodeResult {
|
let GetRpcNodeResult {
|
||||||
rpc_contact_info,
|
rpc_contact_info,
|
||||||
snapshot_hash,
|
snapshot_hash,
|
||||||
|
@ -493,14 +495,15 @@ fn get_vetted_rpc_nodes(
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"Using RPC service from node {}: {:?}",
|
"Using RPC service from node {}: {:?}",
|
||||||
rpc_contact_info.id, rpc_contact_info.rpc
|
rpc_contact_info.id,
|
||||||
|
rpc_contact_info.rpc()
|
||||||
);
|
);
|
||||||
let rpc_client = RpcClient::new_socket_with_timeout(
|
let rpc_client = RpcClient::new_socket_with_timeout(
|
||||||
rpc_contact_info.rpc,
|
rpc_contact_info.rpc().ok()?,
|
||||||
Duration::from_secs(5),
|
Duration::from_secs(5),
|
||||||
);
|
);
|
||||||
|
|
||||||
(rpc_contact_info, snapshot_hash, rpc_client)
|
Some((rpc_contact_info, snapshot_hash, rpc_client))
|
||||||
})
|
})
|
||||||
.filter(|(rpc_contact_info, _snapshot_hash, rpc_client)| {
|
.filter(|(rpc_contact_info, _snapshot_hash, rpc_client)| {
|
||||||
match rpc_client.get_version() {
|
match rpc_client.get_version() {
|
||||||
|
@ -1194,14 +1197,14 @@ fn download_snapshot(
|
||||||
|
|
||||||
*start_progress.write().unwrap() = ValidatorStartProgress::DownloadingSnapshot {
|
*start_progress.write().unwrap() = ValidatorStartProgress::DownloadingSnapshot {
|
||||||
slot: desired_snapshot_hash.0,
|
slot: desired_snapshot_hash.0,
|
||||||
rpc_addr: rpc_contact_info.rpc,
|
rpc_addr: rpc_contact_info.rpc().map_err(|err| format!("{err:?}"))?,
|
||||||
};
|
};
|
||||||
let desired_snapshot_hash = (
|
let desired_snapshot_hash = (
|
||||||
desired_snapshot_hash.0,
|
desired_snapshot_hash.0,
|
||||||
solana_runtime::snapshot_hash::SnapshotHash(desired_snapshot_hash.1),
|
solana_runtime::snapshot_hash::SnapshotHash(desired_snapshot_hash.1),
|
||||||
);
|
);
|
||||||
download_snapshot_archive(
|
download_snapshot_archive(
|
||||||
&rpc_contact_info.rpc,
|
&rpc_contact_info.rpc().map_err(|err| format!("{err:?}"))?,
|
||||||
full_snapshot_archives_dir,
|
full_snapshot_archives_dir,
|
||||||
incremental_snapshot_archives_dir,
|
incremental_snapshot_archives_dir,
|
||||||
desired_snapshot_hash,
|
desired_snapshot_hash,
|
||||||
|
@ -1326,22 +1329,7 @@ mod tests {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_contact_info_for_tests() -> ContactInfo {
|
fn default_contact_info_for_tests() -> ContactInfo {
|
||||||
let sock_addr = SocketAddr::from(([1, 1, 1, 1], 11_111));
|
ContactInfo::new_localhost(&Pubkey::default(), /*now:*/ 1_681_834_947_321)
|
||||||
ContactInfo {
|
|
||||||
id: Pubkey::default(),
|
|
||||||
gossip: sock_addr,
|
|
||||||
tvu: sock_addr,
|
|
||||||
tvu_forwards: sock_addr,
|
|
||||||
repair: sock_addr,
|
|
||||||
tpu: sock_addr,
|
|
||||||
tpu_forwards: sock_addr,
|
|
||||||
tpu_vote: sock_addr,
|
|
||||||
rpc: sock_addr,
|
|
||||||
rpc_pubsub: sock_addr,
|
|
||||||
serve_repair: sock_addr,
|
|
||||||
wallclock: 123456789,
|
|
||||||
shred_version: 1,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|
Loading…
Reference in New Issue