reworks weights for gossip pull-requests peer sampling (#28463)

Amplifying gossip peer sampling weights by the time since last
pull-request has undesired consequence that a node coming back online
will see a huge number of pull requests all at once.
This "time since last request" is also unnecessary to include in
weights because as long as sampling probabilities are non-zero, a node
will be almost surely periodically selected in the sample.
The commit reworks peer sampling probabilities by just using (dampened)
stakes as weights.
This commit is contained in:
behzad nouri 2023-01-14 15:44:38 +00:00 committed by GitHub
parent 71713a92c1
commit d4ce59eee7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 90 additions and 202 deletions

View File

@ -1487,12 +1487,6 @@ impl ClusterInfo {
self.append_entrypoint_to_pulls(thread_pool, &mut pulls); self.append_entrypoint_to_pulls(thread_pool, &mut pulls);
let num_requests = pulls.values().map(Vec::len).sum::<usize>() as u64; let num_requests = pulls.values().map(Vec::len).sum::<usize>() as u64;
self.stats.new_pull_requests_count.add_relaxed(num_requests); self.stats.new_pull_requests_count.add_relaxed(num_requests);
{
let _st = ScopedTimer::from(&self.stats.mark_pull_request);
for peer in pulls.keys() {
self.gossip.mark_pull_request_creation_time(peer.id, now);
}
}
let self_info = CrdsData::LegacyContactInfo(self.my_contact_info()); let self_info = CrdsData::LegacyContactInfo(self.my_contact_info());
let self_info = CrdsValue::new_signed(self_info, &self.keypair()); let self_info = CrdsValue::new_signed(self_info, &self.keypair());
let pulls = pulls let pulls = pulls
@ -4705,17 +4699,6 @@ RPC Enabled Nodes: 1"#;
(0, 0, NO_ENTRIES), (0, 0, NO_ENTRIES),
cluster_info.handle_pull_response(&entrypoint_pubkey, data, &timeouts) cluster_info.handle_pull_response(&entrypoint_pubkey, data, &timeouts)
); );
let now = timestamp();
for peer in peers {
cluster_info
.gossip
.mark_pull_request_creation_time(peer, now);
}
assert_eq!(
cluster_info.gossip.pull.pull_request_time().len(),
CRDS_UNIQUE_PUBKEY_CAPACITY
);
} }
#[test] #[test]

View File

@ -123,7 +123,6 @@ pub struct GossipStats {
pub(crate) handle_batch_pull_requests_time: Counter, pub(crate) handle_batch_pull_requests_time: Counter,
pub(crate) handle_batch_pull_responses_time: Counter, pub(crate) handle_batch_pull_responses_time: Counter,
pub(crate) handle_batch_push_messages_time: Counter, pub(crate) handle_batch_push_messages_time: Counter,
pub(crate) mark_pull_request: Counter,
pub(crate) new_pull_requests: Counter, pub(crate) new_pull_requests: Counter,
pub(crate) new_pull_requests_count: Counter, pub(crate) new_pull_requests_count: Counter,
pub(crate) new_pull_requests_pings_count: Counter, pub(crate) new_pull_requests_pings_count: Counter,
@ -373,7 +372,6 @@ pub(crate) fn submit_gossip_stats(
), ),
("epoch_slots_lookup", stats.epoch_slots_lookup.clear(), i64), ("epoch_slots_lookup", stats.epoch_slots_lookup.clear(), i64),
("new_pull_requests", stats.new_pull_requests.clear(), i64), ("new_pull_requests", stats.new_pull_requests.clear(), i64),
("mark_pull_request", stats.mark_pull_request.clear(), i64),
( (
"gossip_pull_request_no_budget", "gossip_pull_request_no_budget",
stats.gossip_pull_request_no_budget.clear(), stats.gossip_pull_request_no_budget.clear(),

View File

@ -18,6 +18,7 @@ use {
ping_pong::PingCache, ping_pong::PingCache,
}, },
itertools::Itertools, itertools::Itertools,
rand::{CryptoRng, Rng},
rayon::ThreadPool, rayon::ThreadPool,
solana_ledger::shred::Shred, solana_ledger::shred::Shred,
solana_sdk::{ solana_sdk::{
@ -31,7 +32,7 @@ use {
collections::{HashMap, HashSet}, collections::{HashMap, HashSet},
net::SocketAddr, net::SocketAddr,
sync::{Mutex, RwLock}, sync::{Mutex, RwLock},
time::Duration, time::{Duration, Instant},
}, },
}; };
@ -227,14 +228,6 @@ impl CrdsGossip {
) )
} }
/// Time when a request to `from` was initiated.
///
/// This is used for weighted random selection during `new_pull_request`
/// It's important to use the local nodes request creation time as the weight
/// instead of the response received time otherwise failed nodes will increase their weight.
pub fn mark_pull_request_creation_time(&self, from: Pubkey, now: u64) {
self.pull.mark_pull_request_creation_time(from, now)
}
/// Process a pull request and create a response. /// Process a pull request and create a response.
pub fn process_pull_requests<I>(&self, callers: I, now: u64) pub fn process_pull_requests<I>(&self, callers: I, now: u64)
where where
@ -339,40 +332,51 @@ impl CrdsGossip {
} }
} }
/// Computes a normalized (log of actual stake) stake. // Dedups gossip addresses, keeping only the one with the highest stake.
pub fn get_stake<S: std::hash::BuildHasher>(id: &Pubkey, stakes: &HashMap<Pubkey, u64, S>) -> f32 { pub(crate) fn dedup_gossip_addresses(
// cap the max balance to u32 max (it should be plenty) nodes: impl IntoIterator<Item = ContactInfo>,
let bal = f64::from(u32::max_value()).min(*stakes.get(id).unwrap_or(&0) as f64); stakes: &HashMap<Pubkey, u64>,
1_f32.max((bal as f32).ln()) ) -> HashMap</*gossip:*/ SocketAddr, (/*stake:*/ u64, ContactInfo)> {
}
/// Computes bounded weight given some max, a time since last selected, and a stake value.
///
/// The minimum stake is 1 and not 0 to allow 'time since last' picked to factor in.
pub fn get_weight(max_weight: f32, time_since_last_selected: u32, stake: f32) -> f32 {
let mut weight = time_since_last_selected as f32 * stake;
if weight.is_infinite() {
weight = max_weight;
}
1.0_f32.max(weight.min(max_weight))
}
// Dedups gossip addresses, keeping only the one with the highest weight.
pub(crate) fn dedup_gossip_addresses<I, T: PartialOrd>(
nodes: I,
) -> HashMap</*gossip:*/ SocketAddr, (/*weight:*/ T, ContactInfo)>
where
I: IntoIterator<Item = (/*weight:*/ T, ContactInfo)>,
{
nodes nodes
.into_iter() .into_iter()
.into_grouping_map_by(|(_weight, node)| node.gossip) .into_grouping_map_by(|node| node.gossip)
.aggregate(|acc, _node_gossip, (weight, node)| match acc { .aggregate(|acc, _node_gossip, node| {
Some((ref w, _)) if w >= &weight => acc, let stake = stakes.get(&node.id).copied().unwrap_or_default();
Some(_) | None => Some((weight, node)), match acc {
Some((ref s, _)) if s >= &stake => acc,
Some(_) | None => Some((stake, node)),
}
}) })
} }
// Pings gossip addresses if needed.
// Returns nodes which have recently responded to a ping message.
#[must_use]
pub(crate) fn maybe_ping_gossip_addresses<R: Rng + CryptoRng>(
rng: &mut R,
nodes: impl IntoIterator<Item = ContactInfo>,
keypair: &Keypair,
ping_cache: &Mutex<PingCache>,
pings: &mut Vec<(SocketAddr, Ping)>,
) -> Vec<ContactInfo> {
let mut ping_cache = ping_cache.lock().unwrap();
let mut pingf = move || Ping::new_rand(rng, keypair).ok();
let now = Instant::now();
nodes
.into_iter()
.filter(|node| {
let (check, ping) = {
let node = (node.id, node.gossip);
ping_cache.check(now, node, &mut pingf)
};
if let Some(ping) = ping {
pings.push((node.gossip, ping));
}
check
})
.collect()
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use { use {

View File

@ -13,17 +13,16 @@
use { use {
crate::{ crate::{
cluster_info::{Ping, CRDS_UNIQUE_PUBKEY_CAPACITY}, cluster_info::Ping,
cluster_info_metrics::GossipStats, cluster_info_metrics::GossipStats,
crds::{Crds, GossipRoute, VersionedCrdsValue}, crds::{Crds, GossipRoute, VersionedCrdsValue},
crds_gossip::{self, get_stake, get_weight}, crds_gossip,
crds_gossip_error::CrdsGossipError, crds_gossip_error::CrdsGossipError,
crds_value::CrdsValue, crds_value::CrdsValue,
legacy_contact_info::LegacyContactInfo as ContactInfo, legacy_contact_info::LegacyContactInfo as ContactInfo,
ping_pong::PingCache, ping_pong::PingCache,
}, },
itertools::Itertools, itertools::Itertools,
lru::LruCache,
rand::{ rand::{
distributions::{Distribution, WeightedIndex}, distributions::{Distribution, WeightedIndex},
Rng, Rng,
@ -32,6 +31,7 @@ use {
solana_bloom::bloom::{AtomicBloom, Bloom}, solana_bloom::bloom::{AtomicBloom, Bloom},
solana_sdk::{ solana_sdk::{
hash::{hash, Hash}, hash::{hash, Hash},
native_token::LAMPORTS_PER_SOL,
pubkey::Pubkey, pubkey::Pubkey,
signature::{Keypair, Signer}, signature::{Keypair, Signer},
}, },
@ -45,7 +45,7 @@ use {
atomic::{AtomicI64, AtomicUsize, Ordering}, atomic::{AtomicI64, AtomicUsize, Ordering},
Mutex, RwLock, Mutex, RwLock,
}, },
time::{Duration, Instant}, time::Duration,
}, },
}; };
@ -192,8 +192,6 @@ pub struct ProcessPullStats {
} }
pub struct CrdsGossipPull { pub struct CrdsGossipPull {
/// Timestamp of last request
pull_request_time: RwLock<LruCache<Pubkey, /*timestamp:*/ u64>>,
// Hash value and record time (ms) of the pull responses which failed to be // Hash value and record time (ms) of the pull responses which failed to be
// inserted in crds table; Preserved to stop the sender to send back the // inserted in crds table; Preserved to stop the sender to send back the
// same outdated payload again by adding them to the filter for the next // same outdated payload again by adding them to the filter for the next
@ -207,7 +205,6 @@ pub struct CrdsGossipPull {
impl Default for CrdsGossipPull { impl Default for CrdsGossipPull {
fn default() -> Self { fn default() -> Self {
Self { Self {
pull_request_time: RwLock::new(LruCache::new(CRDS_UNIQUE_PUBKEY_CAPACITY)),
failed_inserts: RwLock::default(), failed_inserts: RwLock::default(),
crds_timeout: CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS, crds_timeout: CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
msg_timeout: CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS, msg_timeout: CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
@ -232,8 +229,8 @@ impl CrdsGossipPull {
pings: &mut Vec<(SocketAddr, Ping)>, pings: &mut Vec<(SocketAddr, Ping)>,
socket_addr_space: &SocketAddrSpace, socket_addr_space: &SocketAddrSpace,
) -> Result<HashMap<ContactInfo, Vec<CrdsFilter>>, CrdsGossipError> { ) -> Result<HashMap<ContactInfo, Vec<CrdsFilter>>, CrdsGossipError> {
// Gossip peers and respective sampling weights. // Active and valid gossip nodes with matching shred-version.
let peers = self.pull_options( let nodes = self.pull_options(
crds, crds,
&self_keypair.pubkey(), &self_keypair.pubkey(),
self_shred_version, self_shred_version,
@ -242,35 +239,37 @@ impl CrdsGossipPull {
stakes, stakes,
socket_addr_space, socket_addr_space,
); );
// Check for nodes which have responded to ping messages.
let mut rng = rand::thread_rng(); let mut rng = rand::thread_rng();
let peers: Vec<_> = { // Check for nodes which have responded to ping messages.
let mut ping_cache = ping_cache.lock().unwrap(); let nodes = crds_gossip::maybe_ping_gossip_addresses(
let mut pingf = move || Ping::new_rand(&mut rng, self_keypair).ok(); &mut rng,
let now = Instant::now(); nodes,
peers self_keypair,
.into_iter() ping_cache,
.filter(|(_weight, peer)| { pings,
let node = (peer.id, peer.gossip); );
let (check, ping) = ping_cache.check(now, node, &mut pingf); let stake_cap = stakes
if let Some(ping) = ping { .get(&self_keypair.pubkey())
pings.push((peer.gossip, ping)); .copied()
} .unwrap_or_default();
check let (weights, nodes): (Vec<u64>, Vec<ContactInfo>) =
crds_gossip::dedup_gossip_addresses(nodes, stakes)
.into_values()
.map(|(stake, node)| {
let stake = stake.min(stake_cap) / LAMPORTS_PER_SOL;
let weight = u64::BITS - stake.leading_zeros();
let weight = u64::from(weight).saturating_add(1).saturating_pow(2);
(weight, node)
}) })
.collect() .unzip();
}; if nodes.is_empty() {
let (weights, peers): (Vec<_>, Vec<_>) = crds_gossip::dedup_gossip_addresses(peers)
.into_values()
.unzip();
if peers.is_empty() {
return Err(CrdsGossipError::NoPeers); return Err(CrdsGossipError::NoPeers);
} }
// Associate each pull-request filter with a randomly selected peer. // Associate each pull-request filter with a randomly selected peer.
let filters = self.build_crds_filters(thread_pool, crds, bloom_size); let filters = self.build_crds_filters(thread_pool, crds, bloom_size);
let dist = WeightedIndex::new(&weights).unwrap(); let dist = WeightedIndex::new(&weights).unwrap();
let peers = repeat_with(|| peers[dist.sample(&mut rng)].clone()); let nodes = repeat_with(|| nodes[dist.sample(&mut rng)].clone());
Ok(peers.zip(filters).into_group_map()) Ok(nodes.zip(filters).into_group_map())
} }
fn pull_options( fn pull_options(
@ -282,11 +281,9 @@ impl CrdsGossipPull {
gossip_validators: Option<&HashSet<Pubkey>>, gossip_validators: Option<&HashSet<Pubkey>>,
stakes: &HashMap<Pubkey, u64>, stakes: &HashMap<Pubkey, u64>,
socket_addr_space: &SocketAddrSpace, socket_addr_space: &SocketAddrSpace,
) -> Vec<(/*weight:*/ u64, ContactInfo)> { ) -> Vec<ContactInfo> {
let mut rng = rand::thread_rng(); let mut rng = rand::thread_rng();
let active_cutoff = now.saturating_sub(PULL_ACTIVE_TIMEOUT_MS); let active_cutoff = now.saturating_sub(PULL_ACTIVE_TIMEOUT_MS);
let pull_request_time = self.pull_request_time.read().unwrap();
// crds should be locked last after self.pull_request_time.
let crds = crds.read().unwrap(); let crds = crds.read().unwrap();
crds.get_nodes() crds.get_nodes()
.filter_map(|value| { .filter_map(|value| {
@ -309,31 +306,10 @@ impl CrdsGossipPull {
&& gossip_validators && gossip_validators
.map_or(true, |gossip_validators| gossip_validators.contains(&v.id)) .map_or(true, |gossip_validators| gossip_validators.contains(&v.id))
}) })
.map(|item| { .cloned()
let max_weight = f32::from(u16::max_value()) - 1.0;
let req_time: u64 = pull_request_time
.peek(&item.id)
.copied()
.unwrap_or_default();
let since = (now.saturating_sub(req_time).min(3600 * 1000) / 1024) as u32;
let stake = get_stake(&item.id, stakes);
let weight = get_weight(max_weight, since, stake);
// Weights are bounded by max_weight defined above.
// So this type-cast should be safe.
((weight * 100.0) as u64, item.clone())
})
.collect() .collect()
} }
/// Time when a request to `from` was initiated.
///
/// This is used for weighted random selection during `new_pull_request`
/// It's important to use the local nodes request creation time as the weight
/// instead of the response received time otherwise failed nodes will increase their weight.
pub(crate) fn mark_pull_request_creation_time(&self, from: Pubkey, now: u64) {
self.pull_request_time.write().unwrap().put(from, now);
}
/// Process a pull request /// Process a pull request
pub(crate) fn process_pull_requests<I>(crds: &RwLock<Crds>, callers: I, now: u64) pub(crate) fn process_pull_requests<I>(crds: &RwLock<Crds>, callers: I, now: u64)
where where
@ -633,27 +609,13 @@ impl CrdsGossipPull {
// Only for tests and simulations. // Only for tests and simulations.
pub(crate) fn mock_clone(&self) -> Self { pub(crate) fn mock_clone(&self) -> Self {
let pull_request_time = {
let pull_request_time = self.pull_request_time.read().unwrap();
let mut clone = LruCache::new(pull_request_time.cap());
for (k, v) in pull_request_time.iter().rev() {
clone.put(*k, *v);
}
clone
};
let failed_inserts = self.failed_inserts.read().unwrap().clone(); let failed_inserts = self.failed_inserts.read().unwrap().clone();
Self { Self {
pull_request_time: RwLock::new(pull_request_time),
failed_inserts: RwLock::new(failed_inserts), failed_inserts: RwLock::new(failed_inserts),
num_pulls: AtomicUsize::new(self.num_pulls.load(Ordering::Relaxed)), num_pulls: AtomicUsize::new(self.num_pulls.load(Ordering::Relaxed)),
..*self ..*self
} }
} }
#[cfg(test)]
pub(crate) fn pull_request_time(&self) -> std::sync::RwLockReadGuard<LruCache<Pubkey, u64>> {
self.pull_request_time.read().unwrap()
}
} }
#[cfg(test)] #[cfg(test)]
@ -673,8 +635,8 @@ pub(crate) mod tests {
solana_sdk::{ solana_sdk::{
hash::{hash, HASH_BYTES}, hash::{hash, HASH_BYTES},
packet::PACKET_DATA_SIZE, packet::PACKET_DATA_SIZE,
timing::timestamp,
}, },
std::time::Instant,
}; };
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
@ -739,7 +701,7 @@ pub(crate) mod tests {
} }
let now = 1024; let now = 1024;
let crds = RwLock::new(crds); let crds = RwLock::new(crds);
let mut options = node.pull_options( let options = node.pull_options(
&crds, &crds,
&me.label().pubkey(), &me.label().pubkey(),
0, 0,
@ -749,9 +711,6 @@ pub(crate) mod tests {
&SocketAddrSpace::Unspecified, &SocketAddrSpace::Unspecified,
); );
assert!(!options.is_empty()); assert!(!options.is_empty());
options.sort_by(|(weight_l, _), (weight_r, _)| weight_r.partial_cmp(weight_l).unwrap());
// check that the highest stake holder is also the heaviest weighted.
assert_eq!(stakes[&options[0].1.id], 3000_u64);
} }
#[test] #[test]
@ -809,7 +768,7 @@ pub(crate) mod tests {
&SocketAddrSpace::Unspecified, &SocketAddrSpace::Unspecified,
) )
.iter() .iter()
.map(|(_, peer)| peer.id) .map(|peer| peer.id)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
assert_eq!(options.len(), 1); assert_eq!(options.len(), 1);
assert!(!options.contains(&spy.pubkey())); assert!(!options.contains(&spy.pubkey()));
@ -827,7 +786,7 @@ pub(crate) mod tests {
&SocketAddrSpace::Unspecified, &SocketAddrSpace::Unspecified,
) )
.iter() .iter()
.map(|(_, peer)| peer.id) .map(|peer| peer.id)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
assert_eq!(options.len(), 3); assert_eq!(options.len(), 3);
assert!(options.contains(&me.pubkey())); assert!(options.contains(&me.pubkey()));
@ -897,7 +856,7 @@ pub(crate) mod tests {
&SocketAddrSpace::Unspecified, &SocketAddrSpace::Unspecified,
); );
assert_eq!(options.len(), 1); assert_eq!(options.len(), 1);
assert_eq!(options[0].1.id, node_123.pubkey()); assert_eq!(options[0].id, node_123.pubkey());
} }
#[test] #[test]
@ -1079,7 +1038,6 @@ pub(crate) mod tests {
let peers: Vec<_> = req.unwrap().into_keys().collect(); let peers: Vec<_> = req.unwrap().into_keys().collect();
assert_eq!(peers, vec![new.contact_info().unwrap().clone()]); assert_eq!(peers, vec![new.contact_info().unwrap().clone()]);
node.mark_pull_request_creation_time(new.contact_info().unwrap().id, now);
let offline = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), now); let offline = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), now);
let offline = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(offline)); let offline = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(offline));
crds.write() crds.write()
@ -1129,13 +1087,11 @@ pub(crate) mod tests {
let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0); let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
ping_cache.mock_pong(new.id, new.gossip, Instant::now()); ping_cache.mock_pong(new.id, new.gossip, Instant::now());
let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new)); let new = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(new));
crds.insert(new.clone(), now, GossipRoute::LocalMessage) crds.insert(new, now, GossipRoute::LocalMessage).unwrap();
.unwrap();
let crds = RwLock::new(crds); let crds = RwLock::new(crds);
// set request creation time to now. // set request creation time to now.
let now = now + 50_000; let now = now + 50_000;
node.mark_pull_request_creation_time(new.label().pubkey(), now);
// odds of getting the other request should be close to 1. // odds of getting the other request should be close to 1.
let now = now + 1_000; let now = now + 1_000;
@ -1164,43 +1120,7 @@ pub(crate) mod tests {
.take(100) .take(100)
.filter(|peer| peer != old) .filter(|peer| peer != old)
.count(); .count();
assert!(count < 2, "count of peer != old: {count}"); assert!(count < 75, "count of peer != old: {}", count);
}
#[test]
fn test_pull_request_time() {
const NUM_REPS: usize = 2 * CRDS_UNIQUE_PUBKEY_CAPACITY;
let mut rng = rand::thread_rng();
let pubkeys: Vec<_> = repeat_with(Pubkey::new_unique).take(NUM_REPS).collect();
let node = CrdsGossipPull::default();
let mut requests = HashMap::new();
let now = timestamp();
for k in 0..NUM_REPS {
let pubkey = pubkeys[rng.gen_range(0, pubkeys.len())];
let now = now + k as u64;
node.mark_pull_request_creation_time(pubkey, now);
*requests.entry(pubkey).or_default() = now;
}
let pull_request_time = node.pull_request_time.read().unwrap();
assert!(pull_request_time.len() <= CRDS_UNIQUE_PUBKEY_CAPACITY);
// Assert that timestamps match most recent request.
for (pk, ts) in pull_request_time.iter() {
assert_eq!(*ts, requests[pk]);
}
// Assert that most recent pull timestamps are maintained.
let max_ts = requests
.iter()
.filter(|(pk, _)| !pull_request_time.contains(*pk))
.map(|(_, ts)| *ts)
.max()
.unwrap();
let min_ts = requests
.iter()
.filter(|(pk, _)| pull_request_time.contains(*pk))
.map(|(_, ts)| *ts)
.min()
.unwrap();
assert!(max_ts <= min_ts);
} }
#[test] #[test]

View File

@ -41,7 +41,6 @@ use {
atomic::{AtomicUsize, Ordering}, atomic::{AtomicUsize, Ordering},
Mutex, RwLock, Mutex, RwLock,
}, },
time::Instant,
}, },
}; };
@ -259,29 +258,14 @@ impl CrdsGossipPush {
socket_addr_space, socket_addr_space,
); );
// Check for nodes which have responded to ping messages. // Check for nodes which have responded to ping messages.
let nodes: Vec<_> = { let nodes = crds_gossip::maybe_ping_gossip_addresses(
let mut ping_cache = ping_cache.lock().unwrap(); &mut rng,
let mut pingf = move || Ping::new_rand(&mut rng, self_keypair).ok(); nodes,
let now = Instant::now(); self_keypair,
nodes ping_cache,
.into_iter() pings,
.filter(|node| { );
let (check, ping) = { let nodes = crds_gossip::dedup_gossip_addresses(nodes, stakes)
let node = (node.id, node.gossip);
ping_cache.check(now, node, &mut pingf)
};
if let Some(ping) = ping {
pings.push((node.gossip, ping));
}
check
})
.collect()
};
let nodes = nodes.into_iter().map(|node| {
let stake = stakes.get(&node.id).copied().unwrap_or_default();
(stake, node)
});
let nodes = crds_gossip::dedup_gossip_addresses(nodes)
.into_values() .into_values()
.map(|(_stake, node)| node.id) .map(|(_stake, node)| node.id)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
@ -353,7 +337,7 @@ mod tests {
use { use {
super::*, super::*,
crate::{crds_value::CrdsData, socketaddr}, crate::{crds_value::CrdsData, socketaddr},
std::time::Duration, std::time::{Duration, Instant},
}; };
fn new_ping_cache() -> PingCache { fn new_ping_cache() -> PingCache {

View File

@ -565,7 +565,6 @@ fn network_run_pull(
bytes += serialized_size(&rsp).unwrap() as usize; bytes += serialized_size(&rsp).unwrap() as usize;
msgs += rsp.len(); msgs += rsp.len();
if let Some(node) = network.get(&from) { if let Some(node) = network.get(&from) {
node.gossip.mark_pull_request_creation_time(from, now);
let mut stats = ProcessPullStats::default(); let mut stats = ProcessPullStats::default();
let (vers, vers_expired_timeout, failed_inserts) = node let (vers, vers_expired_timeout, failed_inserts) = node
.gossip .gossip