2018-11-15 13:23:26 -08:00
|
|
|
//! Crds Gossip
|
|
|
|
//! This module ties together Crds and the push and pull gossip overlays. The interface is
|
|
|
|
//! designed to run with a simulator or over a UDP network connection with messages up to a
|
2019-11-14 11:49:31 -08:00
|
|
|
//! packet::PACKET_DATA_SIZE size.
|
2018-11-15 13:23:26 -08:00
|
|
|
|
2021-05-26 08:15:46 -07:00
|
|
|
use {
|
|
|
|
crate::{
|
|
|
|
cluster_info::Ping,
|
|
|
|
contact_info::ContactInfo,
|
|
|
|
crds::Crds,
|
|
|
|
crds_gossip_error::CrdsGossipError,
|
|
|
|
crds_gossip_pull::{CrdsFilter, CrdsGossipPull, ProcessPullStats},
|
|
|
|
crds_gossip_push::{CrdsGossipPush, CRDS_GOSSIP_NUM_ACTIVE},
|
|
|
|
crds_value::{CrdsData, CrdsValue},
|
|
|
|
duplicate_shred::{self, DuplicateShredIndex, LeaderScheduleFn, MAX_DUPLICATE_SHREDS},
|
|
|
|
ping_pong::PingCache,
|
|
|
|
},
|
|
|
|
rayon::ThreadPool,
|
|
|
|
solana_ledger::shred::Shred,
|
|
|
|
solana_sdk::{
|
|
|
|
hash::Hash,
|
|
|
|
pubkey::Pubkey,
|
|
|
|
signature::{Keypair, Signer},
|
|
|
|
timing::timestamp,
|
|
|
|
},
|
|
|
|
std::{
|
|
|
|
collections::{HashMap, HashSet},
|
|
|
|
net::SocketAddr,
|
|
|
|
sync::Mutex,
|
|
|
|
time::Duration,
|
|
|
|
},
|
2021-04-20 11:06:13 -07:00
|
|
|
};
|
2018-11-15 13:23:26 -08:00
|
|
|
|
2021-07-09 06:10:08 -07:00
|
|
|
#[derive(Default)]
|
2018-11-15 13:23:26 -08:00
|
|
|
pub struct CrdsGossip {
|
|
|
|
pub crds: Crds,
|
2019-02-18 08:18:04 -08:00
|
|
|
pub push: CrdsGossipPush,
|
2019-03-08 18:08:24 -08:00
|
|
|
pub pull: CrdsGossipPull,
|
2018-11-15 13:23:26 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
impl CrdsGossip {
|
2019-06-26 00:30:16 -07:00
|
|
|
/// process a push message to the network
|
2021-07-08 18:42:13 -07:00
|
|
|
/// Returns unique origins' pubkeys of upserted values.
|
2019-06-26 00:30:16 -07:00
|
|
|
pub fn process_push_message(
|
|
|
|
&mut self,
|
|
|
|
from: &Pubkey,
|
|
|
|
values: Vec<CrdsValue>,
|
|
|
|
now: u64,
|
2021-07-08 18:42:13 -07:00
|
|
|
) -> HashSet<Pubkey> {
|
2021-07-13 07:04:25 -07:00
|
|
|
self.push
|
|
|
|
.process_push_message(&mut self.crds, from, values, now)
|
2018-11-15 13:23:26 -08:00
|
|
|
.into_iter()
|
2021-07-13 07:04:25 -07:00
|
|
|
.filter_map(Result::ok)
|
2018-12-07 19:01:28 -08:00
|
|
|
.collect()
|
2018-11-15 13:23:26 -08:00
|
|
|
}
|
|
|
|
|
2019-06-26 00:30:16 -07:00
|
|
|
/// remove redundant paths in the network
|
2021-05-13 06:50:16 -07:00
|
|
|
pub fn prune_received_cache<I>(
|
2021-07-13 07:04:25 -07:00
|
|
|
&self,
|
2021-07-09 06:10:08 -07:00
|
|
|
self_pubkey: &Pubkey,
|
2021-05-13 06:50:16 -07:00
|
|
|
origins: I, // Unique pubkeys of crds values' owners.
|
2019-06-26 00:30:16 -07:00
|
|
|
stakes: &HashMap<Pubkey, u64>,
|
2021-05-13 06:50:16 -07:00
|
|
|
) -> HashMap</*gossip peer:*/ Pubkey, /*origins:*/ Vec<Pubkey>>
|
|
|
|
where
|
|
|
|
I: IntoIterator<Item = Pubkey>,
|
|
|
|
{
|
2021-07-13 07:04:25 -07:00
|
|
|
self.push
|
|
|
|
.prune_received_cache_many(self_pubkey, origins, stakes)
|
2019-06-26 00:30:16 -07:00
|
|
|
}
|
|
|
|
|
2020-10-13 18:10:25 -07:00
|
|
|
pub fn new_push_messages(
|
|
|
|
&mut self,
|
2021-04-27 17:15:11 -07:00
|
|
|
pending_push_messages: Vec<CrdsValue>,
|
2020-10-13 18:10:25 -07:00
|
|
|
now: u64,
|
2021-03-12 07:50:14 -08:00
|
|
|
) -> HashMap<Pubkey, Vec<CrdsValue>> {
|
2021-07-08 18:42:13 -07:00
|
|
|
for entry in pending_push_messages {
|
|
|
|
let _ = self.crds.insert(entry, now);
|
|
|
|
}
|
2021-03-12 07:50:14 -08:00
|
|
|
self.push.new_push_messages(&self.crds, now)
|
2018-11-15 13:23:26 -08:00
|
|
|
}
|
|
|
|
|
2021-01-24 07:47:43 -08:00
|
|
|
pub(crate) fn push_duplicate_shred(
|
|
|
|
&mut self,
|
|
|
|
keypair: &Keypair,
|
|
|
|
shred: &Shred,
|
|
|
|
other_payload: &[u8],
|
|
|
|
leader_schedule: Option<impl LeaderScheduleFn>,
|
|
|
|
// Maximum serialized size of each DuplicateShred chunk payload.
|
|
|
|
max_payload_size: usize,
|
|
|
|
) -> Result<(), duplicate_shred::Error> {
|
|
|
|
let pubkey = keypair.pubkey();
|
|
|
|
// Skip if there are already records of duplicate shreds for this slot.
|
|
|
|
let shred_slot = shred.slot();
|
|
|
|
if self
|
|
|
|
.crds
|
|
|
|
.get_records(&pubkey)
|
|
|
|
.any(|value| match &value.value.data {
|
|
|
|
CrdsData::DuplicateShred(_, value) => value.slot == shred_slot,
|
|
|
|
_ => false,
|
|
|
|
})
|
|
|
|
{
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
let chunks = duplicate_shred::from_shred(
|
|
|
|
shred.clone(),
|
|
|
|
pubkey,
|
|
|
|
Vec::from(other_payload),
|
|
|
|
leader_schedule,
|
|
|
|
timestamp(),
|
|
|
|
max_payload_size,
|
|
|
|
)?;
|
|
|
|
// Find the index of oldest duplicate shred.
|
|
|
|
let mut num_dup_shreds = 0;
|
|
|
|
let offset = self
|
|
|
|
.crds
|
|
|
|
.get_records(&pubkey)
|
|
|
|
.filter_map(|value| match &value.value.data {
|
|
|
|
CrdsData::DuplicateShred(ix, value) => {
|
|
|
|
num_dup_shreds += 1;
|
|
|
|
Some((value.wallclock, *ix))
|
|
|
|
}
|
|
|
|
_ => None,
|
|
|
|
})
|
|
|
|
.min() // Override the oldest records.
|
|
|
|
.map(|(_ /*wallclock*/, ix)| ix)
|
|
|
|
.unwrap_or(0);
|
|
|
|
let offset = if num_dup_shreds < MAX_DUPLICATE_SHREDS {
|
|
|
|
num_dup_shreds
|
|
|
|
} else {
|
|
|
|
offset
|
|
|
|
};
|
2021-07-08 18:42:13 -07:00
|
|
|
let entries = chunks.enumerate().map(|(k, chunk)| {
|
|
|
|
let index = (offset + k as DuplicateShredIndex) % MAX_DUPLICATE_SHREDS;
|
|
|
|
let data = CrdsData::DuplicateShred(index, chunk);
|
|
|
|
CrdsValue::new_signed(data, keypair)
|
|
|
|
});
|
|
|
|
let now = timestamp();
|
|
|
|
for entry in entries {
|
|
|
|
if let Err(err) = self.crds.insert(entry, now) {
|
|
|
|
error!("push_duplicate_shred faild: {:?}", err);
|
|
|
|
}
|
|
|
|
}
|
2021-01-24 07:47:43 -08:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-11-15 13:23:26 -08:00
|
|
|
/// add the `from` to the peer's filter of nodes
|
2018-12-01 12:00:30 -08:00
|
|
|
pub fn process_prune_msg(
|
2020-11-05 07:42:00 -08:00
|
|
|
&self,
|
2021-07-09 06:10:08 -07:00
|
|
|
self_pubkey: &Pubkey,
|
2019-03-09 19:28:43 -08:00
|
|
|
peer: &Pubkey,
|
|
|
|
destination: &Pubkey,
|
2018-12-01 12:00:30 -08:00
|
|
|
origin: &[Pubkey],
|
|
|
|
wallclock: u64,
|
|
|
|
now: u64,
|
|
|
|
) -> Result<(), CrdsGossipError> {
|
|
|
|
let expired = now > wallclock + self.push.prune_timeout;
|
|
|
|
if expired {
|
|
|
|
return Err(CrdsGossipError::PruneMessageTimeout);
|
|
|
|
}
|
2021-07-09 06:10:08 -07:00
|
|
|
if self_pubkey == destination {
|
|
|
|
self.push.process_prune_msg(self_pubkey, peer, origin);
|
2018-12-01 12:00:30 -08:00
|
|
|
Ok(())
|
|
|
|
} else {
|
|
|
|
Err(CrdsGossipError::BadPruneDestination)
|
|
|
|
}
|
2018-11-15 13:23:26 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// refresh the push active set
|
|
|
|
/// * ratio - number of actives to rotate
|
2020-09-11 12:00:16 -07:00
|
|
|
pub fn refresh_push_active_set(
|
2021-07-13 07:04:25 -07:00
|
|
|
&self,
|
2021-07-09 06:10:08 -07:00
|
|
|
self_pubkey: &Pubkey,
|
|
|
|
self_shred_version: u16,
|
2020-09-11 12:00:16 -07:00
|
|
|
stakes: &HashMap<Pubkey, u64>,
|
|
|
|
gossip_validators: Option<&HashSet<Pubkey>>,
|
|
|
|
) {
|
2018-11-15 13:23:26 -08:00
|
|
|
self.push.refresh_push_active_set(
|
|
|
|
&self.crds,
|
2019-02-20 20:02:47 -08:00
|
|
|
stakes,
|
2020-09-11 12:00:16 -07:00
|
|
|
gossip_validators,
|
2021-07-09 06:10:08 -07:00
|
|
|
self_pubkey,
|
|
|
|
self_shred_version,
|
2021-03-24 11:33:56 -07:00
|
|
|
self.crds.num_nodes(),
|
2018-11-15 13:23:26 -08:00
|
|
|
CRDS_GOSSIP_NUM_ACTIVE,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// generate a random request
|
2021-07-09 06:10:08 -07:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
2018-11-15 13:23:26 -08:00
|
|
|
pub fn new_pull_request(
|
|
|
|
&self,
|
2020-09-29 16:06:02 -07:00
|
|
|
thread_pool: &ThreadPool,
|
2021-04-20 11:06:13 -07:00
|
|
|
self_keypair: &Keypair,
|
2021-07-09 06:10:08 -07:00
|
|
|
self_shred_version: u16,
|
2018-11-15 13:23:26 -08:00
|
|
|
now: u64,
|
2020-09-11 12:00:16 -07:00
|
|
|
gossip_validators: Option<&HashSet<Pubkey>>,
|
2019-02-20 20:02:47 -08:00
|
|
|
stakes: &HashMap<Pubkey, u64>,
|
2019-08-13 18:04:14 -07:00
|
|
|
bloom_size: usize,
|
2021-04-20 11:06:13 -07:00
|
|
|
ping_cache: &Mutex<PingCache>,
|
|
|
|
pings: &mut Vec<(SocketAddr, Ping)>,
|
2021-04-28 06:19:12 -07:00
|
|
|
) -> Result<(ContactInfo, Vec<CrdsFilter>), CrdsGossipError> {
|
2020-05-05 20:15:19 -07:00
|
|
|
self.pull.new_pull_request(
|
2020-09-29 16:06:02 -07:00
|
|
|
thread_pool,
|
2020-05-05 20:15:19 -07:00
|
|
|
&self.crds,
|
2021-04-20 11:06:13 -07:00
|
|
|
self_keypair,
|
2021-07-09 06:10:08 -07:00
|
|
|
self_shred_version,
|
2020-05-05 20:15:19 -07:00
|
|
|
now,
|
2020-09-11 12:00:16 -07:00
|
|
|
gossip_validators,
|
2020-05-05 20:15:19 -07:00
|
|
|
stakes,
|
|
|
|
bloom_size,
|
2021-04-20 11:06:13 -07:00
|
|
|
ping_cache,
|
|
|
|
pings,
|
2020-05-05 20:15:19 -07:00
|
|
|
)
|
2018-11-15 13:23:26 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// time when a request to `from` was initiated
|
2019-03-07 17:47:23 -08:00
|
|
|
/// This is used for weighted random selection during `new_pull_request`
|
2018-11-15 13:23:26 -08:00
|
|
|
/// It's important to use the local nodes request creation time as the weight
|
2019-03-07 17:47:23 -08:00
|
|
|
/// instead of the response received time otherwise failed nodes will increase their weight.
|
2021-07-11 08:32:10 -07:00
|
|
|
pub fn mark_pull_request_creation_time(&self, from: Pubkey, now: u64) {
|
2018-11-15 13:23:26 -08:00
|
|
|
self.pull.mark_pull_request_creation_time(from, now)
|
|
|
|
}
|
|
|
|
/// process a pull request and create a response
|
2020-10-28 10:03:02 -07:00
|
|
|
pub fn process_pull_requests<I>(&mut self, callers: I, now: u64)
|
|
|
|
where
|
|
|
|
I: IntoIterator<Item = CrdsValue>,
|
|
|
|
{
|
2021-07-10 15:16:33 -07:00
|
|
|
CrdsGossipPull::process_pull_requests(&mut self.crds, callers, now);
|
2020-05-28 11:38:13 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn generate_pull_responses(
|
|
|
|
&self,
|
|
|
|
filters: &[(CrdsValue, CrdsFilter)],
|
2020-12-18 10:45:12 -08:00
|
|
|
output_size_limit: usize, // Limit number of crds values returned.
|
2020-08-11 06:26:42 -07:00
|
|
|
now: u64,
|
2020-05-28 11:38:13 -07:00
|
|
|
) -> Vec<Vec<CrdsValue>> {
|
2021-07-10 15:16:33 -07:00
|
|
|
CrdsGossipPull::generate_pull_responses(&self.crds, filters, output_size_limit, now)
|
2018-11-15 13:23:26 -08:00
|
|
|
}
|
2020-05-28 11:38:13 -07:00
|
|
|
|
2020-06-09 17:08:13 -07:00
|
|
|
pub fn filter_pull_responses(
|
|
|
|
&self,
|
2020-02-07 12:38:24 -08:00
|
|
|
timeouts: &HashMap<Pubkey, u64>,
|
2018-11-15 13:23:26 -08:00
|
|
|
response: Vec<CrdsValue>,
|
|
|
|
now: u64,
|
2020-06-09 17:08:13 -07:00
|
|
|
process_pull_stats: &mut ProcessPullStats,
|
removes delayed crds inserts when upserting gossip table (#16806)
It is crucial that VersionedCrdsValue::insert_timestamp does not go
backward in time:
https://github.com/solana-labs/solana/blob/ec37a843a/core/src/crds.rs#L67-L79
Otherwise methods such as get_votes and get_epoch_slots_since will
break, which will break their downstream flow, including vote-listener
and optimistic confirmation:
https://github.com/solana-labs/solana/blob/ec37a843a/core/src/cluster_info.rs#L1197-L1215
https://github.com/solana-labs/solana/blob/ec37a843a/core/src/cluster_info.rs#L1274-L1298
For that, Crds::new_versioned is intended to be called "atomically" with
Crds::insert_verioned (as the comment already says so):
https://github.com/solana-labs/solana/blob/ec37a843a/core/src/crds.rs#L126-L129
However, currently this is violated in the code. For example,
filter_pull_responses creates VersionedCrdsValues (with the current
timestamp), then acquires an exclusive lock on gossip, then
process_pull_responses writes those values to the crds table:
https://github.com/solana-labs/solana/blob/ec37a843a/core/src/cluster_info.rs#L2375-L2392
Depending on the workload and lock contention, the insert_timestamps may
well be in the past when these values finally are inserted into gossip.
To avoid such scenarios, this commit:
* removes Crds::new_versioned and Crd::insert_versioned.
* makes VersionedCrdsValue constructor private, only invoked in
Crds::insert, so that insert_timestamp is populated right before
insert.
This will improve insert_timestamp monotonicity as long as Crds::insert
is not called with a stalled timestamp. Following commits may further
improve this by calling timestamp() inside Crds::insert, and/or
switching to std::time::Instant which guarantees monotonicity.
2021-04-28 04:56:13 -07:00
|
|
|
) -> (
|
|
|
|
Vec<CrdsValue>, // valid responses.
|
|
|
|
Vec<CrdsValue>, // responses with expired timestamps.
|
|
|
|
Vec<Hash>, // hash of outdated values.
|
|
|
|
) {
|
2018-11-15 13:23:26 -08:00
|
|
|
self.pull
|
2020-06-09 17:08:13 -07:00
|
|
|
.filter_pull_responses(&self.crds, timeouts, response, now, process_pull_stats)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// process a pull response
|
|
|
|
pub fn process_pull_responses(
|
|
|
|
&mut self,
|
|
|
|
from: &Pubkey,
|
removes delayed crds inserts when upserting gossip table (#16806)
It is crucial that VersionedCrdsValue::insert_timestamp does not go
backward in time:
https://github.com/solana-labs/solana/blob/ec37a843a/core/src/crds.rs#L67-L79
Otherwise methods such as get_votes and get_epoch_slots_since will
break, which will break their downstream flow, including vote-listener
and optimistic confirmation:
https://github.com/solana-labs/solana/blob/ec37a843a/core/src/cluster_info.rs#L1197-L1215
https://github.com/solana-labs/solana/blob/ec37a843a/core/src/cluster_info.rs#L1274-L1298
For that, Crds::new_versioned is intended to be called "atomically" with
Crds::insert_verioned (as the comment already says so):
https://github.com/solana-labs/solana/blob/ec37a843a/core/src/crds.rs#L126-L129
However, currently this is violated in the code. For example,
filter_pull_responses creates VersionedCrdsValues (with the current
timestamp), then acquires an exclusive lock on gossip, then
process_pull_responses writes those values to the crds table:
https://github.com/solana-labs/solana/blob/ec37a843a/core/src/cluster_info.rs#L2375-L2392
Depending on the workload and lock contention, the insert_timestamps may
well be in the past when these values finally are inserted into gossip.
To avoid such scenarios, this commit:
* removes Crds::new_versioned and Crd::insert_versioned.
* makes VersionedCrdsValue constructor private, only invoked in
Crds::insert, so that insert_timestamp is populated right before
insert.
This will improve insert_timestamp monotonicity as long as Crds::insert
is not called with a stalled timestamp. Following commits may further
improve this by calling timestamp() inside Crds::insert, and/or
switching to std::time::Instant which guarantees monotonicity.
2021-04-28 04:56:13 -07:00
|
|
|
responses: Vec<CrdsValue>,
|
|
|
|
responses_expired_timeout: Vec<CrdsValue>,
|
2020-09-30 17:39:22 -07:00
|
|
|
failed_inserts: Vec<Hash>,
|
2020-06-09 17:08:13 -07:00
|
|
|
now: u64,
|
|
|
|
process_pull_stats: &mut ProcessPullStats,
|
|
|
|
) {
|
2021-04-30 09:57:19 -07:00
|
|
|
self.pull.process_pull_responses(
|
2020-06-09 17:08:13 -07:00
|
|
|
&mut self.crds,
|
|
|
|
from,
|
|
|
|
responses,
|
|
|
|
responses_expired_timeout,
|
2020-09-30 17:39:22 -07:00
|
|
|
failed_inserts,
|
2020-06-09 17:08:13 -07:00
|
|
|
now,
|
|
|
|
process_pull_stats,
|
2020-06-13 22:03:38 -07:00
|
|
|
);
|
2018-11-15 13:23:26 -08:00
|
|
|
}
|
2019-11-20 11:25:18 -08:00
|
|
|
|
|
|
|
pub fn make_timeouts(
|
|
|
|
&self,
|
2021-07-09 06:10:08 -07:00
|
|
|
self_pubkey: Pubkey,
|
2019-11-20 11:25:18 -08:00
|
|
|
stakes: &HashMap<Pubkey, u64>,
|
2021-05-21 08:55:22 -07:00
|
|
|
epoch_duration: Duration,
|
2019-11-20 11:25:18 -08:00
|
|
|
) -> HashMap<Pubkey, u64> {
|
2021-07-09 06:10:08 -07:00
|
|
|
self.pull.make_timeouts(self_pubkey, stakes, epoch_duration)
|
2019-11-20 11:25:18 -08:00
|
|
|
}
|
|
|
|
|
2020-10-23 07:17:37 -07:00
|
|
|
pub fn purge(
|
|
|
|
&mut self,
|
2021-07-09 06:10:08 -07:00
|
|
|
self_pubkey: &Pubkey,
|
2020-10-23 07:17:37 -07:00
|
|
|
thread_pool: &ThreadPool,
|
|
|
|
now: u64,
|
|
|
|
timeouts: &HashMap<Pubkey, u64>,
|
|
|
|
) -> usize {
|
2019-11-20 11:25:18 -08:00
|
|
|
let mut rv = 0;
|
2018-11-15 13:23:26 -08:00
|
|
|
if now > 5 * self.push.msg_timeout {
|
|
|
|
let min = now - 5 * self.push.msg_timeout;
|
2019-06-26 00:30:16 -07:00
|
|
|
self.push.purge_old_received_cache(min);
|
2018-11-15 13:23:26 -08:00
|
|
|
}
|
|
|
|
if now > self.pull.crds_timeout {
|
2019-11-20 11:25:18 -08:00
|
|
|
//sanity check
|
2021-07-09 06:10:08 -07:00
|
|
|
assert_eq!(timeouts[self_pubkey], std::u64::MAX);
|
2021-05-21 08:55:22 -07:00
|
|
|
assert!(timeouts.contains_key(&Pubkey::default()));
|
2021-07-10 15:16:33 -07:00
|
|
|
rv = CrdsGossipPull::purge_active(thread_pool, &mut self.crds, now, timeouts);
|
2018-11-15 13:23:26 -08:00
|
|
|
}
|
2021-05-24 06:47:21 -07:00
|
|
|
self.crds
|
|
|
|
.trim_purged(now.saturating_sub(5 * self.pull.crds_timeout));
|
2020-09-30 17:39:22 -07:00
|
|
|
self.pull.purge_failed_inserts(now);
|
2019-11-20 11:25:18 -08:00
|
|
|
rv
|
2018-11-15 13:23:26 -08:00
|
|
|
}
|
2020-11-05 07:42:00 -08:00
|
|
|
|
|
|
|
// Only for tests and simulations.
|
|
|
|
pub(crate) fn mock_clone(&self) -> Self {
|
|
|
|
Self {
|
|
|
|
crds: self.crds.clone(),
|
|
|
|
push: self.push.mock_clone(),
|
2021-03-24 11:33:56 -07:00
|
|
|
pull: self.pull.mock_clone(),
|
2020-11-05 07:42:00 -08:00
|
|
|
}
|
|
|
|
}
|
2018-11-15 13:23:26 -08:00
|
|
|
}
|
|
|
|
|
2019-02-20 20:02:47 -08:00
|
|
|
/// Computes a normalized(log of actual stake) stake
|
|
|
|
pub fn get_stake<S: std::hash::BuildHasher>(id: &Pubkey, stakes: &HashMap<Pubkey, u64, S>) -> f32 {
|
|
|
|
// cap the max balance to u32 max (it should be plenty)
|
|
|
|
let bal = f64::from(u32::max_value()).min(*stakes.get(id).unwrap_or(&0) as f64);
|
|
|
|
1_f32.max((bal as f32).ln())
|
2019-02-20 17:08:56 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Computes bounded weight given some max, a time since last selected, and a stake value
|
|
|
|
/// The minimum stake is 1 and not 0 to allow 'time since last' picked to factor in.
|
|
|
|
pub fn get_weight(max_weight: f32, time_since_last_selected: u32, stake: f32) -> f32 {
|
|
|
|
let mut weight = time_since_last_selected as f32 * stake;
|
|
|
|
if weight.is_infinite() {
|
|
|
|
weight = max_weight;
|
|
|
|
}
|
|
|
|
1.0_f32.max(weight.min(max_weight))
|
|
|
|
}
|
|
|
|
|
2018-11-15 13:23:26 -08:00
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
2021-05-26 08:15:46 -07:00
|
|
|
use {
|
|
|
|
super::*,
|
|
|
|
crate::{contact_info::ContactInfo, crds_value::CrdsData},
|
|
|
|
solana_sdk::{hash::hash, timing::timestamp},
|
|
|
|
};
|
2018-11-15 13:23:26 -08:00
|
|
|
|
2018-12-01 12:00:30 -08:00
|
|
|
#[test]
|
|
|
|
fn test_prune_errors() {
|
2021-07-09 06:10:08 -07:00
|
|
|
let mut crds_gossip = CrdsGossip::default();
|
|
|
|
let id = Pubkey::new(&[0; 32]);
|
2019-03-09 19:28:43 -08:00
|
|
|
let ci = ContactInfo::new_localhost(&Pubkey::new(&[1; 32]), 0);
|
2018-12-01 12:00:30 -08:00
|
|
|
let prune_pubkey = Pubkey::new(&[2; 32]);
|
|
|
|
crds_gossip
|
|
|
|
.crds
|
2019-11-03 10:07:51 -08:00
|
|
|
.insert(
|
|
|
|
CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone())),
|
|
|
|
0,
|
|
|
|
)
|
2018-12-01 12:00:30 -08:00
|
|
|
.unwrap();
|
2021-07-09 06:10:08 -07:00
|
|
|
crds_gossip.refresh_push_active_set(
|
|
|
|
&id,
|
|
|
|
0, // shred version
|
|
|
|
&HashMap::new(), // stakes
|
|
|
|
None, // gossip validators
|
|
|
|
);
|
2018-12-01 12:00:30 -08:00
|
|
|
let now = timestamp();
|
|
|
|
//incorrect dest
|
|
|
|
let mut res = crds_gossip.process_prune_msg(
|
2021-07-09 06:10:08 -07:00
|
|
|
&id,
|
2019-03-09 19:28:43 -08:00
|
|
|
&ci.id,
|
|
|
|
&Pubkey::new(hash(&[1; 32]).as_ref()),
|
2018-12-01 12:00:30 -08:00
|
|
|
&[prune_pubkey],
|
|
|
|
now,
|
|
|
|
now,
|
|
|
|
);
|
|
|
|
assert_eq!(res.err(), Some(CrdsGossipError::BadPruneDestination));
|
|
|
|
//correct dest
|
2021-07-09 06:10:08 -07:00
|
|
|
res = crds_gossip.process_prune_msg(
|
|
|
|
&id, // self_pubkey
|
|
|
|
&ci.id, // peer
|
|
|
|
&id, // destination
|
|
|
|
&[prune_pubkey], // origins
|
|
|
|
now,
|
|
|
|
now,
|
|
|
|
);
|
2019-01-28 14:52:35 -08:00
|
|
|
res.unwrap();
|
2018-12-01 12:00:30 -08:00
|
|
|
//test timeout
|
|
|
|
let timeout = now + crds_gossip.push.prune_timeout * 2;
|
2021-07-09 06:10:08 -07:00
|
|
|
res = crds_gossip.process_prune_msg(
|
|
|
|
&id, // self_pubkey
|
|
|
|
&ci.id, // peer
|
|
|
|
&id, // destination
|
|
|
|
&[prune_pubkey], // origins
|
|
|
|
now,
|
|
|
|
timeout,
|
|
|
|
);
|
2018-12-01 12:00:30 -08:00
|
|
|
assert_eq!(res.err(), Some(CrdsGossipError::PruneMessageTimeout));
|
|
|
|
}
|
2018-11-15 13:23:26 -08:00
|
|
|
}
|