caches staked nodes computed from vote-accounts (#13929)

This commit is contained in:
behzad nouri 2020-12-17 21:22:50 +00:00 committed by GitHub
parent fd7d2f82ae
commit d6d76219b6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 367 additions and 78 deletions

View File

@ -17,7 +17,7 @@ use crossbeam_channel::{
Receiver as CrossbeamReceiver, RecvTimeoutError as CrossbeamRecvTimeoutError, Receiver as CrossbeamReceiver, RecvTimeoutError as CrossbeamRecvTimeoutError,
Sender as CrossbeamSender, Sender as CrossbeamSender,
}; };
use solana_ledger::{blockstore::Blockstore, shred::Shred, staking_utils}; use solana_ledger::{blockstore::Blockstore, shred::Shred};
use solana_measure::measure::Measure; use solana_measure::measure::Measure;
use solana_metrics::{inc_new_counter_error, inc_new_counter_info}; use solana_metrics::{inc_new_counter_error, inc_new_counter_info};
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
@ -306,7 +306,7 @@ impl BroadcastStage {
for (_, bank) in retransmit_slots.iter() { for (_, bank) in retransmit_slots.iter() {
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot()); let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch); let stakes = bank.epoch_staked_nodes(bank_epoch);
let stakes = stakes.map(Arc::new); let stakes = stakes.map(Arc::new);
let data_shreds = Arc::new( let data_shreds = Arc::new(
blockstore blockstore

View File

@ -102,7 +102,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
blockstore_sender.send((data_shreds.clone(), None))?; blockstore_sender.send((data_shreds.clone(), None))?;
// 4) Start broadcast step // 4) Start broadcast step
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot()); let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch); let stakes = bank.epoch_staked_nodes(bank_epoch);
let stakes = stakes.map(Arc::new); let stakes = stakes.map(Arc::new);
socket_sender.send(((stakes.clone(), data_shreds), None))?; socket_sender.send(((stakes.clone(), data_shreds), None))?;
if let Some((good_last_data_shred, bad_last_data_shred)) = last_shreds { if let Some((good_last_data_shred, bad_last_data_shred)) = last_shreds {

View File

@ -213,7 +213,7 @@ impl StandardBroadcastRun {
let mut get_leader_schedule_time = Measure::start("broadcast_get_leader_schedule"); let mut get_leader_schedule_time = Measure::start("broadcast_get_leader_schedule");
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot()); let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch); let stakes = bank.epoch_staked_nodes(bank_epoch);
let stakes = stakes.map(Arc::new); let stakes = stakes.map(Arc::new);
// Broadcast the last shred of the interrupted slot if necessary // Broadcast the last shred of the interrupted slot if necessary

View File

@ -39,7 +39,6 @@ use itertools::Itertools;
use rayon::prelude::*; use rayon::prelude::*;
use rayon::{ThreadPool, ThreadPoolBuilder}; use rayon::{ThreadPool, ThreadPoolBuilder};
use serde::ser::Serialize; use serde::ser::Serialize;
use solana_ledger::staking_utils;
use solana_measure::measure::Measure; use solana_measure::measure::Measure;
use solana_measure::thread_mem_usage; use solana_measure::thread_mem_usage;
use solana_metrics::{inc_new_counter_debug, inc_new_counter_error}; use solana_metrics::{inc_new_counter_debug, inc_new_counter_error};
@ -1834,7 +1833,7 @@ impl ClusterInfo {
let stakes: HashMap<_, _> = match bank_forks { let stakes: HashMap<_, _> = match bank_forks {
Some(ref bank_forks) => { Some(ref bank_forks) => {
staking_utils::staked_nodes(&bank_forks.read().unwrap().working_bank()) bank_forks.read().unwrap().working_bank().staked_nodes()
} }
None => HashMap::new(), None => HashMap::new(),
}; };
@ -2492,7 +2491,7 @@ impl ClusterInfo {
let epoch = bank.epoch(); let epoch = bank.epoch();
let epoch_schedule = bank.epoch_schedule(); let epoch_schedule = bank.epoch_schedule();
epoch_time_ms = epoch_schedule.get_slots_in_epoch(epoch) * DEFAULT_MS_PER_SLOT; epoch_time_ms = epoch_schedule.get_slots_in_epoch(epoch) * DEFAULT_MS_PER_SLOT;
staking_utils::staked_nodes(&bank) bank.staked_nodes()
} }
None => { None => {
inc_new_counter_info!("cluster_info-purge-no_working_bank", 1); inc_new_counter_info!("cluster_info-purge-no_working_bank", 1);

View File

@ -19,7 +19,6 @@ use solana_ledger::shred::{get_shred_slot_index_type, ShredFetchStats};
use solana_ledger::{ use solana_ledger::{
blockstore::{Blockstore, CompletedSlotsReceiver}, blockstore::{Blockstore, CompletedSlotsReceiver},
leader_schedule_cache::LeaderScheduleCache, leader_schedule_cache::LeaderScheduleCache,
staking_utils,
}; };
use solana_measure::measure::Measure; use solana_measure::measure::Measure;
use solana_metrics::inc_new_counter_error; use solana_metrics::inc_new_counter_error;
@ -278,7 +277,7 @@ fn retransmit(
drop(r_epoch_stakes_cache); drop(r_epoch_stakes_cache);
let mut w_epoch_stakes_cache = epoch_stakes_cache.write().unwrap(); let mut w_epoch_stakes_cache = epoch_stakes_cache.write().unwrap();
if w_epoch_stakes_cache.epoch != bank_epoch { if w_epoch_stakes_cache.epoch != bank_epoch {
let stakes = staking_utils::staked_nodes_at_epoch(&r_bank, bank_epoch); let stakes = r_bank.epoch_staked_nodes(bank_epoch);
let stakes = stakes.map(Arc::new); let stakes = stakes.map(Arc::new);
w_epoch_stakes_cache.stakes = stakes; w_epoch_stakes_cache.stakes = stakes;
w_epoch_stakes_cache.epoch = bank_epoch; w_epoch_stakes_cache.epoch = bank_epoch;

View File

@ -1,5 +1,4 @@
use crate::leader_schedule::LeaderSchedule; use crate::leader_schedule::LeaderSchedule;
use crate::staking_utils;
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
use solana_sdk::{ use solana_sdk::{
clock::{Epoch, Slot, NUM_CONSECUTIVE_LEADER_SLOTS}, clock::{Epoch, Slot, NUM_CONSECUTIVE_LEADER_SLOTS},
@ -8,7 +7,7 @@ use solana_sdk::{
/// Return the leader schedule for the given epoch. /// Return the leader schedule for the given epoch.
pub fn leader_schedule(epoch: Epoch, bank: &Bank) -> Option<LeaderSchedule> { pub fn leader_schedule(epoch: Epoch, bank: &Bank) -> Option<LeaderSchedule> {
staking_utils::staked_nodes_at_epoch(bank, epoch).map(|stakes| { bank.epoch_staked_nodes(epoch).map(|stakes| {
let mut seed = [0u8; 32]; let mut seed = [0u8; 32];
seed[0..8].copy_from_slice(&epoch.to_le_bytes()); seed[0..8].copy_from_slice(&epoch.to_le_bytes());
let mut stakes: Vec<_> = stakes.into_iter().collect(); let mut stakes: Vec<_> = stakes.into_iter().collect();
@ -66,7 +65,7 @@ mod tests {
.genesis_config; .genesis_config;
let bank = Bank::new(&genesis_config); let bank = Bank::new(&genesis_config);
let pubkeys_and_stakes: Vec<_> = staking_utils::staked_nodes(&bank).into_iter().collect(); let pubkeys_and_stakes: Vec<_> = bank.staked_nodes().into_iter().collect();
let seed = [0u8; 32]; let seed = [0u8; 32];
let leader_schedule = LeaderSchedule::new( let leader_schedule = LeaderSchedule::new(
&pubkeys_and_stakes, &pubkeys_and_stakes,

View File

@ -1,9 +1,9 @@
use solana_runtime::{bank::Bank, vote_account::ArcVoteAccount}; use solana_runtime::bank::Bank;
use solana_sdk::{ use solana_sdk::{
clock::{Epoch, Slot}, clock::{Epoch, Slot},
pubkey::Pubkey, pubkey::Pubkey,
}; };
use std::{borrow::Borrow, collections::HashMap}; use std::collections::HashMap;
/// Looks through vote accounts, and finds the latest slot that has achieved /// Looks through vote accounts, and finds the latest slot that has achieved
/// supermajority lockout /// supermajority lockout
@ -24,36 +24,6 @@ pub fn vote_account_stakes(bank: &Bank) -> HashMap<Pubkey, u64> {
.collect() .collect()
} }
/// Collect the staked nodes, as named by staked vote accounts from the given bank
pub fn staked_nodes(bank: &Bank) -> HashMap<Pubkey, u64> {
to_staked_nodes(bank.vote_accounts())
}
/// At the specified epoch, collect the delegate account balance and vote states for delegates
/// that have non-zero balance in any of their managed staking accounts
pub fn staked_nodes_at_epoch(bank: &Bank, epoch: Epoch) -> Option<HashMap<Pubkey, u64>> {
bank.epoch_vote_accounts(epoch).map(to_staked_nodes)
}
fn to_staked_nodes<I, K, V>(
vote_accounts: I,
) -> HashMap<Pubkey /*VoteState.node_pubkey*/, u64 /*stake*/>
where
I: IntoIterator<Item = (K /*vote pubkey*/, V)>,
V: Borrow<(u64 /*stake*/, ArcVoteAccount)>,
{
let mut out: HashMap<Pubkey, u64> = HashMap::new();
for (_ /*vote pubkey*/, stake_vote_account) in vote_accounts {
let (stake, vote_account) = stake_vote_account.borrow();
if let Ok(vote_state) = vote_account.vote_state().as_ref() {
out.entry(vote_state.node_pubkey)
.and_modify(|s| *s += *stake)
.or_insert(*stake);
}
}
out
}
fn epoch_stakes_and_lockouts(bank: &Bank, epoch: Epoch) -> Vec<(u64, Option<u64>)> { fn epoch_stakes_and_lockouts(bank: &Bank, epoch: Epoch) -> Vec<(u64, Option<u64>)> {
bank.epoch_vote_accounts(epoch) bank.epoch_vote_accounts(epoch)
.expect("Bank state for epoch is missing") .expect("Bank state for epoch is missing")
@ -96,6 +66,7 @@ pub(crate) mod tests {
bootstrap_validator_stake_lamports, create_genesis_config, GenesisConfigInfo, bootstrap_validator_stake_lamports, create_genesis_config, GenesisConfigInfo,
}; };
use rand::Rng; use rand::Rng;
use solana_runtime::vote_account::{ArcVoteAccount, VoteAccounts};
use solana_sdk::{ use solana_sdk::{
account::{from_account, Account}, account::{from_account, Account},
clock::Clock, clock::Clock,
@ -347,7 +318,7 @@ pub(crate) mod tests {
let vote_pubkey = Pubkey::new_unique(); let vote_pubkey = Pubkey::new_unique();
(vote_pubkey, (stake, ArcVoteAccount::from(account))) (vote_pubkey, (stake, ArcVoteAccount::from(account)))
}); });
let result = to_staked_nodes(vote_accounts); let result = vote_accounts.collect::<VoteAccounts>().staked_nodes();
assert_eq!(result.len(), 2); assert_eq!(result.len(), 2);
assert_eq!(result[&node1], 3); assert_eq!(result[&node1], 3);
assert_eq!(result[&node2], 5); assert_eq!(result[&node2], 5);

View File

@ -4293,6 +4293,10 @@ impl Bank {
self.stakes.read().unwrap().stake_delegations().clone() self.stakes.read().unwrap().stake_delegations().clone()
} }
pub fn staked_nodes(&self) -> HashMap<Pubkey, u64> {
self.stakes.read().unwrap().staked_nodes()
}
/// current vote accounts for this bank along with the stake /// current vote accounts for this bank along with the stake
/// attributed to each account /// attributed to each account
/// Note: This clones the entire vote-accounts hashmap. For a single /// Note: This clones the entire vote-accounts hashmap. For a single
@ -4323,6 +4327,10 @@ impl Bank {
&self.epoch_stakes &self.epoch_stakes
} }
pub fn epoch_staked_nodes(&self, epoch: Epoch) -> Option<HashMap<Pubkey, u64>> {
Some(self.epoch_stakes.get(&epoch)?.stakes().staked_nodes())
}
/// vote accounts for the specific epoch along with the stake /// vote accounts for the specific epoch along with the stake
/// attributed to each account /// attributed to each account
pub fn epoch_vote_accounts( pub fn epoch_vote_accounts(

View File

@ -261,7 +261,7 @@ mod test_bank_serialize {
// These some what long test harness is required to freeze the ABI of // These some what long test harness is required to freeze the ABI of
// Bank's serialization due to versioned nature // Bank's serialization due to versioned nature
#[frozen_abi(digest = "5NHt6PLRJPWJH9FUcweSsUWgN5hXMfXj1BduDrDHH73w")] #[frozen_abi(digest = "Gv3em1cZt9cjQWepg8C5aaK95deyA1fifowRfmmTuoES")]
#[derive(Serialize, AbiExample)] #[derive(Serialize, AbiExample)]
pub struct BankAbiTestWrapperFuture { pub struct BankAbiTestWrapperFuture {
#[serde(serialize_with = "wrapper_future")] #[serde(serialize_with = "wrapper_future")]

View File

@ -1,16 +1,16 @@
//! Stakes serve as a cache of stake and vote accounts to derive //! Stakes serve as a cache of stake and vote accounts to derive
//! node stakes //! node stakes
use crate::vote_account::ArcVoteAccount; use crate::vote_account::{ArcVoteAccount, VoteAccounts};
use solana_sdk::{ use solana_sdk::{
account::Account, clock::Epoch, pubkey::Pubkey, sysvar::stake_history::StakeHistory, account::Account, clock::Epoch, pubkey::Pubkey, sysvar::stake_history::StakeHistory,
}; };
use solana_stake_program::stake_state::{new_stake_history_entry, Delegation, StakeState}; use solana_stake_program::stake_state::{new_stake_history_entry, Delegation, StakeState};
use std::collections::HashMap; use std::{borrow::Borrow, collections::HashMap};
#[derive(Default, Clone, PartialEq, Debug, Deserialize, Serialize, AbiExample)] #[derive(Default, Clone, PartialEq, Debug, Deserialize, Serialize, AbiExample)]
pub struct Stakes { pub struct Stakes {
/// vote accounts /// vote accounts
vote_accounts: HashMap<Pubkey, (u64, ArcVoteAccount)>, vote_accounts: VoteAccounts,
/// stake_delegations /// stake_delegations
stake_delegations: HashMap<Pubkey, Delegation>, stake_delegations: HashMap<Pubkey, Delegation>,
@ -52,19 +52,14 @@ impl Stakes {
let vote_accounts_for_next_epoch = self let vote_accounts_for_next_epoch = self
.vote_accounts .vote_accounts
.iter() .iter()
.map(|(pubkey, (_stake, account))| { .map(|(pubkey, (_ /*stake*/, account))| {
( let stake = self.calculate_stake(
*pubkey, pubkey,
( next_epoch,
self.calculate_stake( Some(&stake_history_upto_prev_epoch),
pubkey, fix_stake_deactivate,
next_epoch, );
Some(&stake_history_upto_prev_epoch), (*pubkey, (stake, account.clone()))
fix_stake_deactivate,
),
account.clone(),
),
)
}) })
.collect(); .collect();
@ -179,14 +174,10 @@ impl Stakes {
// if adjustments need to be made... // if adjustments need to be made...
if stake != old_stake { if stake != old_stake {
if let Some((voter_pubkey, stake)) = old_stake { if let Some((voter_pubkey, stake)) = old_stake {
self.vote_accounts self.vote_accounts.sub_stake(&voter_pubkey, stake);
.entry(voter_pubkey)
.and_modify(|e| e.0 -= stake);
} }
if let Some((voter_pubkey, stake)) = stake { if let Some((voter_pubkey, stake)) = stake {
self.vote_accounts self.vote_accounts.add_stake(&voter_pubkey, stake);
.entry(voter_pubkey)
.and_modify(|e| e.0 += stake);
} }
} }
@ -209,13 +200,17 @@ impl Stakes {
} }
pub fn vote_accounts(&self) -> &HashMap<Pubkey, (u64, ArcVoteAccount)> { pub fn vote_accounts(&self) -> &HashMap<Pubkey, (u64, ArcVoteAccount)> {
&self.vote_accounts self.vote_accounts.borrow()
} }
pub fn stake_delegations(&self) -> &HashMap<Pubkey, Delegation> { pub fn stake_delegations(&self) -> &HashMap<Pubkey, Delegation> {
&self.stake_delegations &self.stake_delegations
} }
pub fn staked_nodes(&self) -> HashMap<Pubkey, u64> {
self.vote_accounts.staked_nodes()
}
pub fn highest_staked_node(&self) -> Option<Pubkey> { pub fn highest_staked_node(&self) -> Option<Pubkey> {
let (_pubkey, (_stake, vote_account)) = self let (_pubkey, (_stake, vote_account)) = self
.vote_accounts .vote_accounts

View File

@ -1,9 +1,15 @@
use serde::de::{Deserialize, Deserializer}; use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer}; use serde::ser::{Serialize, Serializer};
use solana_sdk::{account::Account, instruction::InstructionError}; use solana_sdk::{account::Account, instruction::InstructionError, pubkey::Pubkey};
use solana_vote_program::vote_state::VoteState; use solana_vote_program::vote_state::VoteState;
use std::ops::Deref; use std::{
use std::sync::{Arc, Once, RwLock, RwLockReadGuard}; borrow::Borrow,
cmp::Ordering,
collections::{hash_map::Entry, HashMap},
iter::FromIterator,
ops::Deref,
sync::{Arc, Once, RwLock, RwLockReadGuard},
};
// The value here does not matter. It will be overwritten // The value here does not matter. It will be overwritten
// at the first call to VoteAccount::vote_state(). // at the first call to VoteAccount::vote_state().
@ -20,6 +26,18 @@ pub struct VoteAccount {
vote_state_once: Once, vote_state_once: Once,
} }
#[derive(Debug, AbiExample)]
pub struct VoteAccounts {
vote_accounts: HashMap<Pubkey, (u64 /*stake*/, ArcVoteAccount)>,
staked_nodes: RwLock<
HashMap<
Pubkey, // VoteAccount.vote_state.node_pubkey.
u64, // Total stake across all vote-accounts.
>,
>,
staked_nodes_once: Once,
}
impl VoteAccount { impl VoteAccount {
pub fn lamports(&self) -> u64 { pub fn lamports(&self) -> u64 {
self.account.lamports self.account.lamports
@ -31,6 +49,100 @@ impl VoteAccount {
}); });
self.vote_state.read().unwrap() self.vote_state.read().unwrap()
} }
/// VoteState.node_pubkey of this vote-account.
fn node_pubkey(&self) -> Option<Pubkey> {
Some(self.vote_state().as_ref().ok()?.node_pubkey)
}
}
impl VoteAccounts {
pub fn staked_nodes(&self) -> HashMap<Pubkey, u64> {
self.staked_nodes_once.call_once(|| {
let mut staked_nodes = HashMap::new();
for (stake, vote_account) in
self.vote_accounts.values().filter(|(stake, _)| *stake != 0)
{
if let Some(node_pubkey) = vote_account.node_pubkey() {
staked_nodes
.entry(node_pubkey)
.and_modify(|s| *s += *stake)
.or_insert(*stake);
}
}
*self.staked_nodes.write().unwrap() = staked_nodes
});
self.staked_nodes.read().unwrap().clone()
}
pub fn iter(&self) -> impl Iterator<Item = (&Pubkey, &(u64, ArcVoteAccount))> {
self.vote_accounts.iter()
}
pub fn insert(&mut self, pubkey: Pubkey, (stake, vote_account): (u64, ArcVoteAccount)) {
self.add_node_stake(stake, &vote_account);
if let Some((stake, vote_account)) =
self.vote_accounts.insert(pubkey, (stake, vote_account))
{
self.sub_node_stake(stake, &vote_account);
}
}
pub fn remove(&mut self, pubkey: &Pubkey) -> Option<(u64, ArcVoteAccount)> {
let value = self.vote_accounts.remove(pubkey);
if let Some((stake, ref vote_account)) = value {
self.sub_node_stake(stake, vote_account);
}
value
}
pub fn add_stake(&mut self, pubkey: &Pubkey, delta: u64) {
if let Some((stake, vote_account)) = self.vote_accounts.get_mut(pubkey) {
*stake += delta;
let vote_account = vote_account.clone();
self.add_node_stake(delta, &vote_account);
}
}
pub fn sub_stake(&mut self, pubkey: &Pubkey, delta: u64) {
if let Some((stake, vote_account)) = self.vote_accounts.get_mut(pubkey) {
*stake = stake
.checked_sub(delta)
.expect("subtraction value exceeds account's stake");
let vote_account = vote_account.clone();
self.sub_node_stake(delta, &vote_account);
}
}
fn add_node_stake(&mut self, stake: u64, vote_account: &ArcVoteAccount) {
if stake != 0 && self.staked_nodes_once.is_completed() {
if let Some(node_pubkey) = vote_account.node_pubkey() {
self.staked_nodes
.write()
.unwrap()
.entry(node_pubkey)
.and_modify(|s| *s += stake)
.or_insert(stake);
}
}
}
fn sub_node_stake(&mut self, stake: u64, vote_account: &ArcVoteAccount) {
if stake != 0 && self.staked_nodes_once.is_completed() {
if let Some(node_pubkey) = vote_account.node_pubkey() {
match self.staked_nodes.write().unwrap().entry(node_pubkey) {
Entry::Vacant(_) => panic!("this should not happen!"),
Entry::Occupied(mut entry) => match entry.get().cmp(&stake) {
Ordering::Less => panic!("subtraction value exceeds node's stake"),
Ordering::Equal => {
entry.remove_entry();
}
Ordering::Greater => *entry.get_mut() -= stake,
},
}
}
}
}
} }
impl Deref for ArcVoteAccount { impl Deref for ArcVoteAccount {
@ -92,16 +204,104 @@ impl PartialEq<VoteAccount> for VoteAccount {
} }
} }
impl Default for VoteAccounts {
fn default() -> Self {
Self {
vote_accounts: HashMap::default(),
staked_nodes: RwLock::default(),
staked_nodes_once: Once::new(),
}
}
}
impl Clone for VoteAccounts {
fn clone(&self) -> Self {
if self.staked_nodes_once.is_completed() {
let staked_nodes = self.staked_nodes.read().unwrap().clone();
let other = Self {
vote_accounts: self.vote_accounts.clone(),
staked_nodes: RwLock::new(staked_nodes),
staked_nodes_once: Once::new(),
};
other.staked_nodes_once.call_once(|| {});
other
} else {
Self {
vote_accounts: self.vote_accounts.clone(),
staked_nodes: RwLock::default(),
staked_nodes_once: Once::new(),
}
}
}
}
impl PartialEq<VoteAccounts> for VoteAccounts {
fn eq(&self, other: &Self) -> bool {
self.vote_accounts == other.vote_accounts
}
}
type VoteAccountsHashMap = HashMap<Pubkey, (u64 /*stake*/, ArcVoteAccount)>;
impl From<VoteAccountsHashMap> for VoteAccounts {
fn from(vote_accounts: VoteAccountsHashMap) -> Self {
Self {
vote_accounts,
staked_nodes: RwLock::default(),
staked_nodes_once: Once::new(),
}
}
}
impl Borrow<VoteAccountsHashMap> for VoteAccounts {
fn borrow(&self) -> &VoteAccountsHashMap {
&self.vote_accounts
}
}
impl FromIterator<(Pubkey, (u64 /*stake*/, ArcVoteAccount))> for VoteAccounts {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = (Pubkey, (u64, ArcVoteAccount))>,
{
Self::from(HashMap::from_iter(iter))
}
}
impl Serialize for VoteAccounts {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.vote_accounts.serialize(serializer)
}
}
impl<'de> Deserialize<'de> for VoteAccounts {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let vote_accounts = VoteAccountsHashMap::deserialize(deserializer)?;
Ok(Self::from(vote_accounts))
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use bincode::Options;
use rand::Rng; use rand::Rng;
use solana_sdk::{pubkey::Pubkey, sysvar::clock::Clock}; use solana_sdk::{pubkey::Pubkey, sysvar::clock::Clock};
use solana_vote_program::vote_state::{VoteInit, VoteStateVersions}; use solana_vote_program::vote_state::{VoteInit, VoteStateVersions};
use std::iter::repeat_with;
fn new_rand_vote_account<R: Rng>(rng: &mut R) -> (Account, VoteState) { fn new_rand_vote_account<R: Rng>(
rng: &mut R,
node_pubkey: Option<Pubkey>,
) -> (Account, VoteState) {
let vote_init = VoteInit { let vote_init = VoteInit {
node_pubkey: Pubkey::new_unique(), node_pubkey: node_pubkey.unwrap_or_else(Pubkey::new_unique),
authorized_voter: Pubkey::new_unique(), authorized_voter: Pubkey::new_unique(),
authorized_withdrawer: Pubkey::new_unique(), authorized_withdrawer: Pubkey::new_unique(),
commission: rng.gen(), commission: rng.gen(),
@ -123,10 +323,42 @@ mod tests {
(account, vote_state) (account, vote_state)
} }
fn new_rand_vote_accounts<R: Rng>(
rng: &mut R,
num_nodes: usize,
) -> impl Iterator<Item = (Pubkey, (u64 /*stake*/, ArcVoteAccount))> + '_ {
let nodes: Vec<_> = repeat_with(Pubkey::new_unique).take(num_nodes).collect();
repeat_with(move || {
let node = nodes[rng.gen_range(0, nodes.len())];
let (account, _) = new_rand_vote_account(rng, Some(node));
let stake = rng.gen_range(0, 997);
(Pubkey::new_unique(), (stake, ArcVoteAccount::from(account)))
})
}
fn staked_nodes<'a, I>(vote_accounts: I) -> HashMap<Pubkey, u64>
where
I: IntoIterator<Item = &'a (Pubkey, (u64, ArcVoteAccount))>,
{
let mut staked_nodes = HashMap::new();
for (_, (stake, vote_account)) in vote_accounts
.into_iter()
.filter(|(_, (stake, _))| *stake != 0)
{
if let Some(node_pubkey) = vote_account.node_pubkey() {
staked_nodes
.entry(node_pubkey)
.and_modify(|s| *s += *stake)
.or_insert(*stake);
}
}
staked_nodes
}
#[test] #[test]
fn test_vote_account() { fn test_vote_account() {
let mut rng = rand::thread_rng(); let mut rng = rand::thread_rng();
let (account, vote_state) = new_rand_vote_account(&mut rng); let (account, vote_state) = new_rand_vote_account(&mut rng, None);
let lamports = account.lamports; let lamports = account.lamports;
let vote_account = ArcVoteAccount::from(account); let vote_account = ArcVoteAccount::from(account);
assert_eq!(lamports, vote_account.lamports()); assert_eq!(lamports, vote_account.lamports());
@ -138,7 +370,7 @@ mod tests {
#[test] #[test]
fn test_vote_account_serialize() { fn test_vote_account_serialize() {
let mut rng = rand::thread_rng(); let mut rng = rand::thread_rng();
let (account, vote_state) = new_rand_vote_account(&mut rng); let (account, vote_state) = new_rand_vote_account(&mut rng, None);
let vote_account = ArcVoteAccount::from(account.clone()); let vote_account = ArcVoteAccount::from(account.clone());
assert_eq!(vote_state, *vote_account.vote_state().as_ref().unwrap()); assert_eq!(vote_state, *vote_account.vote_state().as_ref().unwrap());
// Assert than ArcVoteAccount has the same wire format as Account. // Assert than ArcVoteAccount has the same wire format as Account.
@ -151,7 +383,7 @@ mod tests {
#[test] #[test]
fn test_vote_account_deserialize() { fn test_vote_account_deserialize() {
let mut rng = rand::thread_rng(); let mut rng = rand::thread_rng();
let (account, vote_state) = new_rand_vote_account(&mut rng); let (account, vote_state) = new_rand_vote_account(&mut rng, None);
let data = bincode::serialize(&account).unwrap(); let data = bincode::serialize(&account).unwrap();
let vote_account = ArcVoteAccount::from(account); let vote_account = ArcVoteAccount::from(account);
assert_eq!(vote_state, *vote_account.vote_state().as_ref().unwrap()); assert_eq!(vote_state, *vote_account.vote_state().as_ref().unwrap());
@ -166,7 +398,7 @@ mod tests {
#[test] #[test]
fn test_vote_account_round_trip() { fn test_vote_account_round_trip() {
let mut rng = rand::thread_rng(); let mut rng = rand::thread_rng();
let (account, vote_state) = new_rand_vote_account(&mut rng); let (account, vote_state) = new_rand_vote_account(&mut rng, None);
let vote_account = ArcVoteAccount::from(account); let vote_account = ArcVoteAccount::from(account);
assert_eq!(vote_state, *vote_account.vote_state().as_ref().unwrap()); assert_eq!(vote_state, *vote_account.vote_state().as_ref().unwrap());
let data = bincode::serialize(&vote_account).unwrap(); let data = bincode::serialize(&vote_account).unwrap();
@ -178,4 +410,90 @@ mod tests {
*other_vote_account.vote_state().as_ref().unwrap() *other_vote_account.vote_state().as_ref().unwrap()
); );
} }
#[test]
fn test_vote_accounts_serialize() {
let mut rng = rand::thread_rng();
let vote_accounts_hash_map: HashMap<Pubkey, (u64, ArcVoteAccount)> =
new_rand_vote_accounts(&mut rng, 64).take(1024).collect();
let vote_accounts = VoteAccounts::from(vote_accounts_hash_map.clone());
assert!(vote_accounts.staked_nodes().len() > 32);
assert_eq!(
bincode::serialize(&vote_accounts).unwrap(),
bincode::serialize(&vote_accounts_hash_map).unwrap(),
);
assert_eq!(
bincode::options().serialize(&vote_accounts).unwrap(),
bincode::options()
.serialize(&vote_accounts_hash_map)
.unwrap(),
)
}
#[test]
fn test_vote_accounts_deserialize() {
let mut rng = rand::thread_rng();
let vote_accounts_hash_map: HashMap<Pubkey, (u64, ArcVoteAccount)> =
new_rand_vote_accounts(&mut rng, 64).take(1024).collect();
let data = bincode::serialize(&vote_accounts_hash_map).unwrap();
let vote_accounts: VoteAccounts = bincode::deserialize(&data).unwrap();
assert!(vote_accounts.staked_nodes().len() > 32);
assert_eq!(vote_accounts.vote_accounts, vote_accounts_hash_map);
let data = bincode::options()
.serialize(&vote_accounts_hash_map)
.unwrap();
let vote_accounts: VoteAccounts = bincode::options().deserialize(&data).unwrap();
assert_eq!(vote_accounts.vote_accounts, vote_accounts_hash_map);
}
#[test]
fn test_staked_nodes() {
let mut rng = rand::thread_rng();
let mut accounts: Vec<_> = new_rand_vote_accounts(&mut rng, 64).take(1024).collect();
let mut vote_accounts = VoteAccounts::default();
// Add vote accounts.
for (k, (pubkey, (stake, vote_account))) in accounts.iter().enumerate() {
vote_accounts.insert(*pubkey, (*stake, vote_account.clone()));
if (k + 1) % 128 == 0 {
assert_eq!(
staked_nodes(&accounts[..k + 1]),
vote_accounts.staked_nodes()
);
}
}
// Remove some of the vote accounts.
for k in 0..256 {
let index = rng.gen_range(0, accounts.len());
let (pubkey, (_, _)) = accounts.swap_remove(index);
vote_accounts.remove(&pubkey);
if (k + 1) % 32 == 0 {
assert_eq!(staked_nodes(&accounts), vote_accounts.staked_nodes());
}
}
// Modify the stakes for some of the accounts.
for k in 0..2048 {
let index = rng.gen_range(0, accounts.len());
let (pubkey, (stake, _)) = &mut accounts[index];
let new_stake = rng.gen_range(0, 997);
if new_stake < *stake {
vote_accounts.sub_stake(pubkey, *stake - new_stake);
} else {
vote_accounts.add_stake(pubkey, new_stake - *stake);
}
*stake = new_stake;
if (k + 1) % 128 == 0 {
assert_eq!(staked_nodes(&accounts), vote_accounts.staked_nodes());
}
}
// Remove everything.
while !accounts.is_empty() {
let index = rng.gen_range(0, accounts.len());
let (pubkey, (_, _)) = accounts.swap_remove(index);
vote_accounts.remove(&pubkey);
if accounts.len() % 32 == 0 {
assert_eq!(staked_nodes(&accounts), vote_accounts.staked_nodes());
}
}
assert!(vote_accounts.staked_nodes.read().unwrap().is_empty());
}
} }