obtains staked-nodes from the root-bank (#14257)

... as opposed to the working bank
This commit is contained in:
behzad nouri 2020-12-27 13:28:05 +00:00 committed by GitHub
parent a267300f06
commit 49019c6613
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 33 additions and 27 deletions

View File

@ -1851,7 +1851,7 @@ impl ClusterInfo {
let stakes: HashMap<_, _> = match bank_forks {
Some(ref bank_forks) => {
bank_forks.read().unwrap().working_bank().staked_nodes()
bank_forks.read().unwrap().root_bank().staked_nodes()
}
None => HashMap::new(),
};
@ -2502,24 +2502,24 @@ impl ClusterInfo {
fn get_stakes_and_epoch_time(
bank_forks: Option<&Arc<RwLock<BankForks>>>,
) -> (HashMap<Pubkey, u64>, u64) {
let epoch_time_ms;
let stakes: HashMap<_, _> = match bank_forks {
) -> (
HashMap<Pubkey, u64>, // staked nodes
u64, // epoch time ms
) {
match bank_forks {
Some(ref bank_forks) => {
let bank = bank_forks.read().unwrap().working_bank();
let bank = bank_forks.read().unwrap().root_bank();
let epoch = bank.epoch();
let epoch_schedule = bank.epoch_schedule();
epoch_time_ms = epoch_schedule.get_slots_in_epoch(epoch) * DEFAULT_MS_PER_SLOT;
bank.staked_nodes()
(
bank.staked_nodes(),
bank.get_slots_in_epoch(epoch) * DEFAULT_MS_PER_SLOT,
)
}
None => {
inc_new_counter_info!("cluster_info-purge-no_working_bank", 1);
epoch_time_ms = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
HashMap::new()
(HashMap::new(), CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS)
}
};
(stakes, epoch_time_ms)
}
}
fn process_packets(

View File

@ -85,7 +85,7 @@ impl ClusterSlots {
}
fn update_peers(&self, cluster_info: &ClusterInfo, bank_forks: &RwLock<BankForks>) {
let root_bank = bank_forks.read().unwrap().root_bank().clone();
let root_bank = bank_forks.read().unwrap().root_bank();
let root_epoch = root_bank.epoch();
let my_epoch = *self.epoch.read().unwrap();

View File

@ -26,7 +26,10 @@ use std::{
collections::{HashMap, HashSet},
fs::{self, File},
io::BufReader,
ops::Bound::{Included, Unbounded},
ops::{
Bound::{Included, Unbounded},
Deref,
},
path::{Path, PathBuf},
sync::Arc,
};
@ -183,7 +186,7 @@ impl Tower {
let root_bank = bank_forks.root_bank();
let (_progress, heaviest_subtree_fork_choice) =
crate::replay_stage::ReplayStage::initialize_progress_and_fork_choice(
root_bank,
root_bank.deref(),
bank_forks.frozen_banks().values().cloned().collect(),
&my_pubkey,
&vote_account,

View File

@ -23,7 +23,7 @@ pub struct OptimisticallyConfirmedBank {
impl OptimisticallyConfirmedBank {
pub fn locked_from_bank_forks_root(bank_forks: &Arc<RwLock<BankForks>>) -> Arc<RwLock<Self>> {
Arc::new(RwLock::new(Self {
bank: bank_forks.read().unwrap().root_bank().clone(),
bank: bank_forks.read().unwrap().root_bank(),
}))
}
}

View File

@ -616,7 +616,7 @@ impl ReplayStage {
let (root_bank, frozen_banks) = {
let bank_forks = bank_forks.read().unwrap();
(
bank_forks.root_bank().clone(),
bank_forks.root_bank(),
bank_forks.frozen_banks().values().cloned().collect(),
)
};
@ -630,7 +630,7 @@ impl ReplayStage {
}
pub(crate) fn initialize_progress_and_fork_choice(
root_bank: &Arc<Bank>,
root_bank: &Bank,
mut frozen_banks: Vec<Arc<Bank>>,
my_pubkey: &Pubkey,
vote_account: &Pubkey,
@ -3120,7 +3120,7 @@ pub(crate) mod tests {
) {
let stake = 10_000;
let (bank_forks, _, _) = initialize_state(&all_keypairs, stake);
let root_bank = bank_forks.root_bank().clone();
let root_bank = bank_forks.root_bank();
let mut propagated_stats = PropagatedStats {
total_epoch_stake: stake * all_keypairs.len() as u64,
..PropagatedStats::default()
@ -3805,7 +3805,7 @@ pub(crate) mod tests {
..
} = replay_blockstore_components();
let root_bank = bank_forks.read().unwrap().root_bank().clone();
let root_bank = bank_forks.read().unwrap().root_bank();
let my_pubkey = leader_schedule_cache
.slot_leader_at(root_bank.slot(), Some(&root_bank))
.unwrap();

View File

@ -187,7 +187,7 @@ impl JsonRpcRequestProcessor {
"Bank with {:?} not found at slot: {:?}",
commitment_level, slot
);
r_bank_forks.root_bank().clone()
r_bank_forks.root_bank()
})
}

View File

@ -49,7 +49,7 @@ impl SamplePerformanceService {
exit: Arc<AtomicBool>,
) {
let forks = bank_forks.read().unwrap();
let bank = forks.root_bank().clone();
let bank = forks.root_bank();
let highest_slot = forks.highest_slot();
drop(forks);

View File

@ -61,6 +61,7 @@ use std::time::Instant;
use std::{
collections::HashSet,
net::SocketAddr,
ops::Deref,
path::{Path, PathBuf},
sync::atomic::{AtomicBool, Ordering},
sync::mpsc::Receiver,
@ -536,7 +537,9 @@ impl Validator {
"New shred signal for the TVU should be the same as the clear bank signal."
);
let vote_tracker = Arc::new(VoteTracker::new(bank_forks.read().unwrap().root_bank()));
let vote_tracker = Arc::new(VoteTracker::new(
bank_forks.read().unwrap().root_bank().deref(),
));
let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded();
let (verified_vote_sender, verified_vote_receiver) = unbounded();

View File

@ -119,8 +119,8 @@ impl BankForks {
self.banks.get(&bank_slot)
}
pub fn root_bank(&self) -> &Arc<Bank> {
&self[self.root()]
pub fn root_bank(&self) -> Arc<Bank> {
self[self.root()].clone()
}
pub fn new_from_banks(initial_forks: &[Arc<Bank>], root: Slot) -> Self {
@ -219,7 +219,7 @@ impl BankForks {
if self.snapshot_config.is_some()
&& accounts_background_request_sender.is_snapshot_creation_enabled()
{
let snapshot_root_bank = self.root_bank().clone();
let snapshot_root_bank = self.root_bank();
let root_slot = snapshot_root_bank.slot();
if let Err(e) =
accounts_background_request_sender.send_snapshot_request(SnapshotRequest {