From d5c2dacd07f13a39ff9c79d873b96d816c40743d Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Mon, 28 Aug 2023 21:31:51 +0000 Subject: [PATCH] revises turbine epoch stakes for shreds propagation (#32743) Bank::get_leader_schedule_epoch returns: 1 + EpochSchedule::get_epoch. As a result, at the epoch boundaries when the propagated shred is from the next epoch, we are looking for epoch stakes for 2 epochs ahead of the root or working bank's epoch. However, the bank structure only contains epoch stakes for one epoch ahead. This results in shreds propagated at epoch boundary having unknown epoch stakes. --- sdk/src/feature_set.rs | 5 +++++ turbine/src/cluster_nodes.rs | 16 ++++++++++++++-- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index cd768991c..5f563427a 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -687,6 +687,10 @@ pub mod reduce_stake_warmup_cooldown { } } +pub mod revise_turbine_epoch_stakes { + solana_sdk::declare_id!("BTWmtJC8U5ZLMbBUUA1k6As62sYjPEjAiNAT55xYGdJU"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -851,6 +855,7 @@ lazy_static! { (bpf_account_data_direct_mapping::id(), "use memory regions to map account data into the rbpf vm instead of copying the data"), (last_restart_slot_sysvar::id(), "enable new sysvar last_restart_slot"), (reduce_stake_warmup_cooldown::id(), "reduce stake warmup cooldown from 25% to 9%"), + (revise_turbine_epoch_stakes::id(), "revise turbine epoch stakes"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() diff --git a/turbine/src/cluster_nodes.rs b/turbine/src/cluster_nodes.rs index 64fabd65e..124d6d8c9 100644 --- a/turbine/src/cluster_nodes.rs +++ b/turbine/src/cluster_nodes.rs @@ -362,7 +362,7 @@ impl ClusterNodesCache { working_bank: &Bank, cluster_info: &ClusterInfo, ) -> Arc> { - let epoch = root_bank.get_leader_schedule_epoch(shred_slot); + let epoch = get_epoch(shred_slot, root_bank); let entry = self.get_cache_entry(epoch); if let Some((_, nodes)) = entry .read() @@ -383,7 +383,7 @@ impl ClusterNodesCache { .find_map(|bank| bank.epoch_staked_nodes(epoch)); if epoch_staked_nodes.is_none() { inc_new_counter_debug!("cluster_nodes-unknown_epoch_staked_nodes", 1); - if epoch != root_bank.get_leader_schedule_epoch(root_bank.slot()) { + if epoch != get_epoch(root_bank.slot(), root_bank) { return self.get(root_bank.slot(), root_bank, working_bank, cluster_info); } inc_new_counter_info!("cluster_nodes-unknown_epoch_staked_nodes_root", 1); @@ -397,6 +397,18 @@ impl ClusterNodesCache { } } +fn get_epoch(shred_slot: Slot, root_bank: &Bank) -> Epoch { + if check_feature_activation( + &feature_set::revise_turbine_epoch_stakes::id(), + shred_slot, + root_bank, + ) { + root_bank.epoch_schedule().get_epoch(shred_slot) + } else { + root_bank.get_leader_schedule_epoch(shred_slot) + } +} + impl From for NodeId { fn from(node: ContactInfo) -> Self { NodeId::ContactInfo(node)