includes shred-type when computing turbine broadcast seed (#25556)

Indices for code and data shreds of the same slot overlap; and so they
will have the same random number generator seed when shuffling cluster
nodes for turbine broadcast.

This results in the same propagation path for code and data shreds of
the same index and effectively smaller sample size for re-transmitter
nodes. For example a 32:32 batch (32 code + 32 data shreds), is
retransmitted through _at most_ 32 unique nodes, whereas ideally we want
~64 unique re-transmitters.

This commit adds shred-type to seed function so that code and data
sherds of the same (slot, index) will (most likely) have different
propagation paths.
This commit is contained in:
behzad nouri 2022-05-25 20:31:53 +00:00 committed by GitHub
parent 880684565c
commit cafa85bfbb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 39 additions and 8 deletions

View File

@ -122,7 +122,7 @@ impl ClusterNodes<BroadcastStage> {
socket_addr_space: &SocketAddrSpace,
) -> Vec<SocketAddr> {
const MAX_CONTACT_INFO_AGE: Duration = Duration::from_secs(2 * 60);
let shred_seed = shred.seed(self.pubkey);
let shred_seed = shred.seed(self.pubkey, root_bank);
let mut rng = ChaChaRng::from_seed(shred_seed);
let index = match self.weighted_shuffle.first(&mut rng) {
None => return Vec::default(),
@ -219,7 +219,7 @@ impl ClusterNodes<RetransmitStage> {
Vec<&Node>, // neighbors
Vec<&Node>, // children
) {
let shred_seed = shred.seed(slot_leader);
let shred_seed = shred.seed(slot_leader, root_bank);
let mut weighted_shuffle = self.weighted_shuffle.clone();
// Exclude slot leader from list of nodes.
if slot_leader == self.pubkey {

View File

@ -57,8 +57,10 @@ use {
serde::{Deserialize, Serialize},
solana_entry::entry::{create_ticks, Entry},
solana_perf::packet::Packet,
solana_runtime::bank::Bank,
solana_sdk::{
clock::Slot,
feature_set,
hash::{hashv, Hash},
packet::PACKET_DATA_SIZE,
pubkey::Pubkey,
@ -416,12 +418,21 @@ impl Shred {
self.set_signature(signature);
}
pub fn seed(&self, leader_pubkey: Pubkey) -> [u8; 32] {
hashv(&[
&self.slot().to_le_bytes(),
&self.index().to_le_bytes(),
&leader_pubkey.to_bytes(),
])
pub fn seed(&self, leader_pubkey: Pubkey, root_bank: &Bank) -> [u8; 32] {
if add_shred_type_to_shred_seed(self.slot(), root_bank) {
hashv(&[
&self.slot().to_le_bytes(),
&u8::from(self.shred_type()).to_le_bytes(),
&self.index().to_le_bytes(),
&leader_pubkey.to_bytes(),
])
} else {
hashv(&[
&self.slot().to_le_bytes(),
&self.index().to_le_bytes(),
&leader_pubkey.to_bytes(),
])
}
.to_bytes()
}
@ -632,6 +643,21 @@ pub fn verify_test_data_shred(
}
}
fn add_shred_type_to_shred_seed(shred_slot: Slot, bank: &Bank) -> bool {
let feature_slot = bank
.feature_set
.activated_slot(&feature_set::add_shred_type_to_shred_seed::id());
match feature_slot {
None => false,
Some(feature_slot) => {
let epoch_schedule = bank.epoch_schedule();
let feature_epoch = epoch_schedule.get_epoch(feature_slot);
let shred_epoch = epoch_schedule.get_epoch(shred_slot);
feature_epoch < shred_epoch
}
}
}
#[cfg(test)]
mod tests {
use {

View File

@ -408,6 +408,10 @@ pub mod include_account_index_in_rent_error {
solana_sdk::declare_id!("2R72wpcQ7qV7aTJWUumdn8u5wmmTyXbK7qzEy7YSAgyY");
}
pub mod add_shred_type_to_shred_seed {
solana_sdk::declare_id!("Ds87KVeqhbv7Jw8W6avsS1mqz3Mw5J3pRTpPoDQ2QdiJ");
}
lazy_static! {
/// Map of feature identifiers to user-visible description
pub static ref FEATURE_NAMES: HashMap<Pubkey, &'static str> = [
@ -503,6 +507,7 @@ lazy_static! {
(add_set_compute_unit_price_ix::id(), "add compute budget ix for setting a compute unit price"),
(disable_deploy_of_alloc_free_syscall::id(), "disable new deployments of deprecated sol_alloc_free_ syscall"),
(include_account_index_in_rent_error::id(), "include account index in rent tx error #25190"),
(add_shred_type_to_shred_seed::id(), "add shred-type to shred seed #25556"),
/*************** ADD NEW FEATURES HERE ***************/
]
.iter()