From b3b57a0f07844c6e9a9e3ef317bd513d7dbc84ab Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Fri, 12 Aug 2022 18:02:01 +0000 Subject: [PATCH] adjusts max coding shreds per slot (#27083) As a consequence of removing buffering when generating coding shreds: https://github.com/solana-labs/solana/pull/25807 more coding shreds are generated than data shreds, and so MAX_CODE_SHREDS_PER_SLOT needs to be adjusted accordingly. The respective value is tied to ERASURE_BATCH_SIZE. --- ledger/src/shred.rs | 2 ++ ledger/src/shred/legacy.rs | 9 +++++---- ledger/src/shred/shred_code.rs | 4 +++- ledger/src/shredder.rs | 22 +++++++++++++++++++--- 4 files changed, 29 insertions(+), 8 deletions(-) diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 2656367db..5556b3906 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -49,6 +49,8 @@ //! So, given a) - c), we must restrict data shred's payload length such that the entire coding //! payload can fit into one coding shred / packet. +#[cfg(test)] +pub(crate) use shred_code::MAX_CODE_SHREDS_PER_SLOT; pub(crate) use shred_data::ShredData; pub use { self::stats::{ProcessShredsStats, ShredFetchStats}, diff --git a/ledger/src/shred/legacy.rs b/ledger/src/shred/legacy.rs index 71c420378..fdf97b8b1 100644 --- a/ledger/src/shred/legacy.rs +++ b/ledger/src/shred/legacy.rs @@ -325,7 +325,7 @@ impl ShredCode { mod test { use { super::*, - crate::shred::{ShredType, MAX_DATA_SHREDS_PER_SLOT}, + crate::shred::{shred_code::MAX_CODE_SHREDS_PER_SLOT, ShredType, MAX_DATA_SHREDS_PER_SLOT}, matches::assert_matches, }; @@ -433,10 +433,10 @@ mod test { } { let mut shred = shred.clone(); - shred.common_header.index = MAX_DATA_SHREDS_PER_SLOT as u32; + shred.common_header.index = MAX_CODE_SHREDS_PER_SLOT as u32; assert_matches!( shred.sanitize(), - Err(Error::InvalidShredIndex(ShredType::Code, 32768)) + Err(Error::InvalidShredIndex(ShredType::Code, 557_056)) ); } // pos >= num_coding is invalid. @@ -454,7 +454,7 @@ mod test { { let mut shred = shred.clone(); shred.common_header.fec_set_index = MAX_DATA_SHREDS_PER_SLOT as u32 - 2; - shred.coding_header.num_data_shreds = 2; + shred.coding_header.num_data_shreds = 3; shred.coding_header.num_coding_shreds = 4; shred.coding_header.position = 1; shred.common_header.index = MAX_DATA_SHREDS_PER_SLOT as u32 - 2; @@ -463,6 +463,7 @@ mod test { Err(Error::InvalidErasureShardIndex { .. }) ); + shred.coding_header.num_data_shreds = 2; shred.coding_header.num_coding_shreds = 2000; assert_matches!(shred.sanitize(), Err(Error::InvalidNumCodingShreds(2000))); diff --git a/ledger/src/shred/shred_code.rs b/ledger/src/shred/shred_code.rs index cd5d99e86..538bb2542 100644 --- a/ledger/src/shred/shred_code.rs +++ b/ledger/src/shred/shred_code.rs @@ -10,7 +10,9 @@ use { static_assertions::const_assert_eq, }; -pub(super) const MAX_CODE_SHREDS_PER_SLOT: usize = MAX_DATA_SHREDS_PER_SLOT; +// See ERASURE_BATCH_SIZE. +const_assert_eq!(MAX_CODE_SHREDS_PER_SLOT, 32_768 * 17); +pub(crate) const MAX_CODE_SHREDS_PER_SLOT: usize = MAX_DATA_SHREDS_PER_SLOT * 17; const_assert_eq!(ShredCode::SIZE_OF_PAYLOAD, 1228); diff --git a/ledger/src/shredder.rs b/ledger/src/shredder.rs index ea7d86920..d3a50cb82 100644 --- a/ledger/src/shredder.rs +++ b/ledger/src/shredder.rs @@ -378,9 +378,12 @@ fn get_fec_set_offsets( mod tests { use { super::*, - crate::shred::{ - self, max_entries_per_n_shred, max_ticks_per_n_shreds, verify_test_data_shred, - ShredType, + crate::{ + blockstore::MAX_DATA_SHREDS_PER_SLOT, + shred::{ + self, max_entries_per_n_shred, max_ticks_per_n_shreds, verify_test_data_shred, + ShredType, MAX_CODE_SHREDS_PER_SLOT, + }, }, bincode::serialized_size, matches::assert_matches, @@ -1105,4 +1108,17 @@ mod tests { )); } } + + #[test] + fn test_max_shreds_per_slot() { + for num_data_shreds in 0..128 { + let num_coding_shreds = get_erasure_batch_size(num_data_shreds) + .checked_sub(num_data_shreds) + .unwrap(); + assert!( + MAX_DATA_SHREDS_PER_SLOT * num_coding_shreds + <= MAX_CODE_SHREDS_PER_SLOT * num_data_shreds + ); + } + } }