adjusts max coding shreds per slot (#27083)
As a consequence of removing buffering when generating coding shreds: https://github.com/solana-labs/solana/pull/25807 more coding shreds are generated than data shreds, and so MAX_CODE_SHREDS_PER_SLOT needs to be adjusted accordingly. The respective value is tied to ERASURE_BATCH_SIZE.
This commit is contained in:
parent
35c87c3888
commit
b3b57a0f07
|
@ -49,6 +49,8 @@
|
|||
//! So, given a) - c), we must restrict data shred's payload length such that the entire coding
|
||||
//! payload can fit into one coding shred / packet.
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) use shred_code::MAX_CODE_SHREDS_PER_SLOT;
|
||||
pub(crate) use shred_data::ShredData;
|
||||
pub use {
|
||||
self::stats::{ProcessShredsStats, ShredFetchStats},
|
||||
|
|
|
@ -325,7 +325,7 @@ impl ShredCode {
|
|||
mod test {
|
||||
use {
|
||||
super::*,
|
||||
crate::shred::{ShredType, MAX_DATA_SHREDS_PER_SLOT},
|
||||
crate::shred::{shred_code::MAX_CODE_SHREDS_PER_SLOT, ShredType, MAX_DATA_SHREDS_PER_SLOT},
|
||||
matches::assert_matches,
|
||||
};
|
||||
|
||||
|
@ -433,10 +433,10 @@ mod test {
|
|||
}
|
||||
{
|
||||
let mut shred = shred.clone();
|
||||
shred.common_header.index = MAX_DATA_SHREDS_PER_SLOT as u32;
|
||||
shred.common_header.index = MAX_CODE_SHREDS_PER_SLOT as u32;
|
||||
assert_matches!(
|
||||
shred.sanitize(),
|
||||
Err(Error::InvalidShredIndex(ShredType::Code, 32768))
|
||||
Err(Error::InvalidShredIndex(ShredType::Code, 557_056))
|
||||
);
|
||||
}
|
||||
// pos >= num_coding is invalid.
|
||||
|
@ -454,7 +454,7 @@ mod test {
|
|||
{
|
||||
let mut shred = shred.clone();
|
||||
shred.common_header.fec_set_index = MAX_DATA_SHREDS_PER_SLOT as u32 - 2;
|
||||
shred.coding_header.num_data_shreds = 2;
|
||||
shred.coding_header.num_data_shreds = 3;
|
||||
shred.coding_header.num_coding_shreds = 4;
|
||||
shred.coding_header.position = 1;
|
||||
shred.common_header.index = MAX_DATA_SHREDS_PER_SLOT as u32 - 2;
|
||||
|
@ -463,6 +463,7 @@ mod test {
|
|||
Err(Error::InvalidErasureShardIndex { .. })
|
||||
);
|
||||
|
||||
shred.coding_header.num_data_shreds = 2;
|
||||
shred.coding_header.num_coding_shreds = 2000;
|
||||
assert_matches!(shred.sanitize(), Err(Error::InvalidNumCodingShreds(2000)));
|
||||
|
||||
|
|
|
@ -10,7 +10,9 @@ use {
|
|||
static_assertions::const_assert_eq,
|
||||
};
|
||||
|
||||
pub(super) const MAX_CODE_SHREDS_PER_SLOT: usize = MAX_DATA_SHREDS_PER_SLOT;
|
||||
// See ERASURE_BATCH_SIZE.
|
||||
const_assert_eq!(MAX_CODE_SHREDS_PER_SLOT, 32_768 * 17);
|
||||
pub(crate) const MAX_CODE_SHREDS_PER_SLOT: usize = MAX_DATA_SHREDS_PER_SLOT * 17;
|
||||
|
||||
const_assert_eq!(ShredCode::SIZE_OF_PAYLOAD, 1228);
|
||||
|
||||
|
|
|
@ -378,9 +378,12 @@ fn get_fec_set_offsets(
|
|||
mod tests {
|
||||
use {
|
||||
super::*,
|
||||
crate::shred::{
|
||||
self, max_entries_per_n_shred, max_ticks_per_n_shreds, verify_test_data_shred,
|
||||
ShredType,
|
||||
crate::{
|
||||
blockstore::MAX_DATA_SHREDS_PER_SLOT,
|
||||
shred::{
|
||||
self, max_entries_per_n_shred, max_ticks_per_n_shreds, verify_test_data_shred,
|
||||
ShredType, MAX_CODE_SHREDS_PER_SLOT,
|
||||
},
|
||||
},
|
||||
bincode::serialized_size,
|
||||
matches::assert_matches,
|
||||
|
@ -1105,4 +1108,17 @@ mod tests {
|
|||
));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_max_shreds_per_slot() {
|
||||
for num_data_shreds in 0..128 {
|
||||
let num_coding_shreds = get_erasure_batch_size(num_data_shreds)
|
||||
.checked_sub(num_data_shreds)
|
||||
.unwrap();
|
||||
assert!(
|
||||
MAX_DATA_SHREDS_PER_SLOT * num_coding_shreds
|
||||
<= MAX_CODE_SHREDS_PER_SLOT * num_data_shreds
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue