diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index caf645500b..b65393fba0 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -6195,7 +6195,8 @@ pub mod tests { let gap: u64 = 10; assert!(gap > 3); // Create enough entries to ensure there are at least two shreds created - let num_entries = max_ticks_per_n_shreds(1, None) + 1; + let data_buffer_size = ShredData::capacity(/*merkle_proof_size:*/ None).unwrap(); + let num_entries = max_ticks_per_n_shreds(1, Some(data_buffer_size)) + 1; let entries = create_ticks(num_entries, 0, Hash::default()); let mut shreds = entries_to_test_shreds(&entries, slot, 0, true, 0, /*merkle_variant:*/ false); diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index fc6dcf0d69..d22309610d 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -995,7 +995,9 @@ pub fn max_entries_per_n_shred( num_shreds: u64, shred_data_size: Option, ) -> u64 { - let data_buffer_size = ShredData::capacity(/*merkle_proof_size:*/ None).unwrap(); + // Default 32:32 erasure batches yields 64 shreds; log2(64) = 6. + let merkle_proof_size = Some(6); + let data_buffer_size = ShredData::capacity(merkle_proof_size).unwrap(); let shred_data_size = shred_data_size.unwrap_or(data_buffer_size) as u64; let vec_size = bincode::serialized_size(&vec![entry]).unwrap(); let entry_size = bincode::serialized_size(entry).unwrap(); diff --git a/turbine/src/broadcast_stage/standard_broadcast_run.rs b/turbine/src/broadcast_stage/standard_broadcast_run.rs index 592d6e699d..b53d326338 100644 --- a/turbine/src/broadcast_stage/standard_broadcast_run.rs +++ b/turbine/src/broadcast_stage/standard_broadcast_run.rs @@ -83,8 +83,7 @@ impl StandardBroadcastRun { let shredder = Shredder::new(state.slot, state.parent, reference_tick, self.shred_version) .unwrap(); - let merkle_variant = - should_use_merkle_variant(state.slot, cluster_type, self.shred_version); + let merkle_variant = should_use_merkle_variant(state.slot, cluster_type); let (mut shreds, coding_shreds) = shredder.entries_to_shreds( keypair, &[], // entries @@ -146,7 +145,7 @@ impl StandardBroadcastRun { }; let shredder = Shredder::new(slot, parent_slot, reference_tick, self.shred_version).unwrap(); - let merkle_variant = should_use_merkle_variant(slot, cluster_type, self.shred_version); + let merkle_variant = should_use_merkle_variant(slot, cluster_type); let (data_shreds, coding_shreds) = shredder.entries_to_shreds( keypair, entries, @@ -507,10 +506,10 @@ impl BroadcastRun for StandardBroadcastRun { } } -fn should_use_merkle_variant(slot: Slot, cluster_type: ClusterType, shred_version: u16) -> bool { +fn should_use_merkle_variant(slot: Slot, cluster_type: ClusterType) -> bool { match cluster_type { - ClusterType::Testnet => shred_version == 28353, - _ => (slot % 19) == 1, + ClusterType::Testnet | ClusterType::Devnet | ClusterType::Development => true, + ClusterType::MainnetBeta => (slot % 19) == 1, } } @@ -788,13 +787,13 @@ mod test { } // At least as many coding shreds as data shreds. assert!(shreds.len() >= 29 * 2); - assert_eq!(shreds.iter().filter(|shred| shred.is_data()).count(), 29); + assert_eq!(shreds.iter().filter(|shred| shred.is_data()).count(), 30); process_ticks(75); while let Ok((recv_shreds, _)) = brecv.recv_timeout(Duration::from_secs(1)) { shreds.extend(recv_shreds.deref().clone()); } assert!(shreds.len() >= 33 * 2); - assert_eq!(shreds.iter().filter(|shred| shred.is_data()).count(), 33); + assert_eq!(shreds.iter().filter(|shred| shred.is_data()).count(), 34); } #[test]