diff --git a/ledger/src/blocktree.rs b/ledger/src/blocktree.rs index fa898d868..164a47152 100644 --- a/ledger/src/blocktree.rs +++ b/ledger/src/blocktree.rs @@ -4608,7 +4608,7 @@ pub mod tests { .iter() .cloned() .chain( - coding_shreds[coding_shreds.len() / 2 - 1..data_shreds.len() / 2] + coding_shreds[coding_shreds.len() / 2 - 1..coding_shreds.len() / 2] .iter() .cloned(), ) diff --git a/ledger/src/blocktree_meta.rs b/ledger/src/blocktree_meta.rs index 4d195de83..8105b0d4d 100644 --- a/ledger/src/blocktree_meta.rs +++ b/ledger/src/blocktree_meta.rs @@ -221,19 +221,17 @@ impl ErasureMeta { .data() .present_in_bounds(self.set_index..self.set_index + self.config.num_data() as u64); - let (data_missing, coding_missing) = ( - self.config.num_data() - num_data, - self.config.num_coding() - num_coding, + let (data_missing, num_needed) = ( + self.config.num_data().saturating_sub(num_data), + self.config.num_data().saturating_sub(num_data + num_coding), ); - let total_missing = data_missing + coding_missing; - - if data_missing > 0 && total_missing <= self.config.num_coding() { - CanRecover - } else if data_missing == 0 { + if data_missing == 0 { DataFull + } else if num_needed == 0 { + CanRecover } else { - StillNeed(total_missing - self.config.num_coding()) + StillNeed(num_needed) } } diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 43656194e..100c1c37f 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -585,7 +585,14 @@ impl Shredder { if fec_rate != 0.0 { let num_data = data_shred_batch.len(); // always generate at least 1 coding shred even if the fec_rate doesn't allow it - let num_coding = Self::calculate_num_coding_shreds(num_data as f32, fec_rate); + let num_coding = Self::calculate_num_coding_shreds(num_data as u32, fec_rate); + if num_coding > num_data { + trace!( + "Generated more codes ({}) than data shreds ({})", + num_coding, + num_data + ); + } let session = Session::new(num_data, num_coding).expect("Failed to create erasure session"); let start_index = data_shred_batch[0].common_header.index; @@ -653,8 +660,8 @@ impl Shredder { } } - fn calculate_num_coding_shreds(num_data_shreds: f32, fec_rate: f32) -> usize { - 1.max((fec_rate * num_data_shreds) as usize) + fn calculate_num_coding_shreds(num_data_shreds: u32, fec_rate: f32) -> usize { + (MAX_DATA_SHREDS_PER_FEC_BLOCK.max(num_data_shreds) as f32 * fec_rate) as usize } fn fill_in_missing_shreds( @@ -951,7 +958,7 @@ pub mod tests { let no_header_size = SIZE_OF_DATA_SHRED_PAYLOAD as u64; let num_expected_data_shreds = (size + no_header_size - 1) / no_header_size; let num_expected_coding_shreds = - Shredder::calculate_num_coding_shreds(num_expected_data_shreds as f32, fec_rate); + Shredder::calculate_num_coding_shreds(num_expected_data_shreds as u32, fec_rate); let start_index = 0; let (data_shreds, coding_shreds, next_index) = @@ -1123,9 +1130,6 @@ pub mod tests { let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(&entries, true, 0); - // Must have created an equal number of coding and data shreds - assert_eq!(data_shreds.len(), coding_shreds.len()); - for (i, s) in data_shreds.iter().enumerate() { verify_test_data_shred( s, @@ -1170,10 +1174,10 @@ pub mod tests { let serialized_entries = bincode::serialize(&entries).unwrap(); let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(&entries, true, 0); + let num_coding_shreds = coding_shreds.len(); // We should have 10 shreds now, an equal number of coding shreds assert_eq!(data_shreds.len(), num_data_shreds); - assert_eq!(coding_shreds.len(), num_data_shreds); let all_shreds = data_shreds .iter() @@ -1186,7 +1190,7 @@ pub mod tests { Shredder::try_recovery( data_shreds[..data_shreds.len() - 1].to_vec(), num_data_shreds, - num_data_shreds, + num_coding_shreds, 0, 0, slot @@ -1198,7 +1202,7 @@ pub mod tests { let recovered_data = Shredder::try_recovery( data_shreds[..].to_vec(), num_data_shreds, - num_data_shreds, + num_coding_shreds, 0, 0, slot, @@ -1216,7 +1220,7 @@ pub mod tests { let mut recovered_data = Shredder::try_recovery( shred_info.clone(), num_data_shreds, - num_data_shreds, + num_coding_shreds, 0, 0, slot, @@ -1264,7 +1268,7 @@ pub mod tests { let recovered_data = Shredder::try_recovery( shred_info.clone(), num_data_shreds, - num_data_shreds, + num_coding_shreds, 0, 0, slot, @@ -1317,10 +1321,9 @@ pub mod tests { // and 2 missing coding shreds. Hint: should work let serialized_entries = bincode::serialize(&entries).unwrap(); let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(&entries, true, 25); - - // We should have 10 shreds now, an equal number of coding shreds + let num_coding_shreds = coding_shreds.len(); + // We should have 10 shreds now assert_eq!(data_shreds.len(), num_data_shreds); - assert_eq!(coding_shreds.len(), num_data_shreds); let all_shreds = data_shreds .iter() @@ -1337,7 +1340,7 @@ pub mod tests { let recovered_data = Shredder::try_recovery( shred_info.clone(), num_data_shreds, - num_data_shreds, + num_coding_shreds, 25, 25, slot, @@ -1369,7 +1372,7 @@ pub mod tests { let recovered_data = Shredder::try_recovery( shred_info.clone(), num_data_shreds, - num_data_shreds, + num_coding_shreds, 25, 25, slot + 1, @@ -1382,7 +1385,7 @@ pub mod tests { Shredder::try_recovery( shred_info.clone(), num_data_shreds, - num_data_shreds, + num_coding_shreds, 15, 15, slot, @@ -1392,7 +1395,7 @@ pub mod tests { // Test8: Try recovery/reassembly with incorrect index. Hint: does not recover any shreds assert_matches!( - Shredder::try_recovery(shred_info, num_data_shreds, num_data_shreds, 35, 35, slot,), + Shredder::try_recovery(shred_info, num_data_shreds, num_coding_shreds, 35, 35, slot,), Err(reed_solomon_erasure::Error::TooFewShardsPresent) ); }