removes first_coding_index from erasure recovery code (#16646)

first_coding_index is the same as the set_index and is so redundant:
https://github.com/solana-labs/solana/blob/37b8587d4/ledger/src/blockstore_meta.rs#L49-L60
This commit is contained in:
behzad nouri 2021-04-23 12:00:37 +00:00 committed by GitHub
parent f59fe41abf
commit 03194145c0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 24 additions and 62 deletions

View File

@ -152,9 +152,8 @@ fn bench_shredder_decoding(bencher: &mut Bencher) {
coding_shreds[..].to_vec(), coding_shreds[..].to_vec(),
symbol_count, symbol_count,
symbol_count, symbol_count,
0, 0, // first index
0, 1, // slot
1,
) )
.unwrap(); .unwrap();
}) })

View File

@ -3516,7 +3516,6 @@ mod tests {
use itertools::izip; use itertools::izip;
use rand::{seq::SliceRandom, SeedableRng}; use rand::{seq::SliceRandom, SeedableRng};
use rand_chacha::ChaChaRng; use rand_chacha::ChaChaRng;
use serial_test::serial;
use solana_ledger::shred::Shredder; use solana_ledger::shred::Shredder;
use solana_sdk::signature::{Keypair, Signer}; use solana_sdk::signature::{Keypair, Signer};
use solana_vote_program::{vote_instruction, vote_state::Vote}; use solana_vote_program::{vote_instruction, vote_state::Vote};
@ -4787,8 +4786,7 @@ mod tests {
} }
#[test] #[test]
#[serial] #[ignore] // TODO: debug why this is flaky on buildkite!
#[ignore]
fn test_pull_request_time_pruning() { fn test_pull_request_time_pruning() {
let node = Node::new_localhost(); let node = Node::new_localhost();
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(node.info)); let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(node.info));

View File

@ -573,8 +573,7 @@ impl Blockstore {
prev_inserted_codes: &mut HashMap<(u64, u64), Shred>, prev_inserted_codes: &mut HashMap<(u64, u64), Shred>,
code_cf: &LedgerColumn<cf::ShredCode>, code_cf: &LedgerColumn<cf::ShredCode>,
) { ) {
(erasure_meta.first_coding_index (erasure_meta.set_index..erasure_meta.set_index + erasure_meta.config.num_coding() as u64)
..erasure_meta.first_coding_index + erasure_meta.config.num_coding() as u64)
.for_each(|i| { .for_each(|i| {
if let Some(shred) = prev_inserted_codes if let Some(shred) = prev_inserted_codes
.remove(&(slot, i)) .remove(&(slot, i))
@ -645,7 +644,6 @@ impl Blockstore {
erasure_meta.config.num_data(), erasure_meta.config.num_data(),
erasure_meta.config.num_coding(), erasure_meta.config.num_coding(),
set_index as usize, set_index as usize,
erasure_meta.first_coding_index as usize,
slot, slot,
) { ) {
Self::submit_metrics( Self::submit_metrics(
@ -1069,12 +1067,10 @@ impl Blockstore {
); );
let erasure_meta = erasure_metas.entry((slot, set_index)).or_insert_with(|| { let erasure_meta = erasure_metas.entry((slot, set_index)).or_insert_with(|| {
let first_coding_index =
u64::from(shred.index()) - u64::from(shred.coding_header.position);
self.erasure_meta_cf self.erasure_meta_cf
.get((slot, set_index)) .get((slot, set_index))
.expect("Expect database get to succeed") .expect("Expect database get to succeed")
.unwrap_or_else(|| ErasureMeta::new(set_index, first_coding_index, &erasure_config)) .unwrap_or_else(|| ErasureMeta::new(set_index, erasure_config))
}); });
if erasure_config != erasure_meta.config { if erasure_config != erasure_meta.config {
@ -1128,10 +1124,10 @@ impl Blockstore {
) -> Option<Vec<u8>> { ) -> Option<Vec<u8>> {
// Search for the shred which set the initial erasure config, either inserted, // Search for the shred which set the initial erasure config, either inserted,
// or in the current batch in just_received_coding_shreds. // or in the current batch in just_received_coding_shreds.
let coding_start = erasure_meta.first_coding_index; let coding_indices = erasure_meta.set_index
let coding_end = coding_start + erasure_meta.config.num_coding() as u64; ..erasure_meta.set_index + erasure_meta.config.num_coding() as u64;
let mut conflicting_shred = None; let mut conflicting_shred = None;
for coding_index in coding_start..coding_end { for coding_index in coding_indices {
let maybe_shred = self.get_coding_shred(slot, coding_index); let maybe_shred = self.get_coding_shred(slot, coding_index);
if let Ok(Some(shred_data)) = maybe_shred { if let Ok(Some(shred_data)) = maybe_shred {
let potential_shred = Shred::new_from_serialized_shred(shred_data).unwrap(); let potential_shred = Shred::new_from_serialized_shred(shred_data).unwrap();

View File

@ -51,8 +51,9 @@ pub struct ShredIndex {
pub struct ErasureMeta { pub struct ErasureMeta {
/// Which erasure set in the slot this is /// Which erasure set in the slot this is
pub set_index: u64, pub set_index: u64,
/// First coding index in the FEC set /// Deprecated field.
pub first_coding_index: u64, #[serde(rename = "first_coding_index")]
__unused: u64,
/// Size of shards in this erasure set /// Size of shards in this erasure set
pub size: usize, pub size: usize,
/// Erasure configuration for this erasure set /// Erasure configuration for this erasure set
@ -184,21 +185,19 @@ impl SlotMeta {
} }
impl ErasureMeta { impl ErasureMeta {
pub fn new(set_index: u64, first_coding_index: u64, config: &ErasureConfig) -> ErasureMeta { pub fn new(set_index: u64, config: ErasureConfig) -> ErasureMeta {
ErasureMeta { ErasureMeta {
set_index, set_index,
first_coding_index, config,
size: 0, ..Self::default()
config: *config,
} }
} }
pub fn status(&self, index: &Index) -> ErasureMetaStatus { pub fn status(&self, index: &Index) -> ErasureMetaStatus {
use ErasureMetaStatus::*; use ErasureMetaStatus::*;
let num_coding = index.coding().present_in_bounds( let coding_indices = self.set_index..self.set_index + self.config.num_coding() as u64;
self.first_coding_index..self.first_coding_index + self.config.num_coding() as u64, let num_coding = index.coding().present_in_bounds(coding_indices);
);
let num_data = index let num_data = index
.data() .data()
.present_in_bounds(self.set_index..self.set_index + self.config.num_data() as u64); .present_in_bounds(self.set_index..self.set_index + self.config.num_data() as u64);
@ -263,7 +262,7 @@ mod test {
let set_index = 0; let set_index = 0;
let erasure_config = ErasureConfig::default(); let erasure_config = ErasureConfig::default();
let mut e_meta = ErasureMeta::new(set_index, set_index, &erasure_config); let mut e_meta = ErasureMeta::new(set_index, erasure_config);
let mut rng = thread_rng(); let mut rng = thread_rng();
let mut index = Index::new(0); let mut index = Index::new(0);
e_meta.size = 1; e_meta.size = 1;

View File

@ -845,7 +845,6 @@ impl Shredder {
num_data: usize, num_data: usize,
num_coding: usize, num_coding: usize,
first_index: usize, first_index: usize,
first_code_index: usize,
slot: Slot, slot: Slot,
) -> std::result::Result<Vec<Shred>, reed_solomon_erasure::Error> { ) -> std::result::Result<Vec<Shred>, reed_solomon_erasure::Error> {
Self::verify_consistent_shred_payload_sizes(&"try_recovery()", &shreds)?; Self::verify_consistent_shred_payload_sizes(&"try_recovery()", &shreds)?;
@ -859,8 +858,8 @@ impl Shredder {
let mut shred_bufs: Vec<Vec<u8>> = shreds let mut shred_bufs: Vec<Vec<u8>> = shreds
.into_iter() .into_iter()
.flat_map(|shred| { .flat_map(|shred| {
let index = let offset = if shred.is_data() { 0 } else { num_data };
Self::get_shred_index(&shred, num_data, first_index, first_code_index); let index = offset + shred.index() as usize;
let mut blocks = Self::fill_in_missing_shreds( let mut blocks = Self::fill_in_missing_shreds(
num_data, num_data,
num_coding, num_coding,
@ -960,19 +959,6 @@ impl Shredder {
Ok(Self::reassemble_payload(num_data, data_shred_bufs)) Ok(Self::reassemble_payload(num_data, data_shred_bufs))
} }
fn get_shred_index(
shred: &Shred,
num_data: usize,
first_data_index: usize,
first_code_index: usize,
) -> usize {
if shred.is_data() {
shred.index() as usize
} else {
shred.index() as usize + num_data + first_data_index - first_code_index
}
}
fn reassemble_payload(num_data: usize, data_shred_bufs: Vec<&Vec<u8>>) -> Vec<u8> { fn reassemble_payload(num_data: usize, data_shred_bufs: Vec<&Vec<u8>>) -> Vec<u8> {
let valid_data_len = SHRED_PAYLOAD_SIZE - SIZE_OF_CODING_SHRED_HEADERS; let valid_data_len = SHRED_PAYLOAD_SIZE - SIZE_OF_CODING_SHRED_HEADERS;
data_shred_bufs[..num_data] data_shred_bufs[..num_data]
@ -1453,7 +1439,6 @@ pub mod tests {
num_data_shreds, num_data_shreds,
num_coding_shreds, num_coding_shreds,
0, 0,
0,
slot slot
), ),
Err(reed_solomon_erasure::Error::TooFewShardsPresent) Err(reed_solomon_erasure::Error::TooFewShardsPresent)
@ -1465,7 +1450,6 @@ pub mod tests {
num_data_shreds, num_data_shreds,
num_coding_shreds, num_coding_shreds,
0, 0,
0,
slot, slot,
) )
.unwrap(); .unwrap();
@ -1483,7 +1467,6 @@ pub mod tests {
num_data_shreds, num_data_shreds,
num_coding_shreds, num_coding_shreds,
0, 0,
0,
slot, slot,
) )
.unwrap(); .unwrap();
@ -1531,7 +1514,6 @@ pub mod tests {
num_data_shreds, num_data_shreds,
num_coding_shreds, num_coding_shreds,
0, 0,
0,
slot, slot,
) )
.unwrap(); .unwrap();
@ -1604,7 +1586,6 @@ pub mod tests {
num_data_shreds, num_data_shreds,
num_coding_shreds, num_coding_shreds,
25, 25,
25,
slot, slot,
) )
.unwrap(); .unwrap();
@ -1636,7 +1617,6 @@ pub mod tests {
num_data_shreds, num_data_shreds,
num_coding_shreds, num_coding_shreds,
25, 25,
25,
slot + 1, slot + 1,
) )
.unwrap(); .unwrap();
@ -1649,7 +1629,6 @@ pub mod tests {
num_data_shreds, num_data_shreds,
num_coding_shreds, num_coding_shreds,
15, 15,
15,
slot, slot,
), ),
Err(reed_solomon_erasure::Error::TooFewShardsPresent) Err(reed_solomon_erasure::Error::TooFewShardsPresent)
@ -1657,7 +1636,7 @@ pub mod tests {
// Test8: Try recovery/reassembly with incorrect index. Hint: does not recover any shreds // Test8: Try recovery/reassembly with incorrect index. Hint: does not recover any shreds
assert_matches!( assert_matches!(
Shredder::try_recovery(shred_info, num_data_shreds, num_coding_shreds, 35, 35, slot,), Shredder::try_recovery(shred_info, num_data_shreds, num_coding_shreds, 35, slot),
Err(reed_solomon_erasure::Error::TooFewShardsPresent) Err(reed_solomon_erasure::Error::TooFewShardsPresent)
); );
} }
@ -1721,7 +1700,6 @@ pub mod tests {
num_data_shreds, num_data_shreds,
num_coding_shreds, num_coding_shreds,
next_shred_index as usize, // first index next_shred_index as usize, // first index
next_shred_index as usize, // first code index
slot, slot,
) )
.unwrap(); .unwrap();

View File

@ -76,7 +76,6 @@ fn test_multi_fec_block_coding() {
MAX_DATA_SHREDS_PER_FEC_BLOCK as usize, MAX_DATA_SHREDS_PER_FEC_BLOCK as usize,
MAX_DATA_SHREDS_PER_FEC_BLOCK as usize, MAX_DATA_SHREDS_PER_FEC_BLOCK as usize,
shred_start_index, shred_start_index,
shred_start_index,
slot, slot,
) )
.unwrap(); .unwrap();
@ -121,6 +120,7 @@ fn test_multi_fec_block_different_size_coding() {
for (fec_data_shreds, fec_coding_shreds) in fec_data.values().zip(fec_coding.values()) { for (fec_data_shreds, fec_coding_shreds) in fec_data.values().zip(fec_coding.values()) {
let first_data_index = fec_data_shreds.first().unwrap().index() as usize; let first_data_index = fec_data_shreds.first().unwrap().index() as usize;
let first_code_index = fec_coding_shreds.first().unwrap().index() as usize; let first_code_index = fec_coding_shreds.first().unwrap().index() as usize;
assert_eq!(first_data_index, first_code_index);
let num_data = fec_data_shreds.len(); let num_data = fec_data_shreds.len();
let num_coding = fec_coding_shreds.len(); let num_coding = fec_coding_shreds.len();
let all_shreds: Vec<Shred> = fec_data_shreds let all_shreds: Vec<Shred> = fec_data_shreds
@ -129,17 +129,9 @@ fn test_multi_fec_block_different_size_coding() {
.chain(fec_coding_shreds.iter().step_by(2)) .chain(fec_coding_shreds.iter().step_by(2))
.cloned() .cloned()
.collect(); .collect();
let recovered_data =
let recovered_data = Shredder::try_recovery( Shredder::try_recovery(all_shreds, num_data, num_coding, first_data_index, slot)
all_shreds, .unwrap();
num_data,
num_coding,
first_data_index,
first_code_index,
slot,
)
.unwrap();
// Necessary in order to ensure the last shred in the slot // Necessary in order to ensure the last shred in the slot
// is part of the recovered set, and that the below `index` // is part of the recovered set, and that the below `index`
// calcuation in the loop is correct // calcuation in the loop is correct