From f526c424c5db46cfb4bbcec670874f011daacd65 Mon Sep 17 00:00:00 2001 From: sakridge Date: Wed, 11 Dec 2019 11:10:21 -0800 Subject: [PATCH] Move slow shred multi_fec test to integration tests folder (#7426) --- ledger/src/shred.rs | 144 ++++++++---------------------------------- ledger/tests/shred.rs | 95 ++++++++++++++++++++++++++++ 2 files changed, 123 insertions(+), 116 deletions(-) create mode 100644 ledger/tests/shred.rs diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index fead6eafa..5f5c6bd55 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -817,6 +817,34 @@ pub fn max_entries_per_n_shred(entry: &Entry, num_shreds: u64) -> u64 { (shred_data_size * num_shreds - count_size) / entry_size } +pub fn verify_test_data_shred( + shred: &Shred, + index: u32, + slot: Slot, + parent: Slot, + pk: &Pubkey, + verify: bool, + is_last_in_slot: bool, + is_last_in_fec_set: bool, +) { + assert_eq!(shred.payload.len(), PACKET_DATA_SIZE); + assert!(shred.is_data()); + assert_eq!(shred.index(), index); + assert_eq!(shred.slot(), slot); + assert_eq!(shred.parent(), parent); + assert_eq!(verify, shred.verify(pk)); + if is_last_in_slot { + assert!(shred.last_in_slot()); + } else { + assert!(!shred.last_in_slot()); + } + if is_last_in_fec_set { + assert!(shred.data_complete()); + } else { + assert!(!shred.data_complete()); + } +} + #[cfg(test)] pub mod tests { use super::*; @@ -847,34 +875,6 @@ pub mod tests { ); } - fn verify_test_data_shred( - shred: &Shred, - index: u32, - slot: Slot, - parent: Slot, - pk: &Pubkey, - verify: bool, - is_last_in_slot: bool, - is_last_in_fec_set: bool, - ) { - assert_eq!(shred.payload.len(), PACKET_DATA_SIZE); - assert!(shred.is_data()); - assert_eq!(shred.index(), index); - assert_eq!(shred.slot(), slot); - assert_eq!(shred.parent(), parent); - assert_eq!(verify, shred.verify(pk)); - if is_last_in_slot { - assert!(shred.last_in_slot()); - } else { - assert!(!shred.last_in_slot()); - } - if is_last_in_fec_set { - assert!(shred.data_complete()); - } else { - assert!(!shred.data_complete()); - } - } - fn verify_test_code_shred(shred: &Shred, index: u32, slot: Slot, pk: &Pubkey, verify: bool) { assert_eq!(shred.payload.len(), PACKET_DATA_SIZE); assert!(!shred.is_data()); @@ -1363,94 +1363,6 @@ pub mod tests { ); } - #[test] - fn test_multi_fec_block_coding() { - let keypair = Arc::new(Keypair::new()); - let slot = 0x123456789abcdef0; - let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone(), 0, 0) - .expect("Failed in creating shredder"); - - let num_fec_sets = 100; - let num_data_shreds = (MAX_DATA_SHREDS_PER_FEC_BLOCK * num_fec_sets) as usize; - let keypair0 = Keypair::new(); - let keypair1 = Keypair::new(); - let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); - let entry = Entry::new(&Hash::default(), 1, vec![tx0]); - let num_entries = max_entries_per_n_shred(&entry, num_data_shreds as u64); - - let entries: Vec<_> = (0..num_entries) - .map(|_| { - let keypair0 = Keypair::new(); - let keypair1 = Keypair::new(); - let tx0 = - system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); - Entry::new(&Hash::default(), 1, vec![tx0]) - }) - .collect(); - - let serialized_entries = bincode::serialize(&entries).unwrap(); - let (data_shreds, coding_shreds, next_index) = - shredder.entries_to_shreds(&entries, true, 0); - assert_eq!(next_index as usize, num_data_shreds); - assert_eq!(data_shreds.len(), num_data_shreds); - assert_eq!(coding_shreds.len(), num_data_shreds); - - for c in &coding_shreds { - assert!(!c.is_data()); - } - - let mut all_shreds = vec![]; - for i in 0..num_fec_sets { - let shred_start_index = (MAX_DATA_SHREDS_PER_FEC_BLOCK * i) as usize; - let end_index = shred_start_index + MAX_DATA_SHREDS_PER_FEC_BLOCK as usize - 1; - let fec_set_shreds = data_shreds[shred_start_index..=end_index] - .iter() - .cloned() - .chain(coding_shreds[shred_start_index..=end_index].iter().cloned()) - .collect::>(); - - let mut shred_info: Vec = fec_set_shreds - .iter() - .enumerate() - .filter_map(|(i, b)| if i % 2 != 0 { Some(b.clone()) } else { None }) - .collect(); - - let recovered_data = Shredder::try_recovery( - shred_info.clone(), - MAX_DATA_SHREDS_PER_FEC_BLOCK as usize, - MAX_DATA_SHREDS_PER_FEC_BLOCK as usize, - shred_start_index, - slot, - ) - .unwrap(); - - for (i, recovered_shred) in recovered_data.into_iter().enumerate() { - let index = shred_start_index + (i * 2); - verify_test_data_shred( - &recovered_shred, - index.try_into().unwrap(), - slot, - slot - 5, - &keypair.pubkey(), - true, - index == end_index, - index == end_index, - ); - - shred_info.insert(i * 2, recovered_shred); - } - - all_shreds.extend( - shred_info - .into_iter() - .take(MAX_DATA_SHREDS_PER_FEC_BLOCK as usize), - ); - } - - let result = Shredder::deshred(&all_shreds[..]).unwrap(); - assert_eq!(serialized_entries[..], result[..serialized_entries.len()]); - } - #[test] fn test_shred_version() { let keypair = Arc::new(Keypair::new()); diff --git a/ledger/tests/shred.rs b/ledger/tests/shred.rs new file mode 100644 index 000000000..461fb8483 --- /dev/null +++ b/ledger/tests/shred.rs @@ -0,0 +1,95 @@ +use solana_ledger::entry::Entry; +use solana_ledger::shred::{ + max_entries_per_n_shred, verify_test_data_shred, Shred, Shredder, MAX_DATA_SHREDS_PER_FEC_BLOCK, +}; +use solana_sdk::signature::{Keypair, KeypairUtil}; +use solana_sdk::{hash::Hash, system_transaction}; +use std::convert::TryInto; +use std::sync::Arc; + +#[test] +fn test_multi_fec_block_coding() { + let keypair = Arc::new(Keypair::new()); + let slot = 0x123456789abcdef0; + let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone(), 0, 0) + .expect("Failed in creating shredder"); + + let num_fec_sets = 100; + let num_data_shreds = (MAX_DATA_SHREDS_PER_FEC_BLOCK * num_fec_sets) as usize; + let keypair0 = Keypair::new(); + let keypair1 = Keypair::new(); + let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); + let entry = Entry::new(&Hash::default(), 1, vec![tx0]); + let num_entries = max_entries_per_n_shred(&entry, num_data_shreds as u64); + + let entries: Vec<_> = (0..num_entries) + .map(|_| { + let keypair0 = Keypair::new(); + let keypair1 = Keypair::new(); + let tx0 = + system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); + Entry::new(&Hash::default(), 1, vec![tx0]) + }) + .collect(); + + let serialized_entries = bincode::serialize(&entries).unwrap(); + let (data_shreds, coding_shreds, next_index) = shredder.entries_to_shreds(&entries, true, 0); + assert_eq!(next_index as usize, num_data_shreds); + assert_eq!(data_shreds.len(), num_data_shreds); + assert_eq!(coding_shreds.len(), num_data_shreds); + + for c in &coding_shreds { + assert!(!c.is_data()); + } + + let mut all_shreds = vec![]; + for i in 0..num_fec_sets { + let shred_start_index = (MAX_DATA_SHREDS_PER_FEC_BLOCK * i) as usize; + let end_index = shred_start_index + MAX_DATA_SHREDS_PER_FEC_BLOCK as usize - 1; + let fec_set_shreds = data_shreds[shred_start_index..=end_index] + .iter() + .cloned() + .chain(coding_shreds[shred_start_index..=end_index].iter().cloned()) + .collect::>(); + + let mut shred_info: Vec = fec_set_shreds + .iter() + .enumerate() + .filter_map(|(i, b)| if i % 2 != 0 { Some(b.clone()) } else { None }) + .collect(); + + let recovered_data = Shredder::try_recovery( + shred_info.clone(), + MAX_DATA_SHREDS_PER_FEC_BLOCK as usize, + MAX_DATA_SHREDS_PER_FEC_BLOCK as usize, + shred_start_index, + slot, + ) + .unwrap(); + + for (i, recovered_shred) in recovered_data.into_iter().enumerate() { + let index = shred_start_index + (i * 2); + verify_test_data_shred( + &recovered_shred, + index.try_into().unwrap(), + slot, + slot - 5, + &keypair.pubkey(), + true, + index == end_index, + index == end_index, + ); + + shred_info.insert(i * 2, recovered_shred); + } + + all_shreds.extend( + shred_info + .into_iter() + .take(MAX_DATA_SHREDS_PER_FEC_BLOCK as usize), + ); + } + + let result = Shredder::deshred(&all_shreds[..]).unwrap(); + assert_eq!(serialized_entries[..], result[..serialized_entries.len()]); +}