|
|
|
@ -64,11 +64,15 @@ use {
|
|
|
|
|
borrow::Cow,
|
|
|
|
|
cell::RefCell,
|
|
|
|
|
cmp,
|
|
|
|
|
collections::{hash_map::Entry as HashMapEntry, BTreeSet, HashMap, HashSet, VecDeque},
|
|
|
|
|
collections::{
|
|
|
|
|
btree_map::Entry as BTreeMapEntry, hash_map::Entry as HashMapEntry, BTreeMap, BTreeSet,
|
|
|
|
|
HashMap, HashSet, VecDeque,
|
|
|
|
|
},
|
|
|
|
|
convert::TryInto,
|
|
|
|
|
fmt::Write,
|
|
|
|
|
fs,
|
|
|
|
|
io::{Error as IoError, ErrorKind},
|
|
|
|
|
ops::Bound,
|
|
|
|
|
path::{Path, PathBuf},
|
|
|
|
|
rc::Rc,
|
|
|
|
|
sync::{
|
|
|
|
@ -128,6 +132,7 @@ pub enum PossibleDuplicateShred {
|
|
|
|
|
LastIndexConflict(/* original */ Shred, /* conflict */ Vec<u8>), // The index of this shred conflicts with `slot_meta.last_index`
|
|
|
|
|
ErasureConflict(/* original */ Shred, /* conflict */ Vec<u8>), // The coding shred has a conflict in the erasure_meta
|
|
|
|
|
MerkleRootConflict(/* original */ Shred, /* conflict */ Vec<u8>), // Merkle root conflict in the same fec set
|
|
|
|
|
ChainedMerkleRootConflict(/* original */ Shred, /* conflict */ Vec<u8>), // Merkle root chaining conflict with previous fec set
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl PossibleDuplicateShred {
|
|
|
|
@ -137,6 +142,7 @@ impl PossibleDuplicateShred {
|
|
|
|
|
Self::LastIndexConflict(shred, _) => shred.slot(),
|
|
|
|
|
Self::ErasureConflict(shred, _) => shred.slot(),
|
|
|
|
|
Self::MerkleRootConflict(shred, _) => shred.slot(),
|
|
|
|
|
Self::ChainedMerkleRootConflict(shred, _) => shred.slot(),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
@ -473,6 +479,85 @@ impl Blockstore {
|
|
|
|
|
self.erasure_meta_cf.get((slot, u64::from(fec_set_index)))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
fn put_erasure_meta(
|
|
|
|
|
&self,
|
|
|
|
|
erasure_set: ErasureSetId,
|
|
|
|
|
erasure_meta: &ErasureMeta,
|
|
|
|
|
) -> Result<()> {
|
|
|
|
|
let (slot, fec_set_index) = erasure_set.store_key();
|
|
|
|
|
self.erasure_meta_cf.put_bytes(
|
|
|
|
|
(slot, u64::from(fec_set_index)),
|
|
|
|
|
&bincode::serialize(erasure_meta).unwrap(),
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Attempts to find the previous consecutive erasure set for `erasure_set`.
|
|
|
|
|
///
|
|
|
|
|
/// Checks the map `erasure_metas`, if not present scans blockstore. Returns None
|
|
|
|
|
/// if the previous consecutive erasure set is not present in either.
|
|
|
|
|
fn previous_erasure_set(
|
|
|
|
|
&self,
|
|
|
|
|
erasure_set: ErasureSetId,
|
|
|
|
|
erasure_metas: &mut BTreeMap<ErasureSetId, WorkingEntry<ErasureMeta>>,
|
|
|
|
|
) -> Result<Option<ErasureSetId>> {
|
|
|
|
|
let (slot, fec_set_index) = erasure_set.store_key();
|
|
|
|
|
|
|
|
|
|
// Check the previous entry from the in memory map to see if it is the consecutive
|
|
|
|
|
// set to `erasure set`
|
|
|
|
|
let candidate_erasure_entry = erasure_metas
|
|
|
|
|
.range((
|
|
|
|
|
Bound::Included(ErasureSetId::new(slot, 0)),
|
|
|
|
|
Bound::Excluded(erasure_set),
|
|
|
|
|
))
|
|
|
|
|
.next_back();
|
|
|
|
|
let candidate_erasure_set = candidate_erasure_entry
|
|
|
|
|
.filter(|(_, candidate_erasure_meta)| {
|
|
|
|
|
candidate_erasure_meta.as_ref().next_fec_set_index() == Some(fec_set_index)
|
|
|
|
|
})
|
|
|
|
|
.map(|(candidate_erasure_set, _)| *candidate_erasure_set);
|
|
|
|
|
if candidate_erasure_set.is_some() {
|
|
|
|
|
return Ok(candidate_erasure_set);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Consecutive set was not found in memory, scan blockstore for a potential candidate
|
|
|
|
|
let Some(((_, candidate_fec_set_index), candidate_erasure_meta)) = self
|
|
|
|
|
.erasure_meta_cf
|
|
|
|
|
.iter(IteratorMode::From(
|
|
|
|
|
(slot, u64::from(fec_set_index)),
|
|
|
|
|
IteratorDirection::Reverse,
|
|
|
|
|
))?
|
|
|
|
|
// `find` here, to skip the first element in case the erasure meta for fec_set_index is already present
|
|
|
|
|
.find(|((_, candidate_fec_set_index), _)| {
|
|
|
|
|
*candidate_fec_set_index != u64::from(fec_set_index)
|
|
|
|
|
})
|
|
|
|
|
// Do not consider sets from the previous slot
|
|
|
|
|
.filter(|((candidate_slot, _), _)| *candidate_slot == slot)
|
|
|
|
|
else {
|
|
|
|
|
// No potential candidates
|
|
|
|
|
return Ok(None);
|
|
|
|
|
};
|
|
|
|
|
let candidate_fec_set_index = u32::try_from(candidate_fec_set_index)
|
|
|
|
|
.expect("fec_set_index from a previously inserted shred should fit in u32");
|
|
|
|
|
let candidate_erasure_set = ErasureSetId::new(slot, candidate_fec_set_index);
|
|
|
|
|
let candidate_erasure_meta: ErasureMeta = deserialize(candidate_erasure_meta.as_ref())?;
|
|
|
|
|
|
|
|
|
|
// Add this candidate to erasure metas to avoid blockstore lookup in future
|
|
|
|
|
erasure_metas.insert(
|
|
|
|
|
candidate_erasure_set,
|
|
|
|
|
WorkingEntry::Clean(candidate_erasure_meta),
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Check if this is actually the consecutive erasure set
|
|
|
|
|
let Some(next_fec_set_index) = candidate_erasure_meta.next_fec_set_index() else {
|
|
|
|
|
return Err(BlockstoreError::InvalidErasureConfig);
|
|
|
|
|
};
|
|
|
|
|
if next_fec_set_index == fec_set_index {
|
|
|
|
|
return Ok(Some(candidate_erasure_set));
|
|
|
|
|
}
|
|
|
|
|
Ok(None)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn merkle_root_meta(&self, erasure_set: ErasureSetId) -> Result<Option<MerkleRootMeta>> {
|
|
|
|
|
self.merkle_root_meta_cf.get(erasure_set.store_key())
|
|
|
|
|
}
|
|
|
|
@ -745,7 +830,7 @@ impl Blockstore {
|
|
|
|
|
|
|
|
|
|
fn try_shred_recovery(
|
|
|
|
|
&self,
|
|
|
|
|
erasure_metas: &HashMap<ErasureSetId, WorkingEntry<ErasureMeta>>,
|
|
|
|
|
erasure_metas: &BTreeMap<ErasureSetId, WorkingEntry<ErasureMeta>>,
|
|
|
|
|
index_working_set: &mut HashMap<u64, IndexMetaWorkingSetEntry>,
|
|
|
|
|
prev_inserted_shreds: &HashMap<ShredId, Shred>,
|
|
|
|
|
reed_solomon_cache: &ReedSolomonCache,
|
|
|
|
@ -864,7 +949,7 @@ impl Blockstore {
|
|
|
|
|
let mut write_batch = self.db.batch()?;
|
|
|
|
|
|
|
|
|
|
let mut just_inserted_shreds = HashMap::with_capacity(shreds.len());
|
|
|
|
|
let mut erasure_metas = HashMap::new();
|
|
|
|
|
let mut erasure_metas = BTreeMap::new();
|
|
|
|
|
let mut merkle_root_metas = HashMap::new();
|
|
|
|
|
let mut slot_meta_working_set = HashMap::new();
|
|
|
|
|
let mut index_working_set = HashMap::new();
|
|
|
|
@ -1189,7 +1274,7 @@ impl Blockstore {
|
|
|
|
|
fn check_insert_coding_shred(
|
|
|
|
|
&self,
|
|
|
|
|
shred: Shred,
|
|
|
|
|
erasure_metas: &mut HashMap<ErasureSetId, WorkingEntry<ErasureMeta>>,
|
|
|
|
|
erasure_metas: &mut BTreeMap<ErasureSetId, WorkingEntry<ErasureMeta>>,
|
|
|
|
|
merkle_root_metas: &mut HashMap<ErasureSetId, WorkingEntry<MerkleRootMeta>>,
|
|
|
|
|
index_working_set: &mut HashMap<u64, IndexMetaWorkingSetEntry>,
|
|
|
|
|
write_batch: &mut WriteBatch,
|
|
|
|
@ -1397,7 +1482,7 @@ impl Blockstore {
|
|
|
|
|
fn check_insert_data_shred(
|
|
|
|
|
&self,
|
|
|
|
|
shred: Shred,
|
|
|
|
|
erasure_metas: &mut HashMap<ErasureSetId, WorkingEntry<ErasureMeta>>,
|
|
|
|
|
erasure_metas: &mut BTreeMap<ErasureSetId, WorkingEntry<ErasureMeta>>,
|
|
|
|
|
merkle_root_metas: &mut HashMap<ErasureSetId, WorkingEntry<MerkleRootMeta>>,
|
|
|
|
|
index_working_set: &mut HashMap<u64, IndexMetaWorkingSetEntry>,
|
|
|
|
|
slot_meta_working_set: &mut HashMap<u64, SlotMetaWorkingSetEntry>,
|
|
|
|
@ -1492,7 +1577,7 @@ impl Blockstore {
|
|
|
|
|
just_inserted_shreds.insert(shred.id(), shred);
|
|
|
|
|
index_meta_working_set_entry.did_insert_occur = true;
|
|
|
|
|
slot_meta_entry.did_insert_occur = true;
|
|
|
|
|
if let HashMapEntry::Vacant(entry) = erasure_metas.entry(erasure_set) {
|
|
|
|
|
if let BTreeMapEntry::Vacant(entry) = erasure_metas.entry(erasure_set) {
|
|
|
|
|
if let Some(meta) = self.erasure_meta(erasure_set).unwrap() {
|
|
|
|
|
entry.insert(WorkingEntry::Clean(meta));
|
|
|
|
|
}
|
|
|
|
@ -1608,6 +1693,177 @@ impl Blockstore {
|
|
|
|
|
false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Returns true if there is no chaining conflict between
|
|
|
|
|
/// the `shred` and `merkle_root_meta` of the next FEC set,
|
|
|
|
|
/// or if shreds from the next set are yet to be received.
|
|
|
|
|
///
|
|
|
|
|
/// Otherwise return false and add duplicate proof to
|
|
|
|
|
/// `duplicate_shreds`.
|
|
|
|
|
///
|
|
|
|
|
/// This is intended to be used right after `shred`'s `erasure_meta`
|
|
|
|
|
/// has been created for the first time and loaded into `erasure_metas`.
|
|
|
|
|
#[allow(dead_code)]
|
|
|
|
|
fn check_forward_chained_merkle_root_consistency(
|
|
|
|
|
&self,
|
|
|
|
|
shred: &Shred,
|
|
|
|
|
just_inserted_shreds: &HashMap<ShredId, Shred>,
|
|
|
|
|
erasure_metas: &BTreeMap<ErasureSetId, WorkingEntry<ErasureMeta>>,
|
|
|
|
|
merkle_root_metas: &mut HashMap<ErasureSetId, WorkingEntry<MerkleRootMeta>>,
|
|
|
|
|
duplicate_shreds: &mut Vec<PossibleDuplicateShred>,
|
|
|
|
|
) -> bool {
|
|
|
|
|
let slot = shred.slot();
|
|
|
|
|
let erasure_set = shred.erasure_set();
|
|
|
|
|
let erasure_meta_entry = erasure_metas.get(&erasure_set).expect(
|
|
|
|
|
"Checking chained merkle root consistency on an erasure set {erasure_set:?}
|
|
|
|
|
that is not loaded in memory, programmer error",
|
|
|
|
|
);
|
|
|
|
|
let erasure_meta = erasure_meta_entry.as_ref();
|
|
|
|
|
debug_assert!(erasure_meta.check_coding_shred(shred));
|
|
|
|
|
|
|
|
|
|
// If a shred from the next fec set has already been inserted, check the chaining
|
|
|
|
|
let Some(next_fec_set_index) = erasure_meta.next_fec_set_index() else {
|
|
|
|
|
error!("Invalid erasure meta, unable to compute next fec set index {erasure_meta:?}");
|
|
|
|
|
return false;
|
|
|
|
|
};
|
|
|
|
|
let next_erasure_set = ErasureSetId::new(slot, next_fec_set_index);
|
|
|
|
|
let next_merkle_root_meta = match merkle_root_metas.entry(next_erasure_set) {
|
|
|
|
|
HashMapEntry::Vacant(entry) => self
|
|
|
|
|
.merkle_root_meta(next_erasure_set)
|
|
|
|
|
.unwrap()
|
|
|
|
|
.map(|meta| entry.insert(WorkingEntry::Clean(meta))),
|
|
|
|
|
HashMapEntry::Occupied(entry) => Some(entry.into_mut()),
|
|
|
|
|
};
|
|
|
|
|
if let Some(next_merkle_root_meta) = next_merkle_root_meta.as_deref().map(AsRef::as_ref) {
|
|
|
|
|
let next_shred_id = ShredId::new(
|
|
|
|
|
slot,
|
|
|
|
|
next_merkle_root_meta.first_received_shred_index(),
|
|
|
|
|
next_merkle_root_meta.first_received_shred_type(),
|
|
|
|
|
);
|
|
|
|
|
let next_shred =
|
|
|
|
|
Self::get_shred_from_just_inserted_or_db(self, just_inserted_shreds, next_shred_id)
|
|
|
|
|
.expect("Shred indicated by merkle root meta must exist")
|
|
|
|
|
.into_owned();
|
|
|
|
|
let merkle_root = shred.merkle_root().ok();
|
|
|
|
|
let chained_merkle_root = shred::layout::get_chained_merkle_root(&next_shred);
|
|
|
|
|
|
|
|
|
|
if !self.check_chaining(merkle_root, chained_merkle_root) {
|
|
|
|
|
warn!(
|
|
|
|
|
"Received conflicting chained merkle roots for slot: {slot},
|
|
|
|
|
shred {erasure_set:?} type {:?} has merkle root {merkle_root:?}, however
|
|
|
|
|
next fec set shred {next_erasure_set:?} type {:?} chains to merkle root {chained_merkle_root:?}.
|
|
|
|
|
Reporting as duplicate",
|
|
|
|
|
shred.shred_type(),
|
|
|
|
|
next_merkle_root_meta.first_received_shred_type(),
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
if !self.has_duplicate_shreds_in_slot(shred.slot()) {
|
|
|
|
|
duplicate_shreds.push(PossibleDuplicateShred::ChainedMerkleRootConflict(
|
|
|
|
|
shred.clone(),
|
|
|
|
|
next_shred,
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Returns true if there is no chaining conflict between
|
|
|
|
|
/// the `shred` and `merkle_root_meta` of the previous FEC set,
|
|
|
|
|
/// or if shreds from the previous set are yet to be received.
|
|
|
|
|
///
|
|
|
|
|
/// Otherwise return false and add duplicate proof to
|
|
|
|
|
/// `duplicate_shreds`.
|
|
|
|
|
///
|
|
|
|
|
/// This is intended to be used right after `shred`'s `merkle_root_meta`
|
|
|
|
|
/// has been created for the first time.
|
|
|
|
|
#[allow(dead_code)]
|
|
|
|
|
fn check_backwards_chained_merkle_root_consistency(
|
|
|
|
|
&self,
|
|
|
|
|
shred: &Shred,
|
|
|
|
|
just_inserted_shreds: &HashMap<ShredId, Shred>,
|
|
|
|
|
erasure_metas: &mut BTreeMap<ErasureSetId, WorkingEntry<ErasureMeta>>,
|
|
|
|
|
merkle_root_metas: &mut HashMap<ErasureSetId, WorkingEntry<MerkleRootMeta>>,
|
|
|
|
|
duplicate_shreds: &mut Vec<PossibleDuplicateShred>,
|
|
|
|
|
) -> bool {
|
|
|
|
|
let slot = shred.slot();
|
|
|
|
|
let erasure_set = shred.erasure_set();
|
|
|
|
|
let fec_set_index = shred.fec_set_index();
|
|
|
|
|
|
|
|
|
|
if fec_set_index == 0 {
|
|
|
|
|
// Although the first fec set chains to the last fec set of the parent block,
|
|
|
|
|
// if this chain is incorrect we do not know which block is the duplicate until votes
|
|
|
|
|
// are received. We instead delay this check until the block reaches duplicate
|
|
|
|
|
// confirmation.
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If a shred from the previous fec set has already been inserted, check the chaining.
|
|
|
|
|
// Since we cannot compute the previous fec set index, we check the in memory map, otherwise
|
|
|
|
|
// check the previous key from blockstore to see if it is consecutive with our current set.
|
|
|
|
|
let Some(prev_erasure_set) = self
|
|
|
|
|
.previous_erasure_set(erasure_set, erasure_metas)
|
|
|
|
|
.expect("Expect database operations to succeed")
|
|
|
|
|
else {
|
|
|
|
|
// No shreds from the previous erasure batch have been received,
|
|
|
|
|
// so nothing to check. Once the previous erasure batch is received,
|
|
|
|
|
// we will verify this chain through the forward check above.
|
|
|
|
|
return true;
|
|
|
|
|
};
|
|
|
|
|
let prev_merkle_root_meta_entry = match merkle_root_metas.entry(prev_erasure_set) {
|
|
|
|
|
HashMapEntry::Vacant(entry) => entry.insert(WorkingEntry::Clean(
|
|
|
|
|
self.merkle_root_meta(prev_erasure_set)
|
|
|
|
|
.unwrap()
|
|
|
|
|
.expect("merkle root meta must exist for erasure meta"),
|
|
|
|
|
)),
|
|
|
|
|
HashMapEntry::Occupied(entry) => entry.into_mut(),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let prev_merkle_root_meta = prev_merkle_root_meta_entry.as_ref();
|
|
|
|
|
let prev_shred_id = ShredId::new(
|
|
|
|
|
slot,
|
|
|
|
|
prev_merkle_root_meta.first_received_shred_index(),
|
|
|
|
|
prev_merkle_root_meta.first_received_shred_type(),
|
|
|
|
|
);
|
|
|
|
|
let prev_shred =
|
|
|
|
|
Self::get_shred_from_just_inserted_or_db(self, just_inserted_shreds, prev_shred_id)
|
|
|
|
|
.expect("Shred indicated by merkle root meta must exist")
|
|
|
|
|
.into_owned();
|
|
|
|
|
let merkle_root = shred::layout::get_merkle_root(&prev_shred);
|
|
|
|
|
let chained_merkle_root = shred.chained_merkle_root().ok();
|
|
|
|
|
|
|
|
|
|
if !self.check_chaining(merkle_root, chained_merkle_root) {
|
|
|
|
|
warn!(
|
|
|
|
|
"Received conflicting chained merkle roots for slot: {slot},
|
|
|
|
|
shred {:?} type {:?} chains to merkle root {chained_merkle_root:?}, however
|
|
|
|
|
previous fec set shred {prev_erasure_set:?} type {:?} has merkle root {merkle_root:?}.
|
|
|
|
|
Reporting as duplicate",
|
|
|
|
|
shred.erasure_set(),
|
|
|
|
|
shred.shred_type(),
|
|
|
|
|
prev_merkle_root_meta.first_received_shred_type(),
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
if !self.has_duplicate_shreds_in_slot(shred.slot()) {
|
|
|
|
|
duplicate_shreds.push(PossibleDuplicateShred::ChainedMerkleRootConflict(
|
|
|
|
|
shred.clone(),
|
|
|
|
|
prev_shred,
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Checks if the chained merkle root == merkle root
|
|
|
|
|
///
|
|
|
|
|
/// Returns true if no conflict, or if chained merkle roots are not enabled
|
|
|
|
|
fn check_chaining(&self, merkle_root: Option<Hash>, chained_merkle_root: Option<Hash>) -> bool {
|
|
|
|
|
chained_merkle_root.is_none() // Chained merkle roots have not been enabled yet
|
|
|
|
|
|| chained_merkle_root == merkle_root
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn should_insert_data_shred(
|
|
|
|
|
&self,
|
|
|
|
|
shred: &Shred,
|
|
|
|
@ -6884,7 +7140,7 @@ pub mod tests {
|
|
|
|
|
let (_, coding_shreds, _) = setup_erasure_shreds(slot, parent_slot, 10);
|
|
|
|
|
let coding_shred = coding_shreds[index as usize].clone();
|
|
|
|
|
|
|
|
|
|
let mut erasure_metas = HashMap::new();
|
|
|
|
|
let mut erasure_metas = BTreeMap::new();
|
|
|
|
|
let mut merkle_root_metas = HashMap::new();
|
|
|
|
|
let mut index_working_set = HashMap::new();
|
|
|
|
|
let mut just_received_shreds = HashMap::new();
|
|
|
|
@ -7079,7 +7335,7 @@ pub mod tests {
|
|
|
|
|
setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index);
|
|
|
|
|
let data_shred = data_shreds[0].clone();
|
|
|
|
|
|
|
|
|
|
let mut erasure_metas = HashMap::new();
|
|
|
|
|
let mut erasure_metas = BTreeMap::new();
|
|
|
|
|
let mut merkle_root_metas = HashMap::new();
|
|
|
|
|
let mut index_working_set = HashMap::new();
|
|
|
|
|
let mut just_received_shreds = HashMap::new();
|
|
|
|
@ -7296,7 +7552,7 @@ pub mod tests {
|
|
|
|
|
0, // version
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
let mut erasure_metas = HashMap::new();
|
|
|
|
|
let mut erasure_metas = BTreeMap::new();
|
|
|
|
|
let mut merkle_root_metas = HashMap::new();
|
|
|
|
|
let mut index_working_set = HashMap::new();
|
|
|
|
|
let mut just_received_shreds = HashMap::new();
|
|
|
|
@ -9819,6 +10075,22 @@ pub mod tests {
|
|
|
|
|
parent_slot: u64,
|
|
|
|
|
num_entries: u64,
|
|
|
|
|
fec_set_index: u32,
|
|
|
|
|
) -> (Vec<Shred>, Vec<Shred>, Arc<LeaderScheduleCache>) {
|
|
|
|
|
setup_erasure_shreds_with_index_and_chained_merkle(
|
|
|
|
|
slot,
|
|
|
|
|
parent_slot,
|
|
|
|
|
num_entries,
|
|
|
|
|
fec_set_index,
|
|
|
|
|
Some(Hash::new_from_array(rand::thread_rng().gen())),
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn setup_erasure_shreds_with_index_and_chained_merkle(
|
|
|
|
|
slot: u64,
|
|
|
|
|
parent_slot: u64,
|
|
|
|
|
num_entries: u64,
|
|
|
|
|
fec_set_index: u32,
|
|
|
|
|
chained_merkle_root: Option<Hash>,
|
|
|
|
|
) -> (Vec<Shred>, Vec<Shred>, Arc<LeaderScheduleCache>) {
|
|
|
|
|
let entries = make_slot_entries_with_transactions(num_entries);
|
|
|
|
|
let leader_keypair = Arc::new(Keypair::new());
|
|
|
|
@ -9827,8 +10099,7 @@ pub mod tests {
|
|
|
|
|
&leader_keypair,
|
|
|
|
|
&entries,
|
|
|
|
|
true, // is_last_in_slot
|
|
|
|
|
// chained_merkle_root
|
|
|
|
|
Some(Hash::new_from_array(rand::thread_rng().gen())),
|
|
|
|
|
chained_merkle_root,
|
|
|
|
|
fec_set_index, // next_shred_index
|
|
|
|
|
fec_set_index, // next_code_index
|
|
|
|
|
true, // merkle_variant
|
|
|
|
@ -10740,4 +11011,142 @@ pub mod tests {
|
|
|
|
|
assert_eq!(read_cost, *cost_table.get(&read_key).unwrap());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_previous_erasure_set() {
|
|
|
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
|
|
|
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
|
|
|
|
|
let mut erasure_metas = BTreeMap::new();
|
|
|
|
|
|
|
|
|
|
let parent_slot = 0;
|
|
|
|
|
let prev_slot = 1;
|
|
|
|
|
let slot = 2;
|
|
|
|
|
let (data_shreds_0, coding_shreds_0, _) =
|
|
|
|
|
setup_erasure_shreds_with_index(slot, parent_slot, 10, 0);
|
|
|
|
|
let erasure_set_0 = ErasureSetId::new(slot, 0);
|
|
|
|
|
let erasure_meta_0 =
|
|
|
|
|
ErasureMeta::from_coding_shred(coding_shreds_0.first().unwrap()).unwrap();
|
|
|
|
|
|
|
|
|
|
let prev_fec_set_index = data_shreds_0.len() as u32;
|
|
|
|
|
let (data_shreds_prev, coding_shreds_prev, _) =
|
|
|
|
|
setup_erasure_shreds_with_index(slot, parent_slot, 10, prev_fec_set_index);
|
|
|
|
|
let erasure_set_prev = ErasureSetId::new(slot, prev_fec_set_index);
|
|
|
|
|
let erasure_meta_prev =
|
|
|
|
|
ErasureMeta::from_coding_shred(coding_shreds_prev.first().unwrap()).unwrap();
|
|
|
|
|
|
|
|
|
|
let (_, coding_shreds_prev_slot, _) =
|
|
|
|
|
setup_erasure_shreds_with_index(prev_slot, parent_slot, 10, prev_fec_set_index);
|
|
|
|
|
let erasure_set_prev_slot = ErasureSetId::new(prev_slot, prev_fec_set_index);
|
|
|
|
|
let erasure_meta_prev_slot =
|
|
|
|
|
ErasureMeta::from_coding_shred(coding_shreds_prev_slot.first().unwrap()).unwrap();
|
|
|
|
|
|
|
|
|
|
let fec_set_index = data_shreds_prev.len() as u32 + prev_fec_set_index;
|
|
|
|
|
let erasure_set = ErasureSetId::new(slot, fec_set_index);
|
|
|
|
|
|
|
|
|
|
// Blockstore is empty
|
|
|
|
|
assert_eq!(
|
|
|
|
|
blockstore
|
|
|
|
|
.previous_erasure_set(erasure_set, &mut erasure_metas)
|
|
|
|
|
.unwrap(),
|
|
|
|
|
None
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Erasure metas does not contain the previous fec set, but only the one before that
|
|
|
|
|
erasure_metas.insert(erasure_set_0, WorkingEntry::Dirty(erasure_meta_0));
|
|
|
|
|
assert_eq!(
|
|
|
|
|
blockstore
|
|
|
|
|
.previous_erasure_set(erasure_set, &mut erasure_metas)
|
|
|
|
|
.unwrap(),
|
|
|
|
|
None
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Both Erasure metas and blockstore, contain only contain the previous previous fec set
|
|
|
|
|
erasure_metas.insert(erasure_set_0, WorkingEntry::Clean(erasure_meta_0));
|
|
|
|
|
blockstore
|
|
|
|
|
.put_erasure_meta(erasure_set_0, &erasure_meta_0)
|
|
|
|
|
.unwrap();
|
|
|
|
|
assert_eq!(
|
|
|
|
|
blockstore
|
|
|
|
|
.previous_erasure_set(erasure_set, &mut erasure_metas)
|
|
|
|
|
.unwrap(),
|
|
|
|
|
None
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Erasure meta contains the previous FEC set, blockstore only contains the older
|
|
|
|
|
erasure_metas.insert(erasure_set_prev, WorkingEntry::Dirty(erasure_meta_prev));
|
|
|
|
|
assert_eq!(
|
|
|
|
|
blockstore
|
|
|
|
|
.previous_erasure_set(erasure_set, &mut erasure_metas)
|
|
|
|
|
.unwrap(),
|
|
|
|
|
Some(erasure_set_prev)
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Erasure meta only contains the older, blockstore has the previous fec set
|
|
|
|
|
erasure_metas.remove(&erasure_set_prev);
|
|
|
|
|
blockstore
|
|
|
|
|
.put_erasure_meta(erasure_set_prev, &erasure_meta_prev)
|
|
|
|
|
.unwrap();
|
|
|
|
|
assert_eq!(
|
|
|
|
|
blockstore
|
|
|
|
|
.previous_erasure_set(erasure_set, &mut erasure_metas)
|
|
|
|
|
.unwrap(),
|
|
|
|
|
Some(erasure_set_prev)
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Both contain the previous fec set
|
|
|
|
|
erasure_metas.insert(erasure_set_prev, WorkingEntry::Clean(erasure_meta_prev));
|
|
|
|
|
assert_eq!(
|
|
|
|
|
blockstore
|
|
|
|
|
.previous_erasure_set(erasure_set, &mut erasure_metas)
|
|
|
|
|
.unwrap(),
|
|
|
|
|
Some(erasure_set_prev)
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Works even if the previous fec set has index 0
|
|
|
|
|
assert_eq!(
|
|
|
|
|
blockstore
|
|
|
|
|
.previous_erasure_set(erasure_set_prev, &mut erasure_metas)
|
|
|
|
|
.unwrap(),
|
|
|
|
|
Some(erasure_set_0)
|
|
|
|
|
);
|
|
|
|
|
erasure_metas.remove(&erasure_set_0);
|
|
|
|
|
assert_eq!(
|
|
|
|
|
blockstore
|
|
|
|
|
.previous_erasure_set(erasure_set_prev, &mut erasure_metas)
|
|
|
|
|
.unwrap(),
|
|
|
|
|
Some(erasure_set_0)
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Does not cross slot boundary
|
|
|
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
|
|
|
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
|
|
|
|
|
erasure_metas.clear();
|
|
|
|
|
erasure_metas.insert(
|
|
|
|
|
erasure_set_prev_slot,
|
|
|
|
|
WorkingEntry::Dirty(erasure_meta_prev_slot),
|
|
|
|
|
);
|
|
|
|
|
assert_eq!(
|
|
|
|
|
erasure_meta_prev_slot.next_fec_set_index().unwrap(),
|
|
|
|
|
fec_set_index
|
|
|
|
|
);
|
|
|
|
|
assert_eq!(
|
|
|
|
|
blockstore
|
|
|
|
|
.previous_erasure_set(erasure_set, &mut erasure_metas)
|
|
|
|
|
.unwrap(),
|
|
|
|
|
None,
|
|
|
|
|
);
|
|
|
|
|
erasure_metas.insert(
|
|
|
|
|
erasure_set_prev_slot,
|
|
|
|
|
WorkingEntry::Clean(erasure_meta_prev_slot),
|
|
|
|
|
);
|
|
|
|
|
blockstore
|
|
|
|
|
.put_erasure_meta(erasure_set_prev_slot, &erasure_meta_prev_slot)
|
|
|
|
|
.unwrap();
|
|
|
|
|
assert_eq!(
|
|
|
|
|
blockstore
|
|
|
|
|
.previous_erasure_set(erasure_set, &mut erasure_metas)
|
|
|
|
|
.unwrap(),
|
|
|
|
|
None,
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|