removes wallclock from duplicate-shreds handler (#30187)

This commit is contained in:
behzad nouri 2023-02-08 17:29:30 +00:00 committed by GitHub
parent 5cbd6b3a74
commit 544fbded07
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 7 additions and 10 deletions

View File

@ -1203,7 +1203,6 @@ impl ClusterInfo {
}
/// Returns duplicate-shreds inserted since the given cursor.
#[allow(dead_code)]
pub(crate) fn get_duplicate_shreds(&self, cursor: &mut Cursor) -> Vec<DuplicateShred> {
let gossip_crds = self.gossip.crds.read().unwrap();
gossip_crds

View File

@ -43,9 +43,12 @@ pub struct DuplicateShred {
}
impl DuplicateShred {
#[inline]
pub(crate) fn num_chunks(&self) -> u8 {
self.num_chunks
}
#[inline]
pub(crate) fn chunk_index(&self) -> u8 {
self.chunk_index
}

View File

@ -31,16 +31,14 @@ const MAX_PUBKEY_PER_SLOT: usize = 300;
struct ProofChunkMap {
num_chunks: u8,
wallclock: u64,
chunks: [Option<DuplicateShred>; MAX_NUM_CHUNKS],
}
impl ProofChunkMap {
fn new(num_chunks: u8, wallclock: u64) -> Self {
fn new(num_chunks: u8) -> Self {
Self {
num_chunks,
chunks: <[Option<DuplicateShred>; MAX_NUM_CHUNKS]>::default(),
wallclock,
}
}
}
@ -171,10 +169,7 @@ impl DuplicateShredHandler {
// Also skip frozen slots or slots with a different proof than me.
match self.chunk_map.get(&slot) {
Some(SlotStatus::Frozen) => false,
Some(SlotStatus::UnfinishedProof(slot_map)) => match slot_map.get(&data.from) {
None => true,
Some(proof_chunkmap) => proof_chunkmap.wallclock == data.wallclock,
},
Some(SlotStatus::UnfinishedProof(_)) => true,
None => true,
}
}
@ -211,7 +206,7 @@ impl DuplicateShredHandler {
};
let proof_chunk_map = slot_chunk_map
.entry(data.from)
.or_insert_with(|| ProofChunkMap::new(data.num_chunks(), data.wallclock));
.or_insert_with(|| ProofChunkMap::new(data.num_chunks()));
if data.num_chunks() != proof_chunk_map.num_chunks
|| data.chunk_index() >= proof_chunk_map.num_chunks
{
@ -468,7 +463,7 @@ mod tests {
None,
start_slot,
None,
DUPLICATE_SHRED_MAX_PAYLOAD_SIZE,
DUPLICATE_SHRED_MAX_PAYLOAD_SIZE * 2,
)
.unwrap();
for chunk in chunks1 {