removes unused code from duplicate-shreds (#30329)

This commit is contained in:
behzad nouri 2023-02-15 17:37:15 +00:00 committed by GitHub
parent a9ad0f2b5a
commit eede50c868
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 20 additions and 58 deletions

View File

@ -13,7 +13,7 @@ use {
crds_gossip_pull::{CrdsFilter, CrdsGossipPull, ProcessPullStats}, crds_gossip_pull::{CrdsFilter, CrdsGossipPull, ProcessPullStats},
crds_gossip_push::CrdsGossipPush, crds_gossip_push::CrdsGossipPush,
crds_value::{CrdsData, CrdsValue}, crds_value::{CrdsData, CrdsValue},
duplicate_shred::{self, DuplicateShredIndex, LeaderScheduleFn, MAX_DUPLICATE_SHREDS}, duplicate_shred::{self, DuplicateShredIndex, MAX_DUPLICATE_SHREDS},
legacy_contact_info::LegacyContactInfo as ContactInfo, legacy_contact_info::LegacyContactInfo as ContactInfo,
ping_pong::PingCache, ping_pong::PingCache,
}, },
@ -22,6 +22,7 @@ use {
rayon::ThreadPool, rayon::ThreadPool,
solana_ledger::shred::Shred, solana_ledger::shred::Shred,
solana_sdk::{ solana_sdk::{
clock::Slot,
hash::Hash, hash::Hash,
pubkey::Pubkey, pubkey::Pubkey,
signature::{Keypair, Signer}, signature::{Keypair, Signer},
@ -88,15 +89,18 @@ impl CrdsGossip {
self.push.new_push_messages(pubkey, &self.crds, now, stakes) self.push.new_push_messages(pubkey, &self.crds, now, stakes)
} }
pub(crate) fn push_duplicate_shred( pub(crate) fn push_duplicate_shred<F>(
&self, &self,
keypair: &Keypair, keypair: &Keypair,
shred: &Shred, shred: &Shred,
other_payload: &[u8], other_payload: &[u8],
leader_schedule: Option<impl LeaderScheduleFn>, leader_schedule: Option<F>,
// Maximum serialized size of each DuplicateShred chunk payload. // Maximum serialized size of each DuplicateShred chunk payload.
max_payload_size: usize, max_payload_size: usize,
) -> Result<(), duplicate_shred::Error> { ) -> Result<(), duplicate_shred::Error>
where
F: FnOnce(Slot) -> Option<Pubkey>,
{
let pubkey = keypair.pubkey(); let pubkey = keypair.pubkey();
// Skip if there are already records of duplicate shreds for this slot. // Skip if there are already records of duplicate shreds for this slot.
let shred_slot = shred.slot(); let shred_slot = shred.slot();

View File

@ -24,10 +24,6 @@ const DUPLICATE_SHRED_HEADER_SIZE: usize = 63;
pub(crate) type DuplicateShredIndex = u16; pub(crate) type DuplicateShredIndex = u16;
pub(crate) const MAX_DUPLICATE_SHREDS: DuplicateShredIndex = 512; pub(crate) const MAX_DUPLICATE_SHREDS: DuplicateShredIndex = 512;
/// Function returning leader at a given slot.
pub trait LeaderScheduleFn: FnOnce(Slot) -> Option<Pubkey> {}
impl<F> LeaderScheduleFn for F where F: FnOnce(Slot) -> Option<Pubkey> {}
#[derive(Clone, Debug, PartialEq, Eq, AbiExample, Deserialize, Serialize)] #[derive(Clone, Debug, PartialEq, Eq, AbiExample, Deserialize, Serialize)]
pub struct DuplicateShred { pub struct DuplicateShred {
pub(crate) from: Pubkey, pub(crate) from: Pubkey,
@ -93,11 +89,10 @@ pub enum Error {
// Asserts that the two shreds can indicate duplicate proof for // Asserts that the two shreds can indicate duplicate proof for
// the same triplet of (slot, shred-index, and shred-type_), and // the same triplet of (slot, shred-index, and shred-type_), and
// that they have valid signatures from the slot leader. // that they have valid signatures from the slot leader.
fn check_shreds( fn check_shreds<F>(leader_schedule: Option<F>, shred1: &Shred, shred2: &Shred) -> Result<(), Error>
leader_schedule: Option<impl LeaderScheduleFn>, where
shred1: &Shred, F: FnOnce(Slot) -> Option<Pubkey>,
shred2: &Shred, {
) -> Result<(), Error> {
if shred1.slot() != shred2.slot() { if shred1.slot() != shred2.slot() {
Err(Error::SlotMismatch) Err(Error::SlotMismatch)
} else if shred1.index() != shred2.index() { } else if shred1.index() != shred2.index() {
@ -120,54 +115,17 @@ fn check_shreds(
} }
} }
/// Splits a DuplicateSlotProof into DuplicateShred pub(crate) fn from_shred<F>(
/// chunks with a size limit on each chunk.
pub fn from_duplicate_slot_proof(
proof: &DuplicateSlotProof,
self_pubkey: Pubkey, // Pubkey of my node broadcasting crds value.
leader_schedule: Option<impl LeaderScheduleFn>,
wallclock: u64,
max_size: usize, // Maximum serialized size of each DuplicateShred.
) -> Result<impl Iterator<Item = DuplicateShred>, Error> {
if proof.shred1 == proof.shred2 {
return Err(Error::InvalidDuplicateSlotProof);
}
let shred1 = Shred::new_from_serialized_shred(proof.shred1.clone())?;
let shred2 = Shred::new_from_serialized_shred(proof.shred2.clone())?;
check_shreds(leader_schedule, &shred1, &shred2)?;
let (slot, shred_index, shred_type) = (shred1.slot(), shred1.index(), shred1.shred_type());
let data = bincode::serialize(proof)?;
let chunk_size = if DUPLICATE_SHRED_HEADER_SIZE < max_size {
max_size - DUPLICATE_SHRED_HEADER_SIZE
} else {
return Err(Error::InvalidSizeLimit);
};
let chunks: Vec<_> = data.chunks(chunk_size).map(Vec::from).collect();
let num_chunks = u8::try_from(chunks.len())?;
let chunks = chunks
.into_iter()
.enumerate()
.map(move |(i, chunk)| DuplicateShred {
from: self_pubkey,
wallclock,
slot,
shred_index,
shred_type,
num_chunks,
chunk_index: i as u8,
chunk,
});
Ok(chunks)
}
pub(crate) fn from_shred(
shred: Shred, shred: Shred,
self_pubkey: Pubkey, // Pubkey of my node broadcasting crds value. self_pubkey: Pubkey, // Pubkey of my node broadcasting crds value.
other_payload: Vec<u8>, other_payload: Vec<u8>,
leader_schedule: Option<impl LeaderScheduleFn>, leader_schedule: Option<F>,
wallclock: u64, wallclock: u64,
max_size: usize, // Maximum serialized size of each DuplicateShred. max_size: usize, // Maximum serialized size of each DuplicateShred.
) -> Result<impl Iterator<Item = DuplicateShred>, Error> { ) -> Result<impl Iterator<Item = DuplicateShred>, Error>
where
F: FnOnce(Slot) -> Option<Pubkey>,
{
if shred.payload() == &other_payload { if shred.payload() == &other_payload {
return Err(Error::InvalidDuplicateShreds); return Err(Error::InvalidDuplicateShreds);
} }
@ -333,7 +291,7 @@ pub(crate) mod tests {
); );
} }
pub fn new_rand_shred<R: Rng>( pub(crate) fn new_rand_shred<R: Rng>(
rng: &mut R, rng: &mut R,
next_shred_index: u32, next_shred_index: u32,
shredder: &Shredder, shredder: &Shredder,

View File

@ -85,7 +85,7 @@ mod tests {
} }
impl FakeHandler { impl FakeHandler {
pub fn new(count: Arc<AtomicU32>) -> Self { fn new(count: Arc<AtomicU32>) -> Self {
Self { count } Self { count }
} }
} }