Remove extra call to serialize in shred verify (#5698)
This commit is contained in:
parent
50ab34ad92
commit
2510f3d352
|
@ -474,7 +474,7 @@ impl Replicator {
|
||||||
repair_socket,
|
repair_socket,
|
||||||
&exit,
|
&exit,
|
||||||
RepairStrategy::RepairRange(repair_slot_range),
|
RepairStrategy::RepairRange(repair_slot_range),
|
||||||
|_, _, _| true,
|
|_, _, _, _| true,
|
||||||
);
|
);
|
||||||
info!("waiting for ledger download");
|
info!("waiting for ledger download");
|
||||||
Self::wait_for_segment_download(
|
Self::wait_for_segment_download(
|
||||||
|
|
|
@ -150,8 +150,14 @@ impl RetransmitStage {
|
||||||
repair_socket,
|
repair_socket,
|
||||||
exit,
|
exit,
|
||||||
repair_strategy,
|
repair_strategy,
|
||||||
move |id, blob, working_bank| {
|
move |id, shred, shred_buf, working_bank| {
|
||||||
should_retransmit_and_persist(blob, working_bank, &leader_schedule_cache, id)
|
should_retransmit_and_persist(
|
||||||
|
shred,
|
||||||
|
shred_buf,
|
||||||
|
working_bank,
|
||||||
|
&leader_schedule_cache,
|
||||||
|
id,
|
||||||
|
)
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
|
@ -107,6 +107,11 @@ impl Shred {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn verify(&self, pubkey: &Pubkey) -> bool {
|
pub fn verify(&self, pubkey: &Pubkey) -> bool {
|
||||||
|
let shred = bincode::serialize(&self).unwrap();
|
||||||
|
self.fast_verify(&shred, pubkey)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn fast_verify(&self, shred_buf: &[u8], pubkey: &Pubkey) -> bool {
|
||||||
let signed_payload_offset = match self {
|
let signed_payload_offset = match self {
|
||||||
Shred::FirstInSlot(_)
|
Shred::FirstInSlot(_)
|
||||||
| Shred::FirstInFECSet(_)
|
| Shred::FirstInFECSet(_)
|
||||||
|
@ -119,9 +124,8 @@ impl Shred {
|
||||||
}
|
}
|
||||||
} + bincode::serialized_size(&Signature::default()).unwrap()
|
} + bincode::serialized_size(&Signature::default()).unwrap()
|
||||||
as usize;
|
as usize;
|
||||||
let shred = bincode::serialize(&self).unwrap();
|
|
||||||
self.signature()
|
self.signature()
|
||||||
.verify(pubkey.as_ref(), &shred[signed_payload_offset..])
|
.verify(pubkey.as_ref(), &shred_buf[signed_payload_offset..])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,7 @@ pub fn process_shreds(shreds: Vec<Shred>, blocktree: &Arc<Blocktree>) -> Result<
|
||||||
/// blob's slot
|
/// blob's slot
|
||||||
pub fn should_retransmit_and_persist(
|
pub fn should_retransmit_and_persist(
|
||||||
shred: &Shred,
|
shred: &Shred,
|
||||||
|
shred_buf: &[u8],
|
||||||
bank: Option<Arc<Bank>>,
|
bank: Option<Arc<Bank>>,
|
||||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||||
my_pubkey: &Pubkey,
|
my_pubkey: &Pubkey,
|
||||||
|
@ -44,7 +45,7 @@ pub fn should_retransmit_and_persist(
|
||||||
if leader_id == *my_pubkey {
|
if leader_id == *my_pubkey {
|
||||||
inc_new_counter_debug!("streamer-recv_window-circular_transmission", 1);
|
inc_new_counter_debug!("streamer-recv_window-circular_transmission", 1);
|
||||||
false
|
false
|
||||||
} else if !shred.verify(&leader_id) {
|
} else if !shred.fast_verify(&shred_buf, &leader_id) {
|
||||||
inc_new_counter_debug!("streamer-recv_window-invalid_signature", 1);
|
inc_new_counter_debug!("streamer-recv_window-invalid_signature", 1);
|
||||||
false
|
false
|
||||||
} else {
|
} else {
|
||||||
|
@ -64,7 +65,7 @@ fn recv_window<F>(
|
||||||
shred_filter: F,
|
shred_filter: F,
|
||||||
) -> Result<()>
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
F: Fn(&Shred) -> bool,
|
F: Fn(&Shred, &[u8]) -> bool,
|
||||||
F: Sync,
|
F: Sync,
|
||||||
{
|
{
|
||||||
let timer = Duration::from_millis(200);
|
let timer = Duration::from_millis(200);
|
||||||
|
@ -81,7 +82,7 @@ where
|
||||||
for (i, packet) in packets.packets.iter_mut().enumerate() {
|
for (i, packet) in packets.packets.iter_mut().enumerate() {
|
||||||
if let Ok(s) = bincode::deserialize(&packet.data) {
|
if let Ok(s) = bincode::deserialize(&packet.data) {
|
||||||
let shred: Shred = s;
|
let shred: Shred = s;
|
||||||
if shred_filter(&shred) {
|
if shred_filter(&shred, &packet.data) {
|
||||||
packet.meta.slot = shred.slot();
|
packet.meta.slot = shred.slot();
|
||||||
packet.meta.seed = shred.seed();
|
packet.meta.seed = shred.seed();
|
||||||
shreds.push(shred);
|
shreds.push(shred);
|
||||||
|
@ -157,7 +158,7 @@ impl WindowService {
|
||||||
) -> WindowService
|
) -> WindowService
|
||||||
where
|
where
|
||||||
F: 'static
|
F: 'static
|
||||||
+ Fn(&Pubkey, &Shred, Option<Arc<Bank>>) -> bool
|
+ Fn(&Pubkey, &Shred, &[u8], Option<Arc<Bank>>) -> bool
|
||||||
+ std::marker::Send
|
+ std::marker::Send
|
||||||
+ std::marker::Sync,
|
+ std::marker::Sync,
|
||||||
{
|
{
|
||||||
|
@ -196,10 +197,11 @@ impl WindowService {
|
||||||
&id,
|
&id,
|
||||||
&r,
|
&r,
|
||||||
&retransmit,
|
&retransmit,
|
||||||
|shred| {
|
|shred, shred_buf| {
|
||||||
shred_filter(
|
shred_filter(
|
||||||
&id,
|
&id,
|
||||||
shred,
|
shred,
|
||||||
|
shred_buf,
|
||||||
bank_forks
|
bank_forks
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|bank_forks| bank_forks.read().unwrap().working_bank()),
|
.map(|bank_forks| bank_forks.read().unwrap().working_bank()),
|
||||||
|
@ -310,10 +312,20 @@ mod test {
|
||||||
|
|
||||||
let entry = Entry::default();
|
let entry = Entry::default();
|
||||||
let mut shreds = local_entries_to_shred(vec![entry], &Arc::new(leader_keypair));
|
let mut shreds = local_entries_to_shred(vec![entry], &Arc::new(leader_keypair));
|
||||||
|
let shred_bufs: Vec<_> = shreds
|
||||||
|
.iter()
|
||||||
|
.map(|s| bincode::serialize(s).unwrap())
|
||||||
|
.collect();
|
||||||
|
|
||||||
// with a Bank for slot 0, blob continues
|
// with a Bank for slot 0, blob continues
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id),
|
should_retransmit_and_persist(
|
||||||
|
&shreds[0],
|
||||||
|
&shred_bufs[0],
|
||||||
|
Some(bank.clone()),
|
||||||
|
&cache,
|
||||||
|
&me_id
|
||||||
|
),
|
||||||
true
|
true
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -328,7 +340,7 @@ mod test {
|
||||||
// with a Bank and no idea who leader is, blob gets thrown out
|
// with a Bank and no idea who leader is, blob gets thrown out
|
||||||
shreds[0].set_slot(MINIMUM_SLOTS_PER_EPOCH as u64 * 3);
|
shreds[0].set_slot(MINIMUM_SLOTS_PER_EPOCH as u64 * 3);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
should_retransmit_and_persist(&shreds[0], Some(bank), &cache, &me_id),
|
should_retransmit_and_persist(&shreds[0], &shred_bufs[0], Some(bank), &cache, &me_id),
|
||||||
false
|
false
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -388,7 +400,7 @@ mod test {
|
||||||
Arc::new(leader_node.sockets.repair),
|
Arc::new(leader_node.sockets.repair),
|
||||||
&exit,
|
&exit,
|
||||||
repair_strategy,
|
repair_strategy,
|
||||||
|_, _, _| true,
|
|_, _, _, _| true,
|
||||||
);
|
);
|
||||||
let t_responder = {
|
let t_responder = {
|
||||||
let (s_responder, r_responder) = channel();
|
let (s_responder, r_responder) = channel();
|
||||||
|
@ -477,7 +489,7 @@ mod test {
|
||||||
Arc::new(leader_node.sockets.repair),
|
Arc::new(leader_node.sockets.repair),
|
||||||
&exit,
|
&exit,
|
||||||
repair_strategy,
|
repair_strategy,
|
||||||
|_, _, _| true,
|
|_, _, _, _| true,
|
||||||
);
|
);
|
||||||
let t_responder = {
|
let t_responder = {
|
||||||
let (s_responder, r_responder) = channel();
|
let (s_responder, r_responder) = channel();
|
||||||
|
|
Loading…
Reference in New Issue