2019-06-19 00:13:19 -07:00
|
|
|
use super::*;
|
2019-10-08 00:42:51 -07:00
|
|
|
use crate::shred::{Shredder, RECOMMENDED_FEC_RATE};
|
2019-06-19 00:13:19 -07:00
|
|
|
use solana_sdk::hash::Hash;
|
|
|
|
|
2019-06-20 20:15:33 -07:00
|
|
|
pub(super) struct FailEntryVerificationBroadcastRun {}
|
2019-06-19 00:13:19 -07:00
|
|
|
|
|
|
|
impl FailEntryVerificationBroadcastRun {
|
|
|
|
pub(super) fn new() -> Self {
|
2019-06-20 20:15:33 -07:00
|
|
|
Self {}
|
2019-06-19 00:13:19 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
|
|
|
fn run(
|
|
|
|
&mut self,
|
|
|
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
2019-09-18 12:16:22 -07:00
|
|
|
receiver: &Receiver<WorkingBankEntry>,
|
2019-06-19 00:13:19 -07:00
|
|
|
sock: &UdpSocket,
|
|
|
|
blocktree: &Arc<Blocktree>,
|
|
|
|
) -> Result<()> {
|
|
|
|
// 1) Pull entries from banking stage
|
2019-09-18 12:16:22 -07:00
|
|
|
let mut receive_results = broadcast_utils::recv_slot_entries(receiver)?;
|
2019-06-19 00:13:19 -07:00
|
|
|
let bank = receive_results.bank.clone();
|
|
|
|
let last_tick = receive_results.last_tick;
|
|
|
|
|
|
|
|
// 2) Convert entries to blobs + generate coding blobs. Set a garbage PoH on the last entry
|
|
|
|
// in the slot to make verification fail on validators
|
|
|
|
if last_tick == bank.max_tick_height() {
|
2019-09-18 12:16:22 -07:00
|
|
|
let mut last_entry = receive_results.entries.last_mut().unwrap();
|
|
|
|
last_entry.hash = Hash::default();
|
2019-06-19 00:13:19 -07:00
|
|
|
}
|
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
let keypair = cluster_info.read().unwrap().keypair.clone();
|
|
|
|
let next_shred_index = blocktree
|
2019-06-19 00:13:19 -07:00
|
|
|
.meta(bank.slot())
|
|
|
|
.expect("Database error")
|
|
|
|
.map(|meta| meta.consumed)
|
2019-10-08 00:42:51 -07:00
|
|
|
.unwrap_or(0) as u32;
|
2019-06-19 00:13:19 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
let shredder = Shredder::new(
|
2019-09-18 12:16:22 -07:00
|
|
|
bank.slot(),
|
2019-09-03 21:32:51 -07:00
|
|
|
bank.parent().unwrap().slot(),
|
2019-10-08 00:42:51 -07:00
|
|
|
RECOMMENDED_FEC_RATE,
|
|
|
|
keypair.clone(),
|
|
|
|
)
|
|
|
|
.expect("Expected to create a new shredder");
|
2019-06-19 00:13:19 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(
|
|
|
|
&receive_results.entries,
|
|
|
|
last_tick == bank.max_tick_height(),
|
|
|
|
next_shred_index,
|
|
|
|
);
|
2019-09-03 21:32:51 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
let all_shreds = data_shreds
|
|
|
|
.iter()
|
|
|
|
.cloned()
|
|
|
|
.chain(coding_shreds.iter().cloned())
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
let all_seeds: Vec<[u8; 32]> = all_shreds.iter().map(|s| s.seed()).collect();
|
|
|
|
blocktree
|
|
|
|
.insert_shreds(all_shreds, None)
|
|
|
|
.expect("Failed to insert shreds in blocktree");
|
2019-06-19 00:13:19 -07:00
|
|
|
|
|
|
|
// 3) Start broadcast step
|
|
|
|
let bank_epoch = bank.get_stakers_epoch(bank.slot());
|
|
|
|
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
|
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
let all_shred_bufs: Vec<Vec<u8>> = data_shreds
|
|
|
|
.into_iter()
|
|
|
|
.chain(coding_shreds.into_iter())
|
|
|
|
.map(|s| s.payload)
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
// Broadcast data
|
2019-09-03 21:32:51 -07:00
|
|
|
cluster_info.read().unwrap().broadcast_shreds(
|
2019-06-19 00:13:19 -07:00
|
|
|
sock,
|
2019-10-08 00:42:51 -07:00
|
|
|
&all_shred_bufs,
|
|
|
|
&all_seeds,
|
2019-06-19 00:13:19 -07:00
|
|
|
stakes.as_ref(),
|
|
|
|
)?;
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|