2019-06-19 00:13:19 -07:00
|
|
|
use super::*;
|
2021-07-06 17:35:25 -07:00
|
|
|
use crate::cluster_nodes::ClusterNodes;
|
2020-05-05 14:07:21 -07:00
|
|
|
use solana_ledger::shred::Shredder;
|
2019-06-19 00:13:19 -07:00
|
|
|
use solana_sdk::hash::Hash;
|
2019-12-16 17:11:18 -08:00
|
|
|
use solana_sdk::signature::Keypair;
|
2020-05-05 14:07:21 -07:00
|
|
|
use std::{thread::sleep, time::Duration};
|
|
|
|
|
|
|
|
pub const NUM_BAD_SLOTS: u64 = 10;
|
|
|
|
pub const SLOT_TO_RESOLVE: u64 = 32;
|
2019-06-19 00:13:19 -07:00
|
|
|
|
2019-12-16 17:11:18 -08:00
|
|
|
#[derive(Clone)]
|
2019-11-18 18:05:02 -08:00
|
|
|
pub(super) struct FailEntryVerificationBroadcastRun {
|
|
|
|
shred_version: u16,
|
2020-05-05 14:07:21 -07:00
|
|
|
good_shreds: Vec<Shred>,
|
|
|
|
current_slot: Slot,
|
|
|
|
next_shred_index: u32,
|
2019-11-18 18:05:02 -08:00
|
|
|
}
|
2019-06-19 00:13:19 -07:00
|
|
|
|
|
|
|
impl FailEntryVerificationBroadcastRun {
|
2021-06-21 13:12:38 -07:00
|
|
|
pub(super) fn new(shred_version: u16) -> Self {
|
2019-12-16 17:11:18 -08:00
|
|
|
Self {
|
|
|
|
shred_version,
|
2020-05-05 14:07:21 -07:00
|
|
|
good_shreds: vec![],
|
|
|
|
current_slot: 0,
|
|
|
|
next_shred_index: 0,
|
2019-12-16 17:11:18 -08:00
|
|
|
}
|
2019-06-19 00:13:19 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
|
|
|
fn run(
|
|
|
|
&mut self,
|
2021-06-21 13:12:38 -07:00
|
|
|
keypair: &Keypair,
|
2020-01-13 13:13:52 -08:00
|
|
|
blockstore: &Arc<Blockstore>,
|
2019-12-16 17:11:18 -08:00
|
|
|
receiver: &Receiver<WorkingBankEntry>,
|
2020-04-15 15:22:16 -07:00
|
|
|
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
|
|
|
|
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
|
2019-06-19 00:13:19 -07:00
|
|
|
) -> Result<()> {
|
|
|
|
// 1) Pull entries from banking stage
|
2019-09-18 12:16:22 -07:00
|
|
|
let mut receive_results = broadcast_utils::recv_slot_entries(receiver)?;
|
2019-06-19 00:13:19 -07:00
|
|
|
let bank = receive_results.bank.clone();
|
2019-10-16 12:53:11 -07:00
|
|
|
let last_tick_height = receive_results.last_tick_height;
|
2019-06-19 00:13:19 -07:00
|
|
|
|
2020-05-05 14:07:21 -07:00
|
|
|
if bank.slot() != self.current_slot {
|
|
|
|
self.next_shred_index = 0;
|
|
|
|
self.current_slot = bank.slot();
|
2019-06-19 00:13:19 -07:00
|
|
|
}
|
|
|
|
|
2020-05-05 14:07:21 -07:00
|
|
|
// 2) If we're past SLOT_TO_RESOLVE, insert the correct shreds so validators can repair
|
|
|
|
// and make progress
|
|
|
|
if bank.slot() > SLOT_TO_RESOLVE && !self.good_shreds.is_empty() {
|
|
|
|
info!("Resolving bad shreds");
|
|
|
|
let mut shreds = vec![];
|
|
|
|
std::mem::swap(&mut shreds, &mut self.good_shreds);
|
|
|
|
blockstore_sender.send((Arc::new(shreds), None))?;
|
|
|
|
}
|
|
|
|
|
|
|
|
// 3) Convert entries to shreds + generate coding shreds. Set a garbage PoH on the last entry
|
|
|
|
// in the slot to make verification fail on validators
|
|
|
|
let last_entries = {
|
|
|
|
if last_tick_height == bank.max_tick_height() && bank.slot() < NUM_BAD_SLOTS {
|
|
|
|
let good_last_entry = receive_results.entries.pop().unwrap();
|
|
|
|
let mut bad_last_entry = good_last_entry.clone();
|
|
|
|
bad_last_entry.hash = Hash::default();
|
|
|
|
Some((good_last_entry, bad_last_entry))
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
};
|
2019-06-19 00:13:19 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
let shredder = Shredder::new(
|
2019-09-18 12:16:22 -07:00
|
|
|
bank.slot(),
|
2019-09-03 21:32:51 -07:00
|
|
|
bank.parent().unwrap().slot(),
|
2019-11-06 13:27:58 -08:00
|
|
|
(bank.tick_height() % bank.ticks_per_slot()) as u8,
|
2019-11-18 18:05:02 -08:00
|
|
|
self.shred_version,
|
2019-10-08 00:42:51 -07:00
|
|
|
)
|
|
|
|
.expect("Expected to create a new shredder");
|
2019-06-19 00:13:19 -07:00
|
|
|
|
2020-05-05 14:07:21 -07:00
|
|
|
let (data_shreds, _, _) = shredder.entries_to_shreds(
|
2021-06-21 13:12:38 -07:00
|
|
|
keypair,
|
2019-10-08 00:42:51 -07:00
|
|
|
&receive_results.entries,
|
2020-05-05 14:07:21 -07:00
|
|
|
last_tick_height == bank.max_tick_height() && last_entries.is_none(),
|
|
|
|
self.next_shred_index,
|
2019-10-08 00:42:51 -07:00
|
|
|
);
|
2019-09-03 21:32:51 -07:00
|
|
|
|
2020-05-05 14:07:21 -07:00
|
|
|
self.next_shred_index += data_shreds.len() as u32;
|
|
|
|
let last_shreds = last_entries.map(|(good_last_entry, bad_last_entry)| {
|
|
|
|
let (good_last_data_shred, _, _) =
|
2021-06-21 13:12:38 -07:00
|
|
|
shredder.entries_to_shreds(keypair, &[good_last_entry], true, self.next_shred_index);
|
2020-05-05 14:07:21 -07:00
|
|
|
|
|
|
|
let (bad_last_data_shred, _, _) =
|
|
|
|
// Don't mark the last shred as last so that validators won't know that
|
|
|
|
// they've gotten all the shreds, and will continue trying to repair
|
2021-06-21 13:12:38 -07:00
|
|
|
shredder.entries_to_shreds(keypair, &[bad_last_entry], false, self.next_shred_index);
|
2020-05-05 14:07:21 -07:00
|
|
|
|
|
|
|
self.next_shred_index += 1;
|
|
|
|
(good_last_data_shred, bad_last_data_shred)
|
|
|
|
});
|
|
|
|
|
2019-12-16 17:11:18 -08:00
|
|
|
let data_shreds = Arc::new(data_shreds);
|
2020-04-15 15:22:16 -07:00
|
|
|
blockstore_sender.send((data_shreds.clone(), None))?;
|
2020-05-05 14:07:21 -07:00
|
|
|
// 4) Start broadcast step
|
2019-10-08 22:34:26 -07:00
|
|
|
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
|
2020-12-17 13:22:50 -08:00
|
|
|
let stakes = bank.epoch_staked_nodes(bank_epoch);
|
2019-12-16 17:11:18 -08:00
|
|
|
let stakes = stakes.map(Arc::new);
|
2020-04-15 15:22:16 -07:00
|
|
|
socket_sender.send(((stakes.clone(), data_shreds), None))?;
|
2020-05-05 14:07:21 -07:00
|
|
|
if let Some((good_last_data_shred, bad_last_data_shred)) = last_shreds {
|
|
|
|
// Stash away the good shred so we can rewrite them later
|
|
|
|
self.good_shreds.extend(good_last_data_shred.clone());
|
|
|
|
let good_last_data_shred = Arc::new(good_last_data_shred);
|
|
|
|
let bad_last_data_shred = Arc::new(bad_last_data_shred);
|
|
|
|
// Store the good shred so that blockstore will signal ClusterSlots
|
|
|
|
// that the slot is complete
|
|
|
|
blockstore_sender.send((good_last_data_shred, None))?;
|
|
|
|
loop {
|
|
|
|
// Wait for slot to be complete
|
|
|
|
if blockstore.is_full(bank.slot()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
sleep(Duration::from_millis(10));
|
|
|
|
}
|
|
|
|
// Store the bad shred so we serve bad repairs to validators catching up
|
|
|
|
blockstore_sender.send((bad_last_data_shred.clone(), None))?;
|
|
|
|
// Send bad shreds to rest of network
|
|
|
|
socket_sender.send(((stakes, bad_last_data_shred), None))?;
|
|
|
|
}
|
2019-12-16 17:11:18 -08:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
fn transmit(
|
2020-04-05 15:18:45 -07:00
|
|
|
&mut self,
|
2020-04-15 15:22:16 -07:00
|
|
|
receiver: &Arc<Mutex<TransmitReceiver>>,
|
2020-04-21 12:54:45 -07:00
|
|
|
cluster_info: &ClusterInfo,
|
2019-12-16 17:11:18 -08:00
|
|
|
sock: &UdpSocket,
|
|
|
|
) -> Result<()> {
|
2020-04-15 15:22:16 -07:00
|
|
|
let ((stakes, shreds), _) = receiver.lock().unwrap().recv()?;
|
2019-10-08 00:42:51 -07:00
|
|
|
// Broadcast data
|
2021-07-06 17:35:25 -07:00
|
|
|
let cluster_nodes = ClusterNodes::<BroadcastStage>::new(
|
|
|
|
cluster_info,
|
|
|
|
stakes.as_deref().unwrap_or(&HashMap::default()),
|
|
|
|
);
|
2020-04-06 17:36:22 -07:00
|
|
|
broadcast_shreds(
|
2020-04-05 15:18:45 -07:00
|
|
|
sock,
|
2020-04-06 17:36:22 -07:00
|
|
|
&shreds,
|
2021-07-06 17:35:25 -07:00
|
|
|
&cluster_nodes,
|
2020-04-14 21:21:58 -07:00
|
|
|
&Arc::new(AtomicU64::new(0)),
|
2020-06-03 08:24:05 -07:00
|
|
|
&mut TransmitShredsStats::default(),
|
2020-04-05 15:18:45 -07:00
|
|
|
)?;
|
2020-04-06 17:36:22 -07:00
|
|
|
|
2019-12-16 17:11:18 -08:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
fn record(
|
2020-04-15 15:22:16 -07:00
|
|
|
&mut self,
|
|
|
|
receiver: &Arc<Mutex<RecordReceiver>>,
|
2020-01-13 13:13:52 -08:00
|
|
|
blockstore: &Arc<Blockstore>,
|
2019-12-16 17:11:18 -08:00
|
|
|
) -> Result<()> {
|
2020-04-15 15:22:16 -07:00
|
|
|
let (all_shreds, _) = receiver.lock().unwrap().recv()?;
|
2020-01-13 13:13:52 -08:00
|
|
|
blockstore
|
2019-12-16 17:11:18 -08:00
|
|
|
.insert_shreds(all_shreds.to_vec(), None, true)
|
2020-01-13 13:13:52 -08:00
|
|
|
.expect("Failed to insert shreds in blockstore");
|
2019-06-19 00:13:19 -07:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|