2018-12-07 14:09:29 -08:00
|
|
|
//! The `replay_stage` replays transactions broadcast by the leader.
|
2018-05-22 14:26:28 -07:00
|
|
|
|
2019-02-21 11:37:48 -08:00
|
|
|
use crate::bank_forks::BankForks;
|
2019-02-07 20:52:39 -08:00
|
|
|
use crate::blocktree::Blocktree;
|
2019-02-26 21:57:45 -08:00
|
|
|
use crate::blocktree_processor;
|
2018-12-07 19:16:27 -08:00
|
|
|
use crate::cluster_info::ClusterInfo;
|
2019-02-28 01:14:37 -08:00
|
|
|
use crate::entry::{Entry, EntryReceiver, EntrySender, EntrySlice};
|
2019-02-27 14:41:46 -08:00
|
|
|
use crate::leader_schedule_utils;
|
2018-12-07 19:16:27 -08:00
|
|
|
use crate::packet::BlobError;
|
2019-03-03 16:44:06 -08:00
|
|
|
use crate::poh_recorder::PohRecorder;
|
2019-02-26 21:57:45 -08:00
|
|
|
use crate::result;
|
2019-02-18 18:08:54 -08:00
|
|
|
use crate::rpc_subscriptions::RpcSubscriptions;
|
2018-12-07 19:16:27 -08:00
|
|
|
use crate::service::Service;
|
2019-02-18 22:26:22 -08:00
|
|
|
use solana_metrics::counter::Counter;
|
|
|
|
use solana_runtime::bank::Bank;
|
2019-01-26 00:28:08 -08:00
|
|
|
use solana_sdk::hash::Hash;
|
2019-01-30 19:28:48 -08:00
|
|
|
use solana_sdk::pubkey::Pubkey;
|
2019-02-21 21:43:35 -08:00
|
|
|
use solana_sdk::signature::KeypairUtil;
|
2018-11-16 08:45:59 -08:00
|
|
|
use solana_sdk::timing::duration_as_ms;
|
2019-03-02 13:51:26 -08:00
|
|
|
use solana_vote_api::vote_transaction::VoteTransaction;
|
2019-02-26 21:57:45 -08:00
|
|
|
use std::collections::HashMap;
|
2019-02-13 20:04:20 -08:00
|
|
|
use std::sync::atomic::{AtomicBool, Ordering};
|
2019-03-13 14:06:12 -07:00
|
|
|
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
|
2019-03-03 16:44:06 -08:00
|
|
|
use std::sync::{Arc, Mutex, RwLock};
|
2018-07-03 21:14:08 -07:00
|
|
|
use std::thread::{self, Builder, JoinHandle};
|
2019-02-10 16:28:52 -08:00
|
|
|
use std::time::Duration;
|
2018-09-26 20:58:06 -07:00
|
|
|
use std::time::Instant;
|
2018-05-22 14:26:28 -07:00
|
|
|
|
2018-12-13 18:43:10 -08:00
|
|
|
pub const MAX_ENTRY_RECV_PER_ITER: usize = 512;
|
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
// Implement a destructor for the ReplayStage thread to signal it exited
|
2018-09-25 15:41:29 -07:00
|
|
|
// even on panics
|
|
|
|
struct Finalizer {
|
|
|
|
exit_sender: Arc<AtomicBool>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Finalizer {
|
|
|
|
fn new(exit_sender: Arc<AtomicBool>) -> Self {
|
|
|
|
Finalizer { exit_sender }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Implement a destructor for Finalizer.
|
|
|
|
impl Drop for Finalizer {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
self.exit_sender.clone().store(true, Ordering::Relaxed);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
pub struct ReplayStage {
|
2019-02-26 21:57:45 -08:00
|
|
|
t_replay: JoinHandle<result::Result<()>>,
|
2018-05-22 14:26:28 -07:00
|
|
|
}
|
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
impl ReplayStage {
|
2019-01-29 00:21:27 -08:00
|
|
|
#[allow(clippy::new_ret_no_self, clippy::too_many_arguments)]
|
2019-02-21 21:43:35 -08:00
|
|
|
pub fn new<T>(
|
2019-03-09 19:28:43 -08:00
|
|
|
my_id: &Pubkey,
|
|
|
|
vote_account: &Pubkey,
|
2019-02-21 21:43:35 -08:00
|
|
|
voting_keypair: Option<Arc<T>>,
|
2019-02-07 20:52:39 -08:00
|
|
|
blocktree: Arc<Blocktree>,
|
2019-02-21 11:37:48 -08:00
|
|
|
bank_forks: &Arc<RwLock<BankForks>>,
|
2018-10-08 19:55:54 -07:00
|
|
|
cluster_info: Arc<RwLock<ClusterInfo>>,
|
2019-03-04 20:50:02 -08:00
|
|
|
exit: &Arc<AtomicBool>,
|
2019-02-04 15:33:43 -08:00
|
|
|
ledger_signal_receiver: Receiver<bool>,
|
2019-02-18 18:08:54 -08:00
|
|
|
subscriptions: &Arc<RpcSubscriptions>,
|
2019-03-03 16:44:06 -08:00
|
|
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
2019-02-28 01:14:37 -08:00
|
|
|
) -> (Self, Receiver<(u64, Pubkey)>, EntryReceiver)
|
2019-02-21 21:43:35 -08:00
|
|
|
where
|
|
|
|
T: 'static + KeypairUtil + Send + Sync,
|
|
|
|
{
|
2019-02-27 10:58:01 -08:00
|
|
|
let (forward_entry_sender, forward_entry_receiver) = channel();
|
2019-02-28 01:14:37 -08:00
|
|
|
let (slot_full_sender, slot_full_receiver) = channel();
|
2019-02-26 21:57:45 -08:00
|
|
|
trace!("replay stage");
|
2019-02-04 15:33:43 -08:00
|
|
|
let exit_ = exit.clone();
|
2019-02-26 21:57:45 -08:00
|
|
|
let subscriptions = subscriptions.clone();
|
|
|
|
let bank_forks = bank_forks.clone();
|
2019-03-03 16:44:06 -08:00
|
|
|
let poh_recorder = poh_recorder.clone();
|
2019-03-09 19:28:43 -08:00
|
|
|
let my_id = *my_id;
|
|
|
|
let vote_account = *vote_account;
|
2019-03-12 17:42:53 -07:00
|
|
|
let mut ticks_per_slot = 0;
|
2019-02-24 01:06:46 -08:00
|
|
|
|
|
|
|
// Start the replay stage loop
|
2018-12-07 14:09:29 -08:00
|
|
|
let t_replay = Builder::new()
|
|
|
|
.name("solana-replay-stage".to_string())
|
2018-09-25 15:41:29 -07:00
|
|
|
.spawn(move || {
|
2019-02-04 15:33:43 -08:00
|
|
|
let _exit = Finalizer::new(exit_.clone());
|
2019-03-04 18:40:47 -08:00
|
|
|
let mut progress = HashMap::new();
|
2018-09-25 15:41:29 -07:00
|
|
|
loop {
|
2019-02-26 21:57:45 -08:00
|
|
|
let now = Instant::now();
|
2019-02-04 15:33:43 -08:00
|
|
|
// Stop getting entries if we get exit signal
|
|
|
|
if exit_.load(Ordering::Relaxed) {
|
|
|
|
break;
|
2018-10-10 16:49:41 -07:00
|
|
|
}
|
2019-02-26 21:57:45 -08:00
|
|
|
Self::generate_new_bank_forks(&blocktree, &mut bank_forks.write().unwrap());
|
2019-03-03 16:44:06 -08:00
|
|
|
let active_banks = bank_forks.read().unwrap().active_banks();
|
|
|
|
trace!("active banks {:?}", active_banks);
|
2019-03-12 17:42:53 -07:00
|
|
|
let mut votable: Vec<Arc<Bank>> = vec![];
|
2019-03-07 15:49:07 -08:00
|
|
|
let mut is_tpu_bank_active = poh_recorder.lock().unwrap().bank().is_some();
|
2019-03-04 16:40:28 -08:00
|
|
|
for bank_slot in &active_banks {
|
|
|
|
let bank = bank_forks.read().unwrap().get(*bank_slot).unwrap().clone();
|
2019-03-12 17:42:53 -07:00
|
|
|
ticks_per_slot = bank.ticks_per_slot();
|
2019-03-04 18:40:47 -08:00
|
|
|
if bank.collector_id() != my_id {
|
2019-02-26 21:57:45 -08:00
|
|
|
Self::replay_blocktree_into_bank(
|
2019-03-01 16:39:23 -08:00
|
|
|
&bank,
|
2019-02-26 21:57:45 -08:00
|
|
|
&blocktree,
|
|
|
|
&mut progress,
|
|
|
|
&forward_entry_sender,
|
|
|
|
)?;
|
2019-02-07 15:10:54 -08:00
|
|
|
}
|
2019-03-04 16:40:28 -08:00
|
|
|
let max_tick_height = (*bank_slot + 1) * bank.ticks_per_slot() - 1;
|
2019-02-26 21:57:45 -08:00
|
|
|
if bank.tick_height() == max_tick_height {
|
2019-03-13 14:06:12 -07:00
|
|
|
Self::process_completed_bank(
|
|
|
|
&my_id,
|
|
|
|
bank,
|
|
|
|
&mut progress,
|
|
|
|
&mut votable,
|
|
|
|
&slot_full_sender,
|
|
|
|
);
|
2019-02-04 15:33:43 -08:00
|
|
|
}
|
2019-02-24 01:06:46 -08:00
|
|
|
}
|
2019-03-12 17:42:53 -07:00
|
|
|
|
|
|
|
if ticks_per_slot == 0 {
|
|
|
|
let frozen_banks = bank_forks.read().unwrap().frozen_banks();
|
|
|
|
let bank = frozen_banks.values().next().unwrap();
|
|
|
|
ticks_per_slot = bank.ticks_per_slot();
|
|
|
|
}
|
|
|
|
|
2019-02-26 21:57:45 -08:00
|
|
|
// TODO: fork selection
|
|
|
|
// vote on the latest one for now
|
2019-03-12 17:42:53 -07:00
|
|
|
votable.sort_by(|b1, b2| b1.slot().cmp(&b2.slot()));
|
2019-02-26 21:57:45 -08:00
|
|
|
|
2019-03-12 17:42:53 -07:00
|
|
|
if let Some(bank) = votable.last() {
|
|
|
|
subscriptions.notify_subscribers(&bank);
|
2019-02-26 21:57:45 -08:00
|
|
|
|
|
|
|
if let Some(ref voting_keypair) = voting_keypair {
|
|
|
|
let keypair = voting_keypair.as_ref();
|
|
|
|
let vote = VoteTransaction::new_vote(
|
2019-03-09 19:28:43 -08:00
|
|
|
&vote_account,
|
2019-02-26 21:57:45 -08:00
|
|
|
keypair,
|
2019-03-12 17:42:53 -07:00
|
|
|
bank.slot(),
|
|
|
|
bank.last_blockhash(),
|
2019-02-26 21:57:45 -08:00
|
|
|
0,
|
2019-02-24 01:06:46 -08:00
|
|
|
);
|
2019-02-26 21:57:45 -08:00
|
|
|
cluster_info.write().unwrap().push_vote(vote);
|
2019-02-04 15:33:43 -08:00
|
|
|
}
|
2019-03-15 13:22:16 -07:00
|
|
|
let next_leader_slot =
|
|
|
|
leader_schedule_utils::next_leader_slot(&my_id, bank.slot(), &bank);
|
2019-03-12 17:42:53 -07:00
|
|
|
poh_recorder.lock().unwrap().reset(
|
|
|
|
bank.tick_height(),
|
|
|
|
bank.last_blockhash(),
|
|
|
|
bank.slot(),
|
2019-03-15 13:22:16 -07:00
|
|
|
next_leader_slot,
|
|
|
|
ticks_per_slot,
|
|
|
|
);
|
|
|
|
info!(
|
|
|
|
"{:?} voted and reset poh at {}. next leader slot {:?}",
|
|
|
|
my_id,
|
|
|
|
bank.tick_height(),
|
|
|
|
next_leader_slot
|
2019-03-12 17:42:53 -07:00
|
|
|
);
|
2019-03-07 15:49:07 -08:00
|
|
|
is_tpu_bank_active = false;
|
|
|
|
}
|
|
|
|
|
2019-03-15 13:22:16 -07:00
|
|
|
let mut reached_leader_tick = false;
|
|
|
|
if !is_tpu_bank_active {
|
|
|
|
let poh = poh_recorder.lock().unwrap();
|
|
|
|
reached_leader_tick = poh.reached_leader_tick();
|
|
|
|
|
|
|
|
info!(
|
|
|
|
"{:?} TPU bank inactive. poh tick {}, leader {}",
|
|
|
|
my_id,
|
|
|
|
poh.tick_height(),
|
|
|
|
reached_leader_tick
|
|
|
|
);
|
|
|
|
};
|
|
|
|
|
2019-03-07 15:49:07 -08:00
|
|
|
if !is_tpu_bank_active {
|
2019-03-12 17:42:53 -07:00
|
|
|
assert!(ticks_per_slot > 0);
|
|
|
|
let poh_tick_height = poh_recorder.lock().unwrap().tick_height();
|
|
|
|
let poh_slot = leader_schedule_utils::tick_height_to_slot(
|
|
|
|
ticks_per_slot,
|
|
|
|
poh_tick_height + 1,
|
|
|
|
);
|
|
|
|
Self::start_leader(
|
|
|
|
&my_id,
|
|
|
|
&bank_forks,
|
|
|
|
&poh_recorder,
|
|
|
|
&cluster_info,
|
|
|
|
&blocktree,
|
|
|
|
poh_slot,
|
2019-03-15 13:22:16 -07:00
|
|
|
reached_leader_tick,
|
2019-03-12 17:42:53 -07:00
|
|
|
);
|
2019-02-04 15:33:43 -08:00
|
|
|
}
|
2019-03-05 17:56:51 -08:00
|
|
|
|
2019-02-26 21:57:45 -08:00
|
|
|
inc_new_counter_info!(
|
|
|
|
"replicate_stage-duration",
|
|
|
|
duration_as_ms(&now.elapsed()) as usize
|
|
|
|
);
|
|
|
|
let timer = Duration::from_millis(100);
|
|
|
|
let result = ledger_signal_receiver.recv_timeout(timer);
|
|
|
|
match result {
|
|
|
|
Err(RecvTimeoutError::Timeout) => continue,
|
|
|
|
Err(_) => break,
|
2019-03-03 16:44:06 -08:00
|
|
|
Ok(_) => trace!("blocktree signal"),
|
2019-02-26 21:57:45 -08:00
|
|
|
};
|
2018-05-30 13:38:15 -07:00
|
|
|
}
|
2019-02-26 21:57:45 -08:00
|
|
|
Ok(())
|
2018-12-07 19:01:28 -08:00
|
|
|
})
|
|
|
|
.unwrap();
|
2019-02-28 01:14:37 -08:00
|
|
|
(
|
2019-03-04 20:50:02 -08:00
|
|
|
Self { t_replay },
|
2019-02-28 01:14:37 -08:00
|
|
|
slot_full_receiver,
|
|
|
|
forward_entry_receiver,
|
|
|
|
)
|
2019-02-10 16:28:52 -08:00
|
|
|
}
|
2019-03-05 17:56:51 -08:00
|
|
|
pub fn start_leader(
|
2019-03-09 19:28:43 -08:00
|
|
|
my_id: &Pubkey,
|
2019-03-05 17:56:51 -08:00
|
|
|
bank_forks: &Arc<RwLock<BankForks>>,
|
|
|
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
|
|
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
2019-03-12 17:42:53 -07:00
|
|
|
blocktree: &Blocktree,
|
|
|
|
poh_slot: u64,
|
2019-03-15 13:22:16 -07:00
|
|
|
reached_leader_tick: bool,
|
2019-03-05 17:56:51 -08:00
|
|
|
) {
|
2019-03-12 17:42:53 -07:00
|
|
|
trace!("{} checking poh slot {}", my_id, poh_slot);
|
|
|
|
if blocktree.meta(poh_slot).unwrap().is_some() {
|
|
|
|
// We've already broadcasted entries for this slot, skip it
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if bank_forks.read().unwrap().get(poh_slot).is_none() {
|
|
|
|
let frozen = bank_forks.read().unwrap().frozen_banks();
|
|
|
|
let parent_slot = poh_recorder.lock().unwrap().start_slot();
|
|
|
|
assert!(frozen.contains_key(&parent_slot));
|
|
|
|
let parent = &frozen[&parent_slot];
|
2019-02-10 16:28:52 -08:00
|
|
|
|
2019-03-12 17:42:53 -07:00
|
|
|
leader_schedule_utils::slot_leader_at(poh_slot, parent)
|
|
|
|
.map(|next_leader| {
|
|
|
|
debug!(
|
|
|
|
"me: {} leader {} at poh slot {}",
|
|
|
|
my_id, next_leader, poh_slot
|
|
|
|
);
|
|
|
|
cluster_info.write().unwrap().set_leader(&next_leader);
|
2019-03-15 13:22:16 -07:00
|
|
|
if next_leader == *my_id && reached_leader_tick {
|
2019-03-12 17:42:53 -07:00
|
|
|
debug!("{} starting tpu for slot {}", my_id, poh_slot);
|
2019-03-15 13:22:16 -07:00
|
|
|
inc_new_counter_info!("replay_stage-new_leader", poh_slot as usize);
|
2019-03-12 17:42:53 -07:00
|
|
|
let tpu_bank = Bank::new_from_parent(parent, my_id, poh_slot);
|
|
|
|
bank_forks.write().unwrap().insert(poh_slot, tpu_bank);
|
|
|
|
if let Some(tpu_bank) = bank_forks.read().unwrap().get(poh_slot).cloned() {
|
|
|
|
assert_eq!(
|
|
|
|
bank_forks.read().unwrap().working_bank().slot(),
|
|
|
|
tpu_bank.slot()
|
|
|
|
);
|
|
|
|
debug!(
|
|
|
|
"poh_recorder new working bank: me: {} next_slot: {} next_leader: {}",
|
|
|
|
my_id,
|
|
|
|
tpu_bank.slot(),
|
|
|
|
next_leader
|
|
|
|
);
|
|
|
|
poh_recorder.lock().unwrap().set_bank(&tpu_bank);
|
2019-03-07 15:49:07 -08:00
|
|
|
}
|
2019-03-12 17:42:53 -07:00
|
|
|
}
|
|
|
|
})
|
|
|
|
.or_else(|| {
|
|
|
|
error!("{} No next leader found", my_id);
|
|
|
|
None
|
|
|
|
});
|
2019-03-05 17:56:51 -08:00
|
|
|
}
|
|
|
|
}
|
2019-02-26 21:57:45 -08:00
|
|
|
pub fn replay_blocktree_into_bank(
|
|
|
|
bank: &Bank,
|
|
|
|
blocktree: &Blocktree,
|
|
|
|
progress: &mut HashMap<u64, (Hash, usize)>,
|
|
|
|
forward_entry_sender: &EntrySender,
|
|
|
|
) -> result::Result<()> {
|
|
|
|
let (entries, num) = Self::load_blocktree_entries(bank, blocktree, progress)?;
|
|
|
|
let len = entries.len();
|
|
|
|
let result =
|
|
|
|
Self::replay_entries_into_bank(bank, entries, progress, forward_entry_sender, num);
|
|
|
|
if result.is_ok() {
|
|
|
|
trace!("verified entries {}", len);
|
|
|
|
inc_new_counter_info!("replicate-stage_process_entries", len);
|
|
|
|
} else {
|
|
|
|
info!("debug to verify entries {}", len);
|
|
|
|
//TODO: mark this fork as failed
|
|
|
|
inc_new_counter_info!("replicate-stage_failed_process_entries", len);
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn load_blocktree_entries(
|
|
|
|
bank: &Bank,
|
|
|
|
blocktree: &Blocktree,
|
|
|
|
progress: &mut HashMap<u64, (Hash, usize)>,
|
|
|
|
) -> result::Result<(Vec<Entry>, usize)> {
|
2019-03-04 16:40:28 -08:00
|
|
|
let bank_slot = bank.slot();
|
2019-03-02 10:20:10 -08:00
|
|
|
let bank_progress = &mut progress
|
2019-03-04 16:40:28 -08:00
|
|
|
.entry(bank_slot)
|
2019-03-02 10:25:16 -08:00
|
|
|
.or_insert((bank.last_blockhash(), 0));
|
2019-03-04 16:40:28 -08:00
|
|
|
blocktree.get_slot_entries_with_blob_count(bank_slot, bank_progress.1 as u64, None)
|
2019-02-26 21:57:45 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn replay_entries_into_bank(
|
|
|
|
bank: &Bank,
|
|
|
|
entries: Vec<Entry>,
|
|
|
|
progress: &mut HashMap<u64, (Hash, usize)>,
|
|
|
|
forward_entry_sender: &EntrySender,
|
|
|
|
num: usize,
|
|
|
|
) -> result::Result<()> {
|
2019-03-02 10:20:10 -08:00
|
|
|
let bank_progress = &mut progress
|
|
|
|
.entry(bank.slot())
|
2019-03-02 10:25:16 -08:00
|
|
|
.or_insert((bank.last_blockhash(), 0));
|
2019-02-26 21:57:45 -08:00
|
|
|
let result = Self::verify_and_process_entries(&bank, &entries, &bank_progress.0);
|
|
|
|
bank_progress.1 += num;
|
|
|
|
if let Some(last_entry) = entries.last() {
|
|
|
|
bank_progress.0 = last_entry.hash;
|
|
|
|
}
|
|
|
|
if result.is_ok() {
|
|
|
|
forward_entry_sender.send(entries)?;
|
|
|
|
}
|
|
|
|
result
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn verify_and_process_entries(
|
|
|
|
bank: &Bank,
|
|
|
|
entries: &[Entry],
|
|
|
|
last_entry: &Hash,
|
|
|
|
) -> result::Result<()> {
|
|
|
|
if !entries.verify(last_entry) {
|
|
|
|
trace!(
|
|
|
|
"entry verification failed {} {} {} {}",
|
|
|
|
entries.len(),
|
|
|
|
bank.tick_height(),
|
|
|
|
last_entry,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash()
|
2019-02-26 21:57:45 -08:00
|
|
|
);
|
|
|
|
return Err(result::Error::BlobError(BlobError::VerificationFailed));
|
|
|
|
}
|
|
|
|
blocktree_processor::process_entries(bank, entries)?;
|
2019-02-24 01:06:46 -08:00
|
|
|
|
2019-02-26 21:57:45 -08:00
|
|
|
Ok(())
|
2019-02-24 01:06:46 -08:00
|
|
|
}
|
|
|
|
|
2019-03-13 14:06:12 -07:00
|
|
|
fn process_completed_bank(
|
|
|
|
my_id: &Pubkey,
|
|
|
|
bank: Arc<Bank>,
|
|
|
|
progress: &mut HashMap<u64, (Hash, usize)>,
|
|
|
|
votable: &mut Vec<Arc<Bank>>,
|
|
|
|
slot_full_sender: &Sender<(u64, Pubkey)>,
|
|
|
|
) {
|
|
|
|
bank.freeze();
|
|
|
|
info!("bank frozen {}", bank.slot());
|
|
|
|
progress.remove(&bank.slot());
|
|
|
|
if let Err(e) = slot_full_sender.send((bank.slot(), bank.collector_id())) {
|
|
|
|
info!("{} slot_full alert failed: {:?}", my_id, e);
|
|
|
|
}
|
|
|
|
if bank.is_votable() {
|
|
|
|
votable.push(bank);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-26 21:57:45 -08:00
|
|
|
fn generate_new_bank_forks(blocktree: &Blocktree, forks: &mut BankForks) {
|
2019-02-07 15:10:54 -08:00
|
|
|
// Find the next slot that chains to the old slot
|
2019-02-26 21:57:45 -08:00
|
|
|
let frozen_banks = forks.frozen_banks();
|
2019-03-04 16:40:28 -08:00
|
|
|
let frozen_bank_slots: Vec<u64> = frozen_banks.keys().cloned().collect();
|
|
|
|
trace!("frozen_banks {:?}", frozen_bank_slots);
|
2019-02-26 21:57:45 -08:00
|
|
|
let next_slots = blocktree
|
2019-03-04 16:40:28 -08:00
|
|
|
.get_slots_since(&frozen_bank_slots)
|
2019-02-26 21:57:45 -08:00
|
|
|
.expect("Db error");
|
2019-03-03 16:44:06 -08:00
|
|
|
trace!("generate new forks {:?}", next_slots);
|
2019-02-26 21:57:45 -08:00
|
|
|
for (parent_id, children) in next_slots {
|
|
|
|
let parent_bank = frozen_banks
|
|
|
|
.get(&parent_id)
|
|
|
|
.expect("missing parent in bank forks")
|
|
|
|
.clone();
|
|
|
|
for child_id in children {
|
2019-03-03 16:44:06 -08:00
|
|
|
if frozen_banks.get(&child_id).is_some() {
|
|
|
|
trace!("child already frozen {}", child_id);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if forks.get(child_id).is_some() {
|
|
|
|
trace!("child already active {}", child_id);
|
|
|
|
continue;
|
2019-02-28 19:49:22 -08:00
|
|
|
}
|
2019-03-07 15:49:07 -08:00
|
|
|
let leader = leader_schedule_utils::slot_leader_at(child_id, &parent_bank).unwrap();
|
2019-03-03 16:44:06 -08:00
|
|
|
info!("new fork:{} parent:{}", child_id, parent_id);
|
|
|
|
forks.insert(
|
|
|
|
child_id,
|
2019-03-09 19:28:43 -08:00
|
|
|
Bank::new_from_parent(&parent_bank, &leader, child_id),
|
2019-03-03 16:44:06 -08:00
|
|
|
);
|
2019-02-26 21:57:45 -08:00
|
|
|
}
|
|
|
|
}
|
2019-02-07 15:10:54 -08:00
|
|
|
}
|
2018-05-22 14:26:28 -07:00
|
|
|
}
|
2018-07-03 21:14:08 -07:00
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
impl Service for ReplayStage {
|
2019-01-26 00:28:08 -08:00
|
|
|
type JoinReturnType = ();
|
2018-10-10 16:49:41 -07:00
|
|
|
|
2019-01-26 00:28:08 -08:00
|
|
|
fn join(self) -> thread::Result<()> {
|
2019-02-26 21:57:45 -08:00
|
|
|
self.t_replay.join().map(|_| ())
|
2018-10-10 16:49:41 -07:00
|
|
|
}
|
|
|
|
}
|
2018-09-13 14:00:17 -07:00
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
2019-01-29 00:21:27 -08:00
|
|
|
use super::*;
|
2019-03-03 16:44:06 -08:00
|
|
|
use crate::banking_stage::create_test_recorder;
|
2019-02-26 16:35:00 -08:00
|
|
|
use crate::blocktree::create_new_tmp_ledger;
|
2019-02-07 20:52:39 -08:00
|
|
|
use crate::cluster_info::{ClusterInfo, Node};
|
2019-01-09 14:33:44 -08:00
|
|
|
use crate::entry::create_ticks;
|
2019-02-12 17:38:46 -08:00
|
|
|
use crate::entry::{next_entry_mut, Entry};
|
2019-02-21 11:37:48 -08:00
|
|
|
use crate::fullnode::new_banks_from_blocktree;
|
2019-01-26 00:28:08 -08:00
|
|
|
use crate::replay_stage::ReplayStage;
|
2019-02-26 21:57:45 -08:00
|
|
|
use crate::result::Error;
|
2019-02-18 22:26:22 -08:00
|
|
|
use solana_sdk::genesis_block::GenesisBlock;
|
2018-11-16 08:04:46 -08:00
|
|
|
use solana_sdk::hash::Hash;
|
2018-12-03 10:26:28 -08:00
|
|
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
2018-10-17 13:42:54 -07:00
|
|
|
use std::fs::remove_dir_all;
|
2018-10-10 16:49:41 -07:00
|
|
|
use std::sync::mpsc::channel;
|
|
|
|
use std::sync::{Arc, RwLock};
|
|
|
|
|
2018-11-12 11:40:32 -08:00
|
|
|
#[test]
|
2018-12-07 14:09:29 -08:00
|
|
|
fn test_vote_error_replay_stage_correctness() {
|
2019-02-26 21:57:45 -08:00
|
|
|
solana_logger::setup();
|
2018-12-07 14:09:29 -08:00
|
|
|
// Set up dummy node to host a ReplayStage
|
2018-11-12 11:40:32 -08:00
|
|
|
let my_keypair = Keypair::new();
|
|
|
|
let my_id = my_keypair.pubkey();
|
2019-03-09 19:28:43 -08:00
|
|
|
let my_node = Node::new_localhost_with_pubkey(&my_id);
|
2018-11-12 11:40:32 -08:00
|
|
|
|
|
|
|
// Create keypair for the leader
|
|
|
|
let leader_id = Keypair::new().pubkey();
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let (genesis_block, _mint_keypair) = GenesisBlock::new_with_leader(10_000, &leader_id, 500);
|
2019-02-22 21:55:46 -08:00
|
|
|
|
2019-03-02 10:25:16 -08:00
|
|
|
let (my_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
|
2018-11-12 11:40:32 -08:00
|
|
|
|
|
|
|
// Set up the cluster info
|
2019-03-06 19:09:37 -08:00
|
|
|
let cluster_info_me = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
|
|
|
my_node.info.clone(),
|
|
|
|
)));
|
2018-11-12 11:40:32 -08:00
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
// Set up the replay stage
|
2019-02-04 15:33:43 -08:00
|
|
|
{
|
2019-03-04 20:50:02 -08:00
|
|
|
let voting_keypair = Arc::new(Keypair::new());
|
2019-03-04 18:40:47 -08:00
|
|
|
let (bank_forks, _bank_forks_info, blocktree, l_receiver) =
|
2019-02-25 21:22:00 -08:00
|
|
|
new_banks_from_blocktree(&my_ledger_path, None);
|
2019-02-21 11:37:48 -08:00
|
|
|
let bank = bank_forks.working_bank();
|
2019-02-06 19:21:31 -08:00
|
|
|
|
2019-02-07 20:52:39 -08:00
|
|
|
let blocktree = Arc::new(blocktree);
|
2019-03-04 20:50:02 -08:00
|
|
|
let (exit, poh_recorder, poh_service, _entry_receiver) = create_test_recorder(&bank);
|
2019-02-28 01:14:37 -08:00
|
|
|
let (replay_stage, _slot_full_receiver, ledger_writer_recv) = ReplayStage::new(
|
2019-03-09 19:28:43 -08:00
|
|
|
&my_keypair.pubkey(),
|
|
|
|
&voting_keypair.pubkey(),
|
2019-02-04 15:33:43 -08:00
|
|
|
Some(voting_keypair.clone()),
|
2019-02-07 20:52:39 -08:00
|
|
|
blocktree.clone(),
|
2019-02-21 11:37:48 -08:00
|
|
|
&Arc::new(RwLock::new(bank_forks)),
|
2019-02-04 15:33:43 -08:00
|
|
|
cluster_info_me.clone(),
|
2019-03-04 20:50:02 -08:00
|
|
|
&exit,
|
2019-02-04 15:33:43 -08:00
|
|
|
l_receiver,
|
2019-02-18 18:08:54 -08:00
|
|
|
&Arc::new(RpcSubscriptions::default()),
|
2019-03-03 16:44:06 -08:00
|
|
|
&poh_recorder,
|
2019-02-04 15:33:43 -08:00
|
|
|
);
|
|
|
|
|
|
|
|
let keypair = voting_keypair.as_ref();
|
2019-03-08 18:29:08 -08:00
|
|
|
let vote =
|
2019-03-09 19:28:43 -08:00
|
|
|
VoteTransaction::new_vote(&keypair.pubkey(), keypair, 0, bank.last_blockhash(), 0);
|
2019-02-04 15:33:43 -08:00
|
|
|
cluster_info_me.write().unwrap().push_vote(vote);
|
|
|
|
|
2019-02-10 09:38:09 -08:00
|
|
|
info!("Send ReplayStage an entry, should see it on the ledger writer receiver");
|
2019-03-02 10:25:16 -08:00
|
|
|
let next_tick = create_ticks(1, bank.last_blockhash());
|
2019-03-14 15:18:37 -07:00
|
|
|
blocktree
|
|
|
|
.write_entries(1, 0, 0, genesis_block.ticks_per_slot, next_tick.clone())
|
|
|
|
.unwrap();
|
2019-02-04 15:33:43 -08:00
|
|
|
|
|
|
|
let received_tick = ledger_writer_recv
|
|
|
|
.recv()
|
2019-02-10 09:38:09 -08:00
|
|
|
.expect("Expected to receive an entry on the ledger writer receiver");
|
2019-02-04 15:33:43 -08:00
|
|
|
|
2019-02-28 01:14:37 -08:00
|
|
|
assert_eq!(next_tick[0], received_tick[0]);
|
2019-02-04 15:33:43 -08:00
|
|
|
|
2019-03-04 20:50:02 -08:00
|
|
|
exit.store(true, Ordering::Relaxed);
|
|
|
|
replay_stage.join().unwrap();
|
|
|
|
poh_service.join().unwrap();
|
2019-02-04 15:33:43 -08:00
|
|
|
}
|
2018-11-12 11:40:32 -08:00
|
|
|
let _ignored = remove_dir_all(&my_ledger_path);
|
|
|
|
}
|
|
|
|
|
2019-03-13 14:06:12 -07:00
|
|
|
#[test]
|
|
|
|
fn test_no_vote_empty_transmission() {
|
|
|
|
let genesis_block = GenesisBlock::new(10_000).0;
|
|
|
|
let bank = Arc::new(Bank::new(&genesis_block));
|
|
|
|
let mut blockhash = bank.last_blockhash();
|
|
|
|
let mut entries = Vec::new();
|
|
|
|
for _ in 0..genesis_block.ticks_per_slot {
|
|
|
|
let entry = next_entry_mut(&mut blockhash, 1, vec![]); //just ticks
|
|
|
|
entries.push(entry);
|
|
|
|
}
|
|
|
|
let (sender, _receiver) = channel();
|
|
|
|
|
|
|
|
let mut progress = HashMap::new();
|
|
|
|
let (forward_entry_sender, _forward_entry_receiver) = channel();
|
|
|
|
ReplayStage::replay_entries_into_bank(
|
|
|
|
&bank,
|
|
|
|
entries.clone(),
|
|
|
|
&mut progress,
|
|
|
|
&forward_entry_sender,
|
|
|
|
0,
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let mut votable = vec![];
|
|
|
|
ReplayStage::process_completed_bank(
|
|
|
|
&Pubkey::default(),
|
|
|
|
bank,
|
|
|
|
&mut progress,
|
|
|
|
&mut votable,
|
|
|
|
&sender,
|
|
|
|
);
|
|
|
|
assert!(progress.is_empty());
|
|
|
|
// Don't vote on slot that only contained ticks
|
|
|
|
assert!(votable.is_empty());
|
|
|
|
}
|
|
|
|
|
2018-11-16 15:48:10 -08:00
|
|
|
#[test]
|
2019-02-26 21:57:45 -08:00
|
|
|
fn test_replay_stage_poh_ok_entry_receiver() {
|
|
|
|
let (forward_entry_sender, forward_entry_receiver) = channel();
|
|
|
|
let genesis_block = GenesisBlock::new(10_000).0;
|
|
|
|
let bank = Arc::new(Bank::new(&genesis_block));
|
2019-03-02 10:25:16 -08:00
|
|
|
let mut blockhash = bank.last_blockhash();
|
2018-11-16 15:48:10 -08:00
|
|
|
let mut entries = Vec::new();
|
|
|
|
for _ in 0..5 {
|
2019-03-02 10:25:16 -08:00
|
|
|
let entry = next_entry_mut(&mut blockhash, 1, vec![]); //just ticks
|
2018-11-16 15:48:10 -08:00
|
|
|
entries.push(entry);
|
|
|
|
}
|
|
|
|
|
2019-02-26 21:57:45 -08:00
|
|
|
let mut progress = HashMap::new();
|
|
|
|
let res = ReplayStage::replay_entries_into_bank(
|
2019-02-16 08:23:29 -08:00
|
|
|
&bank,
|
2019-02-26 21:57:45 -08:00
|
|
|
entries.clone(),
|
|
|
|
&mut progress,
|
2019-02-28 01:14:37 -08:00
|
|
|
&forward_entry_sender,
|
2019-02-26 21:57:45 -08:00
|
|
|
0,
|
2018-11-16 15:48:10 -08:00
|
|
|
);
|
2019-02-26 21:57:45 -08:00
|
|
|
assert!(res.is_ok(), "replay failed {:?}", res);
|
|
|
|
let res = forward_entry_receiver.try_recv();
|
2018-11-16 15:48:10 -08:00
|
|
|
match res {
|
|
|
|
Ok(_) => (),
|
|
|
|
Err(e) => assert!(false, "Entries were not sent correctly {:?}", e),
|
|
|
|
}
|
2019-02-26 21:57:45 -08:00
|
|
|
}
|
2018-11-16 15:48:10 -08:00
|
|
|
|
2019-02-26 21:57:45 -08:00
|
|
|
#[test]
|
|
|
|
fn test_replay_stage_poh_error_entry_receiver() {
|
|
|
|
let (forward_entry_sender, forward_entry_receiver) = channel();
|
|
|
|
let mut entries = Vec::new();
|
2018-11-16 15:48:10 -08:00
|
|
|
for _ in 0..5 {
|
2019-02-19 22:18:57 -08:00
|
|
|
let entry = Entry::new(&mut Hash::default(), 1, vec![]); //just broken entries
|
2018-11-16 15:48:10 -08:00
|
|
|
entries.push(entry);
|
|
|
|
}
|
|
|
|
|
2019-02-26 21:57:45 -08:00
|
|
|
let genesis_block = GenesisBlock::new(10_000).0;
|
2019-02-16 11:36:22 -08:00
|
|
|
let bank = Arc::new(Bank::new(&genesis_block));
|
2019-02-26 21:57:45 -08:00
|
|
|
let mut progress = HashMap::new();
|
|
|
|
let res = ReplayStage::replay_entries_into_bank(
|
2019-02-16 08:23:29 -08:00
|
|
|
&bank,
|
2019-02-26 21:57:45 -08:00
|
|
|
entries.clone(),
|
|
|
|
&mut progress,
|
2019-02-28 01:14:37 -08:00
|
|
|
&forward_entry_sender,
|
2019-02-26 21:57:45 -08:00
|
|
|
0,
|
2018-11-16 15:48:10 -08:00
|
|
|
);
|
|
|
|
|
|
|
|
match res {
|
|
|
|
Ok(_) => assert!(false, "Should have failed because entries are broken"),
|
|
|
|
Err(Error::BlobError(BlobError::VerificationFailed)) => (),
|
|
|
|
Err(e) => assert!(
|
|
|
|
false,
|
|
|
|
"Should have failed because with blob error, instead, got {:?}",
|
|
|
|
e
|
|
|
|
),
|
|
|
|
}
|
2019-02-26 21:57:45 -08:00
|
|
|
assert!(forward_entry_receiver.try_recv().is_err());
|
2018-11-16 15:48:10 -08:00
|
|
|
}
|
2018-07-03 21:14:08 -07:00
|
|
|
}
|