2018-12-07 14:09:29 -08:00
|
|
|
//! The `replay_stage` replays transactions broadcast by the leader.
|
2018-05-22 14:26:28 -07:00
|
|
|
|
2019-02-21 11:37:48 -08:00
|
|
|
use crate::bank_forks::BankForks;
|
2019-07-07 14:37:12 -07:00
|
|
|
use crate::blocktree::{Blocktree, BlocktreeError};
|
2019-02-26 21:57:45 -08:00
|
|
|
use crate::blocktree_processor;
|
2018-12-07 19:16:27 -08:00
|
|
|
use crate::cluster_info::ClusterInfo;
|
2019-06-24 13:41:23 -07:00
|
|
|
use crate::consensus::{StakeLockout, Tower};
|
2019-05-03 16:27:53 -07:00
|
|
|
use crate::entry::{Entry, EntrySlice};
|
2019-04-19 02:39:44 -07:00
|
|
|
use crate::leader_schedule_cache::LeaderScheduleCache;
|
2018-12-07 19:16:27 -08:00
|
|
|
use crate::packet::BlobError;
|
2019-03-03 16:44:06 -08:00
|
|
|
use crate::poh_recorder::PohRecorder;
|
2019-04-15 13:12:28 -07:00
|
|
|
use crate::result::{Error, Result};
|
2019-02-18 18:08:54 -08:00
|
|
|
use crate::rpc_subscriptions::RpcSubscriptions;
|
2018-12-07 19:16:27 -08:00
|
|
|
use crate::service::Service;
|
2019-07-31 17:58:10 -07:00
|
|
|
use crate::snapshot_package::SnapshotPackageSender;
|
2019-06-20 15:50:41 -07:00
|
|
|
use solana_metrics::{datapoint_warn, inc_new_counter_info};
|
2019-02-18 22:26:22 -08:00
|
|
|
use solana_runtime::bank::Bank;
|
2019-01-26 00:28:08 -08:00
|
|
|
use solana_sdk::hash::Hash;
|
2019-01-30 19:28:48 -08:00
|
|
|
use solana_sdk::pubkey::Pubkey;
|
2019-02-21 21:43:35 -08:00
|
|
|
use solana_sdk::signature::KeypairUtil;
|
2019-03-18 12:12:33 -07:00
|
|
|
use solana_sdk::timing::{self, duration_as_ms};
|
2019-03-25 15:08:22 -07:00
|
|
|
use solana_sdk::transaction::Transaction;
|
2019-04-10 17:52:47 -07:00
|
|
|
use solana_vote_api::vote_instruction;
|
2019-05-30 21:31:35 -07:00
|
|
|
use std::collections::HashMap;
|
2019-02-13 20:04:20 -08:00
|
|
|
use std::sync::atomic::{AtomicBool, Ordering};
|
2019-03-13 14:06:12 -07:00
|
|
|
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
|
2019-03-03 16:44:06 -08:00
|
|
|
use std::sync::{Arc, Mutex, RwLock};
|
2018-07-03 21:14:08 -07:00
|
|
|
use std::thread::{self, Builder, JoinHandle};
|
2019-02-10 16:28:52 -08:00
|
|
|
use std::time::Duration;
|
2018-09-26 20:58:06 -07:00
|
|
|
use std::time::Instant;
|
2018-05-22 14:26:28 -07:00
|
|
|
|
2018-12-13 18:43:10 -08:00
|
|
|
pub const MAX_ENTRY_RECV_PER_ITER: usize = 512;
|
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
// Implement a destructor for the ReplayStage thread to signal it exited
|
2018-09-25 15:41:29 -07:00
|
|
|
// even on panics
|
|
|
|
struct Finalizer {
|
|
|
|
exit_sender: Arc<AtomicBool>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Finalizer {
|
|
|
|
fn new(exit_sender: Arc<AtomicBool>) -> Self {
|
|
|
|
Finalizer { exit_sender }
|
|
|
|
}
|
|
|
|
}
|
2019-07-31 17:58:10 -07:00
|
|
|
|
2018-09-25 15:41:29 -07:00
|
|
|
// Implement a destructor for Finalizer.
|
|
|
|
impl Drop for Finalizer {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
self.exit_sender.clone().store(true, Ordering::Relaxed);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
pub struct ReplayStage {
|
2019-04-15 13:12:28 -07:00
|
|
|
t_replay: JoinHandle<Result<()>>,
|
2019-07-26 10:27:57 -07:00
|
|
|
t_lockouts: JoinHandle<()>,
|
2018-05-22 14:26:28 -07:00
|
|
|
}
|
|
|
|
|
2019-03-27 04:30:26 -07:00
|
|
|
#[derive(Default)]
|
|
|
|
struct ForkProgress {
|
|
|
|
last_entry: Hash,
|
|
|
|
num_blobs: usize,
|
|
|
|
started_ms: u64,
|
2019-06-20 15:50:41 -07:00
|
|
|
is_dead: bool,
|
2019-03-27 04:30:26 -07:00
|
|
|
}
|
|
|
|
impl ForkProgress {
|
|
|
|
pub fn new(last_entry: Hash) -> Self {
|
|
|
|
Self {
|
|
|
|
last_entry,
|
|
|
|
num_blobs: 0,
|
|
|
|
started_ms: timing::timestamp(),
|
2019-06-20 15:50:41 -07:00
|
|
|
is_dead: false,
|
2019-03-27 04:30:26 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
impl ReplayStage {
|
2019-06-11 18:27:47 -07:00
|
|
|
#[allow(
|
|
|
|
clippy::new_ret_no_self,
|
|
|
|
clippy::too_many_arguments,
|
|
|
|
clippy::type_complexity
|
|
|
|
)]
|
2019-02-21 21:43:35 -08:00
|
|
|
pub fn new<T>(
|
2019-05-23 23:20:04 -07:00
|
|
|
my_pubkey: &Pubkey,
|
2019-03-09 19:28:43 -08:00
|
|
|
vote_account: &Pubkey,
|
2019-05-15 15:19:29 -07:00
|
|
|
voting_keypair: Option<&Arc<T>>,
|
2019-02-07 20:52:39 -08:00
|
|
|
blocktree: Arc<Blocktree>,
|
2019-02-21 11:37:48 -08:00
|
|
|
bank_forks: &Arc<RwLock<BankForks>>,
|
2018-10-08 19:55:54 -07:00
|
|
|
cluster_info: Arc<RwLock<ClusterInfo>>,
|
2019-03-04 20:50:02 -08:00
|
|
|
exit: &Arc<AtomicBool>,
|
2019-02-04 15:33:43 -08:00
|
|
|
ledger_signal_receiver: Receiver<bool>,
|
2019-02-18 18:08:54 -08:00
|
|
|
subscriptions: &Arc<RpcSubscriptions>,
|
2019-03-03 16:44:06 -08:00
|
|
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
2019-04-19 02:39:44 -07:00
|
|
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
2019-07-20 13:13:55 -07:00
|
|
|
slot_full_senders: Vec<Sender<(u64, Pubkey)>>,
|
2019-07-31 17:58:10 -07:00
|
|
|
snapshot_package_sender: Option<SnapshotPackageSender>,
|
2019-07-20 13:13:55 -07:00
|
|
|
) -> (Self, Receiver<Vec<Arc<Bank>>>)
|
2019-02-21 21:43:35 -08:00
|
|
|
where
|
|
|
|
T: 'static + KeypairUtil + Send + Sync,
|
|
|
|
{
|
2019-06-11 18:27:47 -07:00
|
|
|
let (root_bank_sender, root_bank_receiver) = channel();
|
2019-02-26 21:57:45 -08:00
|
|
|
trace!("replay stage");
|
2019-02-04 15:33:43 -08:00
|
|
|
let exit_ = exit.clone();
|
2019-02-26 21:57:45 -08:00
|
|
|
let subscriptions = subscriptions.clone();
|
|
|
|
let bank_forks = bank_forks.clone();
|
2019-03-03 16:44:06 -08:00
|
|
|
let poh_recorder = poh_recorder.clone();
|
2019-08-16 17:28:07 -07:00
|
|
|
let my_pubkey = *my_pubkey;
|
2019-08-14 13:30:21 -07:00
|
|
|
let mut tower = Tower::new(&my_pubkey, &vote_account, &bank_forks.read().unwrap());
|
2019-02-24 01:06:46 -08:00
|
|
|
// Start the replay stage loop
|
2019-04-19 02:39:44 -07:00
|
|
|
let leader_schedule_cache = leader_schedule_cache.clone();
|
2019-05-20 13:32:32 -07:00
|
|
|
let vote_account = *vote_account;
|
2019-05-15 15:19:29 -07:00
|
|
|
let voting_keypair = voting_keypair.cloned();
|
2019-07-18 14:54:27 -07:00
|
|
|
|
2019-07-26 10:27:57 -07:00
|
|
|
let (lockouts_sender, t_lockouts) = aggregate_stake_lockouts(exit);
|
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
let t_replay = Builder::new()
|
|
|
|
.name("solana-replay-stage".to_string())
|
2018-09-25 15:41:29 -07:00
|
|
|
.spawn(move || {
|
2019-02-04 15:33:43 -08:00
|
|
|
let _exit = Finalizer::new(exit_.clone());
|
2019-03-04 18:40:47 -08:00
|
|
|
let mut progress = HashMap::new();
|
2019-07-23 19:19:20 -07:00
|
|
|
let mut current_leader = None;
|
2019-07-18 14:54:27 -07:00
|
|
|
|
2018-09-25 15:41:29 -07:00
|
|
|
loop {
|
2019-02-26 21:57:45 -08:00
|
|
|
let now = Instant::now();
|
2019-02-04 15:33:43 -08:00
|
|
|
// Stop getting entries if we get exit signal
|
|
|
|
if exit_.load(Ordering::Relaxed) {
|
|
|
|
break;
|
2018-10-10 16:49:41 -07:00
|
|
|
}
|
2019-03-21 11:53:18 -07:00
|
|
|
|
2019-04-19 02:39:44 -07:00
|
|
|
Self::generate_new_bank_forks(
|
|
|
|
&blocktree,
|
|
|
|
&mut bank_forks.write().unwrap(),
|
|
|
|
&leader_schedule_cache,
|
|
|
|
);
|
2019-03-21 11:53:18 -07:00
|
|
|
|
2019-07-18 14:54:27 -07:00
|
|
|
let mut tpu_has_bank = poh_recorder.lock().unwrap().has_bank();
|
|
|
|
|
2019-07-18 14:07:32 -07:00
|
|
|
let did_complete_bank = Self::replay_active_banks(
|
2019-03-21 11:53:18 -07:00
|
|
|
&blocktree,
|
|
|
|
&bank_forks,
|
2019-05-23 23:20:04 -07:00
|
|
|
&my_pubkey,
|
2019-03-21 11:53:18 -07:00
|
|
|
&mut progress,
|
2019-07-20 13:13:55 -07:00
|
|
|
&slot_full_senders,
|
2019-07-18 12:04:53 -07:00
|
|
|
);
|
2019-03-12 17:42:53 -07:00
|
|
|
|
2019-06-24 13:41:23 -07:00
|
|
|
let votable = Self::generate_votable_banks(&bank_forks, &tower, &mut progress);
|
2019-03-18 12:12:33 -07:00
|
|
|
|
2019-08-14 13:30:21 -07:00
|
|
|
if let Some((_, bank, lockouts, total_staked)) = votable.into_iter().last() {
|
2019-05-06 07:31:50 -07:00
|
|
|
subscriptions.notify_subscribers(bank.slot(), &bank_forks);
|
2019-02-26 21:57:45 -08:00
|
|
|
|
2019-07-30 13:18:33 -07:00
|
|
|
if let Some(votable_leader) =
|
2019-07-23 19:19:20 -07:00
|
|
|
leader_schedule_cache.slot_leader_at(bank.slot(), Some(&bank))
|
|
|
|
{
|
|
|
|
Self::log_leader_change(
|
|
|
|
&my_pubkey,
|
|
|
|
bank.slot(),
|
|
|
|
&mut current_leader,
|
2019-07-30 13:18:33 -07:00
|
|
|
&votable_leader,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
let next_slot = bank.slot() + 1;
|
|
|
|
if let Some(new_leader) =
|
|
|
|
leader_schedule_cache.slot_leader_at(next_slot, Some(&bank))
|
|
|
|
{
|
|
|
|
datapoint_info!(
|
|
|
|
"replay_stage-new_leader",
|
|
|
|
("slot", next_slot, i64),
|
|
|
|
("leader", new_leader.to_string(), String),
|
2019-07-23 19:19:20 -07:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2019-03-21 11:53:18 -07:00
|
|
|
Self::handle_votable_bank(
|
|
|
|
&bank,
|
|
|
|
&bank_forks,
|
2019-06-24 13:41:23 -07:00
|
|
|
&mut tower,
|
2019-03-21 11:53:18 -07:00
|
|
|
&mut progress,
|
|
|
|
&vote_account,
|
2019-05-20 13:32:32 -07:00
|
|
|
&voting_keypair,
|
2019-03-21 11:53:18 -07:00
|
|
|
&cluster_info,
|
2019-04-06 19:41:22 -07:00
|
|
|
&blocktree,
|
2019-04-30 13:23:21 -07:00
|
|
|
&leader_schedule_cache,
|
2019-06-11 18:27:47 -07:00
|
|
|
&root_bank_sender,
|
2019-07-26 10:27:57 -07:00
|
|
|
lockouts,
|
2019-08-14 13:30:21 -07:00
|
|
|
total_staked,
|
2019-07-26 10:27:57 -07:00
|
|
|
&lockouts_sender,
|
2019-07-31 17:58:10 -07:00
|
|
|
&snapshot_package_sender,
|
2019-04-15 13:12:28 -07:00
|
|
|
)?;
|
2019-03-21 11:53:18 -07:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
Self::reset_poh_recorder(
|
2019-05-23 23:20:04 -07:00
|
|
|
&my_pubkey,
|
2019-03-29 20:00:36 -07:00
|
|
|
&blocktree,
|
|
|
|
&bank,
|
|
|
|
&poh_recorder,
|
2019-04-19 02:39:44 -07:00
|
|
|
&leader_schedule_cache,
|
2019-03-29 20:00:36 -07:00
|
|
|
);
|
2019-07-18 14:54:27 -07:00
|
|
|
tpu_has_bank = false;
|
2019-03-07 15:49:07 -08:00
|
|
|
}
|
|
|
|
|
2019-07-18 14:54:27 -07:00
|
|
|
if !tpu_has_bank {
|
|
|
|
Self::maybe_start_leader(
|
|
|
|
&my_pubkey,
|
|
|
|
&bank_forks,
|
|
|
|
&poh_recorder,
|
|
|
|
&leader_schedule_cache,
|
|
|
|
);
|
2019-07-23 19:19:20 -07:00
|
|
|
|
|
|
|
if let Some(bank) = poh_recorder.lock().unwrap().bank() {
|
|
|
|
Self::log_leader_change(
|
|
|
|
&my_pubkey,
|
|
|
|
bank.slot(),
|
|
|
|
&mut current_leader,
|
|
|
|
&my_pubkey,
|
|
|
|
);
|
|
|
|
}
|
2019-07-18 14:54:27 -07:00
|
|
|
}
|
2019-03-05 17:56:51 -08:00
|
|
|
|
2019-02-26 21:57:45 -08:00
|
|
|
inc_new_counter_info!(
|
|
|
|
"replicate_stage-duration",
|
|
|
|
duration_as_ms(&now.elapsed()) as usize
|
|
|
|
);
|
2019-07-18 14:07:32 -07:00
|
|
|
if did_complete_bank {
|
2019-07-18 12:04:53 -07:00
|
|
|
//just processed a bank, skip the signal; maybe there's more slots available
|
|
|
|
continue;
|
|
|
|
}
|
2019-02-26 21:57:45 -08:00
|
|
|
let timer = Duration::from_millis(100);
|
|
|
|
let result = ledger_signal_receiver.recv_timeout(timer);
|
|
|
|
match result {
|
|
|
|
Err(RecvTimeoutError::Timeout) => continue,
|
|
|
|
Err(_) => break,
|
2019-03-03 16:44:06 -08:00
|
|
|
Ok(_) => trace!("blocktree signal"),
|
2019-02-26 21:57:45 -08:00
|
|
|
};
|
2018-05-30 13:38:15 -07:00
|
|
|
}
|
2019-02-26 21:57:45 -08:00
|
|
|
Ok(())
|
2018-12-07 19:01:28 -08:00
|
|
|
})
|
|
|
|
.unwrap();
|
2019-07-26 10:27:57 -07:00
|
|
|
(
|
|
|
|
Self {
|
|
|
|
t_replay,
|
|
|
|
t_lockouts,
|
|
|
|
},
|
|
|
|
root_bank_receiver,
|
|
|
|
)
|
2019-02-10 16:28:52 -08:00
|
|
|
}
|
2019-07-09 15:36:30 -07:00
|
|
|
|
2019-07-23 19:19:20 -07:00
|
|
|
fn log_leader_change(
|
|
|
|
my_pubkey: &Pubkey,
|
|
|
|
bank_slot: u64,
|
|
|
|
current_leader: &mut Option<Pubkey>,
|
|
|
|
new_leader: &Pubkey,
|
|
|
|
) {
|
|
|
|
if let Some(ref current_leader) = current_leader {
|
|
|
|
if current_leader != new_leader {
|
|
|
|
let msg = if current_leader == my_pubkey {
|
2019-08-02 10:08:42 -07:00
|
|
|
". I am no longer the leader"
|
2019-07-23 19:19:20 -07:00
|
|
|
} else if new_leader == my_pubkey {
|
2019-08-02 10:08:42 -07:00
|
|
|
". I am now the leader"
|
2019-07-23 19:19:20 -07:00
|
|
|
} else {
|
|
|
|
""
|
|
|
|
};
|
|
|
|
info!(
|
2019-08-02 10:08:42 -07:00
|
|
|
"LEADER CHANGE at slot: {} leader: {}{}",
|
2019-07-23 19:19:20 -07:00
|
|
|
bank_slot, new_leader, msg
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
current_leader.replace(new_leader.to_owned());
|
|
|
|
}
|
|
|
|
|
2019-07-09 15:36:30 -07:00
|
|
|
fn maybe_start_leader(
|
2019-05-23 23:20:04 -07:00
|
|
|
my_pubkey: &Pubkey,
|
2019-03-05 17:56:51 -08:00
|
|
|
bank_forks: &Arc<RwLock<BankForks>>,
|
|
|
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
2019-04-19 02:39:44 -07:00
|
|
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
2019-03-05 17:56:51 -08:00
|
|
|
) {
|
2019-07-18 14:54:27 -07:00
|
|
|
// all the individual calls to poh_recorder.lock() are designed to
|
|
|
|
// increase granularity, decrease contention
|
2019-07-09 22:06:47 -07:00
|
|
|
|
2019-07-18 14:54:27 -07:00
|
|
|
assert!(!poh_recorder.lock().unwrap().has_bank());
|
2019-07-09 22:06:47 -07:00
|
|
|
|
2019-07-30 13:18:33 -07:00
|
|
|
let (reached_leader_tick, _grace_ticks, poh_slot, parent_slot) =
|
2019-07-18 14:54:27 -07:00
|
|
|
poh_recorder.lock().unwrap().reached_leader_tick();
|
2019-07-09 15:36:30 -07:00
|
|
|
|
2019-07-18 14:54:27 -07:00
|
|
|
if !reached_leader_tick {
|
|
|
|
trace!("{} poh_recorder hasn't reached_leader_tick", my_pubkey);
|
2019-07-09 15:36:30 -07:00
|
|
|
return;
|
|
|
|
}
|
2019-07-18 14:54:27 -07:00
|
|
|
trace!("{} reached_leader_tick", my_pubkey,);
|
2019-07-09 15:36:30 -07:00
|
|
|
|
|
|
|
let parent = bank_forks
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.get(parent_slot)
|
|
|
|
.expect("parent_slot doesn't exist in bank forks")
|
|
|
|
.clone();
|
|
|
|
|
2019-07-18 14:54:27 -07:00
|
|
|
assert!(parent.is_frozen());
|
|
|
|
|
|
|
|
if bank_forks.read().unwrap().get(poh_slot).is_some() {
|
|
|
|
warn!("{} already have bank in forks at {}?", my_pubkey, poh_slot);
|
2019-07-09 15:36:30 -07:00
|
|
|
return;
|
|
|
|
}
|
2019-07-18 14:54:27 -07:00
|
|
|
trace!(
|
|
|
|
"{} poh_slot {} parent_slot {}",
|
|
|
|
my_pubkey,
|
|
|
|
poh_slot,
|
|
|
|
parent_slot
|
|
|
|
);
|
2019-07-09 15:36:30 -07:00
|
|
|
|
|
|
|
if let Some(next_leader) = leader_schedule_cache.slot_leader_at(poh_slot, Some(&parent)) {
|
|
|
|
trace!(
|
|
|
|
"{} leader {} at poh slot: {}",
|
|
|
|
my_pubkey,
|
|
|
|
next_leader,
|
|
|
|
poh_slot
|
|
|
|
);
|
|
|
|
|
2019-07-09 22:06:47 -07:00
|
|
|
// I guess I missed my slot
|
|
|
|
if next_leader != *my_pubkey {
|
|
|
|
return;
|
2019-07-09 15:36:30 -07:00
|
|
|
}
|
2019-07-09 22:06:47 -07:00
|
|
|
|
2019-07-30 13:18:33 -07:00
|
|
|
datapoint_info!(
|
2019-07-09 22:06:47 -07:00
|
|
|
"replay_stage-new_leader",
|
2019-07-30 13:18:33 -07:00
|
|
|
("slot", poh_slot, i64),
|
|
|
|
("leader", next_leader.to_string(), String),
|
2019-07-09 22:06:47 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
let tpu_bank = bank_forks
|
|
|
|
.write()
|
|
|
|
.unwrap()
|
|
|
|
.insert(Bank::new_from_parent(&parent, my_pubkey, poh_slot));
|
|
|
|
|
|
|
|
poh_recorder.lock().unwrap().set_bank(&tpu_bank);
|
2019-07-09 15:36:30 -07:00
|
|
|
} else {
|
|
|
|
error!("{} No next leader found", my_pubkey);
|
2019-03-05 17:56:51 -08:00
|
|
|
}
|
|
|
|
}
|
2019-06-20 15:50:41 -07:00
|
|
|
|
|
|
|
// Returns Some(result) if the `result` is a fatal error, which is an error that will cause a
|
|
|
|
// bank to be marked as dead/corrupted
|
|
|
|
fn is_replay_result_fatal(result: &Result<()>) -> bool {
|
|
|
|
match result {
|
|
|
|
Err(Error::TransactionError(e)) => {
|
|
|
|
// Transactions withand transaction errors mean this fork is bogus
|
|
|
|
let tx_error = Err(e.clone());
|
|
|
|
!Bank::can_commit(&tx_error)
|
|
|
|
}
|
|
|
|
Err(Error::BlobError(BlobError::VerificationFailed)) => true,
|
2019-07-07 14:37:12 -07:00
|
|
|
Err(Error::BlocktreeError(BlocktreeError::InvalidBlobData(_))) => true,
|
2019-06-20 15:50:41 -07:00
|
|
|
_ => false,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-30 13:18:33 -07:00
|
|
|
// Returns the replay result and the number of replayed transactions
|
2019-03-27 04:30:26 -07:00
|
|
|
fn replay_blocktree_into_bank(
|
2019-02-26 21:57:45 -08:00
|
|
|
bank: &Bank,
|
|
|
|
blocktree: &Blocktree,
|
2019-03-27 04:30:26 -07:00
|
|
|
progress: &mut HashMap<u64, ForkProgress>,
|
2019-07-30 13:18:33 -07:00
|
|
|
) -> (Result<()>, usize) {
|
|
|
|
let mut tx_count = 0;
|
2019-07-07 14:37:12 -07:00
|
|
|
let result =
|
|
|
|
Self::load_blocktree_entries(bank, blocktree, progress).and_then(|(entries, num)| {
|
2019-08-20 17:16:06 -07:00
|
|
|
debug!("Replaying {:?} entries, num {:?}", entries.len(), num);
|
2019-07-30 13:18:33 -07:00
|
|
|
tx_count += entries.iter().map(|e| e.transactions.len()).sum::<usize>();
|
2019-07-07 14:37:12 -07:00
|
|
|
Self::replay_entries_into_bank(bank, entries, progress, num)
|
|
|
|
});
|
2019-06-20 15:50:41 -07:00
|
|
|
|
|
|
|
if Self::is_replay_result_fatal(&result) {
|
2019-07-07 14:37:12 -07:00
|
|
|
warn!(
|
|
|
|
"Fatal replay result in slot: {}, result: {:?}",
|
|
|
|
bank.slot(),
|
|
|
|
result
|
|
|
|
);
|
2019-06-20 15:50:41 -07:00
|
|
|
Self::mark_dead_slot(bank.slot(), blocktree, progress);
|
2019-02-26 21:57:45 -08:00
|
|
|
}
|
2019-06-20 15:50:41 -07:00
|
|
|
|
2019-07-30 13:18:33 -07:00
|
|
|
(result, tx_count)
|
2019-06-20 15:50:41 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
fn mark_dead_slot(slot: u64, blocktree: &Blocktree, progress: &mut HashMap<u64, ForkProgress>) {
|
|
|
|
// Remove from progress map so we no longer try to replay this bank
|
|
|
|
let mut progress_entry = progress
|
|
|
|
.get_mut(&slot)
|
|
|
|
.expect("Progress entry must exist after call to replay_entries_into_bank()");
|
|
|
|
progress_entry.is_dead = true;
|
|
|
|
blocktree
|
|
|
|
.set_dead_slot(slot)
|
|
|
|
.expect("Failed to mark slot as dead in blocktree");
|
2019-02-26 21:57:45 -08:00
|
|
|
}
|
|
|
|
|
2019-05-03 16:27:53 -07:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
2019-03-21 11:53:18 -07:00
|
|
|
fn handle_votable_bank<T>(
|
|
|
|
bank: &Arc<Bank>,
|
|
|
|
bank_forks: &Arc<RwLock<BankForks>>,
|
2019-06-24 13:41:23 -07:00
|
|
|
tower: &mut Tower,
|
2019-03-27 04:30:26 -07:00
|
|
|
progress: &mut HashMap<u64, ForkProgress>,
|
2019-05-20 13:32:32 -07:00
|
|
|
vote_account: &Pubkey,
|
2019-03-21 11:53:18 -07:00
|
|
|
voting_keypair: &Option<Arc<T>>,
|
|
|
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
2019-04-06 19:41:22 -07:00
|
|
|
blocktree: &Arc<Blocktree>,
|
2019-04-30 13:23:21 -07:00
|
|
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
2019-06-11 18:27:47 -07:00
|
|
|
root_bank_sender: &Sender<Vec<Arc<Bank>>>,
|
2019-07-26 10:27:57 -07:00
|
|
|
lockouts: HashMap<u64, StakeLockout>,
|
2019-08-14 13:30:21 -07:00
|
|
|
total_staked: u64,
|
2019-07-26 10:27:57 -07:00
|
|
|
lockouts_sender: &Sender<LockoutAggregationData>,
|
2019-07-31 17:58:10 -07:00
|
|
|
snapshot_package_sender: &Option<SnapshotPackageSender>,
|
2019-04-15 13:12:28 -07:00
|
|
|
) -> Result<()>
|
|
|
|
where
|
2019-03-21 11:53:18 -07:00
|
|
|
T: 'static + KeypairUtil + Send + Sync,
|
|
|
|
{
|
2019-07-14 18:48:15 -07:00
|
|
|
trace!("handle votable bank {}", bank.slot());
|
2019-06-24 13:41:23 -07:00
|
|
|
if let Some(new_root) = tower.record_vote(bank.slot(), bank.hash()) {
|
2019-05-20 15:01:55 -07:00
|
|
|
// get the root bank before squash
|
|
|
|
let root_bank = bank_forks
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.get(new_root)
|
|
|
|
.expect("Root bank doesn't exist")
|
|
|
|
.clone();
|
2019-06-11 18:27:47 -07:00
|
|
|
let mut rooted_banks = root_bank.parents();
|
|
|
|
rooted_banks.push(root_bank);
|
|
|
|
let rooted_slots: Vec<_> = rooted_banks.iter().map(|bank| bank.slot()).collect();
|
2019-08-27 15:09:41 -07:00
|
|
|
// Call leader schedule_cache.set_root() before blocktree.set_root() because
|
|
|
|
// bank_forks.root is consumed by repair_service to update gossip, so we don't want to
|
|
|
|
// get blobs for repair on gossip before we update leader schedule, otherwise they may
|
|
|
|
// get dropped.
|
|
|
|
leader_schedule_cache.set_root(rooted_banks.last().unwrap());
|
2019-05-24 19:20:09 -07:00
|
|
|
blocktree
|
2019-05-29 09:43:22 -07:00
|
|
|
.set_roots(&rooted_slots)
|
|
|
|
.expect("Ledger set roots failed");
|
2019-07-31 17:58:10 -07:00
|
|
|
bank_forks
|
|
|
|
.write()
|
|
|
|
.unwrap()
|
|
|
|
.set_root(new_root, snapshot_package_sender);
|
2019-04-12 12:03:02 -07:00
|
|
|
Self::handle_new_root(&bank_forks, progress);
|
2019-07-14 18:48:15 -07:00
|
|
|
trace!("new root {}", new_root);
|
2019-07-09 15:36:30 -07:00
|
|
|
if let Err(e) = root_bank_sender.send(rooted_banks) {
|
|
|
|
trace!("root_bank_sender failed: {:?}", e);
|
|
|
|
Err(e)?;
|
|
|
|
}
|
2019-04-12 12:03:02 -07:00
|
|
|
}
|
2019-08-14 13:30:21 -07:00
|
|
|
Self::update_confidence_cache(bank_forks, tower, lockouts, total_staked, lockouts_sender);
|
|
|
|
|
2019-03-21 11:53:18 -07:00
|
|
|
if let Some(ref voting_keypair) = voting_keypair {
|
2019-05-20 13:32:32 -07:00
|
|
|
let node_keypair = cluster_info.read().unwrap().keypair.clone();
|
|
|
|
|
2019-04-11 14:48:36 -07:00
|
|
|
// Send our last few votes along with the new one
|
2019-05-20 13:32:32 -07:00
|
|
|
let vote_ix = vote_instruction::vote(
|
|
|
|
&vote_account,
|
|
|
|
&voting_keypair.pubkey(),
|
2019-06-24 13:41:23 -07:00
|
|
|
tower.recent_votes(),
|
2019-04-11 14:48:36 -07:00
|
|
|
);
|
2019-05-20 13:32:32 -07:00
|
|
|
|
2019-05-31 11:45:17 -07:00
|
|
|
let mut vote_tx =
|
2019-07-09 15:36:30 -07:00
|
|
|
Transaction::new_with_payer(vec![vote_ix], Some(&node_keypair.pubkey()));
|
2019-05-31 11:45:17 -07:00
|
|
|
|
2019-05-20 13:32:32 -07:00
|
|
|
let blockhash = bank.last_blockhash();
|
|
|
|
vote_tx.partial_sign(&[node_keypair.as_ref()], blockhash);
|
|
|
|
vote_tx.partial_sign(&[voting_keypair.as_ref()], blockhash);
|
2019-03-25 15:08:22 -07:00
|
|
|
cluster_info.write().unwrap().push_vote(vote_tx);
|
2019-03-21 11:53:18 -07:00
|
|
|
}
|
2019-04-15 13:12:28 -07:00
|
|
|
Ok(())
|
2019-03-21 11:53:18 -07:00
|
|
|
}
|
|
|
|
|
2019-07-26 10:27:57 -07:00
|
|
|
fn update_confidence_cache(
|
|
|
|
bank_forks: &Arc<RwLock<BankForks>>,
|
|
|
|
tower: &Tower,
|
|
|
|
lockouts: HashMap<u64, StakeLockout>,
|
2019-08-14 13:30:21 -07:00
|
|
|
total_staked: u64,
|
2019-07-26 10:27:57 -07:00
|
|
|
lockouts_sender: &Sender<LockoutAggregationData>,
|
|
|
|
) {
|
2019-08-14 13:30:21 -07:00
|
|
|
{
|
|
|
|
let mut bank_forks = bank_forks.write().unwrap();
|
|
|
|
for (fork, stake_lockout) in lockouts.iter() {
|
|
|
|
if tower.root().is_none() || *fork >= tower.root().unwrap() {
|
|
|
|
bank_forks.cache_fork_confidence(
|
|
|
|
*fork,
|
|
|
|
stake_lockout.stake(),
|
|
|
|
total_staked,
|
|
|
|
stake_lockout.lockout(),
|
|
|
|
);
|
|
|
|
}
|
2019-07-26 10:27:57 -07:00
|
|
|
}
|
|
|
|
}
|
2019-08-14 13:30:21 -07:00
|
|
|
|
2019-07-26 10:27:57 -07:00
|
|
|
let bank_forks_clone = bank_forks.clone();
|
|
|
|
let root = tower.root();
|
|
|
|
|
|
|
|
if let Err(e) = lockouts_sender.send((lockouts, root, bank_forks_clone)) {
|
|
|
|
trace!("lockouts_sender failed: {:?}", e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-21 11:53:18 -07:00
|
|
|
fn reset_poh_recorder(
|
2019-05-23 23:20:04 -07:00
|
|
|
my_pubkey: &Pubkey,
|
2019-03-29 20:00:36 -07:00
|
|
|
blocktree: &Blocktree,
|
2019-03-21 11:53:18 -07:00
|
|
|
bank: &Arc<Bank>,
|
|
|
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
2019-04-19 02:39:44 -07:00
|
|
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
2019-03-21 11:53:18 -07:00
|
|
|
) {
|
2019-03-29 20:00:36 -07:00
|
|
|
let next_leader_slot =
|
2019-05-23 23:20:04 -07:00
|
|
|
leader_schedule_cache.next_leader_slot(&my_pubkey, bank.slot(), &bank, Some(blocktree));
|
2019-07-17 14:10:15 -07:00
|
|
|
poh_recorder
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.reset(bank.last_blockhash(), bank.slot(), next_leader_slot);
|
2019-07-23 19:19:20 -07:00
|
|
|
|
|
|
|
let next_leader_msg = if let Some(next_leader_slot) = next_leader_slot {
|
2019-08-02 10:08:42 -07:00
|
|
|
format!("My next leader slot is {}", next_leader_slot.0)
|
2019-07-23 19:19:20 -07:00
|
|
|
} else {
|
2019-08-02 10:08:42 -07:00
|
|
|
"I am not in the leader schedule yet".to_owned()
|
2019-07-23 19:19:20 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
info!(
|
2019-08-02 10:08:42 -07:00
|
|
|
"{} voted and reset PoH at tick height {}. {}",
|
2019-05-23 23:20:04 -07:00
|
|
|
my_pubkey,
|
2019-03-21 11:53:18 -07:00
|
|
|
bank.tick_height(),
|
2019-07-23 19:19:20 -07:00
|
|
|
next_leader_msg,
|
2019-03-21 11:53:18 -07:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn replay_active_banks(
|
|
|
|
blocktree: &Arc<Blocktree>,
|
|
|
|
bank_forks: &Arc<RwLock<BankForks>>,
|
2019-05-23 23:20:04 -07:00
|
|
|
my_pubkey: &Pubkey,
|
2019-03-27 04:30:26 -07:00
|
|
|
progress: &mut HashMap<u64, ForkProgress>,
|
2019-07-20 13:13:55 -07:00
|
|
|
slot_full_senders: &[Sender<(u64, Pubkey)>],
|
2019-07-18 12:04:53 -07:00
|
|
|
) -> bool {
|
2019-07-18 14:07:32 -07:00
|
|
|
let mut did_complete_bank = false;
|
2019-07-30 13:18:33 -07:00
|
|
|
let mut tx_count = 0;
|
2019-03-21 11:53:18 -07:00
|
|
|
let active_banks = bank_forks.read().unwrap().active_banks();
|
|
|
|
trace!("active banks {:?}", active_banks);
|
|
|
|
|
|
|
|
for bank_slot in &active_banks {
|
2019-06-20 15:50:41 -07:00
|
|
|
// If the fork was marked as dead, don't replay it
|
|
|
|
if progress.get(bank_slot).map(|p| p.is_dead).unwrap_or(false) {
|
2019-08-20 17:16:06 -07:00
|
|
|
debug!("bank_slot {:?} is marked dead", *bank_slot);
|
2019-06-20 15:50:41 -07:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-03-21 11:53:18 -07:00
|
|
|
let bank = bank_forks.read().unwrap().get(*bank_slot).unwrap().clone();
|
2019-07-30 13:18:33 -07:00
|
|
|
if bank.collector_id() != my_pubkey {
|
|
|
|
let (replay_result, replay_tx_count) =
|
|
|
|
Self::replay_blocktree_into_bank(&bank, &blocktree, progress);
|
|
|
|
tx_count += replay_tx_count;
|
|
|
|
if Self::is_replay_result_fatal(&replay_result) {
|
|
|
|
trace!("replay_result_fatal slot {}", bank_slot);
|
|
|
|
// If the bank was corrupted, don't try to run the below logic to check if the
|
|
|
|
// bank is completed
|
|
|
|
continue;
|
|
|
|
}
|
2019-03-21 11:53:18 -07:00
|
|
|
}
|
2019-07-09 15:36:30 -07:00
|
|
|
assert_eq!(*bank_slot, bank.slot());
|
|
|
|
if bank.tick_height() == bank.max_tick_height() {
|
2019-07-18 14:07:32 -07:00
|
|
|
did_complete_bank = true;
|
2019-07-20 13:13:55 -07:00
|
|
|
Self::process_completed_bank(my_pubkey, bank, slot_full_senders);
|
2019-07-09 15:36:30 -07:00
|
|
|
} else {
|
|
|
|
trace!(
|
|
|
|
"bank {} not completed tick_height: {}, max_tick_height: {}",
|
|
|
|
bank.slot(),
|
|
|
|
bank.tick_height(),
|
|
|
|
bank.max_tick_height()
|
|
|
|
);
|
2019-03-21 11:53:18 -07:00
|
|
|
}
|
|
|
|
}
|
2019-08-12 15:15:34 -07:00
|
|
|
inc_new_counter_info!("replay_stage-replay_transactions", tx_count);
|
2019-07-18 14:07:32 -07:00
|
|
|
did_complete_bank
|
2019-03-21 11:53:18 -07:00
|
|
|
}
|
|
|
|
|
2019-08-14 13:30:21 -07:00
|
|
|
#[allow(clippy::type_complexity)]
|
2019-03-21 11:53:18 -07:00
|
|
|
fn generate_votable_banks(
|
|
|
|
bank_forks: &Arc<RwLock<BankForks>>,
|
2019-06-24 13:41:23 -07:00
|
|
|
tower: &Tower,
|
2019-03-27 04:30:26 -07:00
|
|
|
progress: &mut HashMap<u64, ForkProgress>,
|
2019-08-14 13:30:21 -07:00
|
|
|
) -> Vec<(u128, Arc<Bank>, HashMap<u64, StakeLockout>, u64)> {
|
2019-06-24 13:41:23 -07:00
|
|
|
let tower_start = Instant::now();
|
|
|
|
// Tower voting
|
2019-03-21 11:53:18 -07:00
|
|
|
let descendants = bank_forks.read().unwrap().descendants();
|
|
|
|
let ancestors = bank_forks.read().unwrap().ancestors();
|
|
|
|
let frozen_banks = bank_forks.read().unwrap().frozen_banks();
|
|
|
|
|
|
|
|
trace!("frozen_banks {}", frozen_banks.len());
|
2019-08-14 13:30:21 -07:00
|
|
|
let mut votable: Vec<(u128, Arc<Bank>, HashMap<u64, StakeLockout>, u64)> = frozen_banks
|
2019-03-21 11:53:18 -07:00
|
|
|
.values()
|
|
|
|
.filter(|b| {
|
|
|
|
let is_votable = b.is_votable();
|
|
|
|
trace!("bank is votable: {} {}", b.slot(), is_votable);
|
|
|
|
is_votable
|
|
|
|
})
|
|
|
|
.filter(|b| {
|
2019-06-24 13:41:23 -07:00
|
|
|
let has_voted = tower.has_voted(b.slot());
|
2019-08-15 18:58:46 -07:00
|
|
|
trace!("bank has_voted: {} {}", b.slot(), has_voted);
|
2019-03-21 11:53:18 -07:00
|
|
|
!has_voted
|
|
|
|
})
|
|
|
|
.filter(|b| {
|
2019-06-24 13:41:23 -07:00
|
|
|
let is_locked_out = tower.is_locked_out(b.slot(), &descendants);
|
2019-03-21 11:53:18 -07:00
|
|
|
trace!("bank is is_locked_out: {} {}", b.slot(), is_locked_out);
|
|
|
|
!is_locked_out
|
|
|
|
})
|
|
|
|
.map(|bank| {
|
|
|
|
(
|
|
|
|
bank,
|
2019-06-24 13:41:23 -07:00
|
|
|
tower.collect_vote_lockouts(
|
2019-04-05 14:23:00 -07:00
|
|
|
bank.slot(),
|
|
|
|
bank.vote_accounts().into_iter(),
|
|
|
|
&ancestors,
|
|
|
|
),
|
2019-03-21 11:53:18 -07:00
|
|
|
)
|
|
|
|
})
|
2019-08-14 13:30:21 -07:00
|
|
|
.filter(|(b, (stake_lockouts, total_staked))| {
|
|
|
|
let vote_threshold =
|
|
|
|
tower.check_vote_stake_threshold(b.slot(), &stake_lockouts, *total_staked);
|
|
|
|
Self::confirm_forks(tower, &stake_lockouts, *total_staked, progress, bank_forks);
|
2019-03-21 11:53:18 -07:00
|
|
|
debug!("bank vote_threshold: {} {}", b.slot(), vote_threshold);
|
|
|
|
vote_threshold
|
|
|
|
})
|
2019-08-14 13:30:21 -07:00
|
|
|
.map(|(b, (stake_lockouts, total_staked))| {
|
2019-07-26 10:27:57 -07:00
|
|
|
(
|
|
|
|
tower.calculate_weight(&stake_lockouts),
|
|
|
|
b.clone(),
|
|
|
|
stake_lockouts,
|
2019-08-14 13:30:21 -07:00
|
|
|
total_staked,
|
2019-07-26 10:27:57 -07:00
|
|
|
)
|
|
|
|
})
|
2019-03-21 11:53:18 -07:00
|
|
|
.collect();
|
|
|
|
|
|
|
|
votable.sort_by_key(|b| b.0);
|
2019-06-24 13:41:23 -07:00
|
|
|
let ms = timing::duration_as_ms(&tower_start.elapsed());
|
2019-03-21 11:53:18 -07:00
|
|
|
|
|
|
|
trace!("votable_banks {}", votable.len());
|
|
|
|
if !votable.is_empty() {
|
|
|
|
let weights: Vec<u128> = votable.iter().map(|x| x.0).collect();
|
|
|
|
info!(
|
2019-06-24 13:41:23 -07:00
|
|
|
"@{:?} tower duration: {:?} len: {} weights: {:?}",
|
2019-03-21 11:53:18 -07:00
|
|
|
timing::timestamp(),
|
|
|
|
ms,
|
|
|
|
votable.len(),
|
|
|
|
weights
|
|
|
|
);
|
|
|
|
}
|
2019-06-24 13:41:23 -07:00
|
|
|
inc_new_counter_info!("replay_stage-tower_duration", ms as usize);
|
2019-03-21 11:53:18 -07:00
|
|
|
|
|
|
|
votable
|
|
|
|
}
|
|
|
|
|
2019-03-27 04:30:26 -07:00
|
|
|
fn confirm_forks(
|
2019-06-24 13:41:23 -07:00
|
|
|
tower: &Tower,
|
2019-03-27 04:30:26 -07:00
|
|
|
stake_lockouts: &HashMap<u64, StakeLockout>,
|
2019-08-14 13:30:21 -07:00
|
|
|
total_staked: u64,
|
2019-03-27 04:30:26 -07:00
|
|
|
progress: &mut HashMap<u64, ForkProgress>,
|
2019-04-09 11:45:38 -07:00
|
|
|
bank_forks: &Arc<RwLock<BankForks>>,
|
2019-03-27 04:30:26 -07:00
|
|
|
) {
|
2019-03-27 05:21:19 -07:00
|
|
|
progress.retain(|slot, prog| {
|
|
|
|
let duration = timing::timestamp() - prog.started_ms;
|
2019-08-14 13:30:21 -07:00
|
|
|
if tower.is_slot_confirmed(*slot, stake_lockouts, total_staked)
|
2019-04-09 11:45:38 -07:00
|
|
|
&& bank_forks
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.get(*slot)
|
|
|
|
.map(|s| s.is_frozen())
|
|
|
|
.unwrap_or(true)
|
|
|
|
{
|
2019-07-22 08:22:21 -07:00
|
|
|
info!("validator fork confirmed {} {}ms", *slot, duration);
|
2019-05-17 17:34:05 -07:00
|
|
|
datapoint_warn!("validator-confirmation", ("duration_ms", duration, i64));
|
2019-03-27 05:21:19 -07:00
|
|
|
false
|
|
|
|
} else {
|
|
|
|
debug!(
|
2019-07-22 08:22:21 -07:00
|
|
|
"validator fork not confirmed {} {}ms {:?}",
|
2019-03-27 05:21:19 -07:00
|
|
|
*slot,
|
|
|
|
duration,
|
|
|
|
stake_lockouts.get(slot)
|
|
|
|
);
|
|
|
|
true
|
2019-03-27 04:30:26 -07:00
|
|
|
}
|
2019-03-27 05:21:19 -07:00
|
|
|
});
|
2019-03-27 04:30:26 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
fn load_blocktree_entries(
|
2019-02-26 21:57:45 -08:00
|
|
|
bank: &Bank,
|
|
|
|
blocktree: &Blocktree,
|
2019-03-27 04:30:26 -07:00
|
|
|
progress: &mut HashMap<u64, ForkProgress>,
|
2019-04-15 13:12:28 -07:00
|
|
|
) -> Result<(Vec<Entry>, usize)> {
|
2019-03-04 16:40:28 -08:00
|
|
|
let bank_slot = bank.slot();
|
2019-03-02 10:20:10 -08:00
|
|
|
let bank_progress = &mut progress
|
2019-03-04 16:40:28 -08:00
|
|
|
.entry(bank_slot)
|
2019-05-30 21:31:35 -07:00
|
|
|
.or_insert_with(|| ForkProgress::new(bank.last_blockhash()));
|
2019-08-20 17:16:06 -07:00
|
|
|
blocktree.get_slot_entries_with_shred_count(bank_slot, bank_progress.num_blobs as u64)
|
2019-02-26 21:57:45 -08:00
|
|
|
}
|
|
|
|
|
2019-03-27 04:30:26 -07:00
|
|
|
fn replay_entries_into_bank(
|
2019-02-26 21:57:45 -08:00
|
|
|
bank: &Bank,
|
|
|
|
entries: Vec<Entry>,
|
2019-03-27 04:30:26 -07:00
|
|
|
progress: &mut HashMap<u64, ForkProgress>,
|
2019-02-26 21:57:45 -08:00
|
|
|
num: usize,
|
2019-04-15 13:12:28 -07:00
|
|
|
) -> Result<()> {
|
2019-03-02 10:20:10 -08:00
|
|
|
let bank_progress = &mut progress
|
|
|
|
.entry(bank.slot())
|
2019-05-30 21:31:35 -07:00
|
|
|
.or_insert_with(|| ForkProgress::new(bank.last_blockhash()));
|
2019-03-27 04:30:26 -07:00
|
|
|
let result = Self::verify_and_process_entries(&bank, &entries, &bank_progress.last_entry);
|
|
|
|
bank_progress.num_blobs += num;
|
2019-02-26 21:57:45 -08:00
|
|
|
if let Some(last_entry) = entries.last() {
|
2019-03-27 04:30:26 -07:00
|
|
|
bank_progress.last_entry = last_entry.hash;
|
2019-02-26 21:57:45 -08:00
|
|
|
}
|
2019-06-20 15:50:41 -07:00
|
|
|
|
2019-02-26 21:57:45 -08:00
|
|
|
result
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn verify_and_process_entries(
|
|
|
|
bank: &Bank,
|
|
|
|
entries: &[Entry],
|
|
|
|
last_entry: &Hash,
|
2019-04-15 13:12:28 -07:00
|
|
|
) -> Result<()> {
|
2019-02-26 21:57:45 -08:00
|
|
|
if !entries.verify(last_entry) {
|
|
|
|
trace!(
|
|
|
|
"entry verification failed {} {} {} {}",
|
|
|
|
entries.len(),
|
|
|
|
bank.tick_height(),
|
|
|
|
last_entry,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash()
|
2019-02-26 21:57:45 -08:00
|
|
|
);
|
2019-06-20 15:50:41 -07:00
|
|
|
|
|
|
|
datapoint_error!(
|
|
|
|
"replay-stage-entry_verification_failure",
|
|
|
|
("slot", bank.slot(), i64),
|
|
|
|
("last_entry", last_entry.to_string(), String),
|
|
|
|
);
|
2019-04-15 13:12:28 -07:00
|
|
|
return Err(Error::BlobError(BlobError::VerificationFailed));
|
2019-02-26 21:57:45 -08:00
|
|
|
}
|
|
|
|
blocktree_processor::process_entries(bank, entries)?;
|
2019-02-24 01:06:46 -08:00
|
|
|
|
2019-02-26 21:57:45 -08:00
|
|
|
Ok(())
|
2019-02-24 01:06:46 -08:00
|
|
|
}
|
|
|
|
|
2019-03-19 17:30:36 -07:00
|
|
|
fn handle_new_root(
|
|
|
|
bank_forks: &Arc<RwLock<BankForks>>,
|
2019-03-27 04:30:26 -07:00
|
|
|
progress: &mut HashMap<u64, ForkProgress>,
|
2019-03-19 17:30:36 -07:00
|
|
|
) {
|
|
|
|
let r_bank_forks = bank_forks.read().unwrap();
|
|
|
|
progress.retain(|k, _| r_bank_forks.get(*k).is_some());
|
|
|
|
}
|
|
|
|
|
2019-03-13 14:06:12 -07:00
|
|
|
fn process_completed_bank(
|
2019-05-23 23:20:04 -07:00
|
|
|
my_pubkey: &Pubkey,
|
2019-03-13 14:06:12 -07:00
|
|
|
bank: Arc<Bank>,
|
2019-07-20 13:13:55 -07:00
|
|
|
slot_full_senders: &[Sender<(u64, Pubkey)>],
|
2019-03-13 14:06:12 -07:00
|
|
|
) {
|
|
|
|
bank.freeze();
|
|
|
|
info!("bank frozen {}", bank.slot());
|
2019-07-20 13:13:55 -07:00
|
|
|
slot_full_senders.iter().for_each(|sender| {
|
|
|
|
if let Err(e) = sender.send((bank.slot(), *bank.collector_id())) {
|
|
|
|
trace!("{} slot_full alert failed: {:?}", my_pubkey, e);
|
|
|
|
}
|
|
|
|
});
|
2019-03-13 14:06:12 -07:00
|
|
|
}
|
|
|
|
|
2019-04-19 02:39:44 -07:00
|
|
|
fn generate_new_bank_forks(
|
|
|
|
blocktree: &Blocktree,
|
|
|
|
forks: &mut BankForks,
|
|
|
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
|
|
|
) {
|
2019-02-07 15:10:54 -08:00
|
|
|
// Find the next slot that chains to the old slot
|
2019-02-26 21:57:45 -08:00
|
|
|
let frozen_banks = forks.frozen_banks();
|
2019-03-04 16:40:28 -08:00
|
|
|
let frozen_bank_slots: Vec<u64> = frozen_banks.keys().cloned().collect();
|
2019-02-26 21:57:45 -08:00
|
|
|
let next_slots = blocktree
|
2019-03-04 16:40:28 -08:00
|
|
|
.get_slots_since(&frozen_bank_slots)
|
2019-02-26 21:57:45 -08:00
|
|
|
.expect("Db error");
|
2019-03-18 16:04:36 -07:00
|
|
|
// Filter out what we've already seen
|
2019-07-17 14:10:15 -07:00
|
|
|
trace!("generate new forks {:?}", {
|
|
|
|
let mut next_slots = next_slots.iter().collect::<Vec<_>>();
|
|
|
|
next_slots.sort();
|
|
|
|
next_slots
|
|
|
|
});
|
2019-02-26 21:57:45 -08:00
|
|
|
for (parent_id, children) in next_slots {
|
|
|
|
let parent_bank = frozen_banks
|
|
|
|
.get(&parent_id)
|
|
|
|
.expect("missing parent in bank forks")
|
|
|
|
.clone();
|
|
|
|
for child_id in children {
|
2019-03-03 16:44:06 -08:00
|
|
|
if forks.get(child_id).is_some() {
|
2019-03-18 16:04:36 -07:00
|
|
|
trace!("child already active or frozen {}", child_id);
|
2019-03-03 16:44:06 -08:00
|
|
|
continue;
|
2019-02-28 19:49:22 -08:00
|
|
|
}
|
2019-04-19 02:39:44 -07:00
|
|
|
let leader = leader_schedule_cache
|
2019-04-30 13:23:21 -07:00
|
|
|
.slot_leader_at(child_id, Some(&parent_bank))
|
2019-04-19 02:39:44 -07:00
|
|
|
.unwrap();
|
2019-03-03 16:44:06 -08:00
|
|
|
info!("new fork:{} parent:{}", child_id, parent_id);
|
2019-03-18 20:23:34 -07:00
|
|
|
forks.insert(Bank::new_from_parent(&parent_bank, &leader, child_id));
|
2019-02-26 21:57:45 -08:00
|
|
|
}
|
|
|
|
}
|
2019-02-07 15:10:54 -08:00
|
|
|
}
|
2018-05-22 14:26:28 -07:00
|
|
|
}
|
2018-07-03 21:14:08 -07:00
|
|
|
|
2018-12-07 14:09:29 -08:00
|
|
|
impl Service for ReplayStage {
|
2019-01-26 00:28:08 -08:00
|
|
|
type JoinReturnType = ();
|
2018-10-10 16:49:41 -07:00
|
|
|
|
2019-01-26 00:28:08 -08:00
|
|
|
fn join(self) -> thread::Result<()> {
|
2019-07-26 10:27:57 -07:00
|
|
|
self.t_lockouts.join()?;
|
2019-02-26 21:57:45 -08:00
|
|
|
self.t_replay.join().map(|_| ())
|
2018-10-10 16:49:41 -07:00
|
|
|
}
|
|
|
|
}
|
2018-09-13 14:00:17 -07:00
|
|
|
|
2019-07-26 10:27:57 -07:00
|
|
|
type LockoutAggregationData = (
|
|
|
|
HashMap<u64, StakeLockout>, // lockouts
|
|
|
|
Option<u64>, // root
|
|
|
|
Arc<RwLock<BankForks>>, // bank_forks
|
|
|
|
);
|
|
|
|
|
|
|
|
fn aggregate_stake_lockouts(
|
|
|
|
exit: &Arc<AtomicBool>,
|
|
|
|
) -> (Sender<LockoutAggregationData>, JoinHandle<()>) {
|
|
|
|
let (lockouts_sender, lockouts_receiver): (
|
|
|
|
Sender<LockoutAggregationData>,
|
|
|
|
Receiver<LockoutAggregationData>,
|
|
|
|
) = channel();
|
|
|
|
let exit_ = exit.clone();
|
|
|
|
(
|
|
|
|
lockouts_sender,
|
|
|
|
Builder::new()
|
|
|
|
.name("solana-aggregate-stake-lockouts".to_string())
|
|
|
|
.spawn(move || loop {
|
|
|
|
if exit_.load(Ordering::Relaxed) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if let Ok((lockouts, root, bank_forks)) = lockouts_receiver.try_recv() {
|
|
|
|
let ancestors = bank_forks.read().unwrap().ancestors();
|
|
|
|
let stake_weighted_lockouts =
|
|
|
|
Tower::aggregate_stake_lockouts(root, &ancestors, lockouts);
|
|
|
|
let mut w_bank_forks = bank_forks.write().unwrap();
|
|
|
|
for (fork, stake_weighted_lockout) in stake_weighted_lockouts.iter() {
|
|
|
|
if root.is_none() || *fork >= root.unwrap() {
|
|
|
|
w_bank_forks
|
|
|
|
.cache_stake_weighted_lockouts(*fork, *stake_weighted_lockout)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
drop(w_bank_forks);
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.unwrap(),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
2019-01-29 00:21:27 -08:00
|
|
|
use super::*;
|
2019-07-26 10:27:57 -07:00
|
|
|
use crate::bank_forks::Confidence;
|
2019-05-03 16:27:53 -07:00
|
|
|
use crate::blocktree::get_tmp_ledger_path;
|
2019-08-20 17:16:06 -07:00
|
|
|
use crate::blocktree::tests::entries_to_test_shreds;
|
2019-06-20 15:50:41 -07:00
|
|
|
use crate::entry;
|
2019-07-11 13:58:33 -07:00
|
|
|
use crate::erasure::ErasureConfig;
|
2019-07-26 10:27:57 -07:00
|
|
|
use crate::genesis_utils::{create_genesis_block, create_genesis_block_with_leader};
|
2019-08-20 17:16:06 -07:00
|
|
|
use crate::packet::Blob;
|
2019-01-26 00:28:08 -08:00
|
|
|
use crate::replay_stage::ReplayStage;
|
2019-08-20 17:16:06 -07:00
|
|
|
use crate::shred::Shred;
|
2019-06-20 15:50:41 -07:00
|
|
|
use solana_runtime::genesis_utils::GenesisBlockInfo;
|
2019-07-07 14:37:12 -07:00
|
|
|
use solana_sdk::hash::{hash, Hash};
|
2019-06-20 15:50:41 -07:00
|
|
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
|
|
|
use solana_sdk::system_transaction;
|
|
|
|
use solana_sdk::transaction::TransactionError;
|
2019-07-26 10:27:57 -07:00
|
|
|
use solana_vote_api::vote_state::VoteState;
|
2018-10-17 13:42:54 -07:00
|
|
|
use std::fs::remove_dir_all;
|
2018-10-10 16:49:41 -07:00
|
|
|
use std::sync::{Arc, RwLock};
|
|
|
|
|
2019-03-18 16:04:36 -07:00
|
|
|
#[test]
|
|
|
|
fn test_child_slots_of_same_parent() {
|
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let blocktree = Arc::new(
|
|
|
|
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
|
|
|
);
|
|
|
|
|
2019-05-22 20:39:00 -07:00
|
|
|
let genesis_block = create_genesis_block(10_000).genesis_block;
|
2019-03-18 16:04:36 -07:00
|
|
|
let bank0 = Bank::new(&genesis_block);
|
2019-04-19 02:39:44 -07:00
|
|
|
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank0));
|
2019-03-18 16:04:36 -07:00
|
|
|
let mut bank_forks = BankForks::new(0, bank0);
|
|
|
|
bank_forks.working_bank().freeze();
|
|
|
|
|
|
|
|
// Insert blob for slot 1, generate new forks, check result
|
|
|
|
let mut blob_slot_1 = Blob::default();
|
|
|
|
blob_slot_1.set_slot(1);
|
|
|
|
blob_slot_1.set_parent(0);
|
2019-07-11 13:58:33 -07:00
|
|
|
blob_slot_1.set_erasure_config(&ErasureConfig::default());
|
2019-03-18 16:04:36 -07:00
|
|
|
blocktree.insert_data_blobs(&vec![blob_slot_1]).unwrap();
|
|
|
|
assert!(bank_forks.get(1).is_none());
|
2019-04-19 02:39:44 -07:00
|
|
|
ReplayStage::generate_new_bank_forks(
|
|
|
|
&blocktree,
|
|
|
|
&mut bank_forks,
|
|
|
|
&leader_schedule_cache,
|
|
|
|
);
|
2019-03-18 16:04:36 -07:00
|
|
|
assert!(bank_forks.get(1).is_some());
|
|
|
|
|
2019-03-18 20:23:34 -07:00
|
|
|
// Insert blob for slot 3, generate new forks, check result
|
2019-03-18 16:04:36 -07:00
|
|
|
let mut blob_slot_2 = Blob::default();
|
|
|
|
blob_slot_2.set_slot(2);
|
|
|
|
blob_slot_2.set_parent(0);
|
2019-07-11 13:58:33 -07:00
|
|
|
blob_slot_2.set_erasure_config(&ErasureConfig::default());
|
2019-03-18 16:04:36 -07:00
|
|
|
blocktree.insert_data_blobs(&vec![blob_slot_2]).unwrap();
|
|
|
|
assert!(bank_forks.get(2).is_none());
|
2019-04-19 02:39:44 -07:00
|
|
|
ReplayStage::generate_new_bank_forks(
|
|
|
|
&blocktree,
|
|
|
|
&mut bank_forks,
|
|
|
|
&leader_schedule_cache,
|
|
|
|
);
|
2019-03-18 16:04:36 -07:00
|
|
|
assert!(bank_forks.get(1).is_some());
|
|
|
|
assert!(bank_forks.get(2).is_some());
|
|
|
|
}
|
|
|
|
|
|
|
|
let _ignored = remove_dir_all(&ledger_path);
|
|
|
|
}
|
2019-03-19 17:30:36 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_handle_new_root() {
|
2019-05-22 20:39:00 -07:00
|
|
|
let genesis_block = create_genesis_block(10_000).genesis_block;
|
2019-03-21 17:36:10 -07:00
|
|
|
let bank0 = Bank::new(&genesis_block);
|
2019-03-19 17:30:36 -07:00
|
|
|
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank0)));
|
|
|
|
let mut progress = HashMap::new();
|
2019-03-27 04:30:26 -07:00
|
|
|
progress.insert(5, ForkProgress::new(Hash::default()));
|
2019-03-19 17:30:36 -07:00
|
|
|
ReplayStage::handle_new_root(&bank_forks, &mut progress);
|
|
|
|
assert!(progress.is_empty());
|
|
|
|
}
|
2019-06-20 15:50:41 -07:00
|
|
|
|
|
|
|
#[test]
|
2019-07-07 14:37:12 -07:00
|
|
|
fn test_dead_fork_transaction_error() {
|
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let keypair2 = Keypair::new();
|
|
|
|
let missing_keypair = Keypair::new();
|
|
|
|
let missing_keypair2 = Keypair::new();
|
|
|
|
|
2019-08-20 17:16:06 -07:00
|
|
|
let res = check_dead_fork(|blockhash, slot| {
|
|
|
|
let entry = entry::next_entry(
|
2019-07-07 14:37:12 -07:00
|
|
|
blockhash,
|
2019-06-20 15:50:41 -07:00
|
|
|
1,
|
|
|
|
vec![
|
|
|
|
system_transaction::create_user_account(
|
|
|
|
&keypair1,
|
|
|
|
&keypair2.pubkey(),
|
|
|
|
2,
|
2019-07-07 14:37:12 -07:00
|
|
|
*blockhash,
|
2019-06-20 15:50:41 -07:00
|
|
|
), // should be fine,
|
|
|
|
system_transaction::transfer(
|
|
|
|
&missing_keypair,
|
2019-07-07 14:37:12 -07:00
|
|
|
&missing_keypair2.pubkey(),
|
2019-06-20 15:50:41 -07:00
|
|
|
2,
|
2019-07-07 14:37:12 -07:00
|
|
|
*blockhash,
|
2019-06-20 15:50:41 -07:00
|
|
|
), // should cause AccountNotFound error
|
|
|
|
],
|
2019-08-20 17:16:06 -07:00
|
|
|
);
|
|
|
|
entries_to_test_shreds(vec![entry], slot, slot.saturating_sub(1), false)
|
2019-07-07 14:37:12 -07:00
|
|
|
});
|
2019-06-20 15:50:41 -07:00
|
|
|
|
2019-07-07 14:37:12 -07:00
|
|
|
assert_matches!(
|
|
|
|
res,
|
|
|
|
Err(Error::TransactionError(TransactionError::AccountNotFound))
|
|
|
|
);
|
|
|
|
}
|
2019-06-20 15:50:41 -07:00
|
|
|
|
2019-07-07 14:37:12 -07:00
|
|
|
#[test]
|
|
|
|
fn test_dead_fork_entry_verification_failure() {
|
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let keypair2 = Keypair::new();
|
2019-08-20 17:16:06 -07:00
|
|
|
let res = check_dead_fork(|blockhash, slot| {
|
2019-06-20 15:50:41 -07:00
|
|
|
let bad_hash = hash(&[2; 30]);
|
2019-08-20 17:16:06 -07:00
|
|
|
let entry = entry::next_entry(
|
2019-08-21 17:46:59 -07:00
|
|
|
// User wrong blockhash so that the entry causes an entry verification failure
|
2019-06-20 15:50:41 -07:00
|
|
|
&bad_hash,
|
|
|
|
1,
|
|
|
|
vec![system_transaction::create_user_account(
|
|
|
|
&keypair1,
|
|
|
|
&keypair2.pubkey(),
|
|
|
|
2,
|
2019-07-07 14:37:12 -07:00
|
|
|
*blockhash,
|
|
|
|
)],
|
2019-08-20 17:16:06 -07:00
|
|
|
);
|
|
|
|
entries_to_test_shreds(vec![entry], slot, slot.saturating_sub(1), false)
|
2019-07-07 14:37:12 -07:00
|
|
|
});
|
|
|
|
|
|
|
|
assert_matches!(res, Err(Error::BlobError(BlobError::VerificationFailed)));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_dead_fork_blob_deserialize_failure() {
|
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let keypair2 = Keypair::new();
|
|
|
|
// Insert entry that causes blob deserialization failure
|
|
|
|
|
2019-08-20 17:16:06 -07:00
|
|
|
let res = check_dead_fork(|blockhash, slot| {
|
|
|
|
let entry = entry::next_entry(
|
2019-07-07 14:37:12 -07:00
|
|
|
&blockhash,
|
|
|
|
1,
|
|
|
|
vec![system_transaction::create_user_account(
|
|
|
|
&keypair1,
|
|
|
|
&keypair2.pubkey(),
|
|
|
|
2,
|
|
|
|
*blockhash,
|
2019-06-20 15:50:41 -07:00
|
|
|
)],
|
2019-08-20 17:16:06 -07:00
|
|
|
);
|
|
|
|
entries_to_test_shreds(vec![entry], slot, slot.saturating_sub(1), false)
|
2019-07-07 14:37:12 -07:00
|
|
|
});
|
2019-06-20 15:50:41 -07:00
|
|
|
|
2019-07-07 14:37:12 -07:00
|
|
|
assert_matches!(
|
|
|
|
res,
|
2019-08-20 17:16:06 -07:00
|
|
|
Err(Error::TransactionError(TransactionError::AccountNotFound))
|
2019-07-07 14:37:12 -07:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Given a blob and a fatal expected error, check that replaying that blob causes causes the fork to be
|
|
|
|
// marked as dead. Returns the error for caller to verify.
|
2019-08-20 17:16:06 -07:00
|
|
|
fn check_dead_fork<F>(shred_to_insert: F) -> Result<()>
|
2019-07-07 14:37:12 -07:00
|
|
|
where
|
2019-08-20 17:16:06 -07:00
|
|
|
F: Fn(&Hash, u64) -> Vec<Shred>,
|
2019-07-07 14:37:12 -07:00
|
|
|
{
|
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
let res = {
|
|
|
|
let blocktree = Arc::new(
|
|
|
|
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
2019-06-20 15:50:41 -07:00
|
|
|
);
|
2019-07-07 14:37:12 -07:00
|
|
|
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(1000);
|
|
|
|
let bank0 = Arc::new(Bank::new(&genesis_block));
|
|
|
|
let mut progress = HashMap::new();
|
|
|
|
let last_blockhash = bank0.last_blockhash();
|
|
|
|
progress.insert(bank0.slot(), ForkProgress::new(last_blockhash));
|
2019-08-20 17:16:06 -07:00
|
|
|
let shreds = shred_to_insert(&last_blockhash, bank0.slot());
|
2019-08-26 18:27:45 -07:00
|
|
|
blocktree.insert_shreds(shreds).unwrap();
|
2019-07-30 13:18:33 -07:00
|
|
|
let (res, _tx_count) =
|
|
|
|
ReplayStage::replay_blocktree_into_bank(&bank0, &blocktree, &mut progress);
|
2019-07-07 14:37:12 -07:00
|
|
|
|
2019-06-20 15:50:41 -07:00
|
|
|
// Check that the erroring bank was marked as dead in the progress map
|
|
|
|
assert!(progress
|
2019-07-07 14:37:12 -07:00
|
|
|
.get(&bank0.slot())
|
2019-06-20 15:50:41 -07:00
|
|
|
.map(|b| b.is_dead)
|
|
|
|
.unwrap_or(false));
|
|
|
|
|
|
|
|
// Check that the erroring bank was marked as dead in blocktree
|
2019-07-07 14:37:12 -07:00
|
|
|
assert!(blocktree.is_dead(bank0.slot()));
|
|
|
|
res
|
|
|
|
};
|
2019-06-20 15:50:41 -07:00
|
|
|
let _ignored = remove_dir_all(&ledger_path);
|
2019-07-07 14:37:12 -07:00
|
|
|
res
|
2019-06-20 15:50:41 -07:00
|
|
|
}
|
2019-07-26 10:27:57 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_replay_confidence_cache() {
|
|
|
|
fn leader_vote(bank: &Arc<Bank>, pubkey: &Pubkey) {
|
|
|
|
let mut leader_vote_account = bank.get_account(&pubkey).unwrap();
|
|
|
|
let mut vote_state = VoteState::from(&leader_vote_account).unwrap();
|
|
|
|
vote_state.process_slot_vote_unchecked(bank.slot());
|
|
|
|
vote_state.to(&mut leader_vote_account).unwrap();
|
|
|
|
bank.store_account(&pubkey, &leader_vote_account);
|
|
|
|
}
|
|
|
|
|
|
|
|
let (lockouts_sender, _) = aggregate_stake_lockouts(&Arc::new(AtomicBool::new(false)));
|
|
|
|
|
|
|
|
let leader_pubkey = Pubkey::new_rand();
|
|
|
|
let leader_lamports = 3;
|
|
|
|
let genesis_block_info =
|
|
|
|
create_genesis_block_with_leader(50, &leader_pubkey, leader_lamports);
|
|
|
|
let mut genesis_block = genesis_block_info.genesis_block;
|
|
|
|
let leader_voting_pubkey = genesis_block_info.voting_keypair.pubkey();
|
|
|
|
genesis_block.epoch_warmup = false;
|
|
|
|
genesis_block.ticks_per_slot = 4;
|
|
|
|
let bank0 = Bank::new(&genesis_block);
|
|
|
|
for _ in 1..genesis_block.ticks_per_slot {
|
|
|
|
bank0.register_tick(&Hash::default());
|
|
|
|
}
|
|
|
|
bank0.freeze();
|
|
|
|
let arc_bank0 = Arc::new(bank0);
|
|
|
|
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(
|
|
|
|
&[arc_bank0.clone()],
|
2019-08-13 17:20:14 -07:00
|
|
|
vec![0],
|
2019-07-26 10:27:57 -07:00
|
|
|
)));
|
|
|
|
let pubkey = Pubkey::new_rand();
|
2019-08-14 13:30:21 -07:00
|
|
|
let mut tower = Tower::new(&pubkey, &Pubkey::new_rand(), &bank_forks.read().unwrap());
|
2019-07-26 10:27:57 -07:00
|
|
|
let mut progress = HashMap::new();
|
|
|
|
|
|
|
|
leader_vote(&arc_bank0, &leader_voting_pubkey);
|
|
|
|
let votable = ReplayStage::generate_votable_banks(&bank_forks, &tower, &mut progress);
|
2019-08-14 13:30:21 -07:00
|
|
|
if let Some((_, _, lockouts, total_staked)) = votable.into_iter().last() {
|
|
|
|
ReplayStage::update_confidence_cache(
|
|
|
|
&bank_forks,
|
|
|
|
&tower,
|
|
|
|
lockouts,
|
|
|
|
total_staked,
|
|
|
|
&lockouts_sender,
|
|
|
|
);
|
2019-07-26 10:27:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
bank_forks.read().unwrap().get_fork_confidence(0).unwrap(),
|
2019-08-12 20:59:57 -07:00
|
|
|
&Confidence::new(0, 3, 2)
|
2019-07-26 10:27:57 -07:00
|
|
|
);
|
|
|
|
assert!(bank_forks.read().unwrap().get_fork_confidence(1).is_none());
|
|
|
|
|
|
|
|
tower.record_vote(arc_bank0.slot(), arc_bank0.hash());
|
|
|
|
|
|
|
|
let bank1 = Bank::new_from_parent(&arc_bank0, &Pubkey::default(), arc_bank0.slot() + 1);
|
|
|
|
let _res = bank1.transfer(10, &genesis_block_info.mint_keypair, &Pubkey::new_rand());
|
|
|
|
for _ in 0..genesis_block.ticks_per_slot {
|
|
|
|
bank1.register_tick(&Hash::default());
|
|
|
|
}
|
|
|
|
bank1.freeze();
|
|
|
|
bank_forks.write().unwrap().insert(bank1);
|
|
|
|
let arc_bank1 = bank_forks.read().unwrap().get(1).unwrap().clone();
|
|
|
|
leader_vote(&arc_bank1, &leader_voting_pubkey);
|
|
|
|
let votable = ReplayStage::generate_votable_banks(&bank_forks, &tower, &mut progress);
|
2019-08-14 13:30:21 -07:00
|
|
|
if let Some((_, _, lockouts, total_staked)) = votable.into_iter().last() {
|
|
|
|
ReplayStage::update_confidence_cache(
|
|
|
|
&bank_forks,
|
|
|
|
&tower,
|
|
|
|
lockouts,
|
|
|
|
total_staked,
|
|
|
|
&lockouts_sender,
|
|
|
|
);
|
2019-07-26 10:27:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
tower.record_vote(arc_bank1.slot(), arc_bank1.hash());
|
|
|
|
|
|
|
|
let bank2 = Bank::new_from_parent(&arc_bank1, &Pubkey::default(), arc_bank1.slot() + 1);
|
|
|
|
let _res = bank2.transfer(10, &genesis_block_info.mint_keypair, &Pubkey::new_rand());
|
|
|
|
for _ in 0..genesis_block.ticks_per_slot {
|
|
|
|
bank2.register_tick(&Hash::default());
|
|
|
|
}
|
|
|
|
bank2.freeze();
|
|
|
|
bank_forks.write().unwrap().insert(bank2);
|
|
|
|
let arc_bank2 = bank_forks.read().unwrap().get(2).unwrap().clone();
|
|
|
|
leader_vote(&arc_bank2, &leader_voting_pubkey);
|
|
|
|
let votable = ReplayStage::generate_votable_banks(&bank_forks, &tower, &mut progress);
|
2019-08-14 13:30:21 -07:00
|
|
|
if let Some((_, _, lockouts, total_staked)) = votable.into_iter().last() {
|
|
|
|
ReplayStage::update_confidence_cache(
|
|
|
|
&bank_forks,
|
|
|
|
&tower,
|
|
|
|
lockouts,
|
|
|
|
total_staked,
|
|
|
|
&lockouts_sender,
|
|
|
|
);
|
2019-07-26 10:27:57 -07:00
|
|
|
}
|
|
|
|
thread::sleep(Duration::from_millis(200));
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
bank_forks.read().unwrap().get_fork_confidence(0).unwrap(),
|
2019-08-12 20:59:57 -07:00
|
|
|
&Confidence::new_with_stake_weighted(3, 3, 14, 60)
|
2019-07-26 10:27:57 -07:00
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
bank_forks.read().unwrap().get_fork_confidence(1).unwrap(),
|
2019-08-12 20:59:57 -07:00
|
|
|
&Confidence::new_with_stake_weighted(3, 3, 6, 18)
|
2019-07-26 10:27:57 -07:00
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
bank_forks.read().unwrap().get_fork_confidence(2).unwrap(),
|
2019-08-12 20:59:57 -07:00
|
|
|
&Confidence::new_with_stake_weighted(0, 3, 2, 0)
|
2019-07-26 10:27:57 -07:00
|
|
|
);
|
|
|
|
}
|
2018-07-03 21:14:08 -07:00
|
|
|
}
|