2018-12-07 14:09:29 -08:00
//! The `replay_stage` replays transactions broadcast by the leader.
2018-05-22 14:26:28 -07:00
2019-11-20 15:43:10 -08:00
use crate ::{
2020-03-19 23:35:01 -07:00
broadcast_stage ::RetransmitSlotsSender ,
2020-09-09 08:33:14 -07:00
cache_block_time_service ::CacheBlockTimeSender ,
2019-11-20 15:43:10 -08:00
cluster_info ::ClusterInfo ,
2021-04-10 17:34:45 -07:00
cluster_info_vote_listener ::{
GossipDuplicateConfirmedSlotsReceiver , GossipVerifiedVoteHashReceiver , VoteTracker ,
} ,
2021-03-24 23:41:52 -07:00
cluster_slot_state_verifier ::* ,
2020-03-30 19:57:11 -07:00
cluster_slots ::ClusterSlots ,
2020-06-12 17:16:10 -07:00
commitment_service ::{ AggregateCommitmentService , CommitmentAggregationData } ,
2021-03-24 23:41:52 -07:00
consensus ::{
ComputedBankState , Stake , SwitchForkDecision , Tower , VotedStakes , SWITCH_FORK_THRESHOLD ,
} ,
2020-06-11 12:16:04 -07:00
fork_choice ::{ ForkChoice , SelectVoteAndResetForkResult } ,
heaviest_subtree_fork_choice ::HeaviestSubtreeForkChoice ,
2020-09-28 19:43:05 -07:00
optimistically_confirmed_bank_tracker ::{ BankNotification , BankNotificationSender } ,
2020-02-26 13:35:50 -08:00
poh_recorder ::{ PohRecorder , GRACE_TICKS_FACTOR , MAX_GRACE_SLOTS } ,
2021-03-24 23:41:52 -07:00
progress_map ::{ DuplicateStats , ForkProgress , ProgressMap , PropagatedStats } ,
2020-05-05 14:07:21 -07:00
repair_service ::DuplicateSlotsResetReceiver ,
2020-01-14 17:15:26 -08:00
result ::Result ,
2020-02-04 18:50:24 -08:00
rewards_recorder_service ::RewardsRecorderSender ,
2019-11-20 15:43:10 -08:00
rpc_subscriptions ::RpcSubscriptions ,
2021-03-24 23:41:52 -07:00
window_service ::DuplicateSlotReceiver ,
2019-09-17 19:43:40 -07:00
} ;
2021-03-12 05:44:06 -08:00
use solana_client ::rpc_response ::SlotUpdate ;
2019-11-02 00:38:30 -07:00
use solana_ledger ::{
2020-03-15 21:28:47 -07:00
block_error ::BlockError ,
2020-01-14 17:15:26 -08:00
blockstore ::Blockstore ,
2020-08-07 11:21:35 -07:00
blockstore_processor ::{ self , BlockstoreProcessorError , TransactionStatusSender } ,
2020-01-14 17:15:26 -08:00
entry ::VerifyRecyclers ,
2019-11-02 00:38:30 -07:00
leader_schedule_cache ::LeaderScheduleCache ,
} ;
2021-04-09 17:21:01 -07:00
use solana_measure ::measure ::Measure ;
2019-11-19 02:36:00 -08:00
use solana_metrics ::inc_new_counter_info ;
2020-06-25 21:06:58 -07:00
use solana_runtime ::{
2021-02-18 23:42:09 -08:00
accounts_background_service ::AbsRequestSender , bank ::Bank , bank_forks ::BankForks ,
2020-09-28 16:04:46 -07:00
commitment ::BlockCommitmentCache , vote_sender_types ::ReplayVoteSender ,
2020-06-25 21:06:58 -07:00
} ;
2019-11-02 00:38:30 -07:00
use solana_sdk ::{
2020-03-26 19:57:27 -07:00
clock ::{ Slot , NUM_CONSECUTIVE_LEADER_SLOTS } ,
2020-09-08 07:55:09 -07:00
genesis_config ::ClusterType ,
2019-11-02 00:38:30 -07:00
hash ::Hash ,
pubkey ::Pubkey ,
2021-03-25 18:54:51 -07:00
signature ::Signature ,
2020-02-20 13:28:55 -08:00
signature ::{ Keypair , Signer } ,
2020-06-28 10:04:15 -07:00
timing ::timestamp ,
2019-11-02 00:38:30 -07:00
transaction ::Transaction ,
} ;
2020-11-30 09:18:33 -08:00
use solana_vote_program ::{ vote_instruction , vote_state ::Vote } ;
2019-11-02 00:38:30 -07:00
use std ::{
2021-03-24 23:41:52 -07:00
collections ::{ BTreeMap , HashMap , HashSet } ,
2020-01-14 17:15:26 -08:00
result ,
2019-12-06 13:38:49 -08:00
sync ::{
atomic ::{ AtomicBool , Ordering } ,
2020-05-14 18:22:47 -07:00
mpsc ::{ Receiver , RecvTimeoutError , Sender } ,
2019-12-06 13:38:49 -08:00
Arc , Mutex , RwLock ,
} ,
2019-11-02 00:38:30 -07:00
thread ::{ self , Builder , JoinHandle } ,
2020-06-28 10:04:15 -07:00
time ::Duration ,
2019-11-02 00:38:30 -07:00
} ;
2018-05-22 14:26:28 -07:00
2018-12-13 18:43:10 -08:00
pub const MAX_ENTRY_RECV_PER_ITER : usize = 512 ;
2020-03-26 19:57:27 -07:00
pub const SUPERMINORITY_THRESHOLD : f64 = 1 f64 / 3 f64 ;
pub const MAX_UNCONFIRMED_SLOTS : usize = 5 ;
2021-03-24 23:41:52 -07:00
pub const DUPLICATE_LIVENESS_THRESHOLD : f64 = 0.1 ;
pub const DUPLICATE_THRESHOLD : f64 = 1.0 - SWITCH_FORK_THRESHOLD - DUPLICATE_LIVENESS_THRESHOLD ;
2021-03-25 18:54:51 -07:00
const MAX_VOTE_SIGNATURES : usize = 200 ;
2020-03-02 12:43:43 -08:00
2021-04-10 17:34:45 -07:00
pub type GossipVerifiedVoteHashes = BTreeMap < Slot , HashMap < Hash , Vec < Pubkey > > > ;
2020-03-02 12:43:43 -08:00
#[ derive(PartialEq, Debug) ]
pub ( crate ) enum HeaviestForkFailures {
LockedOut ( u64 ) ,
FailedThreshold ( u64 ) ,
FailedSwitchThreshold ( u64 ) ,
2020-03-26 19:57:27 -07:00
NoPropagatedConfirmation ( u64 ) ,
2020-03-02 12:43:43 -08:00
}
2018-12-13 18:43:10 -08:00
2018-12-07 14:09:29 -08:00
// Implement a destructor for the ReplayStage thread to signal it exited
2018-09-25 15:41:29 -07:00
// even on panics
struct Finalizer {
exit_sender : Arc < AtomicBool > ,
}
impl Finalizer {
fn new ( exit_sender : Arc < AtomicBool > ) -> Self {
Finalizer { exit_sender }
}
}
2019-07-31 17:58:10 -07:00
2018-09-25 15:41:29 -07:00
// Implement a destructor for Finalizer.
impl Drop for Finalizer {
fn drop ( & mut self ) {
self . exit_sender . clone ( ) . store ( true , Ordering ::Relaxed ) ;
}
}
2020-03-26 19:57:27 -07:00
#[ derive(Default) ]
struct SkippedSlotsInfo {
last_retransmit_slot : u64 ,
last_skipped_slot : u64 ,
}
2019-12-04 10:17:17 -08:00
pub struct ReplayStageConfig {
pub my_pubkey : Pubkey ,
pub vote_account : Pubkey ,
2021-04-11 20:38:30 -07:00
pub authorized_voter_keypairs : Arc < RwLock < Vec < Arc < Keypair > > > > ,
2019-12-04 10:17:17 -08:00
pub exit : Arc < AtomicBool > ,
pub subscriptions : Arc < RpcSubscriptions > ,
pub leader_schedule_cache : Arc < LeaderScheduleCache > ,
2019-12-18 11:50:09 -08:00
pub latest_root_senders : Vec < Sender < Slot > > ,
2021-02-18 23:42:09 -08:00
pub accounts_background_request_sender : AbsRequestSender ,
2019-12-04 10:17:17 -08:00
pub block_commitment_cache : Arc < RwLock < BlockCommitmentCache > > ,
pub transaction_status_sender : Option < TransactionStatusSender > ,
2020-02-11 17:01:49 -08:00
pub rewards_recorder_sender : Option < RewardsRecorderSender > ,
2020-09-09 08:33:14 -07:00
pub cache_block_time_sender : Option < CacheBlockTimeSender > ,
2020-09-28 19:43:05 -07:00
pub bank_notification_sender : Option < BankNotificationSender > ,
2021-03-25 18:54:51 -07:00
pub wait_for_vote_to_start_leader : bool ,
2019-12-04 10:17:17 -08:00
}
2020-05-08 03:46:29 -07:00
#[ derive(Default) ]
pub struct ReplayTiming {
2020-06-28 10:04:15 -07:00
last_print : u64 ,
2021-01-13 08:08:53 -08:00
collect_frozen_banks_elapsed : u64 ,
2020-05-08 03:46:29 -07:00
compute_bank_stats_elapsed : u64 ,
select_vote_and_reset_forks_elapsed : u64 ,
2020-06-28 10:04:15 -07:00
start_leader_elapsed : u64 ,
reset_bank_elapsed : u64 ,
voting_elapsed : u64 ,
select_forks_elapsed : u64 ,
compute_slot_stats_elapsed : u64 ,
generate_new_bank_forks_elapsed : u64 ,
replay_active_banks_elapsed : u64 ,
2021-01-13 08:08:53 -08:00
wait_receive_elapsed : u64 ,
heaviest_fork_failures_elapsed : u64 ,
bank_count : u64 ,
2021-03-24 23:41:52 -07:00
process_gossip_duplicate_confirmed_slots_elapsed : u64 ,
process_duplicate_slots_elapsed : u64 ,
2021-04-10 17:34:45 -07:00
process_gossip_verified_vote_hashes_elapsed : u64 ,
2020-05-08 03:46:29 -07:00
}
impl ReplayTiming {
2020-06-28 10:04:15 -07:00
#[ allow(clippy::too_many_arguments) ]
2020-05-08 03:46:29 -07:00
fn update (
& mut self ,
2021-01-13 08:08:53 -08:00
collect_frozen_banks_elapsed : u64 ,
2020-05-08 03:46:29 -07:00
compute_bank_stats_elapsed : u64 ,
select_vote_and_reset_forks_elapsed : u64 ,
2020-06-28 10:04:15 -07:00
start_leader_elapsed : u64 ,
reset_bank_elapsed : u64 ,
voting_elapsed : u64 ,
select_forks_elapsed : u64 ,
compute_slot_stats_elapsed : u64 ,
generate_new_bank_forks_elapsed : u64 ,
replay_active_banks_elapsed : u64 ,
2021-01-13 08:08:53 -08:00
wait_receive_elapsed : u64 ,
heaviest_fork_failures_elapsed : u64 ,
bank_count : u64 ,
2021-03-24 23:41:52 -07:00
process_gossip_duplicate_confirmed_slots_elapsed : u64 ,
2021-04-10 17:34:45 -07:00
process_gossip_verified_vote_hashes_elapsed : u64 ,
2021-03-24 23:41:52 -07:00
process_duplicate_slots_elapsed : u64 ,
2020-05-08 03:46:29 -07:00
) {
2021-01-13 08:08:53 -08:00
self . collect_frozen_banks_elapsed + = collect_frozen_banks_elapsed ;
2020-05-08 03:46:29 -07:00
self . compute_bank_stats_elapsed + = compute_bank_stats_elapsed ;
self . select_vote_and_reset_forks_elapsed + = select_vote_and_reset_forks_elapsed ;
2020-06-28 10:04:15 -07:00
self . start_leader_elapsed + = start_leader_elapsed ;
self . reset_bank_elapsed + = reset_bank_elapsed ;
self . voting_elapsed + = voting_elapsed ;
self . select_forks_elapsed + = select_forks_elapsed ;
self . compute_slot_stats_elapsed + = compute_slot_stats_elapsed ;
self . generate_new_bank_forks_elapsed + = generate_new_bank_forks_elapsed ;
self . replay_active_banks_elapsed + = replay_active_banks_elapsed ;
2021-01-13 08:08:53 -08:00
self . wait_receive_elapsed + = wait_receive_elapsed ;
self . heaviest_fork_failures_elapsed + = heaviest_fork_failures_elapsed ;
self . bank_count + = bank_count ;
2021-03-24 23:41:52 -07:00
self . process_gossip_duplicate_confirmed_slots_elapsed + =
process_gossip_duplicate_confirmed_slots_elapsed ;
2021-04-10 17:34:45 -07:00
self . process_gossip_verified_vote_hashes_elapsed + =
process_gossip_verified_vote_hashes_elapsed ;
2021-03-24 23:41:52 -07:00
self . process_duplicate_slots_elapsed + = process_duplicate_slots_elapsed ;
2020-06-28 10:04:15 -07:00
let now = timestamp ( ) ;
let elapsed_ms = now - self . last_print ;
if elapsed_ms > 1000 {
2020-05-08 03:46:29 -07:00
datapoint_info! (
" replay-loop-timing-stats " ,
2020-06-28 10:04:15 -07:00
( " total_elapsed_us " , elapsed_ms * 1000 , i64 ) ,
2021-01-13 08:08:53 -08:00
(
" collect_frozen_banks_elapsed " ,
self . collect_frozen_banks_elapsed as i64 ,
i64
) ,
2020-05-08 03:46:29 -07:00
(
" compute_bank_stats_elapsed " ,
2020-06-28 10:04:15 -07:00
self . compute_bank_stats_elapsed as i64 ,
2020-05-08 03:46:29 -07:00
i64
) ,
(
" select_vote_and_reset_forks_elapsed " ,
2020-06-28 10:04:15 -07:00
self . select_vote_and_reset_forks_elapsed as i64 ,
i64
) ,
(
" start_leader_elapsed " ,
self . start_leader_elapsed as i64 ,
i64
) ,
( " reset_bank_elapsed " , self . reset_bank_elapsed as i64 , i64 ) ,
( " voting_elapsed " , self . voting_elapsed as i64 , i64 ) ,
(
" select_forks_elapsed " ,
self . select_forks_elapsed as i64 ,
i64
) ,
(
" compute_slot_stats_elapsed " ,
self . compute_slot_stats_elapsed as i64 ,
i64
) ,
(
" generate_new_bank_forks_elapsed " ,
self . generate_new_bank_forks_elapsed as i64 ,
i64
) ,
(
" replay_active_banks_elapsed " ,
self . replay_active_banks_elapsed as i64 ,
i64
) ,
(
2021-03-24 23:41:52 -07:00
" process_gossip_duplicate_confirmed_slots_elapsed " ,
self . process_gossip_duplicate_confirmed_slots_elapsed as i64 ,
2020-05-08 03:46:29 -07:00
i64
) ,
2021-04-10 17:34:45 -07:00
(
" process_gossip_verified_vote_hashes_elapsed " ,
self . process_gossip_verified_vote_hashes_elapsed as i64 ,
i64
) ,
2021-01-13 08:08:53 -08:00
(
" wait_receive_elapsed " ,
self . wait_receive_elapsed as i64 ,
i64
) ,
(
" heaviest_fork_failures_elapsed " ,
self . heaviest_fork_failures_elapsed as i64 ,
i64
) ,
( " bank_count " , self . bank_count as i64 , i64 ) ,
2021-03-24 23:41:52 -07:00
(
" process_duplicate_slots_elapsed " ,
self . process_duplicate_slots_elapsed as i64 ,
i64
)
2020-05-08 03:46:29 -07:00
) ;
2020-06-28 10:04:15 -07:00
* self = ReplayTiming ::default ( ) ;
self . last_print = now ;
2020-05-08 03:46:29 -07:00
}
}
}
2018-12-07 14:09:29 -08:00
pub struct ReplayStage {
2019-04-15 13:12:28 -07:00
t_replay : JoinHandle < Result < ( ) > > ,
2019-11-04 15:44:27 -08:00
commitment_service : AggregateCommitmentService ,
2018-05-22 14:26:28 -07:00
}
2018-12-07 14:09:29 -08:00
impl ReplayStage {
2020-05-05 14:07:21 -07:00
#[ allow(clippy::new_ret_no_self, clippy::too_many_arguments) ]
2019-12-06 14:39:35 -08:00
pub fn new (
config : ReplayStageConfig ,
2020-01-13 13:13:52 -08:00
blockstore : Arc < Blockstore > ,
2019-12-06 14:39:35 -08:00
bank_forks : Arc < RwLock < BankForks > > ,
2020-04-21 12:54:45 -07:00
cluster_info : Arc < ClusterInfo > ,
2019-12-06 14:39:35 -08:00
ledger_signal_receiver : Receiver < bool > ,
2021-03-24 23:41:52 -07:00
duplicate_slots_receiver : DuplicateSlotReceiver ,
2019-12-06 14:39:35 -08:00
poh_recorder : Arc < Mutex < PohRecorder > > ,
2020-09-18 22:03:54 -07:00
mut tower : Tower ,
2020-03-26 19:57:27 -07:00
vote_tracker : Arc < VoteTracker > ,
2020-03-30 19:57:11 -07:00
cluster_slots : Arc < ClusterSlots > ,
2020-03-19 23:35:01 -07:00
retransmit_slots_sender : RetransmitSlotsSender ,
2021-03-24 23:41:52 -07:00
_duplicate_slots_reset_receiver : DuplicateSlotsResetReceiver ,
2020-08-07 11:21:35 -07:00
replay_vote_sender : ReplayVoteSender ,
2021-03-24 23:41:52 -07:00
gossip_duplicate_confirmed_slots_receiver : GossipDuplicateConfirmedSlotsReceiver ,
2021-04-10 17:34:45 -07:00
gossip_verified_vote_hash_receiver : GossipVerifiedVoteHashReceiver ,
2020-05-14 18:22:47 -07:00
) -> Self {
2019-12-04 10:17:17 -08:00
let ReplayStageConfig {
my_pubkey ,
vote_account ,
2020-03-31 08:23:42 -07:00
authorized_voter_keypairs ,
2019-12-04 10:17:17 -08:00
exit ,
subscriptions ,
leader_schedule_cache ,
2019-12-18 11:50:09 -08:00
latest_root_senders ,
2020-12-12 17:22:34 -08:00
accounts_background_request_sender ,
2019-12-04 10:17:17 -08:00
block_commitment_cache ,
transaction_status_sender ,
2020-02-11 17:01:49 -08:00
rewards_recorder_sender ,
2020-09-09 08:33:14 -07:00
cache_block_time_sender ,
2020-09-28 19:43:05 -07:00
bank_notification_sender ,
2021-03-25 18:54:51 -07:00
wait_for_vote_to_start_leader ,
2019-12-04 10:17:17 -08:00
} = config ;
2019-02-26 21:57:45 -08:00
trace! ( " replay stage " ) ;
2019-02-24 01:06:46 -08:00
// Start the replay stage loop
2020-05-18 11:49:01 -07:00
let ( lockouts_sender , commitment_service ) = AggregateCommitmentService ::new (
& exit ,
block_commitment_cache . clone ( ) ,
subscriptions . clone ( ) ,
) ;
2019-07-26 10:27:57 -07:00
2020-02-14 11:11:55 -08:00
#[ allow(clippy::cognitive_complexity) ]
2018-12-07 14:09:29 -08:00
let t_replay = Builder ::new ( )
. name ( " solana-replay-stage " . to_string ( ) )
2018-09-25 15:41:29 -07:00
. spawn ( move | | {
2019-12-12 10:36:27 -08:00
let verify_recyclers = VerifyRecyclers ::default ( ) ;
2019-12-04 10:17:17 -08:00
let _exit = Finalizer ::new ( exit . clone ( ) ) ;
2020-09-18 22:03:54 -07:00
let (
mut progress ,
mut heaviest_subtree_fork_choice ,
) = Self ::initialize_progress_and_fork_choice_with_locked_bank_forks (
& bank_forks ,
& my_pubkey ,
& vote_account ,
) ;
2019-07-23 19:19:20 -07:00
let mut current_leader = None ;
2019-11-15 08:36:33 -08:00
let mut last_reset = Hash ::default ( ) ;
2020-06-29 18:49:57 -07:00
let mut partition_exists = false ;
2020-03-26 19:57:27 -07:00
let mut skipped_slots_info = SkippedSlotsInfo ::default ( ) ;
2020-05-08 03:46:29 -07:00
let mut replay_timing = ReplayTiming ::default ( ) ;
2021-03-24 23:41:52 -07:00
let mut gossip_duplicate_confirmed_slots : GossipDuplicateConfirmedSlots = BTreeMap ::new ( ) ;
2021-04-10 17:34:45 -07:00
let mut gossip_verified_vote_hashes : GossipVerifiedVoteHashes = BTreeMap ::new ( ) ;
2021-03-25 18:54:51 -07:00
let mut voted_signatures = Vec ::new ( ) ;
let mut has_new_vote_been_rooted = ! wait_for_vote_to_start_leader ;
2018-09-25 15:41:29 -07:00
loop {
2019-02-04 15:33:43 -08:00
// Stop getting entries if we get exit signal
2019-12-04 10:17:17 -08:00
if exit . load ( Ordering ::Relaxed ) {
2019-02-04 15:33:43 -08:00
break ;
2018-10-10 16:49:41 -07:00
}
2019-03-21 11:53:18 -07:00
2020-06-28 10:04:15 -07:00
let mut generate_new_bank_forks_time =
Measure ::start ( " generate_new_bank_forks_time " ) ;
2019-04-19 02:39:44 -07:00
Self ::generate_new_bank_forks (
2020-01-13 13:13:52 -08:00
& blockstore ,
2019-12-13 17:20:31 -08:00
& bank_forks ,
2019-04-19 02:39:44 -07:00
& leader_schedule_cache ,
2019-11-26 00:42:54 -08:00
& subscriptions ,
2020-03-26 19:57:27 -07:00
& mut progress ,
2019-04-19 02:39:44 -07:00
) ;
2020-06-28 10:04:15 -07:00
generate_new_bank_forks_time . stop ( ) ;
2019-03-21 11:53:18 -07:00
2019-07-18 14:54:27 -07:00
let mut tpu_has_bank = poh_recorder . lock ( ) . unwrap ( ) . has_bank ( ) ;
2020-06-28 10:04:15 -07:00
let mut replay_active_banks_time = Measure ::start ( " replay_active_banks_time " ) ;
2021-03-24 23:41:52 -07:00
let ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
let descendants = bank_forks . read ( ) . unwrap ( ) . descendants ( ) . clone ( ) ;
2019-07-18 14:07:32 -07:00
let did_complete_bank = Self ::replay_active_banks (
2020-01-13 13:13:52 -08:00
& blockstore ,
2019-03-21 11:53:18 -07:00
& bank_forks ,
2019-05-23 23:20:04 -07:00
& my_pubkey ,
2020-03-26 19:57:27 -07:00
& vote_account ,
2019-03-21 11:53:18 -07:00
& mut progress ,
2019-11-20 15:43:10 -08:00
transaction_status_sender . clone ( ) ,
2019-12-12 10:36:27 -08:00
& verify_recyclers ,
2020-06-11 12:16:04 -07:00
& mut heaviest_subtree_fork_choice ,
2020-08-07 11:21:35 -07:00
& replay_vote_sender ,
2020-09-28 19:43:05 -07:00
& bank_notification_sender ,
2020-10-09 12:55:35 -07:00
& rewards_recorder_sender ,
2021-03-12 05:44:06 -08:00
& subscriptions ,
2021-03-24 23:41:52 -07:00
& gossip_duplicate_confirmed_slots ,
& ancestors ,
& descendants ,
2019-07-18 12:04:53 -07:00
) ;
2020-06-28 10:04:15 -07:00
replay_active_banks_time . stop ( ) ;
2019-03-12 17:42:53 -07:00
2020-04-24 15:49:57 -07:00
let forks_root = bank_forks . read ( ) . unwrap ( ) . root ( ) ;
2020-05-05 14:07:21 -07:00
// Reset any duplicate slots that have been confirmed
// by the network in anticipation of the confirmed version of
// the slot
2021-03-24 23:41:52 -07:00
/* let mut reset_duplicate_slots_time = Measure::start("reset_duplicate_slots");
2020-05-05 14:07:21 -07:00
Self ::reset_duplicate_slots (
& duplicate_slots_reset_receiver ,
2020-05-07 23:39:57 -07:00
& mut ancestors ,
& mut descendants ,
2020-05-05 14:07:21 -07:00
& mut progress ,
& bank_forks ,
) ;
2021-03-24 23:41:52 -07:00
reset_duplicate_slots_time . stop ( ) ; * /
// Check for any newly confirmed slots detected from gossip.
let mut process_gossip_duplicate_confirmed_slots_time = Measure ::start ( " process_gossip_duplicate_confirmed_slots " ) ;
Self ::process_gossip_duplicate_confirmed_slots (
& gossip_duplicate_confirmed_slots_receiver ,
& mut gossip_duplicate_confirmed_slots ,
& bank_forks ,
& mut progress ,
& mut heaviest_subtree_fork_choice ,
& ancestors ,
& descendants ,
) ;
process_gossip_duplicate_confirmed_slots_time . stop ( ) ;
2021-04-10 17:34:45 -07:00
// Ingest any new verified votes from gossip. Important for fork choice
// and switching proofs because these may be votes that haven't yet been
// included in a block, so we may not have yet observed these votes just
// by replaying blocks.
let mut process_gossip_verified_vote_hashes_time = Measure ::start ( " process_gossip_duplicate_confirmed_slots " ) ;
Self ::process_gossip_verified_vote_hashes (
& gossip_verified_vote_hash_receiver ,
& mut gossip_verified_vote_hashes ,
) ;
process_gossip_verified_vote_hashes_time . stop ( ) ;
2021-03-24 23:41:52 -07:00
// Check to remove any duplicated slots from fork choice
let mut process_duplicate_slots_time = Measure ::start ( " process_duplicate_slots " ) ;
if ! tpu_has_bank {
Self ::process_duplicate_slots (
& duplicate_slots_receiver ,
& gossip_duplicate_confirmed_slots ,
& bank_forks ,
& ancestors ,
& descendants ,
& mut progress ,
& mut heaviest_subtree_fork_choice ,
) ;
}
process_duplicate_slots_time . stop ( ) ;
2020-06-28 10:04:15 -07:00
let mut collect_frozen_banks_time = Measure ::start ( " frozen_banks " ) ;
2020-02-26 19:59:29 -08:00
let mut frozen_banks : Vec < _ > = bank_forks
. read ( )
. unwrap ( )
. frozen_banks ( )
2020-04-24 15:49:57 -07:00
. into_iter ( )
. filter ( | ( slot , _ ) | * slot > = forks_root )
. map ( | ( _ , bank ) | bank )
2020-02-26 19:59:29 -08:00
. collect ( ) ;
2020-06-28 10:04:15 -07:00
collect_frozen_banks_time . stop ( ) ;
let mut compute_bank_stats_time = Measure ::start ( " compute_bank_stats " ) ;
2020-02-26 19:59:29 -08:00
let newly_computed_slot_stats = Self ::compute_bank_stats (
& my_pubkey ,
& ancestors ,
& mut frozen_banks ,
& tower ,
& mut progress ,
2020-03-26 19:57:27 -07:00
& vote_tracker ,
2020-03-30 19:57:11 -07:00
& cluster_slots ,
2020-03-26 19:57:27 -07:00
& bank_forks ,
2020-06-11 12:16:04 -07:00
& mut heaviest_subtree_fork_choice ,
2020-02-26 19:59:29 -08:00
) ;
2020-06-28 10:04:15 -07:00
compute_bank_stats_time . stop ( ) ;
let mut compute_slot_stats_time = Measure ::start ( " compute_slot_stats_time " ) ;
2020-02-26 19:59:29 -08:00
for slot in newly_computed_slot_stats {
2020-03-26 19:57:27 -07:00
let fork_stats = progress . get_fork_stats ( slot ) . unwrap ( ) ;
2020-02-26 19:59:29 -08:00
let confirmed_forks = Self ::confirm_forks (
2019-11-21 15:47:08 -08:00
& tower ,
2020-06-22 18:30:09 -07:00
& fork_stats . voted_stakes ,
fork_stats . total_stake ,
2020-02-26 19:59:29 -08:00
& progress ,
& bank_forks ,
2019-11-21 15:47:08 -08:00
) ;
2020-02-03 16:48:24 -08:00
2021-03-24 23:41:52 -07:00
Self ::mark_slots_confirmed ( & confirmed_forks , & bank_forks , & mut progress , & ancestors , & descendants , & mut heaviest_subtree_fork_choice ) ;
2020-02-26 19:59:29 -08:00
}
2020-06-28 10:04:15 -07:00
compute_slot_stats_time . stop ( ) ;
2020-02-03 16:48:24 -08:00
2020-06-28 10:04:15 -07:00
let mut select_forks_time = Measure ::start ( " select_forks_time " ) ;
2021-03-24 23:41:52 -07:00
let ( heaviest_bank , heaviest_bank_on_same_voted_fork ) = heaviest_subtree_fork_choice
2020-06-11 12:16:04 -07:00
. select_forks ( & frozen_banks , & tower , & progress , & ancestors , & bank_forks ) ;
2020-06-28 10:04:15 -07:00
select_forks_time . stop ( ) ;
2020-03-02 12:43:43 -08:00
2020-06-28 10:04:15 -07:00
let mut select_vote_and_reset_forks_time =
Measure ::start ( " select_vote_and_reset_forks " ) ;
2020-05-29 14:40:36 -07:00
let SelectVoteAndResetForkResult {
vote_bank ,
reset_bank ,
heaviest_fork_failures ,
} = Self ::select_vote_and_reset_forks (
& heaviest_bank ,
2021-03-24 23:41:52 -07:00
heaviest_bank_on_same_voted_fork . as_ref ( ) ,
2020-05-29 14:40:36 -07:00
& ancestors ,
& descendants ,
& progress ,
2020-10-15 02:30:33 -07:00
& mut tower ,
2020-05-29 14:40:36 -07:00
) ;
2020-06-28 10:04:15 -07:00
select_vote_and_reset_forks_time . stop ( ) ;
2020-03-02 12:43:43 -08:00
2021-01-13 08:08:53 -08:00
let mut heaviest_fork_failures_time = Measure ::start ( " heaviest_fork_failures_time " ) ;
2020-06-11 12:16:04 -07:00
if tower . is_recent ( heaviest_bank . slot ( ) ) & & ! heaviest_fork_failures . is_empty ( ) {
2020-03-02 12:43:43 -08:00
info! (
2020-05-29 14:40:36 -07:00
" Couldn't vote on heaviest fork: {:?}, heaviest_fork_failures: {:?} " ,
2020-06-11 12:16:04 -07:00
heaviest_bank . slot ( ) ,
2020-05-29 14:40:36 -07:00
heaviest_fork_failures
2020-03-02 12:43:43 -08:00
) ;
2020-03-26 19:57:27 -07:00
2020-05-29 14:40:36 -07:00
for r in heaviest_fork_failures {
2020-03-26 19:57:27 -07:00
if let HeaviestForkFailures ::NoPropagatedConfirmation ( slot ) = r {
2020-04-19 14:24:45 -07:00
if let Some ( latest_leader_slot ) =
progress . get_latest_leader_slot ( slot )
{
progress . log_propagated_stats ( latest_leader_slot , & bank_forks ) ;
}
2020-03-26 19:57:27 -07:00
}
}
2020-02-26 19:59:29 -08:00
}
2021-01-13 08:08:53 -08:00
heaviest_fork_failures_time . stop ( ) ;
2020-03-02 12:43:43 -08:00
2020-06-28 10:04:15 -07:00
let mut voting_time = Measure ::start ( " voting_time " ) ;
2020-03-02 12:43:43 -08:00
// Vote on a fork
2020-05-29 14:40:36 -07:00
if let Some ( ( ref vote_bank , ref switch_fork_decision ) ) = vote_bank {
2020-04-08 14:35:24 -07:00
if let Some ( votable_leader ) =
leader_schedule_cache . slot_leader_at ( vote_bank . slot ( ) , Some ( vote_bank ) )
2020-04-02 21:05:33 -07:00
{
Self ::log_leader_change (
& my_pubkey ,
vote_bank . slot ( ) ,
& mut current_leader ,
& votable_leader ,
) ;
2019-11-15 08:36:33 -08:00
}
2020-04-02 21:05:33 -07:00
Self ::handle_votable_bank (
& vote_bank ,
2021-04-17 10:22:00 -07:00
& poh_recorder ,
2020-05-29 14:40:36 -07:00
switch_fork_decision ,
2020-04-02 21:05:33 -07:00
& bank_forks ,
& mut tower ,
& mut progress ,
& vote_account ,
2021-04-11 20:38:30 -07:00
& authorized_voter_keypairs . read ( ) . unwrap ( ) ,
2020-04-02 21:05:33 -07:00
& cluster_info ,
& blockstore ,
& leader_schedule_cache ,
& lockouts_sender ,
2020-12-12 17:22:34 -08:00
& accounts_background_request_sender ,
2020-04-02 21:05:33 -07:00
& latest_root_senders ,
& subscriptions ,
2020-04-24 15:49:57 -07:00
& block_commitment_cache ,
2020-06-11 12:16:04 -07:00
& mut heaviest_subtree_fork_choice ,
2020-09-09 08:33:14 -07:00
& cache_block_time_sender ,
2020-09-28 19:43:05 -07:00
& bank_notification_sender ,
2021-03-24 23:41:52 -07:00
& mut gossip_duplicate_confirmed_slots ,
2021-04-10 17:34:45 -07:00
& mut gossip_verified_vote_hashes ,
2021-03-25 18:54:51 -07:00
& mut voted_signatures ,
& mut has_new_vote_been_rooted ,
2020-12-13 17:26:34 -08:00
) ;
2020-03-02 12:43:43 -08:00
} ;
2020-06-28 10:04:15 -07:00
voting_time . stop ( ) ;
2020-03-02 12:43:43 -08:00
2020-06-28 10:04:15 -07:00
let mut reset_bank_time = Measure ::start ( " reset_bank " ) ;
2020-03-02 12:43:43 -08:00
// Reset onto a fork
if let Some ( reset_bank ) = reset_bank {
2020-04-08 14:35:24 -07:00
if last_reset ! = reset_bank . last_blockhash ( ) {
2020-03-02 12:43:43 -08:00
info! (
" vote bank: {:?} reset bank: {:?} " ,
2020-05-29 14:40:36 -07:00
vote_bank . as_ref ( ) . map ( | ( b , switch_fork_decision ) | (
b . slot ( ) ,
switch_fork_decision
) ) ,
2020-03-02 12:43:43 -08:00
reset_bank . slot ( ) ,
2020-02-26 19:59:29 -08:00
) ;
2020-04-08 14:35:24 -07:00
let fork_progress = progress
. get ( & reset_bank . slot ( ) )
. expect ( " bank to reset to must exist in progress map " ) ;
datapoint_info! (
" blocks_produced " ,
( " num_blocks_on_fork " , fork_progress . num_blocks_on_fork , i64 ) ,
(
" num_dropped_blocks_on_fork " ,
fork_progress . num_dropped_blocks_on_fork ,
i64
) ,
) ;
2020-03-02 12:43:43 -08:00
Self ::reset_poh_recorder (
& my_pubkey ,
& blockstore ,
& reset_bank ,
& poh_recorder ,
& leader_schedule_cache ,
2020-02-26 19:59:29 -08:00
) ;
2020-03-02 12:43:43 -08:00
last_reset = reset_bank . last_blockhash ( ) ;
tpu_has_bank = false ;
2020-06-29 18:49:57 -07:00
if let Some ( last_voted_slot ) = tower . last_voted_slot ( ) {
// If the current heaviest bank is not a descendant of the last voted slot,
// there must be a partition
let partition_detected = Self ::is_partition_detected ( & ancestors , last_voted_slot , heaviest_bank . slot ( ) ) ;
if ! partition_exists & & partition_detected
{
warn! (
" PARTITION DETECTED waiting to join heaviest fork: {} last vote: {:?}, reset slot: {} " ,
heaviest_bank . slot ( ) ,
last_voted_slot ,
reset_bank . slot ( ) ,
) ;
inc_new_counter_info! ( " replay_stage-partition_detected " , 1 ) ;
datapoint_info! (
" replay_stage-partition " ,
( " slot " , reset_bank . slot ( ) as i64 , i64 )
) ;
partition_exists = true ;
} else if partition_exists
& & ! partition_detected
{
warn! (
" PARTITION resolved heaviest fork: {} last vote: {:?}, reset slot: {} " ,
heaviest_bank . slot ( ) ,
last_voted_slot ,
reset_bank . slot ( )
) ;
partition_exists = false ;
inc_new_counter_info! ( " replay_stage-partition_resolved " , 1 ) ;
}
2020-03-02 12:43:43 -08:00
}
2019-07-30 13:18:33 -07:00
}
2019-03-07 15:49:07 -08:00
}
2020-06-28 10:04:15 -07:00
reset_bank_time . stop ( ) ;
2019-03-07 15:49:07 -08:00
2020-06-28 10:04:15 -07:00
let mut start_leader_time = Measure ::start ( " start_leader_time " ) ;
2019-07-18 14:54:27 -07:00
if ! tpu_has_bank {
Self ::maybe_start_leader (
& my_pubkey ,
& bank_forks ,
& poh_recorder ,
& leader_schedule_cache ,
2019-11-26 00:42:54 -08:00
& subscriptions ,
2020-03-26 19:57:27 -07:00
& progress ,
& retransmit_slots_sender ,
& mut skipped_slots_info ,
2021-03-25 18:54:51 -07:00
has_new_vote_been_rooted ,
2019-07-18 14:54:27 -07:00
) ;
2019-07-23 19:19:20 -07:00
2020-03-13 15:15:13 -07:00
let poh_bank = poh_recorder . lock ( ) . unwrap ( ) . bank ( ) ;
if let Some ( bank ) = poh_bank {
2019-07-23 19:19:20 -07:00
Self ::log_leader_change (
& my_pubkey ,
bank . slot ( ) ,
& mut current_leader ,
& my_pubkey ,
) ;
}
2019-07-18 14:54:27 -07:00
}
2020-06-28 10:04:15 -07:00
start_leader_time . stop ( ) ;
2021-01-13 08:08:53 -08:00
let mut wait_receive_time = Measure ::start ( " wait_receive_time " ) ;
if ! did_complete_bank {
// only wait for the signal if we did not just process a bank; maybe there are more slots available
let timer = Duration ::from_millis ( 100 ) ;
let result = ledger_signal_receiver . recv_timeout ( timer ) ;
match result {
Err ( RecvTimeoutError ::Timeout ) = > ( ) ,
Err ( _ ) = > break ,
Ok ( _ ) = > trace! ( " blockstore signal " ) ,
} ;
}
wait_receive_time . stop ( ) ;
2020-06-28 10:04:15 -07:00
replay_timing . update (
2021-01-13 08:08:53 -08:00
collect_frozen_banks_time . as_us ( ) ,
2020-06-28 10:04:15 -07:00
compute_bank_stats_time . as_us ( ) ,
select_vote_and_reset_forks_time . as_us ( ) ,
start_leader_time . as_us ( ) ,
reset_bank_time . as_us ( ) ,
voting_time . as_us ( ) ,
select_forks_time . as_us ( ) ,
compute_slot_stats_time . as_us ( ) ,
generate_new_bank_forks_time . as_us ( ) ,
replay_active_banks_time . as_us ( ) ,
2021-01-13 08:08:53 -08:00
wait_receive_time . as_us ( ) ,
heaviest_fork_failures_time . as_us ( ) ,
if did_complete_bank { 1 } else { 0 } ,
2021-03-24 23:41:52 -07:00
process_gossip_duplicate_confirmed_slots_time . as_us ( ) ,
2021-04-10 17:34:45 -07:00
process_gossip_verified_vote_hashes_time . as_us ( ) ,
2021-03-24 23:41:52 -07:00
process_duplicate_slots_time . as_us ( ) ,
2019-11-21 15:47:08 -08:00
) ;
2018-05-30 13:38:15 -07:00
}
2019-02-26 21:57:45 -08:00
Ok ( ( ) )
2018-12-07 19:01:28 -08:00
} )
. unwrap ( ) ;
2020-05-14 18:22:47 -07:00
Self {
t_replay ,
commitment_service ,
}
2019-02-10 16:28:52 -08:00
}
2019-07-09 15:36:30 -07:00
2020-06-29 18:49:57 -07:00
fn is_partition_detected (
ancestors : & HashMap < Slot , HashSet < Slot > > ,
last_voted_slot : Slot ,
heaviest_slot : Slot ,
) -> bool {
last_voted_slot ! = heaviest_slot
& & ! ancestors
. get ( & heaviest_slot )
. map ( | ancestors | ancestors . contains ( & last_voted_slot ) )
. unwrap_or ( true )
}
2020-09-18 22:03:54 -07:00
fn initialize_progress_and_fork_choice_with_locked_bank_forks (
bank_forks : & RwLock < BankForks > ,
my_pubkey : & Pubkey ,
vote_account : & Pubkey ,
2020-12-07 13:47:14 -08:00
) -> ( ProgressMap , HeaviestSubtreeForkChoice ) {
2020-09-18 22:03:54 -07:00
let ( root_bank , frozen_banks ) = {
let bank_forks = bank_forks . read ( ) . unwrap ( ) ;
(
2020-12-27 05:28:05 -08:00
bank_forks . root_bank ( ) ,
2020-09-18 22:03:54 -07:00
bank_forks . frozen_banks ( ) . values ( ) . cloned ( ) . collect ( ) ,
)
} ;
Self ::initialize_progress_and_fork_choice (
& root_bank ,
frozen_banks ,
& my_pubkey ,
& vote_account ,
)
}
pub ( crate ) fn initialize_progress_and_fork_choice (
2020-12-27 05:28:05 -08:00
root_bank : & Bank ,
2020-09-18 22:03:54 -07:00
mut frozen_banks : Vec < Arc < Bank > > ,
my_pubkey : & Pubkey ,
vote_account : & Pubkey ,
2020-12-07 13:47:14 -08:00
) -> ( ProgressMap , HeaviestSubtreeForkChoice ) {
2020-09-18 22:03:54 -07:00
let mut progress = ProgressMap ::default ( ) ;
frozen_banks . sort_by_key ( | bank | bank . slot ( ) ) ;
// Initialize progress map with any root banks
for bank in & frozen_banks {
let prev_leader_slot = progress . get_bank_prev_leader_slot ( bank ) ;
2021-03-24 23:41:52 -07:00
let duplicate_stats = DuplicateStats ::new_with_unconfirmed_duplicate_ancestor (
progress . latest_unconfirmed_duplicate_ancestor ( bank . parent_slot ( ) ) ,
) ;
2020-09-18 22:03:54 -07:00
progress . insert (
bank . slot ( ) ,
ForkProgress ::new_from_bank (
bank ,
& my_pubkey ,
& vote_account ,
prev_leader_slot ,
2021-03-24 23:41:52 -07:00
duplicate_stats ,
2020-09-18 22:03:54 -07:00
0 ,
0 ,
) ,
) ;
}
let root = root_bank . slot ( ) ;
2021-04-12 01:00:59 -07:00
let heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice ::new_from_frozen_banks (
( root , root_bank . hash ( ) ) ,
& frozen_banks ,
) ;
2020-09-18 22:03:54 -07:00
2020-12-07 13:47:14 -08:00
( progress , heaviest_subtree_fork_choice )
2020-09-18 22:03:54 -07:00
}
2021-03-24 23:41:52 -07:00
#[ allow(dead_code) ]
2020-05-05 14:07:21 -07:00
fn reset_duplicate_slots (
duplicate_slots_reset_receiver : & DuplicateSlotsResetReceiver ,
2020-05-07 23:39:57 -07:00
ancestors : & mut HashMap < Slot , HashSet < Slot > > ,
descendants : & mut HashMap < Slot , HashSet < Slot > > ,
2020-05-05 14:07:21 -07:00
progress : & mut ProgressMap ,
bank_forks : & RwLock < BankForks > ,
) {
for duplicate_slot in duplicate_slots_reset_receiver . try_iter ( ) {
Self ::purge_unconfirmed_duplicate_slot (
duplicate_slot ,
2020-05-07 23:39:57 -07:00
ancestors ,
2020-05-05 14:07:21 -07:00
descendants ,
progress ,
bank_forks ,
) ;
}
}
2021-03-24 23:41:52 -07:00
#[ allow(dead_code) ]
2020-05-05 14:07:21 -07:00
fn purge_unconfirmed_duplicate_slot (
duplicate_slot : Slot ,
2020-05-07 23:39:57 -07:00
ancestors : & mut HashMap < Slot , HashSet < Slot > > ,
descendants : & mut HashMap < Slot , HashSet < Slot > > ,
2020-05-05 14:07:21 -07:00
progress : & mut ProgressMap ,
bank_forks : & RwLock < BankForks > ,
) {
2020-05-07 23:39:57 -07:00
warn! ( " purging slot {} " , duplicate_slot ) ;
let slot_descendants = descendants . get ( & duplicate_slot ) . cloned ( ) ;
2020-05-06 11:44:49 -07:00
if slot_descendants . is_none ( ) {
// Root has already moved past this slot, no need to purge it
return ;
}
2020-05-05 14:07:21 -07:00
2020-05-07 23:39:57 -07:00
// Clear the ancestors/descendants map to keep them
// consistent
let slot_descendants = slot_descendants . unwrap ( ) ;
Self ::purge_ancestors_descendants (
duplicate_slot ,
& slot_descendants ,
ancestors ,
descendants ,
) ;
2020-05-05 14:07:21 -07:00
for d in slot_descendants
. iter ( )
. chain ( std ::iter ::once ( & duplicate_slot ) )
{
// Clear the progress map of these forks
let _ = progress . remove ( d ) ;
// Clear the duplicate banks from BankForks
{
let mut w_bank_forks = bank_forks . write ( ) . unwrap ( ) ;
// Purging should have already been taken care of by logic
// in repair_service, so make sure drop implementation doesn't
// run
2020-05-07 23:39:57 -07:00
if let Some ( b ) = w_bank_forks . get ( * d ) {
b . skip_drop . store ( true , Ordering ::Relaxed )
}
w_bank_forks . remove ( * d ) ;
2020-05-05 14:07:21 -07:00
}
}
}
2020-05-07 23:39:57 -07:00
// Purge given slot and all its descendants from the `ancestors` and
// `descendants` structures so that they're consistent with `BankForks`
// and the `progress` map.
fn purge_ancestors_descendants (
slot : Slot ,
slot_descendants : & HashSet < Slot > ,
ancestors : & mut HashMap < Slot , HashSet < Slot > > ,
descendants : & mut HashMap < Slot , HashSet < Slot > > ,
) {
if ! ancestors . contains_key ( & slot ) {
// Slot has already been purged
return ;
}
// Purge this slot from each of its ancestors' `descendants` maps
for a in ancestors
. get ( & slot )
. expect ( " must exist based on earlier check " )
{
descendants
. get_mut ( & a )
. expect ( " If exists in ancestor map must exist in descendants map " )
. retain ( | d | * d ! = slot & & ! slot_descendants . contains ( d ) ) ;
}
ancestors
. remove ( & slot )
. expect ( " must exist based on earlier check " ) ;
// Purge all the descendants of this slot from both maps
for descendant in slot_descendants {
ancestors . remove ( & descendant ) . expect ( " must exist " ) ;
descendants
. remove ( & descendant )
. expect ( " must exist based on earlier check " ) ;
}
descendants
. remove ( & slot )
. expect ( " must exist based on earlier check " ) ;
}
2021-03-24 23:41:52 -07:00
// Check for any newly confirmed slots by the cluster. This is only detects
// optimistic and in the future, duplicate slot confirmations on the exact
// single slots and does not account for votes on their descendants. Used solely
// for duplicate slot recovery.
fn process_gossip_duplicate_confirmed_slots (
gossip_duplicate_confirmed_slots_receiver : & GossipDuplicateConfirmedSlotsReceiver ,
gossip_duplicate_confirmed_slots : & mut GossipDuplicateConfirmedSlots ,
bank_forks : & RwLock < BankForks > ,
progress : & mut ProgressMap ,
fork_choice : & mut HeaviestSubtreeForkChoice ,
ancestors : & HashMap < Slot , HashSet < Slot > > ,
descendants : & HashMap < Slot , HashSet < Slot > > ,
) {
let root = bank_forks . read ( ) . unwrap ( ) . root ( ) ;
for new_confirmed_slots in gossip_duplicate_confirmed_slots_receiver . try_iter ( ) {
for ( confirmed_slot , confirmed_hash ) in new_confirmed_slots {
if confirmed_slot < = root {
continue ;
} else if let Some ( prev_hash ) =
gossip_duplicate_confirmed_slots . insert ( confirmed_slot , confirmed_hash )
{
assert_eq! ( prev_hash , confirmed_hash ) ;
// Already processed this signal
return ;
}
check_slot_agrees_with_cluster (
confirmed_slot ,
root ,
bank_forks
. read ( )
. unwrap ( )
. get ( confirmed_slot )
. map ( | b | b . hash ( ) ) ,
gossip_duplicate_confirmed_slots ,
ancestors ,
descendants ,
progress ,
fork_choice ,
SlotStateUpdate ::DuplicateConfirmed ,
) ;
}
}
}
2021-04-10 17:34:45 -07:00
fn process_gossip_verified_vote_hashes (
gossip_verified_vote_hash_receiver : & GossipVerifiedVoteHashReceiver ,
gossip_verified_vote_hashes : & mut GossipVerifiedVoteHashes ,
) {
for ( pubkey , slot , hash ) in gossip_verified_vote_hash_receiver . try_iter ( ) {
// cluster_info_vote_listener will ensure it doesn't push duplicates
gossip_verified_vote_hashes
. entry ( slot )
. or_default ( )
. entry ( hash )
. or_default ( )
. push ( pubkey ) ;
}
}
2021-03-24 23:41:52 -07:00
// Checks for and handle forks with duplicate slots.
fn process_duplicate_slots (
duplicate_slots_receiver : & DuplicateSlotReceiver ,
gossip_duplicate_confirmed_slots : & GossipDuplicateConfirmedSlots ,
bank_forks : & RwLock < BankForks > ,
ancestors : & HashMap < Slot , HashSet < Slot > > ,
descendants : & HashMap < Slot , HashSet < Slot > > ,
progress : & mut ProgressMap ,
fork_choice : & mut HeaviestSubtreeForkChoice ,
) {
let duplicate_slots : Vec < Slot > = duplicate_slots_receiver . try_iter ( ) . collect ( ) ;
let ( root_slot , bank_hashes ) = {
let r_bank_forks = bank_forks . read ( ) . unwrap ( ) ;
let bank_hashes : Vec < Option < Hash > > = duplicate_slots
. iter ( )
. map ( | slot | r_bank_forks . get ( * slot ) . map ( | bank | bank . hash ( ) ) )
. collect ( ) ;
( r_bank_forks . root ( ) , bank_hashes )
} ;
for ( duplicate_slot , bank_hash ) in duplicate_slots . into_iter ( ) . zip ( bank_hashes . into_iter ( ) )
{
// WindowService should only send the signal once per slot
check_slot_agrees_with_cluster (
duplicate_slot ,
root_slot ,
bank_hash ,
gossip_duplicate_confirmed_slots ,
ancestors ,
descendants ,
progress ,
fork_choice ,
SlotStateUpdate ::Duplicate ,
) ;
}
}
2019-07-23 19:19:20 -07:00
fn log_leader_change (
my_pubkey : & Pubkey ,
2019-11-02 00:38:30 -07:00
bank_slot : Slot ,
2019-07-23 19:19:20 -07:00
current_leader : & mut Option < Pubkey > ,
new_leader : & Pubkey ,
) {
if let Some ( ref current_leader ) = current_leader {
if current_leader ! = new_leader {
let msg = if current_leader = = my_pubkey {
2019-08-02 10:08:42 -07:00
" . I am no longer the leader "
2019-07-23 19:19:20 -07:00
} else if new_leader = = my_pubkey {
2019-08-02 10:08:42 -07:00
" . I am now the leader "
2019-07-23 19:19:20 -07:00
} else {
" "
} ;
info! (
2019-08-02 10:08:42 -07:00
" LEADER CHANGE at slot: {} leader: {}{} " ,
2019-07-23 19:19:20 -07:00
bank_slot , new_leader , msg
) ;
}
}
current_leader . replace ( new_leader . to_owned ( ) ) ;
}
2020-03-26 19:57:27 -07:00
fn check_propagation_for_start_leader (
poh_slot : Slot ,
parent_slot : Slot ,
progress_map : & ProgressMap ,
) -> bool {
2020-12-03 12:31:38 -08:00
// Assume `NUM_CONSECUTIVE_LEADER_SLOTS` = 4. Then `skip_propagated_check`
// below is true if `poh_slot` is within the same `NUM_CONSECUTIVE_LEADER_SLOTS`
// set of blocks as `latest_leader_slot`.
//
// Example 1 (`poh_slot` directly descended from `latest_leader_slot`):
//
// [B B B B] [B B B latest_leader_slot] poh_slot
//
// Example 2:
//
// [B latest_leader_slot B poh_slot]
//
// In this example, even if there's a block `B` on another fork between
// `poh_slot` and `parent_slot`, because they're in the same
// `NUM_CONSECUTIVE_LEADER_SLOTS` block, we still skip the propagated
// check because it's still within the propagation grace period.
if let Some ( latest_leader_slot ) = progress_map . get_latest_leader_slot ( parent_slot ) {
let skip_propagated_check =
poh_slot - latest_leader_slot < NUM_CONSECUTIVE_LEADER_SLOTS ;
if skip_propagated_check {
return true ;
}
2020-03-26 19:57:27 -07:00
}
2020-12-03 12:31:38 -08:00
// Note that `is_propagated(parent_slot)` doesn't necessarily check
// propagation of `parent_slot`, it checks propagation of the latest ancestor
// of `parent_slot` (hence the call to `get_latest_leader_slot()` in the
// check above)
2020-03-26 19:57:27 -07:00
progress_map . is_propagated ( parent_slot )
}
2020-03-26 23:33:28 -07:00
fn should_retransmit ( poh_slot : Slot , last_retransmit_slot : & mut Slot ) -> bool {
if poh_slot < * last_retransmit_slot
| | poh_slot > = * last_retransmit_slot + NUM_CONSECUTIVE_LEADER_SLOTS
{
* last_retransmit_slot = poh_slot ;
true
} else {
false
}
}
2019-07-09 15:36:30 -07:00
fn maybe_start_leader (
2019-05-23 23:20:04 -07:00
my_pubkey : & Pubkey ,
2019-03-05 17:56:51 -08:00
bank_forks : & Arc < RwLock < BankForks > > ,
poh_recorder : & Arc < Mutex < PohRecorder > > ,
2019-04-19 02:39:44 -07:00
leader_schedule_cache : & Arc < LeaderScheduleCache > ,
2019-11-26 00:42:54 -08:00
subscriptions : & Arc < RpcSubscriptions > ,
2020-03-26 19:57:27 -07:00
progress_map : & ProgressMap ,
retransmit_slots_sender : & RetransmitSlotsSender ,
skipped_slots_info : & mut SkippedSlotsInfo ,
2021-03-25 18:54:51 -07:00
has_new_vote_been_rooted : bool ,
2019-03-05 17:56:51 -08:00
) {
2019-07-18 14:54:27 -07:00
// all the individual calls to poh_recorder.lock() are designed to
// increase granularity, decrease contention
2019-07-09 22:06:47 -07:00
2019-07-18 14:54:27 -07:00
assert! ( ! poh_recorder . lock ( ) . unwrap ( ) . has_bank ( ) ) ;
2019-07-09 22:06:47 -07:00
2019-10-16 12:53:11 -07:00
let ( reached_leader_slot , _grace_ticks , poh_slot , parent_slot ) =
poh_recorder . lock ( ) . unwrap ( ) . reached_leader_slot ( ) ;
2019-07-09 15:36:30 -07:00
2019-10-16 12:53:11 -07:00
if ! reached_leader_slot {
trace! ( " {} poh_recorder hasn't reached_leader_slot " , my_pubkey ) ;
2019-07-09 15:36:30 -07:00
return ;
}
2019-10-16 12:53:11 -07:00
trace! ( " {} reached_leader_slot " , my_pubkey ) ;
2019-07-09 15:36:30 -07:00
let parent = bank_forks
. read ( )
. unwrap ( )
. get ( parent_slot )
. expect ( " parent_slot doesn't exist in bank forks " )
. clone ( ) ;
2019-07-18 14:54:27 -07:00
assert! ( parent . is_frozen ( ) ) ;
if bank_forks . read ( ) . unwrap ( ) . get ( poh_slot ) . is_some ( ) {
warn! ( " {} already have bank in forks at {}? " , my_pubkey , poh_slot ) ;
2019-07-09 15:36:30 -07:00
return ;
}
2019-07-18 14:54:27 -07:00
trace! (
" {} poh_slot {} parent_slot {} " ,
my_pubkey ,
poh_slot ,
parent_slot
) ;
2019-07-09 15:36:30 -07:00
if let Some ( next_leader ) = leader_schedule_cache . slot_leader_at ( poh_slot , Some ( & parent ) ) {
2021-03-26 17:45:53 -07:00
if ! has_new_vote_been_rooted {
info! ( " Haven't landed a vote, so skipping my leader slot " ) ;
return ;
}
2019-07-09 15:36:30 -07:00
trace! (
" {} leader {} at poh slot: {} " ,
my_pubkey ,
next_leader ,
poh_slot
) ;
2019-07-09 22:06:47 -07:00
// I guess I missed my slot
if next_leader ! = * my_pubkey {
return ;
2019-07-09 15:36:30 -07:00
}
2019-07-09 22:06:47 -07:00
2020-02-21 13:41:49 -08:00
datapoint_info! (
2019-07-09 22:06:47 -07:00
" replay_stage-new_leader " ,
2019-07-30 13:18:33 -07:00
( " slot " , poh_slot , i64 ) ,
( " leader " , next_leader . to_string ( ) , String ) ,
2019-07-09 22:06:47 -07:00
) ;
2020-03-26 19:57:27 -07:00
if ! Self ::check_propagation_for_start_leader ( poh_slot , parent_slot , progress_map ) {
2020-04-08 14:35:24 -07:00
let latest_unconfirmed_leader_slot = progress_map . get_latest_leader_slot ( parent_slot ) . expect ( " In order for propagated check to fail, latest leader must exist in progress map " ) ;
2020-03-26 19:57:27 -07:00
if poh_slot ! = skipped_slots_info . last_skipped_slot {
datapoint_info! (
" replay_stage-skip_leader_slot " ,
( " slot " , poh_slot , i64 ) ,
( " parent_slot " , parent_slot , i64 ) ,
2020-04-08 14:35:24 -07:00
(
" latest_unconfirmed_leader_slot " ,
latest_unconfirmed_leader_slot ,
i64
)
2020-03-26 19:57:27 -07:00
) ;
2020-04-08 14:35:24 -07:00
progress_map . log_propagated_stats ( latest_unconfirmed_leader_slot , bank_forks ) ;
2020-03-26 19:57:27 -07:00
skipped_slots_info . last_skipped_slot = poh_slot ;
}
2020-04-08 14:35:24 -07:00
let bank = bank_forks . read ( ) . unwrap ( ) . get ( latest_unconfirmed_leader_slot )
2020-03-26 19:57:27 -07:00
. expect ( " In order for propagated check to fail, latest leader must exist in progress map, and thus also in BankForks " ) . clone ( ) ;
// Signal retransmit
2020-03-26 23:33:28 -07:00
if Self ::should_retransmit ( poh_slot , & mut skipped_slots_info . last_retransmit_slot ) {
2020-03-26 19:57:27 -07:00
datapoint_info! ( " replay_stage-retransmit " , ( " slot " , bank . slot ( ) , i64 ) , ) ;
retransmit_slots_sender
. send ( vec! [ ( bank . slot ( ) , bank . clone ( ) ) ] . into_iter ( ) . collect ( ) )
. unwrap ( ) ;
}
return ;
}
2019-11-26 00:42:54 -08:00
let root_slot = bank_forks . read ( ) . unwrap ( ) . root ( ) ;
2020-04-08 14:35:24 -07:00
datapoint_info! ( " replay_stage-my_leader_slot " , ( " slot " , poh_slot , i64 ) , ) ;
2019-11-26 00:42:54 -08:00
info! (
" new fork:{} parent:{} (leader) root:{} " ,
poh_slot , parent_slot , root_slot
) ;
2019-07-09 22:06:47 -07:00
2020-02-25 15:49:59 -08:00
let tpu_bank = Self ::new_bank_from_parent_with_notify (
& parent ,
poh_slot ,
root_slot ,
my_pubkey ,
subscriptions ,
) ;
let tpu_bank = bank_forks . write ( ) . unwrap ( ) . insert ( tpu_bank ) ;
2019-07-09 22:06:47 -07:00
poh_recorder . lock ( ) . unwrap ( ) . set_bank ( & tpu_bank ) ;
2019-07-09 15:36:30 -07:00
} else {
error! ( " {} No next leader found " , my_pubkey ) ;
2019-03-05 17:56:51 -08:00
}
}
2019-06-20 15:50:41 -07:00
2020-01-13 13:13:52 -08:00
fn replay_blockstore_into_bank (
2019-10-08 14:58:49 -07:00
bank : & Arc < Bank > ,
2020-01-13 13:13:52 -08:00
blockstore : & Blockstore ,
2019-11-19 02:36:00 -08:00
bank_progress : & mut ForkProgress ,
2019-11-20 15:43:10 -08:00
transaction_status_sender : Option < TransactionStatusSender > ,
2020-08-07 11:21:35 -07:00
replay_vote_sender : & ReplayVoteSender ,
2019-12-12 10:36:27 -08:00
verify_recyclers : & VerifyRecyclers ,
2020-01-14 17:15:26 -08:00
) -> result ::Result < usize , BlockstoreProcessorError > {
let tx_count_before = bank_progress . replay_progress . num_txs ;
let confirm_result = blockstore_processor ::confirm_slot (
blockstore ,
bank ,
& mut bank_progress . replay_stats ,
& mut bank_progress . replay_progress ,
false ,
transaction_status_sender ,
2020-08-07 11:21:35 -07:00
Some ( replay_vote_sender ) ,
2020-01-14 17:15:26 -08:00
None ,
verify_recyclers ,
2021-02-06 17:26:42 -08:00
false ,
2020-01-14 17:15:26 -08:00
) ;
let tx_count_after = bank_progress . replay_progress . num_txs ;
let tx_count = tx_count_after - tx_count_before ;
confirm_result . map_err ( | err | {
2021-03-24 23:41:52 -07:00
// All errors must lead to marking the slot as dead, otherwise,
// the `check_slot_agrees_with_cluster()` called by `replay_active_banks()`
// will break!
2020-01-14 17:15:26 -08:00
err
} ) ? ;
2019-06-20 15:50:41 -07:00
2020-01-14 17:15:26 -08:00
Ok ( tx_count )
2019-02-26 21:57:45 -08:00
}
2021-03-24 23:41:52 -07:00
#[ allow(clippy::too_many_arguments) ]
2020-12-09 23:14:31 -08:00
fn mark_dead_slot (
blockstore : & Blockstore ,
2021-03-24 23:41:52 -07:00
bank : & Bank ,
root : Slot ,
2020-12-09 23:14:31 -08:00
err : & BlockstoreProcessorError ,
2021-03-12 05:44:06 -08:00
subscriptions : & Arc < RpcSubscriptions > ,
2021-03-24 23:41:52 -07:00
gossip_duplicate_confirmed_slots : & GossipDuplicateConfirmedSlots ,
ancestors : & HashMap < Slot , HashSet < Slot > > ,
descendants : & HashMap < Slot , HashSet < Slot > > ,
progress : & mut ProgressMap ,
heaviest_subtree_fork_choice : & mut HeaviestSubtreeForkChoice ,
2020-12-09 23:14:31 -08:00
) {
2021-03-24 23:41:52 -07:00
// Do not remove from progress map when marking dead! Needed by
// `process_gossip_duplicate_confirmed_slots()`
// Block producer can abandon the block if it detects a better one
// while producing. Somewhat common and expected in a
// network with variable network/machine configuration.
let is_serious = ! matches! (
err ,
BlockstoreProcessorError ::InvalidBlock ( BlockError ::TooFewTicks )
) ;
let slot = bank . slot ( ) ;
2020-12-09 23:14:31 -08:00
if is_serious {
datapoint_error! (
" replay-stage-mark_dead_slot " ,
( " error " , format! ( " error: {:?} " , err ) , String ) ,
( " slot " , slot , i64 )
) ;
} else {
datapoint_info! (
" replay-stage-mark_dead_slot " ,
( " error " , format! ( " error: {:?} " , err ) , String ) ,
( " slot " , slot , i64 )
) ;
}
2021-03-24 23:41:52 -07:00
progress . get_mut ( & slot ) . unwrap ( ) . is_dead = true ;
2020-12-09 23:14:31 -08:00
blockstore
. set_dead_slot ( slot )
. expect ( " Failed to mark slot as dead in blockstore " ) ;
2021-03-12 05:44:06 -08:00
subscriptions . notify_slot_update ( SlotUpdate ::Dead {
slot ,
err : format ! ( " error: {:?} " , err ) ,
timestamp : timestamp ( ) ,
} ) ;
2021-03-24 23:41:52 -07:00
check_slot_agrees_with_cluster (
slot ,
root ,
Some ( bank . hash ( ) ) ,
gossip_duplicate_confirmed_slots ,
ancestors ,
descendants ,
progress ,
heaviest_subtree_fork_choice ,
SlotStateUpdate ::Dead ,
) ;
2020-12-09 23:14:31 -08:00
}
2019-05-03 16:27:53 -07:00
#[ allow(clippy::too_many_arguments) ]
2019-12-04 10:17:17 -08:00
fn handle_votable_bank (
2019-03-21 11:53:18 -07:00
bank : & Arc < Bank > ,
2021-04-17 10:22:00 -07:00
poh_recorder : & Arc < Mutex < PohRecorder > > ,
2020-05-29 14:40:36 -07:00
switch_fork_decision : & SwitchForkDecision ,
2019-03-21 11:53:18 -07:00
bank_forks : & Arc < RwLock < BankForks > > ,
2019-06-24 13:41:23 -07:00
tower : & mut Tower ,
2020-03-02 12:43:43 -08:00
progress : & mut ProgressMap ,
2020-03-31 08:23:42 -07:00
vote_account_pubkey : & Pubkey ,
authorized_voter_keypairs : & [ Arc < Keypair > ] ,
2020-04-21 12:54:45 -07:00
cluster_info : & Arc < ClusterInfo > ,
2020-01-13 13:13:52 -08:00
blockstore : & Arc < Blockstore > ,
2019-04-30 13:23:21 -07:00
leader_schedule_cache : & Arc < LeaderScheduleCache > ,
2019-11-04 15:44:27 -08:00
lockouts_sender : & Sender < CommitmentAggregationData > ,
2021-02-18 23:42:09 -08:00
accounts_background_request_sender : & AbsRequestSender ,
2019-12-18 11:50:09 -08:00
latest_root_senders : & [ Sender < Slot > ] ,
2020-03-27 09:33:40 -07:00
subscriptions : & Arc < RpcSubscriptions > ,
2020-04-24 15:49:57 -07:00
block_commitment_cache : & Arc < RwLock < BlockCommitmentCache > > ,
2020-06-11 12:16:04 -07:00
heaviest_subtree_fork_choice : & mut HeaviestSubtreeForkChoice ,
2020-09-09 08:33:14 -07:00
cache_block_time_sender : & Option < CacheBlockTimeSender > ,
2020-09-28 19:43:05 -07:00
bank_notification_sender : & Option < BankNotificationSender > ,
2021-03-24 23:41:52 -07:00
gossip_duplicate_confirmed_slots : & mut GossipDuplicateConfirmedSlots ,
2021-04-10 17:34:45 -07:00
gossip_verified_vote_hashes : & mut GossipVerifiedVoteHashes ,
2021-03-25 18:54:51 -07:00
vote_signatures : & mut Vec < Signature > ,
has_new_vote_been_rooted : & mut bool ,
2020-12-13 17:26:34 -08:00
) {
2019-11-06 01:02:26 -08:00
if bank . is_empty ( ) {
inc_new_counter_info! ( " replay_stage-voted_empty_bank " , 1 ) ;
}
2019-07-14 18:48:15 -07:00
trace! ( " handle votable bank {} " , bank . slot ( ) ) ;
2021-03-24 23:41:52 -07:00
let ( new_root , tower_slots ) = tower . record_bank_vote ( bank , vote_account_pubkey ) ;
2020-09-18 22:03:54 -07:00
let last_vote = tower . last_vote_and_timestamp ( ) ;
if let Err ( err ) = tower . save ( & cluster_info . keypair ) {
error! ( " Unable to save tower: {:?} " , err ) ;
std ::process ::exit ( 1 ) ;
}
if let Some ( new_root ) = new_root {
2019-05-20 15:01:55 -07:00
// get the root bank before squash
let root_bank = bank_forks
. read ( )
. unwrap ( )
. get ( new_root )
. expect ( " Root bank doesn't exist " )
. clone ( ) ;
2019-06-11 18:27:47 -07:00
let mut rooted_banks = root_bank . parents ( ) ;
2020-09-28 19:43:05 -07:00
rooted_banks . push ( root_bank . clone ( ) ) ;
2019-06-11 18:27:47 -07:00
let rooted_slots : Vec < _ > = rooted_banks . iter ( ) . map ( | bank | bank . slot ( ) ) . collect ( ) ;
2020-01-13 13:13:52 -08:00
// Call leader schedule_cache.set_root() before blockstore.set_root() because
2019-08-27 15:09:41 -07:00
// bank_forks.root is consumed by repair_service to update gossip, so we don't want to
2019-11-14 11:49:31 -08:00
// get shreds for repair on gossip before we update leader schedule, otherwise they may
2019-08-27 15:09:41 -07:00
// get dropped.
leader_schedule_cache . set_root ( rooted_banks . last ( ) . unwrap ( ) ) ;
2020-01-13 13:13:52 -08:00
blockstore
2019-05-29 09:43:22 -07:00
. set_roots ( & rooted_slots )
. expect ( " Ledger set roots failed " ) ;
2020-09-09 08:33:14 -07:00
Self ::cache_block_times (
blockstore ,
bank_forks ,
& rooted_slots ,
cache_block_time_sender ,
) ;
2020-07-07 16:59:46 -07:00
let highest_confirmed_root = Some (
2020-04-24 15:49:57 -07:00
block_commitment_cache
. read ( )
. unwrap ( )
2020-07-07 16:59:46 -07:00
. highest_confirmed_root ( ) ,
2020-04-24 15:49:57 -07:00
) ;
2020-03-02 12:43:43 -08:00
Self ::handle_new_root (
new_root ,
& bank_forks ,
progress ,
2020-12-12 17:22:34 -08:00
accounts_background_request_sender ,
2020-07-07 16:59:46 -07:00
highest_confirmed_root ,
2020-06-11 12:16:04 -07:00
heaviest_subtree_fork_choice ,
2021-03-24 23:41:52 -07:00
gossip_duplicate_confirmed_slots ,
2021-04-10 17:34:45 -07:00
gossip_verified_vote_hashes ,
2021-03-25 18:54:51 -07:00
has_new_vote_been_rooted ,
vote_signatures ,
2020-03-02 12:43:43 -08:00
) ;
2020-03-27 09:33:40 -07:00
subscriptions . notify_roots ( rooted_slots ) ;
2020-09-28 19:43:05 -07:00
if let Some ( sender ) = bank_notification_sender {
sender
. send ( BankNotification ::Root ( root_bank ) )
. unwrap_or_else ( | err | warn! ( " bank_notification_sender failed: {:?} " , err ) ) ;
}
2019-12-18 11:50:09 -08:00
latest_root_senders . iter ( ) . for_each ( | s | {
if let Err ( e ) = s . send ( new_root ) {
trace! ( " latest root send failed: {:?} " , e ) ;
}
} ) ;
2020-05-05 14:07:21 -07:00
info! ( " new root {} " , new_root ) ;
2019-04-12 12:03:02 -07:00
}
2020-03-02 12:43:43 -08:00
Self ::update_commitment_cache (
bank . clone ( ) ,
2020-03-30 10:29:30 -07:00
bank_forks . read ( ) . unwrap ( ) . root ( ) ,
2020-06-22 18:30:09 -07:00
progress . get_fork_stats ( bank . slot ( ) ) . unwrap ( ) . total_stake ,
2020-03-02 12:43:43 -08:00
lockouts_sender ,
) ;
2020-03-31 08:23:42 -07:00
Self ::push_vote (
cluster_info ,
bank ,
2021-04-17 10:22:00 -07:00
poh_recorder ,
2020-03-31 08:23:42 -07:00
vote_account_pubkey ,
authorized_voter_keypairs ,
2020-09-18 22:03:54 -07:00
last_vote ,
2021-01-21 05:08:07 -08:00
& tower_slots ,
2020-05-29 14:40:36 -07:00
switch_fork_decision ,
2021-03-25 18:54:51 -07:00
vote_signatures ,
* has_new_vote_been_rooted ,
2020-03-31 08:23:42 -07:00
) ;
}
2019-05-20 13:32:32 -07:00
2021-04-17 10:22:00 -07:00
#[ allow(clippy::too_many_arguments) ]
2020-03-31 08:23:42 -07:00
fn push_vote (
2020-04-21 12:54:45 -07:00
cluster_info : & ClusterInfo ,
2020-03-31 08:23:42 -07:00
bank : & Arc < Bank > ,
2021-04-17 10:22:00 -07:00
poh_recorder : & Arc < Mutex < PohRecorder > > ,
2020-03-31 08:23:42 -07:00
vote_account_pubkey : & Pubkey ,
authorized_voter_keypairs : & [ Arc < Keypair > ] ,
vote : Vote ,
2021-01-21 05:08:07 -08:00
tower : & [ Slot ] ,
2020-05-29 14:40:36 -07:00
switch_fork_decision : & SwitchForkDecision ,
2021-03-25 18:54:51 -07:00
vote_signatures : & mut Vec < Signature > ,
has_new_vote_been_rooted : bool ,
2020-03-31 08:23:42 -07:00
) {
if authorized_voter_keypairs . is_empty ( ) {
return ;
}
2020-11-30 09:18:33 -08:00
let vote_account = match bank . get_vote_account ( vote_account_pubkey ) {
None = > {
2020-03-31 08:23:42 -07:00
warn! (
" Vote account {} does not exist. Unable to vote " ,
vote_account_pubkey ,
) ;
return ;
2020-11-30 09:18:33 -08:00
}
Some ( ( _stake , vote_account ) ) = > vote_account ,
} ;
let vote_state = vote_account . vote_state ( ) ;
let vote_state = match vote_state . as_ref ( ) {
Err ( _ ) = > {
warn! (
" Vote account {} is unreadable. Unable to vote " ,
vote_account_pubkey ,
) ;
return ;
}
Ok ( vote_state ) = > vote_state ,
} ;
2020-03-31 08:23:42 -07:00
let authorized_voter_pubkey =
if let Some ( authorized_voter_pubkey ) = vote_state . get_authorized_voter ( bank . epoch ( ) ) {
authorized_voter_pubkey
} else {
warn! (
" Vote account {} has no authorized voter for epoch {}. Unable to vote " ,
vote_account_pubkey ,
bank . epoch ( )
) ;
return ;
} ;
2019-05-31 11:45:17 -07:00
2020-03-31 08:23:42 -07:00
let authorized_voter_keypair = match authorized_voter_keypairs
. iter ( )
. find ( | keypair | keypair . pubkey ( ) = = authorized_voter_pubkey )
{
None = > {
warn! ( " The authorized keypair {} for vote account {} is not available. Unable to vote " ,
authorized_voter_pubkey , vote_account_pubkey ) ;
return ;
}
Some ( authorized_voter_keypair ) = > authorized_voter_keypair ,
} ;
2021-04-16 21:14:29 -07:00
let node_keypair = & cluster_info . keypair ;
2020-03-31 08:23:42 -07:00
// Send our last few votes along with the new one
2020-09-08 07:55:09 -07:00
let vote_ix = if bank . slot ( ) > Self ::get_unlock_switch_vote_slot ( bank . cluster_type ( ) ) {
2020-05-29 14:40:36 -07:00
switch_fork_decision
. to_vote_instruction (
vote ,
& vote_account_pubkey ,
& authorized_voter_keypair . pubkey ( ) ,
)
. expect ( " Switch threshold failure should not lead to voting " )
} else {
vote_instruction ::vote (
& vote_account_pubkey ,
& authorized_voter_keypair . pubkey ( ) ,
vote ,
)
} ;
2020-03-31 08:23:42 -07:00
2020-04-24 12:03:46 -07:00
let mut vote_tx = Transaction ::new_with_payer ( & [ vote_ix ] , Some ( & node_keypair . pubkey ( ) ) ) ;
2020-03-31 08:23:42 -07:00
2021-03-26 17:45:53 -07:00
let blockhash = bank . last_blockhash ( ) ;
vote_tx . partial_sign ( & [ node_keypair . as_ref ( ) ] , blockhash ) ;
vote_tx . partial_sign ( & [ authorized_voter_keypair . as_ref ( ) ] , blockhash ) ;
2021-03-25 18:54:51 -07:00
if ! has_new_vote_been_rooted {
vote_signatures . push ( vote_tx . signatures [ 0 ] ) ;
if vote_signatures . len ( ) > MAX_VOTE_SIGNATURES {
vote_signatures . remove ( 0 ) ;
}
} else {
vote_signatures . clear ( ) ;
}
2021-03-26 17:45:53 -07:00
2021-04-17 10:22:00 -07:00
let _ = cluster_info . send_vote (
& vote_tx ,
crate ::banking_stage ::next_leader_tpu ( cluster_info , poh_recorder ) ,
) ;
2021-01-21 05:08:07 -08:00
cluster_info . push_vote ( tower , vote_tx ) ;
2019-03-21 11:53:18 -07:00
}
2019-11-04 15:44:27 -08:00
fn update_commitment_cache (
2019-09-20 19:38:56 -07:00
bank : Arc < Bank > ,
2020-03-30 10:29:30 -07:00
root : Slot ,
2020-06-22 18:30:09 -07:00
total_stake : Stake ,
2019-11-04 15:44:27 -08:00
lockouts_sender : & Sender < CommitmentAggregationData > ,
2019-07-26 10:27:57 -07:00
) {
2020-03-30 10:29:30 -07:00
if let Err ( e ) =
2020-06-22 18:30:09 -07:00
lockouts_sender . send ( CommitmentAggregationData ::new ( bank , root , total_stake ) )
2020-03-30 10:29:30 -07:00
{
2019-07-26 10:27:57 -07:00
trace! ( " lockouts_sender failed: {:?} " , e ) ;
}
}
2019-03-21 11:53:18 -07:00
fn reset_poh_recorder (
2019-05-23 23:20:04 -07:00
my_pubkey : & Pubkey ,
2020-01-13 13:13:52 -08:00
blockstore : & Blockstore ,
2019-03-21 11:53:18 -07:00
bank : & Arc < Bank > ,
2021-03-24 23:41:52 -07:00
poh_recorder : & Mutex < PohRecorder > ,
leader_schedule_cache : & LeaderScheduleCache ,
2019-03-21 11:53:18 -07:00
) {
2020-01-13 13:13:52 -08:00
let next_leader_slot = leader_schedule_cache . next_leader_slot (
& my_pubkey ,
bank . slot ( ) ,
& bank ,
Some ( blockstore ) ,
2020-02-26 13:35:50 -08:00
GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS ,
2020-01-13 13:13:52 -08:00
) ;
2019-07-17 14:10:15 -07:00
poh_recorder
. lock ( )
. unwrap ( )
. reset ( bank . last_blockhash ( ) , bank . slot ( ) , next_leader_slot ) ;
2019-07-23 19:19:20 -07:00
let next_leader_msg = if let Some ( next_leader_slot ) = next_leader_slot {
2019-08-02 10:08:42 -07:00
format! ( " My next leader slot is {} " , next_leader_slot . 0 )
2019-07-23 19:19:20 -07:00
} else {
2019-08-02 10:08:42 -07:00
" I am not in the leader schedule yet " . to_owned ( )
2019-07-23 19:19:20 -07:00
} ;
info! (
2019-11-15 08:36:33 -08:00
" {} reset PoH to tick {} (within slot {}). {} " ,
2019-05-23 23:20:04 -07:00
my_pubkey ,
2019-03-21 11:53:18 -07:00
bank . tick_height ( ) ,
2019-11-05 18:40:00 -08:00
bank . slot ( ) ,
2019-07-23 19:19:20 -07:00
next_leader_msg ,
2019-03-21 11:53:18 -07:00
) ;
}
2020-07-29 23:17:40 -07:00
#[ allow(clippy::too_many_arguments) ]
2019-03-21 11:53:18 -07:00
fn replay_active_banks (
2021-03-24 23:41:52 -07:00
blockstore : & Blockstore ,
bank_forks : & RwLock < BankForks > ,
2019-05-23 23:20:04 -07:00
my_pubkey : & Pubkey ,
2020-03-26 19:57:27 -07:00
vote_account : & Pubkey ,
2020-03-02 12:43:43 -08:00
progress : & mut ProgressMap ,
2019-11-20 15:43:10 -08:00
transaction_status_sender : Option < TransactionStatusSender > ,
2019-12-12 10:36:27 -08:00
verify_recyclers : & VerifyRecyclers ,
2020-06-11 12:16:04 -07:00
heaviest_subtree_fork_choice : & mut HeaviestSubtreeForkChoice ,
2020-08-07 11:21:35 -07:00
replay_vote_sender : & ReplayVoteSender ,
2020-09-28 19:43:05 -07:00
bank_notification_sender : & Option < BankNotificationSender > ,
2020-10-09 12:55:35 -07:00
rewards_recorder_sender : & Option < RewardsRecorderSender > ,
2021-03-12 05:44:06 -08:00
subscriptions : & Arc < RpcSubscriptions > ,
2021-03-24 23:41:52 -07:00
gossip_duplicate_confirmed_slots : & GossipDuplicateConfirmedSlots ,
ancestors : & HashMap < Slot , HashSet < Slot > > ,
descendants : & HashMap < Slot , HashSet < Slot > > ,
2019-07-18 12:04:53 -07:00
) -> bool {
2019-07-18 14:07:32 -07:00
let mut did_complete_bank = false ;
2019-07-30 13:18:33 -07:00
let mut tx_count = 0 ;
2019-03-21 11:53:18 -07:00
let active_banks = bank_forks . read ( ) . unwrap ( ) . active_banks ( ) ;
trace! ( " active banks {:?} " , active_banks ) ;
for bank_slot in & active_banks {
2019-06-20 15:50:41 -07:00
// If the fork was marked as dead, don't replay it
if progress . get ( bank_slot ) . map ( | p | p . is_dead ) . unwrap_or ( false ) {
2019-08-20 17:16:06 -07:00
debug! ( " bank_slot {:?} is marked dead " , * bank_slot ) ;
2019-06-20 15:50:41 -07:00
continue ;
}
2019-03-21 11:53:18 -07:00
let bank = bank_forks . read ( ) . unwrap ( ) . get ( * bank_slot ) . unwrap ( ) . clone ( ) ;
2020-04-08 14:35:24 -07:00
let parent_slot = bank . parent_slot ( ) ;
2020-03-26 19:57:27 -07:00
let prev_leader_slot = progress . get_bank_prev_leader_slot ( & bank ) ;
2020-04-08 14:35:24 -07:00
let ( num_blocks_on_fork , num_dropped_blocks_on_fork ) = {
let stats = progress
. get ( & parent_slot )
. expect ( " parent of active bank must exist in progress map " ) ;
let num_blocks_on_fork = stats . num_blocks_on_fork + 1 ;
let new_dropped_blocks = bank . slot ( ) - parent_slot - 1 ;
let num_dropped_blocks_on_fork =
stats . num_dropped_blocks_on_fork + new_dropped_blocks ;
( num_blocks_on_fork , num_dropped_blocks_on_fork )
} ;
2021-03-24 23:41:52 -07:00
// New children adopt the same latest duplicate ancestor as their parent.
let duplicate_stats = DuplicateStats ::new_with_unconfirmed_duplicate_ancestor (
progress . latest_unconfirmed_duplicate_ancestor ( bank . parent_slot ( ) ) ,
) ;
2019-11-19 02:36:00 -08:00
// Insert a progress entry even for slots this node is the leader for, so that
// 1) confirm_forks can report confirmation, 2) we can cache computations about
2020-03-02 12:43:43 -08:00
// this bank in `select_forks()`
2020-03-26 19:57:27 -07:00
let bank_progress = & mut progress . entry ( bank . slot ( ) ) . or_insert_with ( | | {
2020-04-08 14:35:24 -07:00
ForkProgress ::new_from_bank (
& bank ,
& my_pubkey ,
vote_account ,
prev_leader_slot ,
2021-03-24 23:41:52 -07:00
duplicate_stats ,
2020-04-08 14:35:24 -07:00
num_blocks_on_fork ,
num_dropped_blocks_on_fork ,
)
2020-03-26 19:57:27 -07:00
} ) ;
2019-07-30 13:18:33 -07:00
if bank . collector_id ( ) ! = my_pubkey {
2021-03-24 23:41:52 -07:00
let root_slot = bank_forks . read ( ) . unwrap ( ) . root ( ) ;
2020-01-14 17:15:26 -08:00
let replay_result = Self ::replay_blockstore_into_bank (
2019-11-20 15:43:10 -08:00
& bank ,
2020-01-13 13:13:52 -08:00
& blockstore ,
2019-11-20 15:43:10 -08:00
bank_progress ,
transaction_status_sender . clone ( ) ,
2020-08-07 11:21:35 -07:00
replay_vote_sender ,
2019-12-12 10:36:27 -08:00
verify_recyclers ,
2019-11-20 15:43:10 -08:00
) ;
2020-01-14 17:15:26 -08:00
match replay_result {
Ok ( replay_tx_count ) = > tx_count + = replay_tx_count ,
Err ( err ) = > {
2021-03-24 23:41:52 -07:00
// Error means the slot needs to be marked as dead
Self ::mark_dead_slot (
blockstore ,
& bank ,
root_slot ,
& err ,
subscriptions ,
gossip_duplicate_confirmed_slots ,
ancestors ,
descendants ,
progress ,
heaviest_subtree_fork_choice ,
) ;
2020-01-14 17:15:26 -08:00
// If the bank was corrupted, don't try to run the below logic to check if the
// bank is completed
continue ;
}
2019-07-30 13:18:33 -07:00
}
2019-03-21 11:53:18 -07:00
}
2019-07-09 15:36:30 -07:00
assert_eq! ( * bank_slot , bank . slot ( ) ) ;
2020-01-14 17:15:26 -08:00
if bank . is_complete ( ) {
2021-03-24 23:41:52 -07:00
bank_progress . replay_stats . report_stats (
bank . slot ( ) ,
bank_progress . replay_progress . num_entries ,
bank_progress . replay_progress . num_shreds ,
) ;
did_complete_bank = true ;
info! ( " bank frozen: {} " , bank . slot ( ) ) ;
2021-03-26 15:47:35 -07:00
if let Some ( transaction_status_sender ) = transaction_status_sender . clone ( ) {
transaction_status_sender . send_transaction_status_freeze_message ( & bank ) ;
}
2021-03-24 23:41:52 -07:00
bank . freeze ( ) ;
2021-04-12 01:00:59 -07:00
let bank_hash = bank . hash ( ) ;
assert_ne! ( bank_hash , Hash ::default ( ) ) ;
heaviest_subtree_fork_choice . add_new_leaf_slot (
( bank . slot ( ) , bank . hash ( ) ) ,
Some ( ( bank . parent_slot ( ) , bank . parent_hash ( ) ) ) ,
) ;
2021-03-24 23:41:52 -07:00
check_slot_agrees_with_cluster (
bank . slot ( ) ,
bank_forks . read ( ) . unwrap ( ) . root ( ) ,
Some ( bank . hash ( ) ) ,
gossip_duplicate_confirmed_slots ,
ancestors ,
descendants ,
progress ,
heaviest_subtree_fork_choice ,
SlotStateUpdate ::Frozen ,
) ;
if let Some ( sender ) = bank_notification_sender {
sender
. send ( BankNotification ::Frozen ( bank . clone ( ) ) )
. unwrap_or_else ( | err | warn! ( " bank_notification_sender failed: {:?} " , err ) ) ;
2020-12-09 23:14:31 -08:00
}
2021-03-24 23:41:52 -07:00
Self ::record_rewards ( & bank , & rewards_recorder_sender ) ;
2019-07-09 15:36:30 -07:00
} else {
trace! (
" bank {} not completed tick_height: {}, max_tick_height: {} " ,
bank . slot ( ) ,
bank . tick_height ( ) ,
bank . max_tick_height ( )
) ;
2019-03-21 11:53:18 -07:00
}
}
2019-08-12 15:15:34 -07:00
inc_new_counter_info! ( " replay_stage-replay_transactions " , tx_count ) ;
2019-07-18 14:07:32 -07:00
did_complete_bank
2019-03-21 11:53:18 -07:00
}
2020-06-11 12:16:04 -07:00
#[ allow(clippy::too_many_arguments) ]
2020-02-03 16:48:24 -08:00
pub ( crate ) fn compute_bank_stats (
2019-11-21 15:47:08 -08:00
my_pubkey : & Pubkey ,
2019-09-04 23:10:25 -07:00
ancestors : & HashMap < u64 , HashSet < u64 > > ,
2020-02-03 16:48:24 -08:00
frozen_banks : & mut Vec < Arc < Bank > > ,
2019-06-24 13:41:23 -07:00
tower : & Tower ,
2020-03-02 12:43:43 -08:00
progress : & mut ProgressMap ,
2020-03-26 19:57:27 -07:00
vote_tracker : & VoteTracker ,
2020-03-30 19:57:11 -07:00
cluster_slots : & ClusterSlots ,
2020-03-26 19:57:27 -07:00
bank_forks : & RwLock < BankForks > ,
2021-04-12 01:00:59 -07:00
heaviest_subtree_fork_choice : & mut HeaviestSubtreeForkChoice ,
2020-02-03 16:48:24 -08:00
) -> Vec < Slot > {
2019-11-15 08:36:33 -08:00
frozen_banks . sort_by_key ( | bank | bank . slot ( ) ) ;
2020-03-04 11:49:56 -08:00
let mut new_stats = vec! [ ] ;
2020-02-03 16:48:24 -08:00
for bank in frozen_banks {
2020-03-26 19:57:27 -07:00
let bank_slot = bank . slot ( ) ;
2020-02-03 16:48:24 -08:00
// Only time progress map should be missing a bank slot
// is if this node was the leader for this slot as those banks
// are not replayed in replay_active_banks()
2020-03-26 19:57:27 -07:00
{
2020-06-11 12:16:04 -07:00
let is_computed = progress
2020-03-26 19:57:27 -07:00
. get_fork_stats_mut ( bank_slot )
2020-06-11 12:16:04 -07:00
. expect ( " All frozen banks must exist in the Progress map " )
. computed ;
if ! is_computed {
let computed_bank_state = Tower ::collect_vote_lockouts (
my_pubkey ,
bank_slot ,
bank . vote_accounts ( ) . into_iter ( ) ,
& ancestors ,
) ;
2020-07-20 17:29:07 -07:00
// Notify any listeners of the votes found in this newly computed
// bank
2020-06-11 12:16:04 -07:00
heaviest_subtree_fork_choice . compute_bank_stats (
& bank ,
tower ,
progress ,
& computed_bank_state ,
) ;
let ComputedBankState {
2020-06-22 18:30:09 -07:00
voted_stakes ,
total_stake ,
2020-06-11 12:16:04 -07:00
lockout_intervals ,
..
} = computed_bank_state ;
let stats = progress
. get_fork_stats_mut ( bank_slot )
. expect ( " All frozen banks must exist in the Progress map " ) ;
2020-06-22 18:30:09 -07:00
stats . total_stake = total_stake ;
stats . voted_stakes = voted_stakes ;
2020-06-11 12:16:04 -07:00
stats . lockout_intervals = lockout_intervals ;
stats . block_height = bank . block_height ( ) ;
2021-04-12 01:00:59 -07:00
stats . bank_hash = Some ( bank . hash ( ) ) ;
2020-06-11 12:16:04 -07:00
stats . computed = true ;
new_stats . push ( bank_slot ) ;
2020-03-26 19:57:27 -07:00
datapoint_info! (
" bank_weight " ,
( " slot " , bank_slot , i64 ) ,
// u128 too large for influx, convert to hex
( " weight " , format! ( " {:X} " , stats . weight ) , String ) ,
) ;
info! (
" {} slot_weight: {} {} {} {} " ,
my_pubkey ,
2020-04-10 23:52:37 -07:00
bank_slot ,
2020-03-26 19:57:27 -07:00
stats . weight ,
stats . fork_weight ,
bank . parent ( ) . map ( | b | b . slot ( ) ) . unwrap_or ( 0 )
) ;
}
2020-02-03 16:48:24 -08:00
}
2020-03-26 19:57:27 -07:00
Self ::update_propagation_status (
progress ,
bank_slot ,
bank_forks ,
vote_tracker ,
2020-03-30 19:57:11 -07:00
cluster_slots ,
2020-03-26 19:57:27 -07:00
) ;
let stats = progress
. get_fork_stats_mut ( bank_slot )
. expect ( " All frozen banks must exist in the Progress map " ) ;
2020-06-22 18:30:09 -07:00
stats . vote_threshold =
tower . check_vote_stake_threshold ( bank_slot , & stats . voted_stakes , stats . total_stake ) ;
2020-03-26 19:57:27 -07:00
stats . is_locked_out = tower . is_locked_out ( bank_slot , & ancestors ) ;
stats . has_voted = tower . has_voted ( bank_slot ) ;
stats . is_recent = tower . is_recent ( bank_slot ) ;
2020-02-03 16:48:24 -08:00
}
new_stats
}
2020-03-26 19:57:27 -07:00
fn update_propagation_status (
progress : & mut ProgressMap ,
slot : Slot ,
bank_forks : & RwLock < BankForks > ,
vote_tracker : & VoteTracker ,
2020-03-30 19:57:11 -07:00
cluster_slots : & ClusterSlots ,
2020-03-26 19:57:27 -07:00
) {
// If propagation has already been confirmed, return
if progress . is_propagated ( slot ) {
return ;
}
// Otherwise we have to check the votes for confirmation
let mut slot_vote_tracker = progress
. get_propagated_stats ( slot )
. expect ( " All frozen banks must exist in the Progress map " )
. slot_vote_tracker
. clone ( ) ;
if slot_vote_tracker . is_none ( ) {
slot_vote_tracker = vote_tracker . get_slot_vote_tracker ( slot ) ;
progress
. get_propagated_stats_mut ( slot )
. expect ( " All frozen banks must exist in the Progress map " )
. slot_vote_tracker = slot_vote_tracker . clone ( ) ;
}
2020-03-30 19:57:11 -07:00
let mut cluster_slot_pubkeys = progress
. get_propagated_stats ( slot )
. expect ( " All frozen banks must exist in the Progress map " )
. cluster_slot_pubkeys
. clone ( ) ;
if cluster_slot_pubkeys . is_none ( ) {
cluster_slot_pubkeys = cluster_slots . lookup ( slot ) ;
progress
. get_propagated_stats_mut ( slot )
. expect ( " All frozen banks must exist in the Progress map " )
. cluster_slot_pubkeys = cluster_slot_pubkeys . clone ( ) ;
}
2020-03-26 19:57:27 -07:00
let newly_voted_pubkeys = slot_vote_tracker
. as_ref ( )
2021-04-10 17:34:45 -07:00
. and_then ( | slot_vote_tracker | {
slot_vote_tracker . write ( ) . unwrap ( ) . get_voted_slot_updates ( )
} )
2020-06-11 12:16:04 -07:00
. unwrap_or_default ( ) ;
2020-03-26 19:57:27 -07:00
2020-03-30 19:57:11 -07:00
let cluster_slot_pubkeys = cluster_slot_pubkeys
. map ( | v | v . read ( ) . unwrap ( ) . keys ( ) . cloned ( ) . collect ( ) )
2020-06-11 12:16:04 -07:00
. unwrap_or_default ( ) ;
2020-03-30 19:57:11 -07:00
2020-03-26 19:57:27 -07:00
Self ::update_fork_propagated_threshold_from_votes (
progress ,
newly_voted_pubkeys ,
2020-03-30 19:57:11 -07:00
cluster_slot_pubkeys ,
2020-03-26 19:57:27 -07:00
slot ,
bank_forks ,
) ;
}
2020-03-02 12:43:43 -08:00
// Given a heaviest bank, `heaviest_bank` and the next votable bank
2020-06-11 12:16:04 -07:00
// `heaviest_bank_on_same_voted_fork` as the validator's last vote, return
2020-03-02 12:43:43 -08:00
// a bank to vote on, a bank to reset to,
pub ( crate ) fn select_vote_and_reset_forks (
2020-06-11 12:16:04 -07:00
heaviest_bank : & Arc < Bank > ,
2021-03-24 23:41:52 -07:00
// Should only be None if there was no previous vote
heaviest_bank_on_same_voted_fork : Option < & Arc < Bank > > ,
2020-03-02 12:43:43 -08:00
ancestors : & HashMap < u64 , HashSet < u64 > > ,
descendants : & HashMap < u64 , HashSet < u64 > > ,
progress : & ProgressMap ,
2020-10-15 02:30:33 -07:00
tower : & mut Tower ,
2020-05-29 14:40:36 -07:00
) -> SelectVoteAndResetForkResult {
2020-03-02 12:43:43 -08:00
// Try to vote on the actual heaviest fork. If the heaviest bank is
// locked out or fails the threshold check, the validator will:
// 1) Not continue to vote on current fork, waiting for lockouts to expire/
// threshold check to pass
// 2) Will reset PoH to heaviest fork in order to make sure the heaviest
// fork is propagated
// This above behavior should ensure correct voting and resetting PoH
// behavior under all cases:
// 1) The best "selected" bank is on same fork
// 2) The best "selected" bank is on a different fork,
// switch_threshold fails
// 3) The best "selected" bank is on a different fork,
2020-06-17 20:54:52 -07:00
// switch_threshold succeeds
2020-03-02 12:43:43 -08:00
let mut failure_reasons = vec! [ ] ;
let selected_fork = {
2020-06-11 12:16:04 -07:00
let switch_fork_decision = tower . check_switch_threshold (
heaviest_bank . slot ( ) ,
& ancestors ,
& descendants ,
& progress ,
heaviest_bank . total_epoch_stake ( ) ,
heaviest_bank
. epoch_vote_accounts ( heaviest_bank . epoch ( ) )
. expect ( " Bank epoch vote accounts must contain entry for the bank's own epoch " ) ,
) ;
2021-03-24 23:41:52 -07:00
match switch_fork_decision {
SwitchForkDecision ::FailedSwitchThreshold ( _ , _ ) = > {
let reset_bank = heaviest_bank_on_same_voted_fork ;
// If we can't switch and our last vote was on a non-duplicate/confirmed slot, then
// reset to the the next votable bank on the same fork as our last vote,
// but don't vote.
// We don't just reset to the heaviest fork when switch threshold fails because
// a situation like this can occur:
/* Figure 1:
slot 0
|
slot 1
/ \
slot 2 ( last vote ) |
| slot 8 ( 10 % )
slot 4 ( 9 % )
* /
// Imagine 90% of validators voted on slot 4, but only 9% landed. If everybody that fails
// the switch theshold abandons slot 4 to build on slot 8 (because it's *currently* heavier),
// then there will be no blocks to include the votes for slot 4, and the network halts
// because 90% of validators can't vote
info! (
" Waiting to switch vote to {}, resetting to slot {:?} for now " ,
heaviest_bank . slot ( ) ,
reset_bank . as_ref ( ) . map ( | b | b . slot ( ) ) ,
) ;
failure_reasons . push ( HeaviestForkFailures ::FailedSwitchThreshold (
heaviest_bank . slot ( ) ,
) ) ;
reset_bank . map ( | b | ( b , switch_fork_decision ) )
}
SwitchForkDecision ::FailedSwitchDuplicateRollback ( latest_duplicate_ancestor ) = > {
// If we can't switch and our last vote was on an unconfirmed, duplicate slot,
// then we need to reset to the heaviest bank, even if the heaviest bank is not
// a descendant of the last vote (usually for switch threshold failures we reset
// to the heaviest descendant of the last vote, but in this case, the last vote
// was on a duplicate branch). This is because in the case of *unconfirmed* duplicate
// slots, somebody needs to generate an alternative branch to escape a situation
// like a 50-50 split where both partitions have voted on different versions of the
// same duplicate slot.
// Unlike the situation described in `Figure 1` above, this is safe. To see why,
// imagine the same situation described in Figure 1 above occurs, but slot 2 is
// a duplicate block. There are now a few cases:
//
// Note first that DUPLICATE_THRESHOLD + SWITCH_FORK_THRESHOLD + DUPLICATE_LIVENESS_THRESHOLD = 1;
//
// 1) > DUPLICATE_THRESHOLD of the network voted on some version of slot 2. Because duplicate slots can be confirmed
// by gossip, unlike the situation described in `Figure 1`, we don't need those
// votes to land in a descendant to confirm slot 2. Once slot 2 is confirmed by
// gossip votes, that fork is added back to the fork choice set and falls back into
// normal fork choice, which is covered by the `FailedSwitchThreshold` case above
// (everyone will resume building on their last voted fork, slot 4, since slot 8
// doesn't have for switch threshold)
//
// 2) <= DUPLICATE_THRESHOLD of the network voted on some version of slot 2, > SWITCH_FORK_THRESHOLD of the network voted
// on slot 8. Then everybody abandons the duplicate fork from fork choice and both builds
// on slot 8's fork. They can also vote on slot 8's fork because it has sufficient weight
// to pass the switching threshold
//
// 3) <= DUPLICATE_THRESHOLD of the network voted on some version of slot 2, <= SWITCH_FORK_THRESHOLD of the network voted
// on slot 8. This means more than DUPLICATE_LIVENESS_THRESHOLD of the network is gone, so we cannot
// guarantee progress anyways
// Note the heaviest fork is never descended from a known unconfirmed duplicate slot
// because the fork choice rule ensures that (marks it as an invalid candidate),
// thus it's safe to use as the reset bank.
let reset_bank = Some ( heaviest_bank ) ;
info! (
" Waiting to switch vote to {}, resetting to slot {:?} for now, latest duplicate ancestor: {:?} " ,
heaviest_bank . slot ( ) ,
reset_bank . as_ref ( ) . map ( | b | b . slot ( ) ) ,
latest_duplicate_ancestor ,
) ;
failure_reasons . push ( HeaviestForkFailures ::FailedSwitchThreshold (
heaviest_bank . slot ( ) ,
) ) ;
reset_bank . map ( | b | ( b , switch_fork_decision ) )
}
_ = > Some ( ( heaviest_bank , switch_fork_decision ) ) ,
2020-03-02 12:43:43 -08:00
}
} ;
2020-05-29 14:40:36 -07:00
if let Some ( ( bank , switch_fork_decision ) ) = selected_fork {
2020-03-26 19:57:27 -07:00
let ( is_locked_out , vote_threshold , is_leader_slot , fork_weight ) = {
let fork_stats = progress . get_fork_stats ( bank . slot ( ) ) . unwrap ( ) ;
let propagated_stats = & progress . get_propagated_stats ( bank . slot ( ) ) . unwrap ( ) ;
2020-03-02 12:43:43 -08:00
(
fork_stats . is_locked_out ,
fork_stats . vote_threshold ,
2020-03-26 19:57:27 -07:00
propagated_stats . is_leader_slot ,
2020-03-02 12:43:43 -08:00
fork_stats . weight ,
)
} ;
2020-03-26 19:57:27 -07:00
let propagation_confirmed = is_leader_slot | | progress . is_propagated ( bank . slot ( ) ) ;
2020-03-02 12:43:43 -08:00
if is_locked_out {
failure_reasons . push ( HeaviestForkFailures ::LockedOut ( bank . slot ( ) ) ) ;
}
if ! vote_threshold {
failure_reasons . push ( HeaviestForkFailures ::FailedThreshold ( bank . slot ( ) ) ) ;
}
2020-03-26 19:57:27 -07:00
if ! propagation_confirmed {
failure_reasons . push ( HeaviestForkFailures ::NoPropagatedConfirmation ( bank . slot ( ) ) ) ;
}
2020-03-02 12:43:43 -08:00
2020-05-29 14:40:36 -07:00
if ! is_locked_out
& & vote_threshold
& & propagation_confirmed
2020-10-15 02:30:33 -07:00
& & switch_fork_decision . can_vote ( )
2020-05-29 14:40:36 -07:00
{
2020-03-02 12:43:43 -08:00
info! ( " voting: {} {} " , bank . slot ( ) , fork_weight ) ;
2020-05-29 14:40:36 -07:00
SelectVoteAndResetForkResult {
vote_bank : Some ( ( bank . clone ( ) , switch_fork_decision ) ) ,
reset_bank : Some ( bank . clone ( ) ) ,
heaviest_fork_failures : failure_reasons ,
}
2020-03-02 12:43:43 -08:00
} else {
2020-05-29 14:40:36 -07:00
SelectVoteAndResetForkResult {
vote_bank : None ,
reset_bank : Some ( bank . clone ( ) ) ,
heaviest_fork_failures : failure_reasons ,
}
2020-03-02 12:43:43 -08:00
}
} else {
2020-05-29 14:40:36 -07:00
SelectVoteAndResetForkResult {
vote_bank : None ,
reset_bank : None ,
heaviest_fork_failures : failure_reasons ,
}
2020-03-02 12:43:43 -08:00
}
2019-03-21 11:53:18 -07:00
}
2020-03-26 19:57:27 -07:00
fn update_fork_propagated_threshold_from_votes (
progress : & mut ProgressMap ,
2021-02-07 18:07:00 -08:00
mut newly_voted_pubkeys : Vec < Pubkey > ,
mut cluster_slot_pubkeys : Vec < Pubkey > ,
2020-03-26 19:57:27 -07:00
fork_tip : Slot ,
bank_forks : & RwLock < BankForks > ,
) {
let mut current_leader_slot = progress . get_latest_leader_slot ( fork_tip ) ;
let mut did_newly_reach_threshold = false ;
let root = bank_forks . read ( ) . unwrap ( ) . root ( ) ;
loop {
// These cases mean confirmation of propagation on any earlier
// leader blocks must have been reached
2020-09-11 02:03:11 -07:00
if current_leader_slot = = None | | current_leader_slot . unwrap ( ) < root {
2020-03-26 19:57:27 -07:00
break ;
}
let leader_propagated_stats = progress
. get_propagated_stats_mut ( current_leader_slot . unwrap ( ) )
2020-09-11 02:03:11 -07:00
. expect ( " current_leader_slot >= root, so must exist in the progress map " ) ;
2020-03-26 19:57:27 -07:00
// If a descendant has reached propagation threshold, then
2020-03-30 19:57:11 -07:00
// all its ancestor banks have also reached propagation
2020-03-26 19:57:27 -07:00
// threshold as well (Validators can't have voted for a
// descendant without also getting the ancestor block)
if leader_propagated_stats . is_propagated | |
// If there's no new validators to record, and there's no
// newly achieved threshold, then there's no further
// information to propagate backwards to past leader blocks
2020-03-30 19:57:11 -07:00
( newly_voted_pubkeys . is_empty ( ) & & cluster_slot_pubkeys . is_empty ( ) & &
! did_newly_reach_threshold )
2020-03-26 19:57:27 -07:00
{
break ;
}
// We only iterate through the list of leader slots by traversing
// the linked list of 'prev_leader_slot`'s outlined in the
// `progress` map
assert! ( leader_propagated_stats . is_leader_slot ) ;
let leader_bank = bank_forks
. read ( )
. unwrap ( )
. get ( current_leader_slot . unwrap ( ) )
. expect ( " Entry in progress map must exist in BankForks " )
. clone ( ) ;
did_newly_reach_threshold = Self ::update_slot_propagated_threshold_from_votes (
& mut newly_voted_pubkeys ,
2020-03-30 19:57:11 -07:00
& mut cluster_slot_pubkeys ,
2020-03-26 19:57:27 -07:00
& leader_bank ,
leader_propagated_stats ,
did_newly_reach_threshold ,
) | | did_newly_reach_threshold ;
// Now jump to process the previous leader slot
current_leader_slot = leader_propagated_stats . prev_leader_slot ;
}
}
fn update_slot_propagated_threshold_from_votes (
2021-02-07 18:07:00 -08:00
newly_voted_pubkeys : & mut Vec < Pubkey > ,
cluster_slot_pubkeys : & mut Vec < Pubkey > ,
2020-03-26 19:57:27 -07:00
leader_bank : & Bank ,
leader_propagated_stats : & mut PropagatedStats ,
did_child_reach_threshold : bool ,
) -> bool {
// Track whether this slot newly confirm propagation
// throughout the network (switched from is_propagated == false
// to is_propagated == true)
let mut did_newly_reach_threshold = false ;
// If a child of this slot confirmed propagation, then
// we can return early as this implies this slot must also
// be propagated
if did_child_reach_threshold {
if ! leader_propagated_stats . is_propagated {
leader_propagated_stats . is_propagated = true ;
return true ;
} else {
return false ;
}
}
if leader_propagated_stats . is_propagated {
return false ;
}
2020-03-30 19:57:11 -07:00
// Remove the vote/node pubkeys that we already know voted for this
// slot. These vote accounts/validator identities are safe to drop
2020-06-17 20:54:52 -07:00
// because they don't to be ported back any further because earlier
2020-03-30 19:57:11 -07:00
// parents must have:
2020-06-17 20:54:52 -07:00
// 1) Also recorded these pubkeys already, or
2020-03-26 19:57:27 -07:00
// 2) Already reached the propagation threshold, in which case
// they no longer need to track the set of propagated validators
2020-03-30 19:57:11 -07:00
newly_voted_pubkeys . retain ( | vote_pubkey | {
let exists = leader_propagated_stats
2020-03-26 19:57:27 -07:00
. propagated_validators
2021-02-03 15:02:11 -08:00
. contains ( vote_pubkey ) ;
2020-03-30 19:57:11 -07:00
leader_propagated_stats . add_vote_pubkey (
2021-02-07 18:07:00 -08:00
* vote_pubkey ,
2020-03-30 19:57:11 -07:00
leader_bank . epoch_vote_account_stake ( & vote_pubkey ) ,
) ;
! exists
} ) ;
cluster_slot_pubkeys . retain ( | node_pubkey | {
let exists = leader_propagated_stats
. propagated_node_ids
2021-02-07 18:07:00 -08:00
. contains ( node_pubkey ) ;
2021-02-03 15:02:11 -08:00
leader_propagated_stats . add_node_pubkey ( & * node_pubkey , leader_bank ) ;
2020-03-30 19:57:11 -07:00
! exists
2020-03-26 19:57:27 -07:00
} ) ;
2020-03-30 19:57:11 -07:00
if leader_propagated_stats . total_epoch_stake = = 0
| | leader_propagated_stats . propagated_validators_stake as f64
/ leader_propagated_stats . total_epoch_stake as f64
> SUPERMINORITY_THRESHOLD
{
leader_propagated_stats . is_propagated = true ;
did_newly_reach_threshold = true
}
2020-03-26 19:57:27 -07:00
did_newly_reach_threshold
}
2021-03-24 23:41:52 -07:00
fn mark_slots_confirmed (
confirmed_forks : & [ Slot ] ,
bank_forks : & RwLock < BankForks > ,
progress : & mut ProgressMap ,
ancestors : & HashMap < Slot , HashSet < Slot > > ,
descendants : & HashMap < Slot , HashSet < Slot > > ,
fork_choice : & mut HeaviestSubtreeForkChoice ,
) {
let ( root_slot , bank_hashes ) = {
let r_bank_forks = bank_forks . read ( ) . unwrap ( ) ;
let bank_hashes : Vec < Option < Hash > > = confirmed_forks
. iter ( )
. map ( | slot | r_bank_forks . get ( * slot ) . map ( | bank | bank . hash ( ) ) )
. collect ( ) ;
( r_bank_forks . root ( ) , bank_hashes )
} ;
for ( slot , bank_hash ) in confirmed_forks . iter ( ) . zip ( bank_hashes . into_iter ( ) ) {
// This case should be guaranteed as false by confirm_forks()
if let Some ( false ) = progress . is_supermajority_confirmed ( * slot ) {
// Because supermajority confirmation will iterate through all ancestors/descendants
// in `check_slot_agrees_with_cluster`, only incur this cost if the slot wasn't already
// confirmed
progress . set_supermajority_confirmed_slot ( * slot ) ;
check_slot_agrees_with_cluster (
* slot ,
root_slot ,
bank_hash ,
// Don't need to pass the gossip confirmed slots since `slot`
// is already marked as confirmed in progress
& BTreeMap ::new ( ) ,
ancestors ,
descendants ,
progress ,
fork_choice ,
SlotStateUpdate ::DuplicateConfirmed ,
) ;
}
}
}
2019-03-27 04:30:26 -07:00
fn confirm_forks (
2019-06-24 13:41:23 -07:00
tower : & Tower ,
2020-06-22 18:30:09 -07:00
voted_stakes : & VotedStakes ,
total_stake : Stake ,
2020-03-02 12:43:43 -08:00
progress : & ProgressMap ,
2020-01-28 16:02:28 -08:00
bank_forks : & RwLock < BankForks > ,
2020-02-03 16:48:24 -08:00
) -> Vec < Slot > {
let mut confirmed_forks = vec! [ ] ;
for ( slot , prog ) in progress . iter ( ) {
2021-03-24 23:41:52 -07:00
if ! prog . fork_stats . is_supermajority_confirmed {
2020-02-03 16:48:24 -08:00
let bank = bank_forks
. read ( )
. unwrap ( )
. get ( * slot )
. expect ( " bank in progress must exist in BankForks " )
. clone ( ) ;
2020-01-14 17:15:26 -08:00
let duration = prog . replay_stats . started . elapsed ( ) . as_millis ( ) ;
2020-06-22 18:30:09 -07:00
if bank . is_frozen ( ) & & tower . is_slot_confirmed ( * slot , voted_stakes , total_stake ) {
2019-11-19 02:36:00 -08:00
info! ( " validator fork confirmed {} {}ms " , * slot , duration ) ;
2020-02-19 14:25:49 -08:00
datapoint_info! ( " validator-confirmation " , ( " duration_ms " , duration , i64 ) ) ;
2020-02-03 16:48:24 -08:00
confirmed_forks . push ( * slot ) ;
2019-11-19 02:36:00 -08:00
} else {
debug! (
" validator fork not confirmed {} {}ms {:?} " ,
* slot ,
duration ,
2020-06-22 18:30:09 -07:00
voted_stakes . get ( slot )
2019-11-19 02:36:00 -08:00
) ;
}
2019-03-27 04:30:26 -07:00
}
2019-11-19 02:36:00 -08:00
}
2020-02-03 16:48:24 -08:00
confirmed_forks
2019-03-27 04:30:26 -07:00
}
2021-04-10 17:34:45 -07:00
#[ allow(clippy::too_many_arguments) ]
2020-01-28 16:02:28 -08:00
pub ( crate ) fn handle_new_root (
2020-04-24 15:49:57 -07:00
new_root : Slot ,
2020-01-28 16:02:28 -08:00
bank_forks : & RwLock < BankForks > ,
2020-03-02 12:43:43 -08:00
progress : & mut ProgressMap ,
2021-02-18 23:42:09 -08:00
accounts_background_request_sender : & AbsRequestSender ,
2020-07-07 16:59:46 -07:00
highest_confirmed_root : Option < Slot > ,
2020-06-11 12:16:04 -07:00
heaviest_subtree_fork_choice : & mut HeaviestSubtreeForkChoice ,
2021-03-24 23:41:52 -07:00
gossip_duplicate_confirmed_slots : & mut GossipDuplicateConfirmedSlots ,
2021-04-10 17:34:45 -07:00
gossip_verified_vote_hashes : & mut GossipVerifiedVoteHashes ,
2021-03-25 18:54:51 -07:00
has_new_vote_been_rooted : & mut bool ,
voted_signatures : & mut Vec < Signature > ,
2019-03-19 17:30:36 -07:00
) {
2020-04-24 15:49:57 -07:00
bank_forks . write ( ) . unwrap ( ) . set_root (
new_root ,
2020-12-12 17:22:34 -08:00
accounts_background_request_sender ,
2020-07-07 16:59:46 -07:00
highest_confirmed_root ,
2020-04-24 15:49:57 -07:00
) ;
2019-03-19 17:30:36 -07:00
let r_bank_forks = bank_forks . read ( ) . unwrap ( ) ;
2021-03-25 18:54:51 -07:00
let new_root_bank = & r_bank_forks [ new_root ] ;
if ! * has_new_vote_been_rooted {
for signature in voted_signatures . iter ( ) {
if new_root_bank . get_signature_status ( signature ) . is_some ( ) {
* has_new_vote_been_rooted = true ;
break ;
}
}
if * has_new_vote_been_rooted {
std ::mem ::take ( voted_signatures ) ;
}
}
2020-03-26 19:57:27 -07:00
progress . handle_new_root ( & r_bank_forks ) ;
2021-04-12 01:00:59 -07:00
heaviest_subtree_fork_choice . set_root ( ( new_root , r_bank_forks . root_bank ( ) . hash ( ) ) ) ;
2021-03-24 23:41:52 -07:00
let mut slots_ge_root = gossip_duplicate_confirmed_slots . split_off ( & new_root ) ;
2021-04-12 01:00:59 -07:00
// gossip_confirmed_slots now only contains entries >= `new_root`
2021-03-24 23:41:52 -07:00
std ::mem ::swap ( gossip_duplicate_confirmed_slots , & mut slots_ge_root ) ;
2021-04-10 17:34:45 -07:00
let mut slots_ge_root = gossip_verified_vote_hashes . split_off ( & new_root ) ;
// gossip_verified_vote_hashes now only contains entries >= `new_root`
std ::mem ::swap ( gossip_verified_vote_hashes , & mut slots_ge_root ) ;
2019-03-19 17:30:36 -07:00
}
2019-04-19 02:39:44 -07:00
fn generate_new_bank_forks (
2020-01-13 13:13:52 -08:00
blockstore : & Blockstore ,
2020-03-26 19:57:27 -07:00
bank_forks : & RwLock < BankForks > ,
2019-04-19 02:39:44 -07:00
leader_schedule_cache : & Arc < LeaderScheduleCache > ,
2019-11-26 00:42:54 -08:00
subscriptions : & Arc < RpcSubscriptions > ,
2020-03-26 19:57:27 -07:00
progress : & mut ProgressMap ,
2019-04-19 02:39:44 -07:00
) {
2019-02-07 15:10:54 -08:00
// Find the next slot that chains to the old slot
2020-03-26 19:57:27 -07:00
let forks = bank_forks . read ( ) . unwrap ( ) ;
2019-02-26 21:57:45 -08:00
let frozen_banks = forks . frozen_banks ( ) ;
2020-04-24 15:49:57 -07:00
let frozen_bank_slots : Vec < u64 > = frozen_banks
. keys ( )
. cloned ( )
. filter ( | s | * s > = forks . root ( ) )
. collect ( ) ;
2020-01-13 13:13:52 -08:00
let next_slots = blockstore
2019-03-04 16:40:28 -08:00
. get_slots_since ( & frozen_bank_slots )
2019-02-26 21:57:45 -08:00
. expect ( " Db error " ) ;
2019-03-18 16:04:36 -07:00
// Filter out what we've already seen
2019-07-17 14:10:15 -07:00
trace! ( " generate new forks {:?} " , {
let mut next_slots = next_slots . iter ( ) . collect ::< Vec < _ > > ( ) ;
next_slots . sort ( ) ;
next_slots
} ) ;
2019-12-13 17:20:31 -08:00
let mut new_banks = HashMap ::new ( ) ;
2019-11-08 11:30:25 -08:00
for ( parent_slot , children ) in next_slots {
2019-02-26 21:57:45 -08:00
let parent_bank = frozen_banks
2019-11-08 11:30:25 -08:00
. get ( & parent_slot )
2019-02-26 21:57:45 -08:00
. expect ( " missing parent in bank forks " )
. clone ( ) ;
2019-11-08 11:30:25 -08:00
for child_slot in children {
2019-12-13 17:20:31 -08:00
if forks . get ( child_slot ) . is_some ( ) | | new_banks . get ( & child_slot ) . is_some ( ) {
2019-11-08 11:30:25 -08:00
trace! ( " child already active or frozen {} " , child_slot ) ;
2019-03-03 16:44:06 -08:00
continue ;
2019-02-28 19:49:22 -08:00
}
2019-04-19 02:39:44 -07:00
let leader = leader_schedule_cache
2019-11-08 11:30:25 -08:00
. slot_leader_at ( child_slot , Some ( & parent_bank ) )
2019-04-19 02:39:44 -07:00
. unwrap ( ) ;
2019-11-26 00:42:54 -08:00
info! (
" new fork:{} parent:{} root:{} " ,
child_slot ,
parent_slot ,
forks . root ( )
) ;
2020-02-25 15:49:59 -08:00
let child_bank = Self ::new_bank_from_parent_with_notify (
& parent_bank ,
child_slot ,
forks . root ( ) ,
& leader ,
subscriptions ,
) ;
2021-02-07 18:07:00 -08:00
let empty : Vec < Pubkey > = vec! [ ] ;
2020-03-30 19:57:11 -07:00
Self ::update_fork_propagated_threshold_from_votes (
progress ,
empty ,
2021-02-07 18:07:00 -08:00
vec! [ leader ] ,
2020-03-30 19:57:11 -07:00
parent_bank . slot ( ) ,
bank_forks ,
) ;
2020-02-04 18:50:24 -08:00
new_banks . insert ( child_slot , child_bank ) ;
2019-02-26 21:57:45 -08:00
}
}
2019-12-13 17:20:31 -08:00
drop ( forks ) ;
2020-03-26 19:57:27 -07:00
let mut forks = bank_forks . write ( ) . unwrap ( ) ;
2019-12-13 17:20:31 -08:00
for ( _ , bank ) in new_banks {
forks . insert ( bank ) ;
}
2019-02-07 15:10:54 -08:00
}
2018-10-10 16:49:41 -07:00
2020-02-25 15:49:59 -08:00
fn new_bank_from_parent_with_notify (
parent : & Arc < Bank > ,
slot : u64 ,
root_slot : u64 ,
leader : & Pubkey ,
subscriptions : & Arc < RpcSubscriptions > ,
) -> Bank {
subscriptions . notify_slot ( slot , parent . slot ( ) , root_slot ) ;
2020-10-09 12:55:35 -07:00
Bank ::new_from_parent ( parent , leader , slot )
2020-02-25 15:49:59 -08:00
}
2020-02-11 17:01:49 -08:00
fn record_rewards ( bank : & Bank , rewards_recorder_sender : & Option < RewardsRecorderSender > ) {
if let Some ( rewards_recorder_sender ) = rewards_recorder_sender {
2020-10-09 12:55:35 -07:00
let rewards = bank . rewards . read ( ) . unwrap ( ) ;
if ! rewards . is_empty ( ) {
2020-02-11 17:01:49 -08:00
rewards_recorder_sender
2020-10-09 12:55:35 -07:00
. send ( ( bank . slot ( ) , rewards . clone ( ) ) )
2020-02-11 17:01:49 -08:00
. unwrap_or_else ( | err | warn! ( " rewards_recorder_sender failed: {:?} " , err ) ) ;
2020-02-04 18:50:24 -08:00
}
}
}
2020-09-09 08:33:14 -07:00
fn cache_block_times (
blockstore : & Arc < Blockstore > ,
bank_forks : & Arc < RwLock < BankForks > > ,
rooted_slots : & [ Slot ] ,
cache_block_time_sender : & Option < CacheBlockTimeSender > ,
) {
if let Some ( cache_block_time_sender ) = cache_block_time_sender {
for slot in rooted_slots {
if blockstore
. get_block_time ( * slot )
. unwrap_or_default ( )
. is_none ( )
{
if let Some ( rooted_bank ) = bank_forks . read ( ) . unwrap ( ) . get ( * slot ) {
cache_block_time_sender
. send ( rooted_bank . clone ( ) )
. unwrap_or_else ( | err | {
warn! ( " cache_block_time_sender failed: {:?} " , err )
} ) ;
} else {
error! (
" rooted_bank {:?} not available in BankForks; block time not cached " ,
slot
) ;
}
}
}
}
}
2020-09-08 07:55:09 -07:00
pub fn get_unlock_switch_vote_slot ( cluster_type : ClusterType ) -> Slot {
match cluster_type {
ClusterType ::Development = > 0 ,
ClusterType ::Devnet = > 0 ,
2020-06-16 02:55:36 -07:00
// Epoch 63
2020-09-08 07:55:09 -07:00
ClusterType ::Testnet = > 21_692_256 ,
// 400_000 slots into epoch 61
ClusterType ::MainnetBeta = > 26_752_000 ,
2020-05-30 00:03:19 -07:00
}
}
2019-11-13 10:12:09 -08:00
pub fn join ( self ) -> thread ::Result < ( ) > {
2019-11-04 15:44:27 -08:00
self . commitment_service . join ( ) ? ;
2019-02-26 21:57:45 -08:00
self . t_replay . join ( ) . map ( | _ | ( ) )
2018-10-10 16:49:41 -07:00
}
}
2018-09-13 14:00:17 -07:00
2018-10-10 16:49:41 -07:00
#[ cfg(test) ]
2019-11-25 11:08:03 -08:00
pub ( crate ) mod tests {
2019-01-29 00:21:27 -08:00
use super ::* ;
2019-11-20 15:43:10 -08:00
use crate ::{
2020-02-26 14:09:07 -08:00
consensus ::test ::{ initialize_state , VoteSimulator } ,
2019-12-10 08:36:16 -08:00
consensus ::Tower ,
2020-09-28 19:43:05 -07:00
optimistically_confirmed_bank_tracker ::OptimisticallyConfirmedBank ,
2020-03-26 19:57:27 -07:00
progress_map ::ValidatorStakeInfo ,
2019-11-20 15:43:10 -08:00
replay_stage ::ReplayStage ,
transaction_status_service ::TransactionStatusService ,
2019-10-18 22:55:59 -07:00
} ;
2019-11-21 13:23:40 -08:00
use crossbeam_channel ::unbounded ;
2019-11-13 07:14:09 -08:00
use solana_ledger ::{
2020-01-13 13:13:52 -08:00
blockstore ::make_slot_entries ,
blockstore ::{ entries_to_test_shreds , BlockstoreError } ,
2021-03-24 23:41:52 -07:00
blockstore_processor , create_new_tmp_ledger ,
2020-01-14 17:15:26 -08:00
entry ::{ self , next_entry , Entry } ,
2020-03-21 10:54:40 -07:00
genesis_utils ::{ create_genesis_config , create_genesis_config_with_leader } ,
2019-11-13 07:14:09 -08:00
get_tmp_ledger_path ,
2019-11-20 15:43:10 -08:00
shred ::{
CodingShredHeader , DataShredHeader , Shred , ShredCommonHeader , DATA_COMPLETE_SHRED ,
SIZE_OF_COMMON_SHRED_HEADER , SIZE_OF_DATA_SHRED_HEADER , SIZE_OF_DATA_SHRED_PAYLOAD ,
} ,
2019-11-13 07:14:09 -08:00
} ;
2020-06-25 21:06:58 -07:00
use solana_runtime ::{
2021-02-18 23:42:09 -08:00
accounts_background_service ::AbsRequestSender ,
2020-06-25 21:06:58 -07:00
commitment ::BlockCommitment ,
genesis_utils ::{ self , GenesisConfigInfo , ValidatorVoteKeypairs } ,
} ;
2019-11-20 15:43:10 -08:00
use solana_sdk ::{
2020-03-26 19:57:27 -07:00
clock ::NUM_CONSECUTIVE_LEADER_SLOTS ,
genesis_config ,
2019-11-20 15:43:10 -08:00
hash ::{ hash , Hash } ,
instruction ::InstructionError ,
packet ::PACKET_DATA_SIZE ,
2020-02-20 13:28:55 -08:00
signature ::{ Keypair , Signature , Signer } ,
2019-11-20 15:43:10 -08:00
system_transaction ,
transaction ::TransactionError ,
} ;
2020-09-23 22:10:29 -07:00
use solana_transaction_status ::TransactionWithStatusMeta ;
2020-03-04 11:49:56 -08:00
use solana_vote_program ::{
2020-06-11 12:16:04 -07:00
vote_state ::{ VoteState , VoteStateVersions } ,
2020-03-04 11:49:56 -08:00
vote_transaction ,
} ;
2019-11-20 15:43:10 -08:00
use std ::{
fs ::remove_dir_all ,
2020-01-14 23:25:45 -08:00
iter ,
2021-03-26 15:47:35 -07:00
sync ::{ atomic ::AtomicU64 , Arc , RwLock } ,
2019-11-20 15:43:10 -08:00
} ;
2020-02-03 16:48:24 -08:00
use trees ::tr ;
2018-10-10 16:49:41 -07:00
2020-06-29 18:49:57 -07:00
#[ test ]
fn test_is_partition_detected ( ) {
let ( bank_forks , _ ) = setup_forks ( ) ;
let ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
// Last vote 1 is an ancestor of the heaviest slot 3, no partition
assert! ( ! ReplayStage ::is_partition_detected ( & ancestors , 1 , 3 ) ) ;
// Last vote 1 is an ancestor of the from heaviest slot 1, no partition
assert! ( ! ReplayStage ::is_partition_detected ( & ancestors , 3 , 3 ) ) ;
// Last vote 2 is not an ancestor of the heaviest slot 3,
// partition detected!
assert! ( ReplayStage ::is_partition_detected ( & ancestors , 2 , 3 ) ) ;
// Last vote 4 is not an ancestor of the heaviest slot 3,
// partition detected!
assert! ( ReplayStage ::is_partition_detected ( & ancestors , 4 , 3 ) ) ;
}
2020-07-24 02:55:25 -07:00
struct ReplayBlockstoreComponents {
blockstore : Arc < Blockstore > ,
validator_voting_keys : HashMap < Pubkey , Pubkey > ,
progress : ProgressMap ,
bank_forks : Arc < RwLock < BankForks > > ,
leader_schedule_cache : Arc < LeaderScheduleCache > ,
rpc_subscriptions : Arc < RpcSubscriptions > ,
}
fn replay_blockstore_components ( ) -> ReplayBlockstoreComponents {
// Setup blockstore
2019-03-18 16:04:36 -07:00
let ledger_path = get_tmp_ledger_path! ( ) ;
2020-07-24 02:55:25 -07:00
let blockstore = Arc ::new (
Blockstore ::open ( & ledger_path ) . expect ( " Expected to be able to open database ledger " ) ,
) ;
let validator_authorized_voter_keypairs : Vec < _ > =
( 0 .. 20 ) . map ( | _ | ValidatorVoteKeypairs ::new_rand ( ) ) . collect ( ) ;
let validator_voting_keys : HashMap < _ , _ > = validator_authorized_voter_keypairs
. iter ( )
. map ( | v | ( v . node_keypair . pubkey ( ) , v . vote_keypair . pubkey ( ) ) )
. collect ( ) ;
let GenesisConfigInfo { genesis_config , .. } =
genesis_utils ::create_genesis_config_with_vote_accounts (
10_000 ,
& validator_authorized_voter_keypairs ,
vec! [ 100 ; validator_authorized_voter_keypairs . len ( ) ] ,
2019-04-19 02:39:44 -07:00
) ;
2020-07-24 02:55:25 -07:00
let bank0 = Bank ::new ( & genesis_config ) ;
// ProgressMap
let mut progress = ProgressMap ::default ( ) ;
progress . insert (
0 ,
ForkProgress ::new_from_bank (
& bank0 ,
bank0 . collector_id ( ) ,
& Pubkey ::default ( ) ,
2020-02-04 18:50:24 -08:00
None ,
2021-03-24 23:41:52 -07:00
DuplicateStats ::default ( ) ,
2020-07-24 02:55:25 -07:00
0 ,
0 ,
) ,
) ;
// Leader schedule cache
let leader_schedule_cache = Arc ::new ( LeaderScheduleCache ::new_from_bank ( & bank0 ) ) ;
// BankForks
let bank_forks = Arc ::new ( RwLock ::new ( BankForks ::new ( bank0 ) ) ) ;
// RpcSubscriptions
2020-09-28 19:43:05 -07:00
let optimistically_confirmed_bank =
OptimisticallyConfirmedBank ::locked_from_bank_forks_root ( & bank_forks ) ;
2020-07-24 02:55:25 -07:00
let exit = Arc ::new ( AtomicBool ::new ( false ) ) ;
let rpc_subscriptions = Arc ::new ( RpcSubscriptions ::new (
& exit ,
bank_forks . clone ( ) ,
Arc ::new ( RwLock ::new ( BlockCommitmentCache ::default ( ) ) ) ,
2020-09-28 19:43:05 -07:00
optimistically_confirmed_bank ,
2020-07-24 02:55:25 -07:00
) ) ;
ReplayBlockstoreComponents {
blockstore ,
validator_voting_keys ,
progress ,
bank_forks ,
leader_schedule_cache ,
rpc_subscriptions ,
}
}
#[ test ]
fn test_child_slots_of_same_parent ( ) {
let ReplayBlockstoreComponents {
blockstore ,
validator_voting_keys ,
mut progress ,
bank_forks ,
leader_schedule_cache ,
rpc_subscriptions ,
} = replay_blockstore_components ( ) ;
// Insert a non-root bank so that the propagation logic will update this
// bank
let bank1 = Bank ::new_from_parent (
bank_forks . read ( ) . unwrap ( ) . get ( 0 ) . unwrap ( ) ,
& leader_schedule_cache . slot_leader_at ( 1 , None ) . unwrap ( ) ,
1 ,
) ;
progress . insert (
1 ,
ForkProgress ::new_from_bank (
& bank1 ,
bank1 . collector_id ( ) ,
validator_voting_keys . get ( & bank1 . collector_id ( ) ) . unwrap ( ) ,
Some ( 0 ) ,
2021-03-24 23:41:52 -07:00
DuplicateStats ::default ( ) ,
2020-07-24 02:55:25 -07:00
0 ,
0 ,
) ,
) ;
assert! ( progress . get_propagated_stats ( 1 ) . unwrap ( ) . is_leader_slot ) ;
bank1 . freeze ( ) ;
bank_forks . write ( ) . unwrap ( ) . insert ( bank1 ) ;
// Insert shreds for slot NUM_CONSECUTIVE_LEADER_SLOTS,
// chaining to slot 1
let ( shreds , _ ) = make_slot_entries ( NUM_CONSECUTIVE_LEADER_SLOTS , 1 , 8 ) ;
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
assert! ( bank_forks
. read ( )
. unwrap ( )
. get ( NUM_CONSECUTIVE_LEADER_SLOTS )
. is_none ( ) ) ;
ReplayStage ::generate_new_bank_forks (
& blockstore ,
& bank_forks ,
& leader_schedule_cache ,
& rpc_subscriptions ,
& mut progress ,
) ;
assert! ( bank_forks
. read ( )
. unwrap ( )
. get ( NUM_CONSECUTIVE_LEADER_SLOTS )
. is_some ( ) ) ;
// Insert shreds for slot 2 * NUM_CONSECUTIVE_LEADER_SLOTS,
// chaining to slot 1
let ( shreds , _ ) = make_slot_entries ( 2 * NUM_CONSECUTIVE_LEADER_SLOTS , 1 , 8 ) ;
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
assert! ( bank_forks
. read ( )
. unwrap ( )
. get ( 2 * NUM_CONSECUTIVE_LEADER_SLOTS )
. is_none ( ) ) ;
ReplayStage ::generate_new_bank_forks (
& blockstore ,
& bank_forks ,
& leader_schedule_cache ,
& rpc_subscriptions ,
& mut progress ,
) ;
assert! ( bank_forks
. read ( )
. unwrap ( )
. get ( NUM_CONSECUTIVE_LEADER_SLOTS )
. is_some ( ) ) ;
assert! ( bank_forks
. read ( )
. unwrap ( )
. get ( 2 * NUM_CONSECUTIVE_LEADER_SLOTS )
. is_some ( ) ) ;
// // There are 20 equally staked accounts, of which 3 have built
// banks above or at bank 1. Because 3/20 < SUPERMINORITY_THRESHOLD,
// we should see 3 validators in bank 1's propagated_validator set.
let expected_leader_slots = vec! [
1 ,
NUM_CONSECUTIVE_LEADER_SLOTS ,
2 * NUM_CONSECUTIVE_LEADER_SLOTS ,
] ;
for slot in expected_leader_slots {
let leader = leader_schedule_cache . slot_leader_at ( slot , None ) . unwrap ( ) ;
let vote_key = validator_voting_keys . get ( & leader ) . unwrap ( ) ;
assert! ( progress
. get_propagated_stats ( 1 )
2020-03-26 19:57:27 -07:00
. unwrap ( )
2020-07-24 02:55:25 -07:00
. propagated_validators
. contains ( vote_key ) ) ;
2019-03-18 16:04:36 -07:00
}
}
2019-03-19 17:30:36 -07:00
#[ test ]
fn test_handle_new_root ( ) {
2019-11-08 20:56:57 -08:00
let genesis_config = create_genesis_config ( 10_000 ) . genesis_config ;
let bank0 = Bank ::new ( & genesis_config ) ;
2020-06-12 10:04:17 -07:00
let bank_forks = Arc ::new ( RwLock ::new ( BankForks ::new ( bank0 ) ) ) ;
2021-04-12 01:00:59 -07:00
2020-01-28 16:02:28 -08:00
let root = 3 ;
let root_bank = Bank ::new_from_parent (
bank_forks . read ( ) . unwrap ( ) . get ( 0 ) . unwrap ( ) ,
& Pubkey ::default ( ) ,
root ,
) ;
2021-04-12 01:00:59 -07:00
root_bank . freeze ( ) ;
let root_hash = root_bank . hash ( ) ;
2020-01-28 16:02:28 -08:00
bank_forks . write ( ) . unwrap ( ) . insert ( root_bank ) ;
2021-04-12 01:00:59 -07:00
let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice ::new ( ( root , root_hash ) ) ;
2020-03-26 19:57:27 -07:00
let mut progress = ProgressMap ::default ( ) ;
2020-01-28 16:02:28 -08:00
for i in 0 ..= root {
2021-03-24 23:41:52 -07:00
progress . insert (
i ,
ForkProgress ::new ( Hash ::default ( ) , None , DuplicateStats ::default ( ) , None , 0 , 0 ) ,
) ;
2020-01-28 16:02:28 -08:00
}
2021-03-24 23:41:52 -07:00
let mut gossip_duplicate_confirmed_slots : GossipDuplicateConfirmedSlots =
vec! [ root - 1 , root , root + 1 ]
. into_iter ( )
. map ( | s | ( s , Hash ::default ( ) ) )
. collect ( ) ;
2021-04-10 17:34:45 -07:00
let mut gossip_verified_vote_hashes : GossipVerifiedVoteHashes =
vec! [ root - 1 , root , root + 1 ]
. into_iter ( )
. map ( | s | ( s , HashMap ::new ( ) ) )
. collect ( ) ;
2020-04-24 15:49:57 -07:00
ReplayStage ::handle_new_root (
root ,
& bank_forks ,
& mut progress ,
2021-02-18 23:42:09 -08:00
& AbsRequestSender ::default ( ) ,
2020-04-24 15:49:57 -07:00
None ,
2020-06-11 12:16:04 -07:00
& mut heaviest_subtree_fork_choice ,
2021-03-24 23:41:52 -07:00
& mut gossip_duplicate_confirmed_slots ,
2021-04-10 17:34:45 -07:00
& mut gossip_verified_vote_hashes ,
2021-03-25 18:54:51 -07:00
& mut true ,
& mut Vec ::new ( ) ,
2020-04-24 15:49:57 -07:00
) ;
2020-01-28 16:02:28 -08:00
assert_eq! ( bank_forks . read ( ) . unwrap ( ) . root ( ) , root ) ;
assert_eq! ( progress . len ( ) , 1 ) ;
assert! ( progress . get ( & root ) . is_some ( ) ) ;
2021-03-24 23:41:52 -07:00
// root - 1 is filtered out
assert_eq! (
gossip_duplicate_confirmed_slots
. keys ( )
. cloned ( )
. collect ::< Vec < Slot > > ( ) ,
vec! [ root , root + 1 ]
) ;
2021-04-10 17:34:45 -07:00
assert_eq! (
gossip_verified_vote_hashes
. keys ( )
. cloned ( )
. collect ::< Vec < Slot > > ( ) ,
vec! [ root , root + 1 ]
) ;
2019-03-19 17:30:36 -07:00
}
2019-06-20 15:50:41 -07:00
2020-04-24 15:49:57 -07:00
#[ test ]
2020-07-07 16:59:46 -07:00
fn test_handle_new_root_ahead_of_highest_confirmed_root ( ) {
2020-04-24 15:49:57 -07:00
let genesis_config = create_genesis_config ( 10_000 ) . genesis_config ;
let bank0 = Bank ::new ( & genesis_config ) ;
2020-06-12 10:04:17 -07:00
let bank_forks = Arc ::new ( RwLock ::new ( BankForks ::new ( bank0 ) ) ) ;
2020-04-24 15:49:57 -07:00
let confirmed_root = 1 ;
let fork = 2 ;
let bank1 = Bank ::new_from_parent (
bank_forks . read ( ) . unwrap ( ) . get ( 0 ) . unwrap ( ) ,
& Pubkey ::default ( ) ,
confirmed_root ,
) ;
bank_forks . write ( ) . unwrap ( ) . insert ( bank1 ) ;
let bank2 = Bank ::new_from_parent (
bank_forks . read ( ) . unwrap ( ) . get ( confirmed_root ) . unwrap ( ) ,
& Pubkey ::default ( ) ,
fork ,
) ;
bank_forks . write ( ) . unwrap ( ) . insert ( bank2 ) ;
let root = 3 ;
let root_bank = Bank ::new_from_parent (
bank_forks . read ( ) . unwrap ( ) . get ( confirmed_root ) . unwrap ( ) ,
& Pubkey ::default ( ) ,
root ,
) ;
2021-04-12 01:00:59 -07:00
root_bank . freeze ( ) ;
let root_hash = root_bank . hash ( ) ;
2020-04-24 15:49:57 -07:00
bank_forks . write ( ) . unwrap ( ) . insert ( root_bank ) ;
2021-04-12 01:00:59 -07:00
let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice ::new ( ( root , root_hash ) ) ;
2020-04-24 15:49:57 -07:00
let mut progress = ProgressMap ::default ( ) ;
for i in 0 ..= root {
2021-03-24 23:41:52 -07:00
progress . insert (
i ,
ForkProgress ::new ( Hash ::default ( ) , None , DuplicateStats ::default ( ) , None , 0 , 0 ) ,
) ;
2020-04-24 15:49:57 -07:00
}
ReplayStage ::handle_new_root (
root ,
& bank_forks ,
& mut progress ,
2021-02-18 23:42:09 -08:00
& AbsRequestSender ::default ( ) ,
2020-04-24 15:49:57 -07:00
Some ( confirmed_root ) ,
2020-06-11 12:16:04 -07:00
& mut heaviest_subtree_fork_choice ,
2021-03-24 23:41:52 -07:00
& mut BTreeMap ::new ( ) ,
2021-04-10 17:34:45 -07:00
& mut BTreeMap ::new ( ) ,
2021-03-25 18:54:51 -07:00
& mut true ,
& mut Vec ::new ( ) ,
2020-04-24 15:49:57 -07:00
) ;
assert_eq! ( bank_forks . read ( ) . unwrap ( ) . root ( ) , root ) ;
assert! ( bank_forks . read ( ) . unwrap ( ) . get ( confirmed_root ) . is_some ( ) ) ;
assert! ( bank_forks . read ( ) . unwrap ( ) . get ( fork ) . is_none ( ) ) ;
assert_eq! ( progress . len ( ) , 2 ) ;
assert! ( progress . get ( & root ) . is_some ( ) ) ;
assert! ( progress . get ( & confirmed_root ) . is_some ( ) ) ;
assert! ( progress . get ( & fork ) . is_none ( ) ) ;
}
2019-06-20 15:50:41 -07:00
#[ test ]
2019-07-07 14:37:12 -07:00
fn test_dead_fork_transaction_error ( ) {
let keypair1 = Keypair ::new ( ) ;
let keypair2 = Keypair ::new ( ) ;
let missing_keypair = Keypair ::new ( ) ;
let missing_keypair2 = Keypair ::new ( ) ;
2019-10-31 13:38:50 -07:00
let res = check_dead_fork ( | _keypair , bank | {
let blockhash = bank . last_blockhash ( ) ;
let slot = bank . slot ( ) ;
let hashes_per_tick = bank . hashes_per_tick ( ) . unwrap_or ( 0 ) ;
2019-08-20 17:16:06 -07:00
let entry = entry ::next_entry (
2019-10-31 13:38:50 -07:00
& blockhash ,
hashes_per_tick . saturating_sub ( 1 ) ,
2019-06-20 15:50:41 -07:00
vec! [
2019-10-31 13:38:50 -07:00
system_transaction ::transfer ( & keypair1 , & keypair2 . pubkey ( ) , 2 , blockhash ) , // should be fine,
2019-06-20 15:50:41 -07:00
system_transaction ::transfer (
& missing_keypair ,
2019-07-07 14:37:12 -07:00
& missing_keypair2 . pubkey ( ) ,
2019-06-20 15:50:41 -07:00
2 ,
2019-10-31 13:38:50 -07:00
blockhash ,
2019-06-20 15:50:41 -07:00
) , // should cause AccountNotFound error
] ,
2019-08-20 17:16:06 -07:00
) ;
2019-11-18 18:05:02 -08:00
entries_to_test_shreds ( vec! [ entry ] , slot , slot . saturating_sub ( 1 ) , false , 0 )
2019-07-07 14:37:12 -07:00
} ) ;
2019-06-20 15:50:41 -07:00
2019-07-07 14:37:12 -07:00
assert_matches! (
res ,
2020-01-14 17:15:26 -08:00
Err ( BlockstoreProcessorError ::InvalidTransaction (
TransactionError ::AccountNotFound
) )
2019-07-07 14:37:12 -07:00
) ;
}
2019-06-20 15:50:41 -07:00
2019-07-07 14:37:12 -07:00
#[ test ]
fn test_dead_fork_entry_verification_failure ( ) {
let keypair2 = Keypair ::new ( ) ;
2019-10-31 13:38:50 -07:00
let res = check_dead_fork ( | genesis_keypair , bank | {
let blockhash = bank . last_blockhash ( ) ;
let slot = bank . slot ( ) ;
2019-06-20 15:50:41 -07:00
let bad_hash = hash ( & [ 2 ; 30 ] ) ;
2019-10-31 13:38:50 -07:00
let hashes_per_tick = bank . hashes_per_tick ( ) . unwrap_or ( 0 ) ;
2019-08-20 17:16:06 -07:00
let entry = entry ::next_entry (
2019-10-23 12:11:04 -07:00
// Use wrong blockhash so that the entry causes an entry verification failure
2019-06-20 15:50:41 -07:00
& bad_hash ,
2019-10-31 13:38:50 -07:00
hashes_per_tick . saturating_sub ( 1 ) ,
2019-10-23 22:01:22 -07:00
vec! [ system_transaction ::transfer (
2019-10-23 12:11:04 -07:00
& genesis_keypair ,
2019-06-20 15:50:41 -07:00
& keypair2 . pubkey ( ) ,
2 ,
2019-10-31 13:38:50 -07:00
blockhash ,
2019-07-07 14:37:12 -07:00
) ] ,
2019-08-20 17:16:06 -07:00
) ;
2019-11-18 18:05:02 -08:00
entries_to_test_shreds ( vec! [ entry ] , slot , slot . saturating_sub ( 1 ) , false , 0 )
2019-07-07 14:37:12 -07:00
} ) ;
2020-01-14 17:15:26 -08:00
if let Err ( BlockstoreProcessorError ::InvalidBlock ( block_error ) ) = res {
2019-10-31 13:38:50 -07:00
assert_eq! ( block_error , BlockError ::InvalidEntryHash ) ;
} else {
2020-05-15 09:35:43 -07:00
panic! ( ) ;
2019-10-31 13:38:50 -07:00
}
}
#[ test ]
fn test_dead_fork_invalid_tick_hash_count ( ) {
let res = check_dead_fork ( | _keypair , bank | {
let blockhash = bank . last_blockhash ( ) ;
let slot = bank . slot ( ) ;
let hashes_per_tick = bank . hashes_per_tick ( ) . unwrap_or ( 0 ) ;
assert! ( hashes_per_tick > 0 ) ;
let too_few_hashes_tick = Entry ::new ( & blockhash , hashes_per_tick - 1 , vec! [ ] ) ;
entries_to_test_shreds (
vec! [ too_few_hashes_tick ] ,
slot ,
slot . saturating_sub ( 1 ) ,
false ,
2019-11-18 18:05:02 -08:00
0 ,
2019-10-31 13:38:50 -07:00
)
} ) ;
2020-01-14 17:15:26 -08:00
if let Err ( BlockstoreProcessorError ::InvalidBlock ( block_error ) ) = res {
2019-10-31 13:38:50 -07:00
assert_eq! ( block_error , BlockError ::InvalidTickHashCount ) ;
} else {
2020-05-15 09:35:43 -07:00
panic! ( ) ;
2019-10-31 13:38:50 -07:00
}
}
#[ test ]
fn test_dead_fork_invalid_slot_tick_count ( ) {
2021-03-01 14:57:37 -08:00
solana_logger ::setup ( ) ;
2019-11-08 17:21:54 -08:00
// Too many ticks per slot
2019-10-31 13:38:50 -07:00
let res = check_dead_fork ( | _keypair , bank | {
let blockhash = bank . last_blockhash ( ) ;
let slot = bank . slot ( ) ;
let hashes_per_tick = bank . hashes_per_tick ( ) . unwrap_or ( 0 ) ;
entries_to_test_shreds (
entry ::create_ticks ( bank . ticks_per_slot ( ) + 1 , hashes_per_tick , blockhash ) ,
slot ,
slot . saturating_sub ( 1 ) ,
false ,
2019-11-18 18:05:02 -08:00
0 ,
2019-10-31 13:38:50 -07:00
)
} ) ;
2020-01-14 17:15:26 -08:00
if let Err ( BlockstoreProcessorError ::InvalidBlock ( block_error ) ) = res {
2021-03-01 14:57:37 -08:00
assert_eq! ( block_error , BlockError ::TooManyTicks ) ;
2019-10-31 13:38:50 -07:00
} else {
2020-05-15 09:35:43 -07:00
panic! ( ) ;
2019-10-31 13:38:50 -07:00
}
2019-11-08 17:21:54 -08:00
// Too few ticks per slot
let res = check_dead_fork ( | _keypair , bank | {
let blockhash = bank . last_blockhash ( ) ;
let slot = bank . slot ( ) ;
let hashes_per_tick = bank . hashes_per_tick ( ) . unwrap_or ( 0 ) ;
entries_to_test_shreds (
entry ::create_ticks ( bank . ticks_per_slot ( ) - 1 , hashes_per_tick , blockhash ) ,
slot ,
slot . saturating_sub ( 1 ) ,
true ,
2019-11-18 18:05:02 -08:00
0 ,
2019-11-08 17:21:54 -08:00
)
} ) ;
2020-01-14 17:15:26 -08:00
if let Err ( BlockstoreProcessorError ::InvalidBlock ( block_error ) ) = res {
2021-03-01 14:57:37 -08:00
assert_eq! ( block_error , BlockError ::TooFewTicks ) ;
2019-11-08 17:21:54 -08:00
} else {
2020-05-15 09:35:43 -07:00
panic! ( ) ;
2019-11-08 17:21:54 -08:00
}
}
#[ test ]
fn test_dead_fork_invalid_last_tick ( ) {
let res = check_dead_fork ( | _keypair , bank | {
let blockhash = bank . last_blockhash ( ) ;
let slot = bank . slot ( ) ;
let hashes_per_tick = bank . hashes_per_tick ( ) . unwrap_or ( 0 ) ;
entries_to_test_shreds (
entry ::create_ticks ( bank . ticks_per_slot ( ) , hashes_per_tick , blockhash ) ,
slot ,
slot . saturating_sub ( 1 ) ,
false ,
2019-11-18 18:05:02 -08:00
0 ,
2019-11-08 17:21:54 -08:00
)
} ) ;
2020-01-14 17:15:26 -08:00
if let Err ( BlockstoreProcessorError ::InvalidBlock ( block_error ) ) = res {
2019-11-08 17:21:54 -08:00
assert_eq! ( block_error , BlockError ::InvalidLastTick ) ;
} else {
2020-05-15 09:35:43 -07:00
panic! ( ) ;
2019-11-08 17:21:54 -08:00
}
2019-10-31 13:38:50 -07:00
}
#[ test ]
fn test_dead_fork_trailing_entry ( ) {
let keypair = Keypair ::new ( ) ;
let res = check_dead_fork ( | genesis_keypair , bank | {
let blockhash = bank . last_blockhash ( ) ;
let slot = bank . slot ( ) ;
let hashes_per_tick = bank . hashes_per_tick ( ) . unwrap_or ( 0 ) ;
let mut entries =
2020-05-15 09:35:43 -07:00
entry ::create_ticks ( bank . ticks_per_slot ( ) , hashes_per_tick , blockhash ) ;
2019-10-31 13:38:50 -07:00
let last_entry_hash = entries . last ( ) . unwrap ( ) . hash ;
let tx =
system_transaction ::transfer ( & genesis_keypair , & keypair . pubkey ( ) , 2 , blockhash ) ;
let trailing_entry = entry ::next_entry ( & last_entry_hash , 1 , vec! [ tx ] ) ;
entries . push ( trailing_entry ) ;
2019-11-18 18:05:02 -08:00
entries_to_test_shreds ( entries , slot , slot . saturating_sub ( 1 ) , true , 0 )
2019-10-31 13:38:50 -07:00
} ) ;
2020-01-14 17:15:26 -08:00
if let Err ( BlockstoreProcessorError ::InvalidBlock ( block_error ) ) = res {
2019-10-31 13:38:50 -07:00
assert_eq! ( block_error , BlockError ::TrailingEntry ) ;
} else {
2020-05-15 09:35:43 -07:00
panic! ( ) ;
2019-10-31 13:38:50 -07:00
}
2019-07-07 14:37:12 -07:00
}
#[ test ]
2019-10-16 15:41:43 -07:00
fn test_dead_fork_entry_deserialize_failure ( ) {
// Insert entry that causes deserialization failure
2019-10-31 13:38:50 -07:00
let res = check_dead_fork ( | _ , _ | {
2019-10-21 12:46:16 -07:00
let payload_len = SIZE_OF_DATA_SHRED_PAYLOAD ;
2019-10-16 15:41:43 -07:00
let gibberish = [ 0xa5 u8 ; PACKET_DATA_SIZE ] ;
2019-10-18 22:55:59 -07:00
let mut data_header = DataShredHeader ::default ( ) ;
2019-11-06 13:27:58 -08:00
data_header . flags | = DATA_COMPLETE_SHRED ;
2019-10-18 22:55:59 -07:00
let mut shred = Shred ::new_empty_from_header (
ShredCommonHeader ::default ( ) ,
data_header ,
CodingShredHeader ::default ( ) ,
2019-08-20 17:16:06 -07:00
) ;
2019-10-18 22:55:59 -07:00
bincode ::serialize_into (
2019-10-21 12:46:16 -07:00
& mut shred . payload [ SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER .. ] ,
2019-10-18 22:55:59 -07:00
& gibberish [ .. payload_len ] ,
)
. unwrap ( ) ;
2019-10-16 15:41:43 -07:00
vec! [ shred ]
2019-07-07 14:37:12 -07:00
} ) ;
2019-06-20 15:50:41 -07:00
2019-07-07 14:37:12 -07:00
assert_matches! (
res ,
2021-02-18 23:29:31 -08:00
Err ( BlockstoreProcessorError ::FailedToLoadEntries (
BlockstoreError ::InvalidShredData ( _ )
) , )
2019-07-07 14:37:12 -07:00
) ;
}
2019-11-14 11:49:31 -08:00
// Given a shred and a fatal expected error, check that replaying that shred causes causes the fork to be
2019-07-07 14:37:12 -07:00
// marked as dead. Returns the error for caller to verify.
2020-01-14 17:15:26 -08:00
fn check_dead_fork < F > ( shred_to_insert : F ) -> result ::Result < ( ) , BlockstoreProcessorError >
2019-07-07 14:37:12 -07:00
where
2019-10-31 13:38:50 -07:00
F : Fn ( & Keypair , Arc < Bank > ) -> Vec < Shred > ,
2019-07-07 14:37:12 -07:00
{
let ledger_path = get_tmp_ledger_path! ( ) ;
2020-08-07 11:21:35 -07:00
let ( replay_vote_sender , _replay_vote_receiver ) = unbounded ( ) ;
2019-07-07 14:37:12 -07:00
let res = {
2020-01-13 13:13:52 -08:00
let blockstore = Arc ::new (
Blockstore ::open ( & ledger_path )
. expect ( " Expected to be able to open database ledger " ) ,
2019-06-20 15:50:41 -07:00
) ;
2019-11-08 20:56:57 -08:00
let GenesisConfigInfo {
mut genesis_config ,
2019-10-23 12:11:04 -07:00
mint_keypair ,
..
2019-11-08 20:56:57 -08:00
} = create_genesis_config ( 1000 ) ;
genesis_config . poh_config . hashes_per_tick = Some ( 2 ) ;
2021-03-12 05:44:06 -08:00
let bank_forks = BankForks ::new ( Bank ::new ( & genesis_config ) ) ;
let bank0 = bank_forks . working_bank ( ) ;
2020-03-26 19:57:27 -07:00
let mut progress = ProgressMap ::default ( ) ;
2019-07-07 14:37:12 -07:00
let last_blockhash = bank0 . last_blockhash ( ) ;
2021-03-24 23:41:52 -07:00
let mut bank0_progress = progress . entry ( bank0 . slot ( ) ) . or_insert_with ( | | {
ForkProgress ::new ( last_blockhash , None , DuplicateStats ::default ( ) , None , 0 , 0 )
} ) ;
2019-10-31 13:38:50 -07:00
let shreds = shred_to_insert ( & mint_keypair , bank0 . clone ( ) ) ;
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2021-03-12 05:44:06 -08:00
let block_commitment_cache = Arc ::new ( RwLock ::new ( BlockCommitmentCache ::default ( ) ) ) ;
let bank_forks = Arc ::new ( RwLock ::new ( bank_forks ) ) ;
let exit = Arc ::new ( AtomicBool ::new ( false ) ) ;
2020-01-14 17:15:26 -08:00
let res = ReplayStage ::replay_blockstore_into_bank (
2019-11-20 15:43:10 -08:00
& bank0 ,
2020-01-13 13:13:52 -08:00
& blockstore ,
2019-11-20 15:43:10 -08:00
& mut bank0_progress ,
None ,
2020-08-07 11:21:35 -07:00
& replay_vote_sender ,
& & VerifyRecyclers ::default ( ) ,
2019-11-20 15:43:10 -08:00
) ;
2019-07-07 14:37:12 -07:00
2021-03-24 23:41:52 -07:00
let subscriptions = Arc ::new ( RpcSubscriptions ::new (
& exit ,
bank_forks . clone ( ) ,
block_commitment_cache ,
OptimisticallyConfirmedBank ::locked_from_bank_forks_root ( & bank_forks ) ,
) ) ;
if let Err ( err ) = & res {
ReplayStage ::mark_dead_slot (
& blockstore ,
& bank0 ,
0 ,
err ,
& subscriptions ,
& BTreeMap ::new ( ) ,
& HashMap ::new ( ) ,
& HashMap ::new ( ) ,
& mut progress ,
2021-04-12 01:00:59 -07:00
& mut HeaviestSubtreeForkChoice ::new ( ( 0 , Hash ::default ( ) ) ) ,
2021-03-24 23:41:52 -07:00
) ;
}
2019-06-20 15:50:41 -07:00
// Check that the erroring bank was marked as dead in the progress map
assert! ( progress
2019-07-07 14:37:12 -07:00
. get ( & bank0 . slot ( ) )
2019-06-20 15:50:41 -07:00
. map ( | b | b . is_dead )
. unwrap_or ( false ) ) ;
2020-01-13 13:13:52 -08:00
// Check that the erroring bank was marked as dead in blockstore
assert! ( blockstore . is_dead ( bank0 . slot ( ) ) ) ;
2020-01-14 17:15:26 -08:00
res . map ( | _ | ( ) )
2019-07-07 14:37:12 -07:00
} ;
2019-06-20 15:50:41 -07:00
let _ignored = remove_dir_all ( & ledger_path ) ;
2019-07-07 14:37:12 -07:00
res
2019-06-20 15:50:41 -07:00
}
2019-07-26 10:27:57 -07:00
#[ test ]
2019-11-04 15:44:27 -08:00
fn test_replay_commitment_cache ( ) {
2020-12-15 12:45:40 -08:00
fn leader_vote ( vote_slot : Slot , bank : & Arc < Bank > , pubkey : & Pubkey ) {
2019-07-26 10:27:57 -07:00
let mut leader_vote_account = bank . get_account ( & pubkey ) . unwrap ( ) ;
let mut vote_state = VoteState ::from ( & leader_vote_account ) . unwrap ( ) ;
2020-12-15 12:45:40 -08:00
vote_state . process_slot_vote_unchecked ( vote_slot ) ;
2020-12-21 15:19:04 -08:00
let versioned = VoteStateVersions ::new_current ( vote_state ) ;
2020-02-25 17:12:01 -08:00
VoteState ::to ( & versioned , & mut leader_vote_account ) . unwrap ( ) ;
2019-07-26 10:27:57 -07:00
bank . store_account ( & pubkey , & leader_vote_account ) ;
}
2020-10-19 12:12:08 -07:00
let leader_pubkey = solana_sdk ::pubkey ::new_rand ( ) ;
2019-07-26 10:27:57 -07:00
let leader_lamports = 3 ;
2019-11-08 20:56:57 -08:00
let genesis_config_info =
create_genesis_config_with_leader ( 50 , & leader_pubkey , leader_lamports ) ;
let mut genesis_config = genesis_config_info . genesis_config ;
let leader_voting_pubkey = genesis_config_info . voting_keypair . pubkey ( ) ;
genesis_config . epoch_schedule . warmup = false ;
genesis_config . ticks_per_slot = 4 ;
let bank0 = Bank ::new ( & genesis_config ) ;
for _ in 0 .. genesis_config . ticks_per_slot {
2019-07-26 10:27:57 -07:00
bank0 . register_tick ( & Hash ::default ( ) ) ;
}
bank0 . freeze ( ) ;
let arc_bank0 = Arc ::new ( bank0 ) ;
2020-12-15 12:45:40 -08:00
let bank_forks = Arc ::new ( RwLock ::new ( BankForks ::new_from_banks ( & [ arc_bank0 ] , 0 ) ) ) ;
2019-07-26 10:27:57 -07:00
2020-05-18 11:49:01 -07:00
let exit = Arc ::new ( AtomicBool ::new ( false ) ) ;
2020-06-25 21:06:58 -07:00
let block_commitment_cache = Arc ::new ( RwLock ::new ( BlockCommitmentCache ::default ( ) ) ) ;
2020-05-18 11:49:01 -07:00
let subscriptions = Arc ::new ( RpcSubscriptions ::new (
& exit ,
bank_forks . clone ( ) ,
block_commitment_cache . clone ( ) ,
2020-09-28 19:43:05 -07:00
OptimisticallyConfirmedBank ::locked_from_bank_forks_root ( & bank_forks ) ,
2020-05-18 11:49:01 -07:00
) ) ;
let ( lockouts_sender , _ ) =
AggregateCommitmentService ::new ( & exit , block_commitment_cache . clone ( ) , subscriptions ) ;
2019-11-04 15:44:27 -08:00
assert! ( block_commitment_cache
2019-09-20 19:38:56 -07:00
. read ( )
. unwrap ( )
2019-11-04 15:44:27 -08:00
. get_block_commitment ( 0 )
2019-09-20 19:38:56 -07:00
. is_none ( ) ) ;
2019-11-04 15:44:27 -08:00
assert! ( block_commitment_cache
2019-09-04 23:10:25 -07:00
. read ( )
. unwrap ( )
2019-11-04 15:44:27 -08:00
. get_block_commitment ( 1 )
2019-09-04 23:10:25 -07:00
. is_none ( ) ) ;
2019-07-26 10:27:57 -07:00
2020-12-15 12:45:40 -08:00
for i in 1 ..= 3 {
let prev_bank = bank_forks . read ( ) . unwrap ( ) . get ( i - 1 ) . unwrap ( ) . clone ( ) ;
let bank = Bank ::new_from_parent ( & prev_bank , & Pubkey ::default ( ) , prev_bank . slot ( ) + 1 ) ;
let _res = bank . transfer (
10 ,
& genesis_config_info . mint_keypair ,
& solana_sdk ::pubkey ::new_rand ( ) ,
) ;
for _ in 0 .. genesis_config . ticks_per_slot {
bank . register_tick ( & Hash ::default ( ) ) ;
}
bank_forks . write ( ) . unwrap ( ) . insert ( bank ) ;
let arc_bank = bank_forks . read ( ) . unwrap ( ) . get ( i ) . unwrap ( ) . clone ( ) ;
leader_vote ( i - 1 , & arc_bank , & leader_voting_pubkey ) ;
ReplayStage ::update_commitment_cache (
arc_bank . clone ( ) ,
0 ,
leader_lamports ,
& lockouts_sender ,
) ;
arc_bank . freeze ( ) ;
2019-07-26 10:27:57 -07:00
}
2021-03-25 14:16:39 -07:00
for _ in 0 .. 10 {
let done = {
let bcc = block_commitment_cache . read ( ) . unwrap ( ) ;
bcc . get_block_commitment ( 0 ) . is_some ( )
& & bcc . get_block_commitment ( 1 ) . is_some ( )
& & bcc . get_block_commitment ( 2 ) . is_some ( )
} ;
if done {
break ;
} else {
thread ::sleep ( Duration ::from_millis ( 200 ) ) ;
}
}
2019-07-26 10:27:57 -07:00
2019-11-04 15:44:27 -08:00
let mut expected0 = BlockCommitment ::default ( ) ;
2020-12-15 12:45:40 -08:00
expected0 . increase_confirmation_stake ( 3 , leader_lamports ) ;
2019-07-26 10:27:57 -07:00
assert_eq! (
2019-11-04 15:44:27 -08:00
block_commitment_cache
2019-09-04 23:10:25 -07:00
. read ( )
. unwrap ( )
2019-11-04 15:44:27 -08:00
. get_block_commitment ( 0 )
2019-09-04 23:10:25 -07:00
. unwrap ( ) ,
2019-09-20 19:38:56 -07:00
& expected0 ,
2019-07-26 10:27:57 -07:00
) ;
2019-11-04 15:44:27 -08:00
let mut expected1 = BlockCommitment ::default ( ) ;
2019-09-20 19:38:56 -07:00
expected1 . increase_confirmation_stake ( 2 , leader_lamports ) ;
2019-07-26 10:27:57 -07:00
assert_eq! (
2019-11-04 15:44:27 -08:00
block_commitment_cache
2019-09-04 23:10:25 -07:00
. read ( )
. unwrap ( )
2019-11-04 15:44:27 -08:00
. get_block_commitment ( 1 )
2019-09-04 23:10:25 -07:00
. unwrap ( ) ,
2019-09-20 19:38:56 -07:00
& expected1
2019-07-26 10:27:57 -07:00
) ;
2019-11-04 15:44:27 -08:00
let mut expected2 = BlockCommitment ::default ( ) ;
2019-09-20 19:38:56 -07:00
expected2 . increase_confirmation_stake ( 1 , leader_lamports ) ;
2019-07-26 10:27:57 -07:00
assert_eq! (
2019-11-04 15:44:27 -08:00
block_commitment_cache
2019-09-04 23:10:25 -07:00
. read ( )
. unwrap ( )
2019-11-04 15:44:27 -08:00
. get_block_commitment ( 2 )
2019-09-04 23:10:25 -07:00
. unwrap ( ) ,
2019-09-20 19:38:56 -07:00
& expected2
2019-07-26 10:27:57 -07:00
) ;
}
2019-11-19 17:55:42 -08:00
2020-01-13 13:13:52 -08:00
pub fn create_test_transactions_and_populate_blockstore (
2019-11-25 11:08:03 -08:00
keypairs : Vec < & Keypair > ,
previous_slot : Slot ,
bank : Arc < Bank > ,
2020-01-13 13:13:52 -08:00
blockstore : Arc < Blockstore > ,
2021-03-26 15:47:35 -07:00
max_complete_transaction_status_slot : Arc < AtomicU64 > ,
2019-11-25 11:08:03 -08:00
) -> Vec < Signature > {
let mint_keypair = keypairs [ 0 ] ;
let keypair1 = keypairs [ 1 ] ;
let keypair2 = keypairs [ 2 ] ;
let keypair3 = keypairs [ 3 ] ;
let slot = bank . slot ( ) ;
let blockhash = bank . confirmed_last_blockhash ( ) . 0 ;
// Generate transactions for processing
// Successful transaction
let success_tx =
system_transaction ::transfer ( & mint_keypair , & keypair1 . pubkey ( ) , 2 , blockhash ) ;
let success_signature = success_tx . signatures [ 0 ] ;
let entry_1 = next_entry ( & blockhash , 1 , vec! [ success_tx ] ) ;
// Failed transaction, InstructionError
let ix_error_tx =
system_transaction ::transfer ( & keypair2 , & keypair3 . pubkey ( ) , 10 , blockhash ) ;
let ix_error_signature = ix_error_tx . signatures [ 0 ] ;
let entry_2 = next_entry ( & entry_1 . hash , 1 , vec! [ ix_error_tx ] ) ;
// Failed transaction
let fail_tx =
system_transaction ::transfer ( & mint_keypair , & keypair2 . pubkey ( ) , 2 , Hash ::default ( ) ) ;
let entry_3 = next_entry ( & entry_2 . hash , 1 , vec! [ fail_tx ] ) ;
2021-03-31 16:59:19 -07:00
let mut entries = vec! [ entry_1 , entry_2 , entry_3 ] ;
2019-11-25 11:08:03 -08:00
let shreds = entries_to_test_shreds ( entries . clone ( ) , slot , previous_slot , true , 0 ) ;
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
blockstore . set_roots ( & [ slot ] ) . unwrap ( ) ;
2019-11-25 11:08:03 -08:00
let ( transaction_status_sender , transaction_status_receiver ) = unbounded ( ) ;
2020-08-07 11:21:35 -07:00
let ( replay_vote_sender , _replay_vote_receiver ) = unbounded ( ) ;
2019-11-25 11:08:03 -08:00
let transaction_status_service = TransactionStatusService ::new (
transaction_status_receiver ,
2021-03-26 15:47:35 -07:00
max_complete_transaction_status_slot ,
2020-05-15 09:35:43 -07:00
blockstore ,
2019-11-25 11:08:03 -08:00
& Arc ::new ( AtomicBool ::new ( false ) ) ,
) ;
// Check that process_entries successfully writes can_commit transactions statuses, and
2021-03-26 15:47:35 -07:00
// that they are matched properly by get_rooted_block
2020-01-13 13:13:52 -08:00
let _result = blockstore_processor ::process_entries (
2019-11-25 11:08:03 -08:00
& bank ,
2021-03-31 16:59:19 -07:00
& mut entries ,
2019-11-25 11:08:03 -08:00
true ,
2021-02-01 13:00:51 -08:00
Some ( TransactionStatusSender {
sender : transaction_status_sender ,
enable_cpi_and_log_storage : false ,
} ) ,
2020-08-07 11:21:35 -07:00
Some ( & replay_vote_sender ) ,
2019-11-25 11:08:03 -08:00
) ;
transaction_status_service . join ( ) . unwrap ( ) ;
vec! [ success_signature , ix_error_signature ]
}
2019-11-20 15:43:10 -08:00
#[ test ]
fn test_write_persist_transaction_status ( ) {
let GenesisConfigInfo {
genesis_config ,
mint_keypair ,
..
} = create_genesis_config ( 1000 ) ;
2019-11-25 11:08:03 -08:00
let ( ledger_path , _ ) = create_new_tmp_ledger! ( & genesis_config ) ;
2019-11-20 15:43:10 -08:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & ledger_path )
2019-11-20 15:43:10 -08:00
. expect ( " Expected to successfully open database ledger " ) ;
2020-01-13 13:13:52 -08:00
let blockstore = Arc ::new ( blockstore ) ;
2019-11-20 15:43:10 -08:00
let keypair1 = Keypair ::new ( ) ;
let keypair2 = Keypair ::new ( ) ;
let keypair3 = Keypair ::new ( ) ;
let bank0 = Arc ::new ( Bank ::new ( & genesis_config ) ) ;
bank0
. transfer ( 4 , & mint_keypair , & keypair2 . pubkey ( ) )
. unwrap ( ) ;
let bank1 = Arc ::new ( Bank ::new_from_parent ( & bank0 , & Pubkey ::default ( ) , 1 ) ) ;
let slot = bank1 . slot ( ) ;
2020-01-13 13:13:52 -08:00
let signatures = create_test_transactions_and_populate_blockstore (
2019-11-25 11:08:03 -08:00
vec! [ & mint_keypair , & keypair1 , & keypair2 , & keypair3 ] ,
bank0 . slot ( ) ,
bank1 ,
2020-01-13 13:13:52 -08:00
blockstore . clone ( ) ,
2021-03-26 15:47:35 -07:00
Arc ::new ( AtomicU64 ::default ( ) ) ,
2019-11-20 15:43:10 -08:00
) ;
2021-03-26 15:47:35 -07:00
let confirmed_block = blockstore . get_rooted_block ( slot , false ) . unwrap ( ) ;
2019-11-20 15:43:10 -08:00
assert_eq! ( confirmed_block . transactions . len ( ) , 3 ) ;
2020-03-26 13:29:30 -07:00
for TransactionWithStatusMeta { transaction , meta } in
2020-01-14 23:25:45 -08:00
confirmed_block . transactions . into_iter ( )
{
2020-09-23 22:10:29 -07:00
if transaction . signatures [ 0 ] = = signatures [ 0 ] {
let meta = meta . unwrap ( ) ;
assert_eq! ( meta . status , Ok ( ( ) ) ) ;
} else if transaction . signatures [ 0 ] = = signatures [ 1 ] {
let meta = meta . unwrap ( ) ;
assert_eq! (
meta . status ,
Err ( TransactionError ::InstructionError (
0 ,
InstructionError ::Custom ( 1 )
) )
) ;
} else {
assert_eq! ( meta , None ) ;
2019-11-20 15:43:10 -08:00
}
}
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & ledger_path ) . unwrap ( ) ;
2019-11-20 15:43:10 -08:00
}
2020-02-03 16:48:24 -08:00
2020-03-04 11:49:56 -08:00
#[ test ]
fn test_compute_bank_stats_confirmed ( ) {
2020-07-23 18:50:42 -07:00
let vote_keypairs = ValidatorVoteKeypairs ::new_rand ( ) ;
let node_pubkey = vote_keypairs . node_keypair . pubkey ( ) ;
let keypairs : HashMap < _ , _ > = vec! [ ( node_pubkey , vote_keypairs ) ] . into_iter ( ) . collect ( ) ;
2020-03-04 11:49:56 -08:00
2020-06-11 12:16:04 -07:00
let ( bank_forks , mut progress , mut heaviest_subtree_fork_choice ) =
initialize_state ( & keypairs , 10_000 ) ;
2020-03-04 11:49:56 -08:00
let bank0 = bank_forks . get ( 0 ) . unwrap ( ) . clone ( ) ;
let my_keypairs = keypairs . get ( & node_pubkey ) . unwrap ( ) ;
let vote_tx = vote_transaction ::new_vote_transaction (
vec! [ 0 ] ,
bank0 . hash ( ) ,
bank0 . last_blockhash ( ) ,
& my_keypairs . node_keypair ,
& my_keypairs . vote_keypair ,
& my_keypairs . vote_keypair ,
2020-07-28 02:33:27 -07:00
None ,
2020-03-04 11:49:56 -08:00
) ;
let bank_forks = RwLock ::new ( bank_forks ) ;
let bank1 = Bank ::new_from_parent ( & bank0 , & node_pubkey , 1 ) ;
bank1 . process_transaction ( & vote_tx ) . unwrap ( ) ;
bank1 . freeze ( ) ;
// Test confirmations
let ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
let mut frozen_banks : Vec < _ > = bank_forks
. read ( )
. unwrap ( )
. frozen_banks ( )
. values ( )
. cloned ( )
. collect ( ) ;
let tower = Tower ::new_for_tests ( 0 , 0.67 ) ;
let newly_computed = ReplayStage ::compute_bank_stats (
& node_pubkey ,
& ancestors ,
& mut frozen_banks ,
& tower ,
& mut progress ,
2020-03-26 19:57:27 -07:00
& VoteTracker ::default ( ) ,
2020-03-30 19:57:11 -07:00
& ClusterSlots ::default ( ) ,
2020-03-26 19:57:27 -07:00
& bank_forks ,
2020-06-11 12:16:04 -07:00
& mut heaviest_subtree_fork_choice ,
2020-03-04 11:49:56 -08:00
) ;
2020-07-20 17:29:07 -07:00
// bank 0 has no votes, should not send any votes on the channel
2020-03-04 11:49:56 -08:00
assert_eq! ( newly_computed , vec! [ 0 ] ) ;
// The only vote is in bank 1, and bank_forks does not currently contain
// bank 1, so no slot should be confirmed.
{
let fork_progress = progress . get ( & 0 ) . unwrap ( ) ;
let confirmed_forks = ReplayStage ::confirm_forks (
& tower ,
2020-06-22 18:30:09 -07:00
& fork_progress . fork_stats . voted_stakes ,
fork_progress . fork_stats . total_stake ,
2020-03-04 11:49:56 -08:00
& progress ,
& bank_forks ,
) ;
2021-03-24 23:41:52 -07:00
assert! ( confirmed_forks . is_empty ( ) ) ;
2020-03-04 11:49:56 -08:00
}
// Insert the bank that contains a vote for slot 0, which confirms slot 0
bank_forks . write ( ) . unwrap ( ) . insert ( bank1 ) ;
2020-04-08 14:35:24 -07:00
progress . insert (
1 ,
2021-03-24 23:41:52 -07:00
ForkProgress ::new (
bank0 . last_blockhash ( ) ,
None ,
DuplicateStats ::default ( ) ,
None ,
0 ,
0 ,
) ,
2020-04-08 14:35:24 -07:00
) ;
2020-03-04 11:49:56 -08:00
let ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
let mut frozen_banks : Vec < _ > = bank_forks
. read ( )
. unwrap ( )
. frozen_banks ( )
. values ( )
. cloned ( )
. collect ( ) ;
let newly_computed = ReplayStage ::compute_bank_stats (
& node_pubkey ,
& ancestors ,
& mut frozen_banks ,
& tower ,
& mut progress ,
2020-03-26 19:57:27 -07:00
& VoteTracker ::default ( ) ,
2020-03-30 19:57:11 -07:00
& ClusterSlots ::default ( ) ,
2020-03-26 19:57:27 -07:00
& bank_forks ,
2020-06-11 12:16:04 -07:00
& mut heaviest_subtree_fork_choice ,
2020-03-04 11:49:56 -08:00
) ;
2020-07-29 23:17:40 -07:00
// Bank 1 had one vote
2020-03-04 11:49:56 -08:00
assert_eq! ( newly_computed , vec! [ 1 ] ) ;
{
let fork_progress = progress . get ( & 1 ) . unwrap ( ) ;
let confirmed_forks = ReplayStage ::confirm_forks (
& tower ,
2020-06-22 18:30:09 -07:00
& fork_progress . fork_stats . voted_stakes ,
fork_progress . fork_stats . total_stake ,
2020-03-04 11:49:56 -08:00
& progress ,
& bank_forks ,
) ;
2021-03-24 23:41:52 -07:00
// No new stats should have been computed
2020-03-04 11:49:56 -08:00
assert_eq! ( confirmed_forks , vec! [ 0 ] ) ;
}
let ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
let mut frozen_banks : Vec < _ > = bank_forks
. read ( )
. unwrap ( )
. frozen_banks ( )
. values ( )
. cloned ( )
. collect ( ) ;
let newly_computed = ReplayStage ::compute_bank_stats (
& node_pubkey ,
& ancestors ,
& mut frozen_banks ,
& tower ,
& mut progress ,
2020-03-26 19:57:27 -07:00
& VoteTracker ::default ( ) ,
2020-03-30 19:57:11 -07:00
& ClusterSlots ::default ( ) ,
2020-03-26 19:57:27 -07:00
& bank_forks ,
2020-06-11 12:16:04 -07:00
& mut heaviest_subtree_fork_choice ,
2020-03-04 11:49:56 -08:00
) ;
// No new stats should have been computed
assert! ( newly_computed . is_empty ( ) ) ;
}
2020-04-10 23:52:37 -07:00
#[ test ]
fn test_same_weight_select_lower_slot ( ) {
// Init state
let mut vote_simulator = VoteSimulator ::new ( 1 ) ;
let node_pubkey = vote_simulator . node_pubkeys [ 0 ] ;
let tower = Tower ::new_with_key ( & node_pubkey ) ;
// Create the tree of banks in a BankForks object
let forks = tr ( 0 ) / ( tr ( 1 ) ) / ( tr ( 2 ) ) ;
2021-04-12 01:00:59 -07:00
vote_simulator . fill_bank_forks ( forks , & HashMap ::new ( ) ) ;
2020-04-10 23:52:37 -07:00
let mut frozen_banks : Vec < _ > = vote_simulator
. bank_forks
. read ( )
. unwrap ( )
. frozen_banks ( )
. values ( )
. cloned ( )
. collect ( ) ;
2021-04-12 01:00:59 -07:00
let mut heaviest_subtree_fork_choice = & mut vote_simulator . heaviest_subtree_fork_choice ;
2020-04-10 23:52:37 -07:00
let ancestors = vote_simulator . bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
ReplayStage ::compute_bank_stats (
2020-06-11 12:16:04 -07:00
& node_pubkey ,
2020-04-10 23:52:37 -07:00
& ancestors ,
& mut frozen_banks ,
& tower ,
& mut vote_simulator . progress ,
& VoteTracker ::default ( ) ,
& ClusterSlots ::default ( ) ,
& vote_simulator . bank_forks ,
2020-06-11 12:16:04 -07:00
& mut heaviest_subtree_fork_choice ,
2020-04-10 23:52:37 -07:00
) ;
2021-04-12 01:00:59 -07:00
let bank1 = vote_simulator
. bank_forks
. read ( )
. unwrap ( )
. get ( 1 )
. unwrap ( )
. clone ( ) ;
let bank2 = vote_simulator
. bank_forks
. read ( )
. unwrap ( )
. get ( 2 )
. unwrap ( )
. clone ( ) ;
2020-04-10 23:52:37 -07:00
assert_eq! (
2021-04-12 01:00:59 -07:00
heaviest_subtree_fork_choice
. stake_voted_subtree ( & ( 1 , bank1 . hash ( ) ) )
. unwrap ( ) ,
heaviest_subtree_fork_choice
. stake_voted_subtree ( & ( 2 , bank2 . hash ( ) ) )
. unwrap ( )
2020-06-11 12:16:04 -07:00
) ;
let ( heaviest_bank , _ ) = heaviest_subtree_fork_choice . select_forks (
& frozen_banks ,
& tower ,
& vote_simulator . progress ,
& ancestors ,
& vote_simulator . bank_forks ,
2020-04-10 23:52:37 -07:00
) ;
// Should pick the lower of the two equally weighted banks
2020-06-11 12:16:04 -07:00
assert_eq! ( heaviest_bank . slot ( ) , 1 ) ;
2020-04-10 23:52:37 -07:00
}
2020-02-03 16:48:24 -08:00
#[ test ]
fn test_child_bank_heavier ( ) {
2020-04-10 15:16:12 -07:00
// Init state
let mut vote_simulator = VoteSimulator ::new ( 1 ) ;
let node_pubkey = vote_simulator . node_pubkeys [ 0 ] ;
2020-02-03 16:48:24 -08:00
let mut tower = Tower ::new_with_key ( & node_pubkey ) ;
// Create the tree of banks in a BankForks object
let forks = tr ( 0 ) / ( tr ( 1 ) / ( tr ( 2 ) / ( tr ( 3 ) ) ) ) ;
2020-04-10 15:16:12 -07:00
// Set the voting behavior
let mut cluster_votes = HashMap ::new ( ) ;
let votes = vec! [ 0 , 2 ] ;
cluster_votes . insert ( node_pubkey , votes . clone ( ) ) ;
vote_simulator . fill_bank_forks ( forks , & cluster_votes ) ;
2020-06-11 12:16:04 -07:00
// Fill banks with votes
2020-04-10 15:16:12 -07:00
for vote in votes {
assert! ( vote_simulator
. simulate_vote ( vote , & node_pubkey , & mut tower , )
2020-02-26 14:09:07 -08:00
. is_empty ( ) ) ;
2020-02-03 16:48:24 -08:00
}
2020-04-10 15:16:12 -07:00
let mut frozen_banks : Vec < _ > = vote_simulator
. bank_forks
2020-02-03 16:48:24 -08:00
. read ( )
. unwrap ( )
. frozen_banks ( )
. values ( )
. cloned ( )
. collect ( ) ;
ReplayStage ::compute_bank_stats (
2020-06-11 12:16:04 -07:00
& node_pubkey ,
2020-04-10 15:16:12 -07:00
& vote_simulator . bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ,
2020-02-03 16:48:24 -08:00
& mut frozen_banks ,
& tower ,
2020-04-10 15:16:12 -07:00
& mut vote_simulator . progress ,
2020-03-26 19:57:27 -07:00
& VoteTracker ::default ( ) ,
2020-03-30 19:57:11 -07:00
& ClusterSlots ::default ( ) ,
2020-04-10 15:16:12 -07:00
& vote_simulator . bank_forks ,
2020-06-11 12:16:04 -07:00
& mut vote_simulator . heaviest_subtree_fork_choice ,
2020-02-03 16:48:24 -08:00
) ;
frozen_banks . sort_by_key ( | bank | bank . slot ( ) ) ;
for pair in frozen_banks . windows ( 2 ) {
2020-04-10 15:16:12 -07:00
let first = vote_simulator
. progress
. get_fork_stats ( pair [ 0 ] . slot ( ) )
. unwrap ( )
. fork_weight ;
let second = vote_simulator
. progress
. get_fork_stats ( pair [ 1 ] . slot ( ) )
. unwrap ( )
. fork_weight ;
2020-02-03 16:48:24 -08:00
assert! ( second > = first ) ;
}
2020-06-11 12:16:04 -07:00
for bank in frozen_banks {
// The only leaf should always be chosen over parents
assert_eq! (
vote_simulator
. heaviest_subtree_fork_choice
2021-04-12 01:00:59 -07:00
. best_slot ( & ( bank . slot ( ) , bank . hash ( ) ) )
. unwrap ( )
. 0 ,
2020-06-11 12:16:04 -07:00
3
) ;
}
2020-02-03 16:48:24 -08:00
}
2020-03-26 19:57:27 -07:00
2020-03-26 23:33:28 -07:00
#[ test ]
fn test_should_retransmit ( ) {
let poh_slot = 4 ;
let mut last_retransmit_slot = 4 ;
// We retransmitted already at slot 4, shouldn't retransmit until
// >= 4 + NUM_CONSECUTIVE_LEADER_SLOTS, or if we reset to < 4
assert! ( ! ReplayStage ::should_retransmit (
poh_slot ,
& mut last_retransmit_slot
) ) ;
assert_eq! ( last_retransmit_slot , 4 ) ;
for poh_slot in 4 .. 4 + NUM_CONSECUTIVE_LEADER_SLOTS {
assert! ( ! ReplayStage ::should_retransmit (
poh_slot ,
& mut last_retransmit_slot
) ) ;
assert_eq! ( last_retransmit_slot , 4 ) ;
}
let poh_slot = 4 + NUM_CONSECUTIVE_LEADER_SLOTS ;
last_retransmit_slot = 4 ;
assert! ( ReplayStage ::should_retransmit (
poh_slot ,
& mut last_retransmit_slot
) ) ;
assert_eq! ( last_retransmit_slot , poh_slot ) ;
let poh_slot = 3 ;
last_retransmit_slot = 4 ;
assert! ( ReplayStage ::should_retransmit (
poh_slot ,
& mut last_retransmit_slot
) ) ;
assert_eq! ( last_retransmit_slot , poh_slot ) ;
}
2020-03-26 19:57:27 -07:00
#[ test ]
fn test_update_slot_propagated_threshold_from_votes ( ) {
let keypairs : HashMap < _ , _ > = iter ::repeat_with ( | | {
2020-07-23 18:50:42 -07:00
let vote_keypairs = ValidatorVoteKeypairs ::new_rand ( ) ;
( vote_keypairs . node_keypair . pubkey ( ) , vote_keypairs )
2020-03-26 19:57:27 -07:00
} )
. take ( 10 )
. collect ( ) ;
2020-03-30 19:57:11 -07:00
let new_vote_pubkeys : Vec < _ > = keypairs
2020-03-26 19:57:27 -07:00
. values ( )
. map ( | keys | keys . vote_keypair . pubkey ( ) )
. collect ( ) ;
2020-03-30 19:57:11 -07:00
let new_node_pubkeys : Vec < _ > = keypairs
. values ( )
. map ( | keys | keys . node_keypair . pubkey ( ) )
. collect ( ) ;
// Once 4/10 validators have voted, we have hit threshold
run_test_update_slot_propagated_threshold_from_votes ( & keypairs , & new_vote_pubkeys , & [ ] , 4 ) ;
// Adding the same node pubkey's instead of the corresponding
// vote pubkeys should be equivalent
run_test_update_slot_propagated_threshold_from_votes ( & keypairs , & [ ] , & new_node_pubkeys , 4 ) ;
// Adding the same node pubkey's in the same order as their
// corresponding vote accounts is redundant, so we don't
// reach the threshold any sooner.
run_test_update_slot_propagated_threshold_from_votes (
& keypairs ,
& new_vote_pubkeys ,
& new_node_pubkeys ,
4 ,
) ;
// However, if we add different node pubkey's than the
// vote accounts, we should hit threshold much faster
// because now we are getting 2 new pubkeys on each
// iteration instead of 1, so by the 2nd iteration
// we should have 4/10 validators voting
run_test_update_slot_propagated_threshold_from_votes (
& keypairs ,
& new_vote_pubkeys [ 0 .. 5 ] ,
& new_node_pubkeys [ 5 .. ] ,
2 ,
) ;
}
2020-03-26 19:57:27 -07:00
2020-03-30 19:57:11 -07:00
fn run_test_update_slot_propagated_threshold_from_votes (
all_keypairs : & HashMap < Pubkey , ValidatorVoteKeypairs > ,
new_vote_pubkeys : & [ Pubkey ] ,
new_node_pubkeys : & [ Pubkey ] ,
success_index : usize ,
) {
2020-03-26 19:57:27 -07:00
let stake = 10_000 ;
2020-06-11 12:16:04 -07:00
let ( bank_forks , _ , _ ) = initialize_state ( & all_keypairs , stake ) ;
2020-12-27 05:28:05 -08:00
let root_bank = bank_forks . root_bank ( ) ;
2020-03-26 19:57:27 -07:00
let mut propagated_stats = PropagatedStats {
2020-03-30 19:57:11 -07:00
total_epoch_stake : stake * all_keypairs . len ( ) as u64 ,
2020-03-26 19:57:27 -07:00
.. PropagatedStats ::default ( )
} ;
2020-03-30 19:57:11 -07:00
let child_reached_threshold = false ;
for i in 0 .. std ::cmp ::max ( new_vote_pubkeys . len ( ) , new_node_pubkeys . len ( ) ) {
2020-03-26 19:57:27 -07:00
propagated_stats . is_propagated = false ;
2020-03-30 19:57:11 -07:00
let len = std ::cmp ::min ( i , new_vote_pubkeys . len ( ) ) ;
2021-02-07 18:07:00 -08:00
let mut voted_pubkeys = new_vote_pubkeys [ .. len ] . iter ( ) . copied ( ) . collect ( ) ;
2020-03-30 19:57:11 -07:00
let len = std ::cmp ::min ( i , new_node_pubkeys . len ( ) ) ;
2021-02-07 18:07:00 -08:00
let mut node_pubkeys = new_node_pubkeys [ .. len ] . iter ( ) . copied ( ) . collect ( ) ;
2020-03-26 19:57:27 -07:00
let did_newly_reach_threshold =
ReplayStage ::update_slot_propagated_threshold_from_votes (
2020-03-30 19:57:11 -07:00
& mut voted_pubkeys ,
& mut node_pubkeys ,
2020-03-26 19:57:27 -07:00
& root_bank ,
& mut propagated_stats ,
child_reached_threshold ,
) ;
// Only the i'th voted pubkey should be new (everything else was
// inserted in previous iteration of the loop), so those redundant
2020-03-30 19:57:11 -07:00
// pubkeys should have been filtered out
let remaining_vote_pubkeys = {
if i = = 0 | | i > = new_vote_pubkeys . len ( ) {
2020-03-26 19:57:27 -07:00
vec! [ ]
} else {
2021-02-07 18:07:00 -08:00
vec! [ new_vote_pubkeys [ i - 1 ] ]
2020-03-26 19:57:27 -07:00
}
} ;
2020-03-30 19:57:11 -07:00
let remaining_node_pubkeys = {
if i = = 0 | | i > = new_node_pubkeys . len ( ) {
vec! [ ]
} else {
2021-02-07 18:07:00 -08:00
vec! [ new_node_pubkeys [ i - 1 ] ]
2020-03-30 19:57:11 -07:00
}
} ;
assert_eq! ( voted_pubkeys , remaining_vote_pubkeys ) ;
assert_eq! ( node_pubkeys , remaining_node_pubkeys ) ;
2020-03-26 19:57:27 -07:00
// If we crossed the superminority threshold, then
// `did_newly_reach_threshold == true`, otherwise the
// threshold has not been reached
2020-03-30 19:57:11 -07:00
if i > = success_index {
2020-03-26 19:57:27 -07:00
assert! ( propagated_stats . is_propagated ) ;
assert! ( did_newly_reach_threshold ) ;
} else {
assert! ( ! propagated_stats . is_propagated ) ;
assert! ( ! did_newly_reach_threshold ) ;
}
}
2020-03-30 19:57:11 -07:00
}
2020-03-26 19:57:27 -07:00
2020-03-30 19:57:11 -07:00
#[ test ]
fn test_update_slot_propagated_threshold_from_votes2 ( ) {
2021-02-07 18:07:00 -08:00
let mut empty : Vec < Pubkey > = vec! [ ] ;
2020-03-30 19:57:11 -07:00
let genesis_config = create_genesis_config ( 100_000_000 ) . genesis_config ;
let root_bank = Bank ::new ( & genesis_config ) ;
let stake = 10_000 ;
2020-03-26 19:57:27 -07:00
// Simulate a child slot seeing threshold (`child_reached_threshold` = true),
// then the parent should also be marked as having reached threshold,
// even if there are no new pubkeys to add (`newly_voted_pubkeys.is_empty()`)
2020-03-30 19:57:11 -07:00
let mut propagated_stats = PropagatedStats {
2020-03-26 19:57:27 -07:00
total_epoch_stake : stake * 10 ,
.. PropagatedStats ::default ( )
} ;
propagated_stats . total_epoch_stake = stake * 10 ;
2020-03-30 19:57:11 -07:00
let child_reached_threshold = true ;
2021-02-07 18:07:00 -08:00
let mut newly_voted_pubkeys : Vec < Pubkey > = vec! [ ] ;
2020-03-26 19:57:27 -07:00
assert! ( ReplayStage ::update_slot_propagated_threshold_from_votes (
& mut newly_voted_pubkeys ,
2020-03-30 19:57:11 -07:00
& mut empty ,
2020-03-26 19:57:27 -07:00
& root_bank ,
& mut propagated_stats ,
child_reached_threshold ,
) ) ;
// If propagation already happened (propagated_stats.is_propagated = true),
// always returns false
propagated_stats = PropagatedStats {
total_epoch_stake : stake * 10 ,
.. PropagatedStats ::default ( )
} ;
propagated_stats . is_propagated = true ;
newly_voted_pubkeys = vec! [ ] ;
assert! ( ! ReplayStage ::update_slot_propagated_threshold_from_votes (
& mut newly_voted_pubkeys ,
2020-03-30 19:57:11 -07:00
& mut empty ,
2020-03-26 19:57:27 -07:00
& root_bank ,
& mut propagated_stats ,
child_reached_threshold ,
) ) ;
2020-03-30 19:57:11 -07:00
let child_reached_threshold = false ;
2020-03-26 19:57:27 -07:00
assert! ( ! ReplayStage ::update_slot_propagated_threshold_from_votes (
& mut newly_voted_pubkeys ,
2020-03-30 19:57:11 -07:00
& mut empty ,
2020-03-26 19:57:27 -07:00
& root_bank ,
& mut propagated_stats ,
child_reached_threshold ,
) ) ;
}
#[ test ]
fn test_update_propagation_status ( ) {
// Create genesis stakers
2020-07-23 18:50:42 -07:00
let vote_keypairs = ValidatorVoteKeypairs ::new_rand ( ) ;
let node_pubkey = vote_keypairs . node_keypair . pubkey ( ) ;
2021-02-07 18:07:00 -08:00
let vote_pubkey = vote_keypairs . vote_keypair . pubkey ( ) ;
2020-07-23 18:50:42 -07:00
let keypairs : HashMap < _ , _ > = vec! [ ( node_pubkey , vote_keypairs ) ] . into_iter ( ) . collect ( ) ;
2020-03-26 19:57:27 -07:00
let stake = 10_000 ;
2020-06-11 12:16:04 -07:00
let ( mut bank_forks , mut progress_map , _ ) = initialize_state ( & keypairs , stake ) ;
2020-03-26 19:57:27 -07:00
let bank0 = bank_forks . get ( 0 ) . unwrap ( ) . clone ( ) ;
bank_forks . insert ( Bank ::new_from_parent ( & bank0 , & Pubkey ::default ( ) , 9 ) ) ;
let bank9 = bank_forks . get ( 9 ) . unwrap ( ) . clone ( ) ;
bank_forks . insert ( Bank ::new_from_parent ( & bank9 , & Pubkey ::default ( ) , 10 ) ) ;
2021-02-18 23:42:09 -08:00
bank_forks . set_root ( 9 , & AbsRequestSender ::default ( ) , None ) ;
2020-03-26 19:57:27 -07:00
let total_epoch_stake = bank0 . total_epoch_stake ( ) ;
// Insert new ForkProgress for slot 10 and its
// previous leader slot 9
progress_map . insert (
10 ,
ForkProgress ::new (
Hash ::default ( ) ,
Some ( 9 ) ,
2021-03-24 23:41:52 -07:00
DuplicateStats ::default ( ) ,
2020-03-26 19:57:27 -07:00
Some ( ValidatorStakeInfo {
total_epoch_stake ,
.. ValidatorStakeInfo ::default ( )
} ) ,
2020-04-08 14:35:24 -07:00
0 ,
0 ,
2020-03-26 19:57:27 -07:00
) ,
) ;
progress_map . insert (
9 ,
ForkProgress ::new (
Hash ::default ( ) ,
Some ( 8 ) ,
2021-03-24 23:41:52 -07:00
DuplicateStats ::default ( ) ,
2020-03-26 19:57:27 -07:00
Some ( ValidatorStakeInfo {
total_epoch_stake ,
.. ValidatorStakeInfo ::default ( )
} ) ,
2020-04-08 14:35:24 -07:00
0 ,
0 ,
2020-03-26 19:57:27 -07:00
) ,
) ;
// Make sure is_propagated == false so that the propagation logic
// runs in `update_propagation_status`
assert! ( ! progress_map . is_propagated ( 10 ) ) ;
let vote_tracker = VoteTracker ::new ( & bank_forks . root_bank ( ) ) ;
2021-02-07 18:07:00 -08:00
vote_tracker . insert_vote ( 10 , vote_pubkey ) ;
2020-03-26 19:57:27 -07:00
ReplayStage ::update_propagation_status (
& mut progress_map ,
10 ,
& RwLock ::new ( bank_forks ) ,
& vote_tracker ,
2020-03-30 19:57:11 -07:00
& ClusterSlots ::default ( ) ,
2020-03-26 19:57:27 -07:00
) ;
let propagated_stats = & progress_map . get ( & 10 ) . unwrap ( ) . propagated_stats ;
// There should now be a cached reference to the VoteTracker for
// slot 10
assert! ( propagated_stats . slot_vote_tracker . is_some ( ) ) ;
// Updates should have been consumed
assert! ( propagated_stats
. slot_vote_tracker
. as_ref ( )
. unwrap ( )
. write ( )
. unwrap ( )
2021-04-10 17:34:45 -07:00
. get_voted_slot_updates ( )
2020-03-26 19:57:27 -07:00
. is_none ( ) ) ;
// The voter should be recorded
assert! ( propagated_stats
. propagated_validators
2021-02-07 18:07:00 -08:00
. contains ( & vote_pubkey ) ) ;
2020-03-26 19:57:27 -07:00
assert_eq! ( propagated_stats . propagated_validators_stake , stake ) ;
}
#[ test ]
fn test_chain_update_propagation_status ( ) {
let keypairs : HashMap < _ , _ > = iter ::repeat_with ( | | {
2020-07-23 18:50:42 -07:00
let vote_keypairs = ValidatorVoteKeypairs ::new_rand ( ) ;
( vote_keypairs . node_keypair . pubkey ( ) , vote_keypairs )
2020-03-26 19:57:27 -07:00
} )
. take ( 10 )
. collect ( ) ;
let vote_pubkeys : Vec < _ > = keypairs
. values ( )
. map ( | keys | keys . vote_keypair . pubkey ( ) )
. collect ( ) ;
let stake_per_validator = 10_000 ;
2020-06-11 12:16:04 -07:00
let ( mut bank_forks , mut progress_map , _ ) =
initialize_state ( & keypairs , stake_per_validator ) ;
2020-09-11 02:03:11 -07:00
progress_map
. get_propagated_stats_mut ( 0 )
. unwrap ( )
. is_leader_slot = true ;
2021-02-18 23:42:09 -08:00
bank_forks . set_root ( 0 , & AbsRequestSender ::default ( ) , None ) ;
2020-03-26 19:57:27 -07:00
let total_epoch_stake = bank_forks . root_bank ( ) . total_epoch_stake ( ) ;
// Insert new ForkProgress representing a slot for all slots 1..=num_banks. Only
// make even numbered ones leader slots
for i in 1 ..= 10 {
let parent_bank = bank_forks . get ( i - 1 ) . unwrap ( ) . clone ( ) ;
let prev_leader_slot = ( ( i - 1 ) / 2 ) * 2 ;
bank_forks . insert ( Bank ::new_from_parent ( & parent_bank , & Pubkey ::default ( ) , i ) ) ;
progress_map . insert (
i ,
2020-04-08 14:35:24 -07:00
ForkProgress ::new (
Hash ::default ( ) ,
Some ( prev_leader_slot ) ,
2021-03-24 23:41:52 -07:00
DuplicateStats ::default ( ) ,
2020-04-08 14:35:24 -07:00
{
if i % 2 = = 0 {
Some ( ValidatorStakeInfo {
total_epoch_stake ,
.. ValidatorStakeInfo ::default ( )
} )
} else {
None
}
} ,
0 ,
0 ,
) ,
2020-03-26 19:57:27 -07:00
) ;
}
let vote_tracker = VoteTracker ::new ( & bank_forks . root_bank ( ) ) ;
for vote_pubkey in & vote_pubkeys {
// Insert a vote for the last bank for each voter
2021-02-07 18:07:00 -08:00
vote_tracker . insert_vote ( 10 , * vote_pubkey ) ;
2020-03-26 19:57:27 -07:00
}
// The last bank should reach propagation threshold, and propagate it all
// the way back through earlier leader banks
ReplayStage ::update_propagation_status (
& mut progress_map ,
10 ,
& RwLock ::new ( bank_forks ) ,
& vote_tracker ,
2020-03-30 19:57:11 -07:00
& ClusterSlots ::default ( ) ,
2020-03-26 19:57:27 -07:00
) ;
for i in 1 ..= 10 {
let propagated_stats = & progress_map . get ( & i ) . unwrap ( ) . propagated_stats ;
// Only the even numbered ones were leader banks, so only
// those should have been updated
if i % 2 = = 0 {
assert! ( propagated_stats . is_propagated ) ;
} else {
assert! ( ! propagated_stats . is_propagated ) ;
}
}
}
#[ test ]
fn test_chain_update_propagation_status2 ( ) {
let num_validators = 6 ;
let keypairs : HashMap < _ , _ > = iter ::repeat_with ( | | {
2020-07-23 18:50:42 -07:00
let vote_keypairs = ValidatorVoteKeypairs ::new_rand ( ) ;
( vote_keypairs . node_keypair . pubkey ( ) , vote_keypairs )
2020-03-26 19:57:27 -07:00
} )
. take ( num_validators )
. collect ( ) ;
let vote_pubkeys : Vec < _ > = keypairs
. values ( )
. map ( | keys | keys . vote_keypair . pubkey ( ) )
. collect ( ) ;
let stake_per_validator = 10_000 ;
2020-06-11 12:16:04 -07:00
let ( mut bank_forks , mut progress_map , _ ) =
initialize_state ( & keypairs , stake_per_validator ) ;
2020-09-11 02:03:11 -07:00
progress_map
. get_propagated_stats_mut ( 0 )
. unwrap ( )
. is_leader_slot = true ;
2021-02-18 23:42:09 -08:00
bank_forks . set_root ( 0 , & AbsRequestSender ::default ( ) , None ) ;
2020-03-26 19:57:27 -07:00
let total_epoch_stake = num_validators as u64 * stake_per_validator ;
// Insert new ForkProgress representing a slot for all slots 1..=num_banks. Only
// make even numbered ones leader slots
for i in 1 ..= 10 {
let parent_bank = bank_forks . get ( i - 1 ) . unwrap ( ) . clone ( ) ;
let prev_leader_slot = i - 1 ;
bank_forks . insert ( Bank ::new_from_parent ( & parent_bank , & Pubkey ::default ( ) , i ) ) ;
let mut fork_progress = ForkProgress ::new (
Hash ::default ( ) ,
Some ( prev_leader_slot ) ,
2021-03-24 23:41:52 -07:00
DuplicateStats ::default ( ) ,
2020-03-26 19:57:27 -07:00
Some ( ValidatorStakeInfo {
total_epoch_stake ,
.. ValidatorStakeInfo ::default ( )
} ) ,
2020-04-08 14:35:24 -07:00
0 ,
0 ,
2020-03-26 19:57:27 -07:00
) ;
let end_range = {
// The earlier slots are one pubkey away from reaching confirmation
if i < 5 {
2
} else {
// The later slots are two pubkeys away from reaching confirmation
1
}
} ;
2021-02-03 15:02:11 -08:00
fork_progress . propagated_stats . propagated_validators =
vote_pubkeys [ 0 .. end_range ] . iter ( ) . copied ( ) . collect ( ) ;
2020-03-26 19:57:27 -07:00
fork_progress . propagated_stats . propagated_validators_stake =
end_range as u64 * stake_per_validator ;
progress_map . insert ( i , fork_progress ) ;
}
let vote_tracker = VoteTracker ::new ( & bank_forks . root_bank ( ) ) ;
// Insert a new vote
2021-02-07 18:07:00 -08:00
vote_tracker . insert_vote ( 10 , vote_pubkeys [ 2 ] ) ;
2020-03-26 19:57:27 -07:00
// The last bank should reach propagation threshold, and propagate it all
// the way back through earlier leader banks
ReplayStage ::update_propagation_status (
& mut progress_map ,
10 ,
& RwLock ::new ( bank_forks ) ,
& vote_tracker ,
2020-03-30 19:57:11 -07:00
& ClusterSlots ::default ( ) ,
2020-03-26 19:57:27 -07:00
) ;
// Only the first 5 banks should have reached the threshold
for i in 1 ..= 10 {
let propagated_stats = & progress_map . get ( & i ) . unwrap ( ) . propagated_stats ;
if i < 5 {
assert! ( propagated_stats . is_propagated ) ;
} else {
assert! ( ! propagated_stats . is_propagated ) ;
}
}
}
#[ test ]
fn test_check_propagation_for_start_leader ( ) {
let mut progress_map = ProgressMap ::default ( ) ;
let poh_slot = 5 ;
2020-12-03 12:31:38 -08:00
let parent_slot = poh_slot - NUM_CONSECUTIVE_LEADER_SLOTS ;
2020-03-26 19:57:27 -07:00
// If there is no previous leader slot (previous leader slot is None),
// should succeed
2020-12-03 12:31:38 -08:00
progress_map . insert (
parent_slot ,
2021-03-24 23:41:52 -07:00
ForkProgress ::new ( Hash ::default ( ) , None , DuplicateStats ::default ( ) , None , 0 , 0 ) ,
2020-12-03 12:31:38 -08:00
) ;
2020-03-26 19:57:27 -07:00
assert! ( ReplayStage ::check_propagation_for_start_leader (
poh_slot ,
parent_slot ,
& progress_map ,
) ) ;
2020-12-03 12:31:38 -08:00
// Now if we make the parent was itself the leader, then requires propagation
// confirmation check because the parent is at least NUM_CONSECUTIVE_LEADER_SLOTS
// slots from the `poh_slot`
2020-03-26 19:57:27 -07:00
progress_map . insert (
2020-12-03 12:31:38 -08:00
parent_slot ,
2020-04-08 14:35:24 -07:00
ForkProgress ::new (
Hash ::default ( ) ,
None ,
2021-03-24 23:41:52 -07:00
DuplicateStats ::default ( ) ,
2020-04-08 14:35:24 -07:00
Some ( ValidatorStakeInfo ::default ( ) ) ,
0 ,
0 ,
) ,
2020-03-26 19:57:27 -07:00
) ;
assert! ( ! ReplayStage ::check_propagation_for_start_leader (
poh_slot ,
parent_slot ,
& progress_map ,
) ) ;
progress_map
2020-12-03 12:31:38 -08:00
. get_mut ( & parent_slot )
2020-03-26 19:57:27 -07:00
. unwrap ( )
. propagated_stats
. is_propagated = true ;
assert! ( ReplayStage ::check_propagation_for_start_leader (
poh_slot ,
parent_slot ,
& progress_map ,
) ) ;
2020-12-03 12:31:38 -08:00
// Now, set up the progress map to show that the `previous_leader_slot` of 5 is
// `parent_slot - 1` (not equal to the actual parent!), so `parent_slot - 1` needs
// to see propagation confirmation before we can start a leader for block 5
let previous_leader_slot = parent_slot - 1 ;
2020-03-26 19:57:27 -07:00
progress_map . insert (
2020-12-03 12:31:38 -08:00
parent_slot ,
2021-03-24 23:41:52 -07:00
ForkProgress ::new (
Hash ::default ( ) ,
Some ( previous_leader_slot ) ,
DuplicateStats ::default ( ) ,
None ,
0 ,
0 ,
) ,
2020-12-03 12:31:38 -08:00
) ;
progress_map . insert (
previous_leader_slot ,
2020-04-08 14:35:24 -07:00
ForkProgress ::new (
Hash ::default ( ) ,
None ,
2021-03-24 23:41:52 -07:00
DuplicateStats ::default ( ) ,
2020-04-08 14:35:24 -07:00
Some ( ValidatorStakeInfo ::default ( ) ) ,
0 ,
0 ,
) ,
2020-03-26 19:57:27 -07:00
) ;
2020-12-03 12:31:38 -08:00
// `previous_leader_slot` has not seen propagation threshold, so should fail
2020-03-26 19:57:27 -07:00
assert! ( ! ReplayStage ::check_propagation_for_start_leader (
poh_slot ,
parent_slot ,
& progress_map ,
) ) ;
2020-12-03 12:31:38 -08:00
// If we set the is_propagated = true for the `previous_leader_slot`, should
2020-03-26 19:57:27 -07:00
// allow the block to be generated
progress_map
2020-12-03 12:31:38 -08:00
. get_mut ( & previous_leader_slot )
2020-03-26 19:57:27 -07:00
. unwrap ( )
. propagated_stats
. is_propagated = true ;
assert! ( ReplayStage ::check_propagation_for_start_leader (
poh_slot ,
parent_slot ,
& progress_map ,
) ) ;
2020-12-03 12:31:38 -08:00
// If the root is now set to `parent_slot`, this filters out `previous_leader_slot` from the progress map,
2020-03-26 19:57:27 -07:00
// which implies confirmation
2020-06-12 10:04:17 -07:00
let bank0 = Bank ::new ( & genesis_config ::create_genesis_config ( 10000 ) . 0 ) ;
2020-12-03 12:31:38 -08:00
let parent_slot_bank =
Bank ::new_from_parent ( & Arc ::new ( bank0 ) , & Pubkey ::default ( ) , parent_slot ) ;
let mut bank_forks = BankForks ::new ( parent_slot_bank ) ;
let bank5 =
Bank ::new_from_parent ( bank_forks . get ( parent_slot ) . unwrap ( ) , & Pubkey ::default ( ) , 5 ) ;
2020-03-26 19:57:27 -07:00
bank_forks . insert ( bank5 ) ;
2020-12-03 12:31:38 -08:00
// Should purge only `previous_leader_slot` from the progress map
2020-03-26 19:57:27 -07:00
progress_map . handle_new_root ( & bank_forks ) ;
// Should succeed
assert! ( ReplayStage ::check_propagation_for_start_leader (
poh_slot ,
parent_slot ,
& progress_map ,
) ) ;
}
#[ test ]
2020-12-03 12:31:38 -08:00
fn test_check_propagation_skip_propagation_check ( ) {
2020-03-26 19:57:27 -07:00
let mut progress_map = ProgressMap ::default ( ) ;
let poh_slot = 4 ;
2020-12-03 12:31:38 -08:00
let mut parent_slot = poh_slot - 1 ;
2020-03-26 19:57:27 -07:00
// Set up the progress map to show that the last leader slot of 4 is 3,
2020-06-17 20:54:52 -07:00
// which means 3 and 4 are consecutive leader slots
2020-03-26 19:57:27 -07:00
progress_map . insert (
3 ,
2020-04-08 14:35:24 -07:00
ForkProgress ::new (
Hash ::default ( ) ,
None ,
2021-03-24 23:41:52 -07:00
DuplicateStats ::default ( ) ,
2020-04-08 14:35:24 -07:00
Some ( ValidatorStakeInfo ::default ( ) ) ,
0 ,
0 ,
) ,
2020-03-26 19:57:27 -07:00
) ;
2020-12-03 12:31:38 -08:00
// If the previous leader slot has not seen propagation threshold, but
2020-03-26 19:57:27 -07:00
// was the direct parent (implying consecutive leader slots), create
// the block regardless
assert! ( ReplayStage ::check_propagation_for_start_leader (
poh_slot ,
parent_slot ,
& progress_map ,
) ) ;
// If propagation threshold was achieved on parent, block should
// also be created
progress_map
. get_mut ( & 3 )
. unwrap ( )
. propagated_stats
. is_propagated = true ;
assert! ( ReplayStage ::check_propagation_for_start_leader (
poh_slot ,
parent_slot ,
& progress_map ,
) ) ;
2020-12-03 12:31:38 -08:00
// Now insert another parent slot 2 for which this validator is also the leader
parent_slot = poh_slot - NUM_CONSECUTIVE_LEADER_SLOTS + 1 ;
progress_map . insert (
parent_slot ,
ForkProgress ::new (
Hash ::default ( ) ,
None ,
2021-03-24 23:41:52 -07:00
DuplicateStats ::default ( ) ,
2020-12-03 12:31:38 -08:00
Some ( ValidatorStakeInfo ::default ( ) ) ,
0 ,
0 ,
) ,
) ;
// Even though `parent_slot` and `poh_slot` are separated by another block,
// because they're within `NUM_CONSECUTIVE` blocks of each other, the propagation
// check is still skipped
assert! ( ReplayStage ::check_propagation_for_start_leader (
poh_slot ,
parent_slot ,
& progress_map ,
) ) ;
2020-03-26 19:57:27 -07:00
2020-12-03 12:31:38 -08:00
// Once the distance becomes >= NUM_CONSECUTIVE_LEADER_SLOTS, then we need to
// enforce the propagation check
parent_slot = poh_slot - NUM_CONSECUTIVE_LEADER_SLOTS ;
progress_map . insert (
parent_slot ,
ForkProgress ::new (
Hash ::default ( ) ,
None ,
2021-03-24 23:41:52 -07:00
DuplicateStats ::default ( ) ,
2020-12-03 12:31:38 -08:00
Some ( ValidatorStakeInfo ::default ( ) ) ,
0 ,
0 ,
) ,
) ;
2020-03-26 19:57:27 -07:00
assert! ( ! ReplayStage ::check_propagation_for_start_leader (
poh_slot ,
parent_slot ,
& progress_map ,
) ) ;
}
2020-05-05 14:07:21 -07:00
#[ test ]
fn test_purge_unconfirmed_duplicate_slot ( ) {
let ( bank_forks , mut progress ) = setup_forks ( ) ;
2021-02-05 10:00:45 -08:00
let mut descendants = bank_forks . read ( ) . unwrap ( ) . descendants ( ) . clone ( ) ;
2020-05-07 23:39:57 -07:00
let mut ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
2020-05-05 14:07:21 -07:00
// Purging slot 5 should purge only slots 5 and its descendant 6
2020-05-07 23:39:57 -07:00
ReplayStage ::purge_unconfirmed_duplicate_slot (
5 ,
& mut ancestors ,
& mut descendants ,
& mut progress ,
& bank_forks ,
) ;
2020-05-05 14:07:21 -07:00
for i in 5 ..= 6 {
assert! ( bank_forks . read ( ) . unwrap ( ) . get ( i ) . is_none ( ) ) ;
assert! ( progress . get ( & i ) . is_none ( ) ) ;
}
for i in 0 ..= 4 {
assert! ( bank_forks . read ( ) . unwrap ( ) . get ( i ) . is_some ( ) ) ;
assert! ( progress . get ( & i ) . is_some ( ) ) ;
}
// Purging slot 4 should purge only slot 4
2021-02-05 10:00:45 -08:00
let mut descendants = bank_forks . read ( ) . unwrap ( ) . descendants ( ) . clone ( ) ;
2020-05-07 23:39:57 -07:00
let mut ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
ReplayStage ::purge_unconfirmed_duplicate_slot (
4 ,
& mut ancestors ,
& mut descendants ,
& mut progress ,
& bank_forks ,
) ;
2020-05-05 14:07:21 -07:00
for i in 4 ..= 6 {
assert! ( bank_forks . read ( ) . unwrap ( ) . get ( i ) . is_none ( ) ) ;
assert! ( progress . get ( & i ) . is_none ( ) ) ;
}
for i in 0 ..= 3 {
assert! ( bank_forks . read ( ) . unwrap ( ) . get ( i ) . is_some ( ) ) ;
assert! ( progress . get ( & i ) . is_some ( ) ) ;
}
// Purging slot 1 should purge both forks 2 and 3
2021-02-05 10:00:45 -08:00
let mut descendants = bank_forks . read ( ) . unwrap ( ) . descendants ( ) . clone ( ) ;
2020-05-07 23:39:57 -07:00
let mut ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
ReplayStage ::purge_unconfirmed_duplicate_slot (
1 ,
& mut ancestors ,
& mut descendants ,
& mut progress ,
& bank_forks ,
) ;
2020-05-05 14:07:21 -07:00
for i in 1 ..= 6 {
assert! ( bank_forks . read ( ) . unwrap ( ) . get ( i ) . is_none ( ) ) ;
assert! ( progress . get ( & i ) . is_none ( ) ) ;
}
assert! ( bank_forks . read ( ) . unwrap ( ) . get ( 0 ) . is_some ( ) ) ;
assert! ( progress . get ( & 0 ) . is_some ( ) ) ;
}
2020-05-07 23:39:57 -07:00
#[ test ]
fn test_purge_ancestors_descendants ( ) {
let ( bank_forks , _ ) = setup_forks ( ) ;
// Purge branch rooted at slot 2
2021-02-05 10:00:45 -08:00
let mut descendants = bank_forks . read ( ) . unwrap ( ) . descendants ( ) . clone ( ) ;
2020-05-07 23:39:57 -07:00
let mut ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
let slot_2_descendants = descendants . get ( & 2 ) . unwrap ( ) . clone ( ) ;
ReplayStage ::purge_ancestors_descendants (
2 ,
& slot_2_descendants ,
& mut ancestors ,
& mut descendants ,
) ;
// Result should be equivalent to removing slot from BankForks
2020-06-17 20:54:52 -07:00
// and regenerating the `ancestor` `descendant` maps
2020-05-07 23:39:57 -07:00
for d in slot_2_descendants {
bank_forks . write ( ) . unwrap ( ) . remove ( d ) ;
}
bank_forks . write ( ) . unwrap ( ) . remove ( 2 ) ;
assert! ( check_map_eq (
& ancestors ,
& bank_forks . read ( ) . unwrap ( ) . ancestors ( )
) ) ;
assert! ( check_map_eq (
& descendants ,
& bank_forks . read ( ) . unwrap ( ) . descendants ( )
) ) ;
// Try to purge the root
2020-12-12 17:22:34 -08:00
bank_forks
. write ( )
. unwrap ( )
2021-02-18 23:42:09 -08:00
. set_root ( 3 , & AbsRequestSender ::default ( ) , None ) ;
2021-02-05 10:00:45 -08:00
let mut descendants = bank_forks . read ( ) . unwrap ( ) . descendants ( ) . clone ( ) ;
2020-05-07 23:39:57 -07:00
let mut ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
let slot_3_descendants = descendants . get ( & 3 ) . unwrap ( ) . clone ( ) ;
ReplayStage ::purge_ancestors_descendants (
3 ,
& slot_3_descendants ,
& mut ancestors ,
& mut descendants ,
) ;
assert! ( ancestors . is_empty ( ) ) ;
// Only remaining keys should be ones < root
for k in descendants . keys ( ) {
assert! ( * k < 3 ) ;
}
}
2020-09-11 02:03:11 -07:00
#[ test ]
fn test_leader_snapshot_restart_propagation ( ) {
let ReplayBlockstoreComponents {
validator_voting_keys ,
mut progress ,
bank_forks ,
leader_schedule_cache ,
..
} = replay_blockstore_components ( ) ;
2020-12-27 05:28:05 -08:00
let root_bank = bank_forks . read ( ) . unwrap ( ) . root_bank ( ) ;
2020-09-11 02:03:11 -07:00
let my_pubkey = leader_schedule_cache
. slot_leader_at ( root_bank . slot ( ) , Some ( & root_bank ) )
. unwrap ( ) ;
// Check that we are the leader of the root bank
assert! (
progress
. get_propagated_stats ( root_bank . slot ( ) )
. unwrap ( )
. is_leader_slot
) ;
let ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
// Freeze bank so it shows up in frozen banks
root_bank . freeze ( ) ;
let mut frozen_banks : Vec < _ > = bank_forks
. read ( )
. unwrap ( )
. frozen_banks ( )
. values ( )
. cloned ( )
. collect ( ) ;
// Compute bank stats, make sure vote is propagated back to starting root bank
let vote_tracker = VoteTracker ::default ( ) ;
// Add votes
for vote_key in validator_voting_keys . values ( ) {
2021-02-07 18:07:00 -08:00
vote_tracker . insert_vote ( root_bank . slot ( ) , * vote_key ) ;
2020-09-11 02:03:11 -07:00
}
assert! ( ! progress . is_propagated ( root_bank . slot ( ) ) ) ;
// Update propagation status
let tower = Tower ::new_for_tests ( 0 , 0.67 ) ;
ReplayStage ::compute_bank_stats (
& my_pubkey ,
& ancestors ,
& mut frozen_banks ,
& tower ,
& mut progress ,
& vote_tracker ,
& ClusterSlots ::default ( ) ,
& bank_forks ,
& mut HeaviestSubtreeForkChoice ::new_from_bank_forks ( & bank_forks . read ( ) . unwrap ( ) ) ,
) ;
// Check status is true
assert! ( progress . is_propagated ( root_bank . slot ( ) ) ) ;
}
2021-03-24 23:41:52 -07:00
#[ test ]
fn test_unconfirmed_duplicate_slots_and_lockouts ( ) {
/*
Build fork structure :
slot 0
|
slot 1
/ \
slot 2 |
| |
slot 3 |
| |
slot 4 |
slot 5
|
slot 6
* /
let forks = tr ( 0 ) / ( tr ( 1 ) / ( tr ( 2 ) / ( tr ( 3 ) / ( tr ( 4 ) ) ) ) / ( tr ( 5 ) / ( tr ( 6 ) ) ) ) ;
// Make enough validators for vote switch thrshold later
let mut vote_simulator = VoteSimulator ::new ( 2 ) ;
let validator_votes : HashMap < Pubkey , Vec < u64 > > = vec! [
( vote_simulator . node_pubkeys [ 0 ] , vec! [ 5 ] ) ,
( vote_simulator . node_pubkeys [ 1 ] , vec! [ 2 ] ) ,
]
. into_iter ( )
. collect ( ) ;
vote_simulator . fill_bank_forks ( forks , & validator_votes ) ;
let ( bank_forks , mut progress ) = ( vote_simulator . bank_forks , vote_simulator . progress ) ;
let ledger_path = get_tmp_ledger_path! ( ) ;
let blockstore = Arc ::new (
Blockstore ::open ( & ledger_path ) . expect ( " Expected to be able to open database ledger " ) ,
) ;
let mut tower = Tower ::new_for_tests ( 8 , 0.67 ) ;
let mut heaviest_subtree_fork_choice =
HeaviestSubtreeForkChoice ::new_from_bank_forks ( & bank_forks . read ( ) . unwrap ( ) ) ;
// All forks have same weight so heaviest bank to vote/reset on should be the tip of
// the fork with the lower slot
let ( vote_fork , reset_fork ) = run_compute_and_select_forks (
& bank_forks ,
& mut progress ,
& mut tower ,
& mut heaviest_subtree_fork_choice ,
) ;
assert_eq! ( vote_fork . unwrap ( ) , 4 ) ;
assert_eq! ( reset_fork . unwrap ( ) , 4 ) ;
// Record the vote for 4
tower . record_bank_vote (
& bank_forks . read ( ) . unwrap ( ) . get ( 4 ) . unwrap ( ) ,
& Pubkey ::default ( ) ,
) ;
// Mark 4 as duplicate, 3 should be the heaviest slot, but should not be votable
// because of lockout
blockstore . store_duplicate_slot ( 4 , vec! [ ] , vec! [ ] ) . unwrap ( ) ;
let ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
let descendants = bank_forks . read ( ) . unwrap ( ) . descendants ( ) . clone ( ) ;
let mut gossip_duplicate_confirmed_slots = BTreeMap ::new ( ) ;
let bank4_hash = bank_forks . read ( ) . unwrap ( ) . get ( 4 ) . unwrap ( ) . hash ( ) ;
assert_ne! ( bank4_hash , Hash ::default ( ) ) ;
check_slot_agrees_with_cluster (
4 ,
bank_forks . read ( ) . unwrap ( ) . root ( ) ,
Some ( bank4_hash ) ,
& gossip_duplicate_confirmed_slots ,
& ancestors ,
& descendants ,
& mut progress ,
& mut heaviest_subtree_fork_choice ,
SlotStateUpdate ::Duplicate ,
) ;
let ( vote_fork , reset_fork ) = run_compute_and_select_forks (
& bank_forks ,
& mut progress ,
& mut tower ,
& mut heaviest_subtree_fork_choice ,
) ;
assert! ( vote_fork . is_none ( ) ) ;
assert_eq! ( reset_fork . unwrap ( ) , 3 ) ;
// Now mark 2, an ancestor of 4, as duplicate
blockstore . store_duplicate_slot ( 2 , vec! [ ] , vec! [ ] ) . unwrap ( ) ;
let ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
let descendants = bank_forks . read ( ) . unwrap ( ) . descendants ( ) . clone ( ) ;
let bank2_hash = bank_forks . read ( ) . unwrap ( ) . get ( 2 ) . unwrap ( ) . hash ( ) ;
assert_ne! ( bank2_hash , Hash ::default ( ) ) ;
check_slot_agrees_with_cluster (
2 ,
bank_forks . read ( ) . unwrap ( ) . root ( ) ,
Some ( bank2_hash ) ,
& gossip_duplicate_confirmed_slots ,
& ancestors ,
& descendants ,
& mut progress ,
& mut heaviest_subtree_fork_choice ,
SlotStateUpdate ::Duplicate ,
) ;
let ( vote_fork , reset_fork ) = run_compute_and_select_forks (
& bank_forks ,
& mut progress ,
& mut tower ,
& mut heaviest_subtree_fork_choice ,
) ;
// Should now pick the next heaviest fork that is not a descendant of 2, which is 6.
// However the lockout from vote 4 should still apply, so 6 should not be votable
assert! ( vote_fork . is_none ( ) ) ;
assert_eq! ( reset_fork . unwrap ( ) , 6 ) ;
// If slot 4 is marked as confirmed, then this confirms slot 2 and 4, and
// then slot 4 is now the heaviest bank again
gossip_duplicate_confirmed_slots . insert ( 4 , bank4_hash ) ;
check_slot_agrees_with_cluster (
4 ,
bank_forks . read ( ) . unwrap ( ) . root ( ) ,
Some ( bank4_hash ) ,
& gossip_duplicate_confirmed_slots ,
& ancestors ,
& descendants ,
& mut progress ,
& mut heaviest_subtree_fork_choice ,
SlotStateUpdate ::DuplicateConfirmed ,
) ;
let ( vote_fork , reset_fork ) = run_compute_and_select_forks (
& bank_forks ,
& mut progress ,
& mut tower ,
& mut heaviest_subtree_fork_choice ,
) ;
// Should now pick the heaviest fork 4 again, but lockouts apply so fork 4
// is not votable, which avoids voting for 4 again.
assert! ( vote_fork . is_none ( ) ) ;
assert_eq! ( reset_fork . unwrap ( ) , 4 ) ;
}
fn run_compute_and_select_forks (
bank_forks : & RwLock < BankForks > ,
progress : & mut ProgressMap ,
tower : & mut Tower ,
heaviest_subtree_fork_choice : & mut HeaviestSubtreeForkChoice ,
) -> ( Option < Slot > , Option < Slot > ) {
let mut frozen_banks : Vec < _ > = bank_forks
. read ( )
. unwrap ( )
. frozen_banks ( )
. values ( )
. cloned ( )
. collect ( ) ;
let ancestors = & bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
let descendants = & bank_forks . read ( ) . unwrap ( ) . descendants ( ) . clone ( ) ;
ReplayStage ::compute_bank_stats (
& Pubkey ::default ( ) ,
& bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ,
& mut frozen_banks ,
tower ,
progress ,
& VoteTracker ::default ( ) ,
& ClusterSlots ::default ( ) ,
& bank_forks ,
heaviest_subtree_fork_choice ,
) ;
let ( heaviest_bank , heaviest_bank_on_same_fork ) = heaviest_subtree_fork_choice
. select_forks ( & frozen_banks , & tower , & progress , & ancestors , bank_forks ) ;
assert! ( heaviest_bank_on_same_fork . is_none ( ) ) ;
let SelectVoteAndResetForkResult {
vote_bank ,
reset_bank ,
..
} = ReplayStage ::select_vote_and_reset_forks (
& heaviest_bank ,
heaviest_bank_on_same_fork . as_ref ( ) ,
& ancestors ,
& descendants ,
progress ,
tower ,
) ;
(
vote_bank . map ( | ( b , _ ) | b . slot ( ) ) ,
reset_bank . map ( | b | b . slot ( ) ) ,
)
}
2020-05-05 14:07:21 -07:00
fn setup_forks ( ) -> ( RwLock < BankForks > , ProgressMap ) {
/*
Build fork structure :
2021-03-24 23:41:52 -07:00
2020-05-05 14:07:21 -07:00
slot 0
|
slot 1
/ \
slot 2 |
| slot 3
slot 4 |
slot 5
|
slot 6
* /
let forks = tr ( 0 ) / ( tr ( 1 ) / ( tr ( 2 ) / ( tr ( 4 ) ) ) / ( tr ( 3 ) / ( tr ( 5 ) / ( tr ( 6 ) ) ) ) ) ;
let mut vote_simulator = VoteSimulator ::new ( 1 ) ;
vote_simulator . fill_bank_forks ( forks , & HashMap ::new ( ) ) ;
( vote_simulator . bank_forks , vote_simulator . progress )
}
2020-05-07 23:39:57 -07:00
fn check_map_eq < K : Eq + std ::hash ::Hash + std ::fmt ::Debug , T : PartialEq + std ::fmt ::Debug > (
map1 : & HashMap < K , T > ,
map2 : & HashMap < K , T > ,
) -> bool {
map1 . len ( ) = = map2 . len ( ) & & map1 . iter ( ) . all ( | ( k , v ) | map2 . get ( k ) . unwrap ( ) = = v )
}
2018-07-03 21:14:08 -07:00
}