2018-12-07 14:09:29 -08:00
//! The `replay_stage` replays transactions broadcast by the leader.
2022-07-28 11:33:19 -07:00
2021-07-20 22:25:13 -07:00
use {
crate ::{
ancestor_hashes_service ::AncestorHashesReplayUpdateSender ,
broadcast_stage ::RetransmitSlotsSender ,
cache_block_meta_service ::CacheBlockMetaSender ,
cluster_info_vote_listener ::{
GossipDuplicateConfirmedSlotsReceiver , GossipVerifiedVoteHashReceiver , VoteTracker ,
} ,
cluster_slot_state_verifier ::* ,
cluster_slots ::ClusterSlots ,
cluster_slots_service ::ClusterSlotsUpdateSender ,
commitment_service ::{ AggregateCommitmentService , CommitmentAggregationData } ,
consensus ::{
2021-08-09 11:32:48 -07:00
ComputedBankState , Stake , SwitchForkDecision , Tower , VotedStakes , SWITCH_FORK_THRESHOLD ,
2021-07-20 22:25:13 -07:00
} ,
2021-10-24 20:19:23 -07:00
cost_update_service ::CostUpdate ,
2021-07-20 22:25:13 -07:00
fork_choice ::{ ForkChoice , SelectVoteAndResetForkResult } ,
heaviest_subtree_fork_choice ::HeaviestSubtreeForkChoice ,
latest_validator_votes_for_frozen_banks ::LatestValidatorVotesForFrozenBanks ,
2022-07-28 11:33:19 -07:00
progress_map ::{ ForkProgress , ProgressMap , PropagatedStats , ReplaySlotStats } ,
2021-07-20 22:25:13 -07:00
repair_service ::DuplicateSlotsResetReceiver ,
rewards_recorder_service ::RewardsRecorderSender ,
2022-02-07 14:06:19 -08:00
tower_storage ::{ SavedTower , SavedTowerVersions , TowerStorage } ,
2021-07-20 22:25:13 -07:00
unfrozen_gossip_verified_vote_hashes ::UnfrozenGossipVerifiedVoteHashes ,
2022-06-25 23:14:17 -07:00
validator ::ProcessBlockStore ,
2021-07-20 22:25:13 -07:00
voting_service ::VoteOp ,
window_service ::DuplicateSlotReceiver ,
2021-04-10 17:34:45 -07:00
} ,
2022-01-11 02:44:46 -08:00
crossbeam_channel ::{ Receiver , RecvTimeoutError , Sender } ,
2022-07-28 11:33:19 -07:00
lazy_static ::lazy_static ,
rayon ::{ prelude ::* , ThreadPool } ,
2021-07-20 22:25:13 -07:00
solana_entry ::entry ::VerifyRecyclers ,
2022-03-14 18:18:46 -07:00
solana_geyser_plugin_manager ::block_metadata_notifier_interface ::BlockMetadataNotifierLock ,
2021-07-20 22:25:13 -07:00
solana_gossip ::cluster_info ::ClusterInfo ,
solana_ledger ::{
block_error ::BlockError ,
blockstore ::Blockstore ,
2022-07-28 11:33:19 -07:00
blockstore_processor ::{
self , BlockstoreProcessorError , ConfirmationProgress , TransactionStatusSender ,
} ,
2021-07-20 22:25:13 -07:00
leader_schedule_cache ::LeaderScheduleCache ,
2022-01-04 00:24:16 -08:00
leader_schedule_utils ::first_of_consecutive_leader_slots ,
2021-03-24 23:41:52 -07:00
} ,
2021-07-20 22:25:13 -07:00
solana_measure ::measure ::Measure ,
solana_metrics ::inc_new_counter_info ,
2022-01-22 18:28:50 -08:00
solana_poh ::poh_recorder ::{ PohLeaderStatus , PohRecorder , GRACE_TICKS_FACTOR , MAX_GRACE_SLOTS } ,
2022-01-04 00:23:56 -08:00
solana_program_runtime ::timings ::ExecuteTimings ,
2021-07-20 22:25:13 -07:00
solana_rpc ::{
optimistically_confirmed_bank_tracker ::{ BankNotification , BankNotificationSender } ,
rpc_subscriptions ::RpcSubscriptions ,
} ,
2022-08-24 09:47:02 -07:00
solana_rpc_client_api ::response ::SlotUpdate ,
2021-07-20 22:25:13 -07:00
solana_runtime ::{
2021-12-03 09:00:31 -08:00
accounts_background_service ::AbsRequestSender ,
2022-01-04 00:23:56 -08:00
bank ::{ Bank , NewBankOptions } ,
2022-05-14 08:53:37 -07:00
bank_forks ::{ BankForks , MAX_ROOT_DISTANCE_FOR_VOTE_ONLY } ,
2021-12-03 09:00:31 -08:00
commitment ::BlockCommitmentCache ,
2022-08-31 06:00:55 -07:00
prioritization_fee_cache ::PrioritizationFeeCache ,
2021-07-20 22:25:13 -07:00
vote_sender_types ::ReplayVoteSender ,
} ,
solana_sdk ::{
clock ::{ BankId , Slot , MAX_PROCESSING_AGE , NUM_CONSECUTIVE_LEADER_SLOTS } ,
2022-07-27 12:23:44 -07:00
feature_set ,
2021-07-20 22:25:13 -07:00
genesis_config ::ClusterType ,
hash ::Hash ,
pubkey ::Pubkey ,
2022-01-17 08:59:47 -08:00
saturating_add_assign ,
2021-12-03 09:00:31 -08:00
signature ::{ Keypair , Signature , Signer } ,
2021-07-20 22:25:13 -07:00
timing ::timestamp ,
transaction ::Transaction ,
} ,
2022-08-23 22:29:03 -07:00
solana_vote_program ::vote_state ::VoteTransaction ,
2021-07-20 22:25:13 -07:00
std ::{
collections ::{ HashMap , HashSet } ,
result ,
sync ::{
2022-07-28 11:33:19 -07:00
atomic ::{ AtomicBool , AtomicU64 , Ordering } ,
2022-07-05 07:29:44 -07:00
Arc , RwLock ,
2021-07-20 22:25:13 -07:00
} ,
thread ::{ self , Builder , JoinHandle } ,
time ::{ Duration , Instant } ,
2019-12-06 13:38:49 -08:00
} ,
2019-11-02 00:38:30 -07:00
} ;
2018-05-22 14:26:28 -07:00
2018-12-13 18:43:10 -08:00
pub const MAX_ENTRY_RECV_PER_ITER : usize = 512 ;
2020-03-26 19:57:27 -07:00
pub const SUPERMINORITY_THRESHOLD : f64 = 1 f64 / 3 f64 ;
pub const MAX_UNCONFIRMED_SLOTS : usize = 5 ;
2021-03-24 23:41:52 -07:00
pub const DUPLICATE_LIVENESS_THRESHOLD : f64 = 0.1 ;
pub const DUPLICATE_THRESHOLD : f64 = 1.0 - SWITCH_FORK_THRESHOLD - DUPLICATE_LIVENESS_THRESHOLD ;
2021-03-25 18:54:51 -07:00
const MAX_VOTE_SIGNATURES : usize = 200 ;
2021-04-28 11:46:16 -07:00
const MAX_VOTE_REFRESH_INTERVAL_MILLIS : usize = 5000 ;
2022-07-28 11:33:19 -07:00
// Expect this number to be small enough to minimize thread pool overhead while large enough
// to be able to replay all active forks at the same time in most cases.
const MAX_CONCURRENT_FORKS_TO_REPLAY : usize = 4 ;
2022-10-27 20:06:06 -07:00
const MAX_REPAIR_RETRY_LOOP_ATTEMPTS : usize = 10 ;
2022-07-28 11:33:19 -07:00
lazy_static! {
static ref PAR_THREAD_POOL : ThreadPool = rayon ::ThreadPoolBuilder ::new ( )
. num_threads ( MAX_CONCURRENT_FORKS_TO_REPLAY )
2022-12-06 06:30:06 -08:00
. thread_name ( | ix | format! ( " solReplay {ix:02} " ) )
2022-07-28 11:33:19 -07:00
. build ( )
. unwrap ( ) ;
}
2020-03-02 12:43:43 -08:00
2022-05-22 18:00:42 -07:00
#[ derive(PartialEq, Eq, Debug) ]
2021-07-08 19:07:32 -07:00
pub enum HeaviestForkFailures {
2020-03-02 12:43:43 -08:00
LockedOut ( u64 ) ,
FailedThreshold ( u64 ) ,
FailedSwitchThreshold ( u64 ) ,
2020-03-26 19:57:27 -07:00
NoPropagatedConfirmation ( u64 ) ,
2020-03-02 12:43:43 -08:00
}
2018-12-13 18:43:10 -08:00
2018-12-07 14:09:29 -08:00
// Implement a destructor for the ReplayStage thread to signal it exited
2018-09-25 15:41:29 -07:00
// even on panics
struct Finalizer {
exit_sender : Arc < AtomicBool > ,
}
impl Finalizer {
fn new ( exit_sender : Arc < AtomicBool > ) -> Self {
Finalizer { exit_sender }
}
}
2019-07-31 17:58:10 -07:00
2018-09-25 15:41:29 -07:00
// Implement a destructor for Finalizer.
impl Drop for Finalizer {
fn drop ( & mut self ) {
self . exit_sender . clone ( ) . store ( true , Ordering ::Relaxed ) ;
}
}
2022-07-28 11:33:19 -07:00
struct ReplaySlotFromBlockstore {
is_slot_dead : bool ,
bank_slot : Slot ,
replay_result : Option < Result < usize /* tx count */ , BlockstoreProcessorError > > ,
}
2021-04-28 11:46:16 -07:00
struct LastVoteRefreshTime {
last_refresh_time : Instant ,
last_print_time : Instant ,
}
2020-03-26 19:57:27 -07:00
#[ derive(Default) ]
struct SkippedSlotsInfo {
last_retransmit_slot : u64 ,
last_skipped_slot : u64 ,
}
2019-12-04 10:17:17 -08:00
pub struct ReplayStageConfig {
pub vote_account : Pubkey ,
2021-04-11 20:38:30 -07:00
pub authorized_voter_keypairs : Arc < RwLock < Vec < Arc < Keypair > > > > ,
2019-12-04 10:17:17 -08:00
pub exit : Arc < AtomicBool > ,
2021-06-16 10:57:52 -07:00
pub rpc_subscriptions : Arc < RpcSubscriptions > ,
2019-12-04 10:17:17 -08:00
pub leader_schedule_cache : Arc < LeaderScheduleCache > ,
2019-12-18 11:50:09 -08:00
pub latest_root_senders : Vec < Sender < Slot > > ,
2021-02-18 23:42:09 -08:00
pub accounts_background_request_sender : AbsRequestSender ,
2019-12-04 10:17:17 -08:00
pub block_commitment_cache : Arc < RwLock < BlockCommitmentCache > > ,
pub transaction_status_sender : Option < TransactionStatusSender > ,
2020-02-11 17:01:49 -08:00
pub rewards_recorder_sender : Option < RewardsRecorderSender > ,
2021-05-26 21:16:16 -07:00
pub cache_block_meta_sender : Option < CacheBlockMetaSender > ,
2020-09-28 19:43:05 -07:00
pub bank_notification_sender : Option < BankNotificationSender > ,
2021-03-25 18:54:51 -07:00
pub wait_for_vote_to_start_leader : bool ,
2021-07-26 20:59:00 -07:00
pub ancestor_hashes_replay_update_sender : AncestorHashesReplayUpdateSender ,
2021-07-20 22:25:13 -07:00
pub tower_storage : Arc < dyn TowerStorage > ,
2022-02-15 12:19:34 -08:00
// Stops voting until this slot has been reached. Should be used to avoid
// duplicate voting which can lead to slashing.
pub wait_to_vote_slot : Option < Slot > ,
2022-08-26 12:36:02 -07:00
pub replay_slots_concurrently : bool ,
2019-12-04 10:17:17 -08:00
}
2020-05-08 03:46:29 -07:00
#[ derive(Default) ]
pub struct ReplayTiming {
2020-06-28 10:04:15 -07:00
last_print : u64 ,
2021-01-13 08:08:53 -08:00
collect_frozen_banks_elapsed : u64 ,
2020-05-08 03:46:29 -07:00
compute_bank_stats_elapsed : u64 ,
select_vote_and_reset_forks_elapsed : u64 ,
2020-06-28 10:04:15 -07:00
start_leader_elapsed : u64 ,
reset_bank_elapsed : u64 ,
voting_elapsed : u64 ,
2021-06-26 08:32:08 -07:00
vote_push_us : u64 ,
vote_send_us : u64 ,
generate_vote_us : u64 ,
update_commitment_cache_us : u64 ,
2020-06-28 10:04:15 -07:00
select_forks_elapsed : u64 ,
compute_slot_stats_elapsed : u64 ,
generate_new_bank_forks_elapsed : u64 ,
replay_active_banks_elapsed : u64 ,
2021-01-13 08:08:53 -08:00
wait_receive_elapsed : u64 ,
heaviest_fork_failures_elapsed : u64 ,
bank_count : u64 ,
2021-03-24 23:41:52 -07:00
process_gossip_duplicate_confirmed_slots_elapsed : u64 ,
process_duplicate_slots_elapsed : u64 ,
2021-04-21 14:40:35 -07:00
process_unfrozen_gossip_verified_vote_hashes_elapsed : u64 ,
2021-07-08 19:07:32 -07:00
repair_correct_slots_elapsed : u64 ,
2021-12-17 05:44:40 -08:00
retransmit_not_propagated_elapsed : u64 ,
2022-01-17 08:59:47 -08:00
generate_new_bank_forks_read_lock_us : u64 ,
generate_new_bank_forks_get_slots_since_us : u64 ,
generate_new_bank_forks_loop_us : u64 ,
generate_new_bank_forks_write_lock_us : u64 ,
2022-07-28 11:33:19 -07:00
replay_blockstore_us : u64 , //< When processing forks concurrently, only captures the longest fork
2020-05-08 03:46:29 -07:00
}
impl ReplayTiming {
2020-06-28 10:04:15 -07:00
#[ allow(clippy::too_many_arguments) ]
2020-05-08 03:46:29 -07:00
fn update (
& mut self ,
2021-01-13 08:08:53 -08:00
collect_frozen_banks_elapsed : u64 ,
2020-05-08 03:46:29 -07:00
compute_bank_stats_elapsed : u64 ,
select_vote_and_reset_forks_elapsed : u64 ,
2020-06-28 10:04:15 -07:00
start_leader_elapsed : u64 ,
reset_bank_elapsed : u64 ,
voting_elapsed : u64 ,
select_forks_elapsed : u64 ,
compute_slot_stats_elapsed : u64 ,
generate_new_bank_forks_elapsed : u64 ,
replay_active_banks_elapsed : u64 ,
2021-01-13 08:08:53 -08:00
wait_receive_elapsed : u64 ,
heaviest_fork_failures_elapsed : u64 ,
bank_count : u64 ,
2021-03-24 23:41:52 -07:00
process_gossip_duplicate_confirmed_slots_elapsed : u64 ,
2021-04-21 14:40:35 -07:00
process_unfrozen_gossip_verified_vote_hashes_elapsed : u64 ,
2021-03-24 23:41:52 -07:00
process_duplicate_slots_elapsed : u64 ,
2021-07-08 19:07:32 -07:00
repair_correct_slots_elapsed : u64 ,
2021-12-17 05:44:40 -08:00
retransmit_not_propagated_elapsed : u64 ,
2020-05-08 03:46:29 -07:00
) {
2021-01-13 08:08:53 -08:00
self . collect_frozen_banks_elapsed + = collect_frozen_banks_elapsed ;
2020-05-08 03:46:29 -07:00
self . compute_bank_stats_elapsed + = compute_bank_stats_elapsed ;
self . select_vote_and_reset_forks_elapsed + = select_vote_and_reset_forks_elapsed ;
2020-06-28 10:04:15 -07:00
self . start_leader_elapsed + = start_leader_elapsed ;
self . reset_bank_elapsed + = reset_bank_elapsed ;
self . voting_elapsed + = voting_elapsed ;
self . select_forks_elapsed + = select_forks_elapsed ;
self . compute_slot_stats_elapsed + = compute_slot_stats_elapsed ;
self . generate_new_bank_forks_elapsed + = generate_new_bank_forks_elapsed ;
self . replay_active_banks_elapsed + = replay_active_banks_elapsed ;
2021-01-13 08:08:53 -08:00
self . wait_receive_elapsed + = wait_receive_elapsed ;
self . heaviest_fork_failures_elapsed + = heaviest_fork_failures_elapsed ;
self . bank_count + = bank_count ;
2021-03-24 23:41:52 -07:00
self . process_gossip_duplicate_confirmed_slots_elapsed + =
process_gossip_duplicate_confirmed_slots_elapsed ;
2021-04-21 14:40:35 -07:00
self . process_unfrozen_gossip_verified_vote_hashes_elapsed + =
process_unfrozen_gossip_verified_vote_hashes_elapsed ;
2021-03-24 23:41:52 -07:00
self . process_duplicate_slots_elapsed + = process_duplicate_slots_elapsed ;
2021-07-08 19:07:32 -07:00
self . repair_correct_slots_elapsed + = repair_correct_slots_elapsed ;
2021-12-17 05:44:40 -08:00
self . retransmit_not_propagated_elapsed + = retransmit_not_propagated_elapsed ;
2020-06-28 10:04:15 -07:00
let now = timestamp ( ) ;
let elapsed_ms = now - self . last_print ;
if elapsed_ms > 1000 {
2021-06-26 08:32:08 -07:00
datapoint_info! (
" replay-loop-voting-stats " ,
( " vote_push_us " , self . vote_push_us , i64 ) ,
( " vote_send_us " , self . vote_send_us , i64 ) ,
( " generate_vote_us " , self . generate_vote_us , i64 ) ,
(
" update_commitment_cache_us " ,
self . update_commitment_cache_us ,
i64
) ,
) ;
2020-05-08 03:46:29 -07:00
datapoint_info! (
" replay-loop-timing-stats " ,
2020-06-28 10:04:15 -07:00
( " total_elapsed_us " , elapsed_ms * 1000 , i64 ) ,
2021-01-13 08:08:53 -08:00
(
" collect_frozen_banks_elapsed " ,
self . collect_frozen_banks_elapsed as i64 ,
i64
) ,
2020-05-08 03:46:29 -07:00
(
" compute_bank_stats_elapsed " ,
2020-06-28 10:04:15 -07:00
self . compute_bank_stats_elapsed as i64 ,
2020-05-08 03:46:29 -07:00
i64
) ,
(
" select_vote_and_reset_forks_elapsed " ,
2020-06-28 10:04:15 -07:00
self . select_vote_and_reset_forks_elapsed as i64 ,
i64
) ,
(
" start_leader_elapsed " ,
self . start_leader_elapsed as i64 ,
i64
) ,
( " reset_bank_elapsed " , self . reset_bank_elapsed as i64 , i64 ) ,
( " voting_elapsed " , self . voting_elapsed as i64 , i64 ) ,
(
" select_forks_elapsed " ,
self . select_forks_elapsed as i64 ,
i64
) ,
(
" compute_slot_stats_elapsed " ,
self . compute_slot_stats_elapsed as i64 ,
i64
) ,
(
" generate_new_bank_forks_elapsed " ,
self . generate_new_bank_forks_elapsed as i64 ,
i64
) ,
(
" replay_active_banks_elapsed " ,
self . replay_active_banks_elapsed as i64 ,
i64
) ,
(
2021-03-24 23:41:52 -07:00
" process_gossip_duplicate_confirmed_slots_elapsed " ,
self . process_gossip_duplicate_confirmed_slots_elapsed as i64 ,
2020-05-08 03:46:29 -07:00
i64
) ,
2021-04-10 17:34:45 -07:00
(
2021-04-21 14:40:35 -07:00
" process_unfrozen_gossip_verified_vote_hashes_elapsed " ,
self . process_unfrozen_gossip_verified_vote_hashes_elapsed as i64 ,
2021-04-10 17:34:45 -07:00
i64
) ,
2021-01-13 08:08:53 -08:00
(
" wait_receive_elapsed " ,
self . wait_receive_elapsed as i64 ,
i64
) ,
(
" heaviest_fork_failures_elapsed " ,
self . heaviest_fork_failures_elapsed as i64 ,
i64
) ,
( " bank_count " , self . bank_count as i64 , i64 ) ,
2021-03-24 23:41:52 -07:00
(
" process_duplicate_slots_elapsed " ,
self . process_duplicate_slots_elapsed as i64 ,
i64
2021-07-01 09:32:41 -07:00
) ,
2021-07-08 19:07:32 -07:00
(
" repair_correct_slots_elapsed " ,
self . repair_correct_slots_elapsed as i64 ,
i64
2021-12-17 05:44:40 -08:00
) ,
(
" retransmit_not_propagated_elapsed " ,
self . retransmit_not_propagated_elapsed as i64 ,
i64
) ,
2022-01-17 08:59:47 -08:00
(
" generate_new_bank_forks_read_lock_us " ,
self . generate_new_bank_forks_read_lock_us as i64 ,
i64
) ,
(
" generate_new_bank_forks_get_slots_since_us " ,
self . generate_new_bank_forks_get_slots_since_us as i64 ,
i64
) ,
(
" generate_new_bank_forks_loop_us " ,
self . generate_new_bank_forks_loop_us as i64 ,
i64
) ,
(
" generate_new_bank_forks_write_lock_us " ,
self . generate_new_bank_forks_write_lock_us as i64 ,
i64
) ,
2022-06-03 10:45:27 -07:00
(
" replay_blockstore_us " ,
self . replay_blockstore_us as i64 ,
i64
) ,
2020-05-08 03:46:29 -07:00
) ;
2020-06-28 10:04:15 -07:00
* self = ReplayTiming ::default ( ) ;
self . last_print = now ;
2020-05-08 03:46:29 -07:00
}
}
}
2018-12-07 14:09:29 -08:00
pub struct ReplayStage {
2021-07-01 09:32:41 -07:00
t_replay : JoinHandle < ( ) > ,
2019-11-04 15:44:27 -08:00
commitment_service : AggregateCommitmentService ,
2018-05-22 14:26:28 -07:00
}
2018-12-07 14:09:29 -08:00
impl ReplayStage {
2020-05-05 14:07:21 -07:00
#[ allow(clippy::new_ret_no_self, clippy::too_many_arguments) ]
2022-06-25 23:14:17 -07:00
pub fn new (
2019-12-06 14:39:35 -08:00
config : ReplayStageConfig ,
2020-01-13 13:13:52 -08:00
blockstore : Arc < Blockstore > ,
2019-12-06 14:39:35 -08:00
bank_forks : Arc < RwLock < BankForks > > ,
2020-04-21 12:54:45 -07:00
cluster_info : Arc < ClusterInfo > ,
2019-12-06 14:39:35 -08:00
ledger_signal_receiver : Receiver < bool > ,
2021-03-24 23:41:52 -07:00
duplicate_slots_receiver : DuplicateSlotReceiver ,
2022-07-05 07:29:44 -07:00
poh_recorder : Arc < RwLock < PohRecorder > > ,
2022-06-25 23:14:17 -07:00
maybe_process_blockstore : Option < ProcessBlockStore > ,
2020-03-26 19:57:27 -07:00
vote_tracker : Arc < VoteTracker > ,
2020-03-30 19:57:11 -07:00
cluster_slots : Arc < ClusterSlots > ,
2020-03-19 23:35:01 -07:00
retransmit_slots_sender : RetransmitSlotsSender ,
2021-08-13 14:21:52 -07:00
epoch_slots_frozen_receiver : DuplicateSlotsResetReceiver ,
2020-08-07 11:21:35 -07:00
replay_vote_sender : ReplayVoteSender ,
2021-03-24 23:41:52 -07:00
gossip_duplicate_confirmed_slots_receiver : GossipDuplicateConfirmedSlotsReceiver ,
2021-04-10 17:34:45 -07:00
gossip_verified_vote_hash_receiver : GossipVerifiedVoteHashReceiver ,
2021-06-02 17:20:00 -07:00
cluster_slots_update_sender : ClusterSlotsUpdateSender ,
2021-10-24 20:19:23 -07:00
cost_update_sender : Sender < CostUpdate > ,
2021-07-15 07:35:51 -07:00
voting_sender : Sender < VoteOp > ,
2021-11-19 08:20:18 -08:00
drop_bank_sender : Sender < Vec < Arc < Bank > > > ,
2021-12-29 15:12:01 -08:00
block_metadata_notifier : Option < BlockMetadataNotifierLock > ,
2022-07-11 08:53:18 -07:00
log_messages_bytes_limit : Option < usize > ,
2022-08-31 06:00:55 -07:00
prioritization_fee_cache : Arc < PrioritizationFeeCache > ,
2022-08-11 23:25:20 -07:00
) -> Result < Self , String > {
2022-06-25 23:14:17 -07:00
let mut tower = if let Some ( process_blockstore ) = maybe_process_blockstore {
2022-08-11 23:25:20 -07:00
let tower = process_blockstore . process_to_create_tower ( ) ? ;
2022-06-25 23:14:17 -07:00
info! ( " Tower state: {:?} " , tower ) ;
tower
} else {
warn! ( " creating default tower.... " ) ;
Tower ::default ( )
} ;
2022-03-23 10:04:58 -07:00
2019-12-04 10:17:17 -08:00
let ReplayStageConfig {
vote_account ,
2020-03-31 08:23:42 -07:00
authorized_voter_keypairs ,
2019-12-04 10:17:17 -08:00
exit ,
2021-06-16 10:57:52 -07:00
rpc_subscriptions ,
2019-12-04 10:17:17 -08:00
leader_schedule_cache ,
2019-12-18 11:50:09 -08:00
latest_root_senders ,
2020-12-12 17:22:34 -08:00
accounts_background_request_sender ,
2019-12-04 10:17:17 -08:00
block_commitment_cache ,
transaction_status_sender ,
2020-02-11 17:01:49 -08:00
rewards_recorder_sender ,
2021-05-26 21:16:16 -07:00
cache_block_meta_sender ,
2020-09-28 19:43:05 -07:00
bank_notification_sender ,
2021-03-25 18:54:51 -07:00
wait_for_vote_to_start_leader ,
2021-07-26 20:59:00 -07:00
ancestor_hashes_replay_update_sender ,
2021-07-20 22:25:13 -07:00
tower_storage ,
2022-02-15 12:19:34 -08:00
wait_to_vote_slot ,
2022-08-26 12:36:02 -07:00
replay_slots_concurrently ,
2019-12-04 10:17:17 -08:00
} = config ;
2019-02-26 21:57:45 -08:00
trace! ( " replay stage " ) ;
2019-02-24 01:06:46 -08:00
// Start the replay stage loop
2020-05-18 11:49:01 -07:00
let ( lockouts_sender , commitment_service ) = AggregateCommitmentService ::new (
& exit ,
block_commitment_cache . clone ( ) ,
2021-06-16 10:57:52 -07:00
rpc_subscriptions . clone ( ) ,
2020-05-18 11:49:01 -07:00
) ;
2022-12-06 14:09:57 -08:00
let run_replay = move | | {
let verify_recyclers = VerifyRecyclers ::default ( ) ;
let _exit = Finalizer ::new ( exit . clone ( ) ) ;
let mut identity_keypair = cluster_info . keypair ( ) . clone ( ) ;
let mut my_pubkey = identity_keypair . pubkey ( ) ;
let ( mut progress , mut heaviest_subtree_fork_choice ) =
Self ::initialize_progress_and_fork_choice_with_locked_bank_forks (
2020-09-18 22:03:54 -07:00
& bank_forks ,
& my_pubkey ,
& vote_account ,
) ;
2022-12-06 14:09:57 -08:00
let mut current_leader = None ;
let mut last_reset = Hash ::default ( ) ;
let mut partition_exists = false ;
let mut skipped_slots_info = SkippedSlotsInfo ::default ( ) ;
let mut replay_timing = ReplayTiming ::default ( ) ;
let mut duplicate_slots_tracker = DuplicateSlotsTracker ::default ( ) ;
let mut gossip_duplicate_confirmed_slots : GossipDuplicateConfirmedSlots =
GossipDuplicateConfirmedSlots ::default ( ) ;
let mut epoch_slots_frozen_slots : EpochSlotsFrozenSlots =
EpochSlotsFrozenSlots ::default ( ) ;
let mut duplicate_slots_to_repair = DuplicateSlotsToRepair ::default ( ) ;
let mut purge_repair_slot_counter = PurgeRepairSlotCounter ::default ( ) ;
let mut unfrozen_gossip_verified_vote_hashes : UnfrozenGossipVerifiedVoteHashes =
UnfrozenGossipVerifiedVoteHashes ::default ( ) ;
let mut latest_validator_votes_for_frozen_banks : LatestValidatorVotesForFrozenBanks =
LatestValidatorVotesForFrozenBanks ::default ( ) ;
let mut voted_signatures = Vec ::new ( ) ;
let mut has_new_vote_been_rooted = ! wait_for_vote_to_start_leader ;
let mut last_vote_refresh_time = LastVoteRefreshTime {
last_refresh_time : Instant ::now ( ) ,
last_print_time : Instant ::now ( ) ,
} ;
let ( working_bank , in_vote_only_mode ) = {
let r_bank_forks = bank_forks . read ( ) . unwrap ( ) ;
(
r_bank_forks . working_bank ( ) ,
r_bank_forks . get_vote_only_mode_signal ( ) ,
)
} ;
2022-02-07 14:06:19 -08:00
2022-12-06 14:09:57 -08:00
Self ::reset_poh_recorder (
& my_pubkey ,
& blockstore ,
& working_bank ,
& poh_recorder ,
& leader_schedule_cache ,
) ;
loop {
// Stop getting entries if we get exit signal
if exit . load ( Ordering ::Relaxed ) {
break ;
}
let mut generate_new_bank_forks_time =
Measure ::start ( " generate_new_bank_forks_time " ) ;
Self ::generate_new_bank_forks (
2022-04-20 17:53:29 -07:00
& blockstore ,
2022-12-06 14:09:57 -08:00
& bank_forks ,
2022-04-20 17:53:29 -07:00
& leader_schedule_cache ,
2022-12-06 14:09:57 -08:00
& rpc_subscriptions ,
& mut progress ,
& mut replay_timing ,
2022-04-20 17:53:29 -07:00
) ;
2022-12-06 14:09:57 -08:00
generate_new_bank_forks_time . stop ( ) ;
2022-04-20 17:53:29 -07:00
2022-12-06 14:09:57 -08:00
let mut tpu_has_bank = poh_recorder . read ( ) . unwrap ( ) . has_bank ( ) ;
2019-03-21 11:53:18 -07:00
2022-12-06 14:09:57 -08:00
let mut replay_active_banks_time = Measure ::start ( " replay_active_banks_time " ) ;
let mut ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
let mut descendants = bank_forks . read ( ) . unwrap ( ) . descendants ( ) ;
let did_complete_bank = Self ::replay_active_banks (
& blockstore ,
& bank_forks ,
& my_pubkey ,
& vote_account ,
& mut progress ,
transaction_status_sender . as_ref ( ) ,
cache_block_meta_sender . as_ref ( ) ,
& verify_recyclers ,
& mut heaviest_subtree_fork_choice ,
& replay_vote_sender ,
& bank_notification_sender ,
& rewards_recorder_sender ,
& rpc_subscriptions ,
& mut duplicate_slots_tracker ,
& gossip_duplicate_confirmed_slots ,
& mut epoch_slots_frozen_slots ,
& mut unfrozen_gossip_verified_vote_hashes ,
& mut latest_validator_votes_for_frozen_banks ,
& cluster_slots_update_sender ,
& cost_update_sender ,
& mut duplicate_slots_to_repair ,
& ancestor_hashes_replay_update_sender ,
block_metadata_notifier . clone ( ) ,
& mut replay_timing ,
log_messages_bytes_limit ,
replay_slots_concurrently ,
& prioritization_fee_cache ,
& mut purge_repair_slot_counter ,
) ;
replay_active_banks_time . stop ( ) ;
2019-03-21 11:53:18 -07:00
2022-12-06 14:09:57 -08:00
let forks_root = bank_forks . read ( ) . unwrap ( ) . root ( ) ;
2019-07-18 14:54:27 -07:00
2022-12-06 14:09:57 -08:00
// Reset any dead slots that have been frozen by a sufficient portion of
// the network. Signalled by repair_service.
let mut purge_dead_slots_time = Measure ::start ( " purge_dead_slots " ) ;
Self ::process_epoch_slots_frozen_dead_slots (
& my_pubkey ,
& blockstore ,
& epoch_slots_frozen_receiver ,
& mut duplicate_slots_tracker ,
& gossip_duplicate_confirmed_slots ,
& mut epoch_slots_frozen_slots ,
& mut progress ,
& mut heaviest_subtree_fork_choice ,
& bank_forks ,
& mut duplicate_slots_to_repair ,
& ancestor_hashes_replay_update_sender ,
& mut purge_repair_slot_counter ,
) ;
purge_dead_slots_time . stop ( ) ;
2019-03-12 17:42:53 -07:00
2022-12-06 14:09:57 -08:00
// Check for any newly confirmed slots detected from gossip.
let mut process_gossip_duplicate_confirmed_slots_time =
Measure ::start ( " process_gossip_duplicate_confirmed_slots " ) ;
Self ::process_gossip_duplicate_confirmed_slots (
& gossip_duplicate_confirmed_slots_receiver ,
& blockstore ,
& mut duplicate_slots_tracker ,
& mut gossip_duplicate_confirmed_slots ,
& mut epoch_slots_frozen_slots ,
& bank_forks ,
& mut progress ,
& mut heaviest_subtree_fork_choice ,
& mut duplicate_slots_to_repair ,
& ancestor_hashes_replay_update_sender ,
& mut purge_repair_slot_counter ,
) ;
process_gossip_duplicate_confirmed_slots_time . stop ( ) ;
// Ingest any new verified votes from gossip. Important for fork choice
// and switching proofs because these may be votes that haven't yet been
// included in a block, so we may not have yet observed these votes just
// by replaying blocks.
let mut process_unfrozen_gossip_verified_vote_hashes_time =
Measure ::start ( " process_gossip_verified_vote_hashes " ) ;
Self ::process_gossip_verified_vote_hashes (
& gossip_verified_vote_hash_receiver ,
& mut unfrozen_gossip_verified_vote_hashes ,
& heaviest_subtree_fork_choice ,
& mut latest_validator_votes_for_frozen_banks ,
) ;
for _ in gossip_verified_vote_hash_receiver . try_iter ( ) { }
process_unfrozen_gossip_verified_vote_hashes_time . stop ( ) ;
2021-03-24 23:41:52 -07:00
2022-12-06 14:09:57 -08:00
// Check to remove any duplicated slots from fork choice
let mut process_duplicate_slots_time = Measure ::start ( " process_duplicate_slots " ) ;
if ! tpu_has_bank {
Self ::process_duplicate_slots (
2021-08-13 14:21:52 -07:00
& blockstore ,
2022-12-06 14:09:57 -08:00
& duplicate_slots_receiver ,
2021-08-13 14:21:52 -07:00
& mut duplicate_slots_tracker ,
& gossip_duplicate_confirmed_slots ,
& mut epoch_slots_frozen_slots ,
2022-12-06 14:09:57 -08:00
& bank_forks ,
2021-08-13 14:21:52 -07:00
& mut progress ,
& mut heaviest_subtree_fork_choice ,
& mut duplicate_slots_to_repair ,
2022-10-27 20:06:06 -07:00
& ancestor_hashes_replay_update_sender ,
& mut purge_repair_slot_counter ,
2021-08-13 14:21:52 -07:00
) ;
2022-12-06 14:09:57 -08:00
}
process_duplicate_slots_time . stop ( ) ;
2021-08-13 14:21:52 -07:00
2022-12-06 14:09:57 -08:00
let mut collect_frozen_banks_time = Measure ::start ( " frozen_banks " ) ;
let mut frozen_banks : Vec < _ > = bank_forks
. read ( )
. unwrap ( )
. frozen_banks ( )
. into_iter ( )
. filter ( | ( slot , _ ) | * slot > = forks_root )
. map ( | ( _ , bank ) | bank )
. collect ( ) ;
collect_frozen_banks_time . stop ( ) ;
let mut compute_bank_stats_time = Measure ::start ( " compute_bank_stats " ) ;
let newly_computed_slot_stats = Self ::compute_bank_stats (
& vote_account ,
& ancestors ,
& mut frozen_banks ,
& mut tower ,
& mut progress ,
& vote_tracker ,
& cluster_slots ,
& bank_forks ,
& mut heaviest_subtree_fork_choice ,
& mut latest_validator_votes_for_frozen_banks ,
) ;
compute_bank_stats_time . stop ( ) ;
let mut compute_slot_stats_time = Measure ::start ( " compute_slot_stats_time " ) ;
for slot in newly_computed_slot_stats {
let fork_stats = progress . get_fork_stats ( slot ) . unwrap ( ) ;
let confirmed_forks = Self ::confirm_forks (
& tower ,
& fork_stats . voted_stakes ,
fork_stats . total_stake ,
& progress ,
& bank_forks ,
) ;
Self ::mark_slots_confirmed (
& confirmed_forks ,
2021-07-18 17:04:25 -07:00
& blockstore ,
2021-03-24 23:41:52 -07:00
& bank_forks ,
& mut progress ,
2022-12-06 14:09:57 -08:00
& mut duplicate_slots_tracker ,
2021-03-24 23:41:52 -07:00
& mut heaviest_subtree_fork_choice ,
2022-12-06 14:09:57 -08:00
& mut epoch_slots_frozen_slots ,
2021-07-26 20:59:00 -07:00
& mut duplicate_slots_to_repair ,
& ancestor_hashes_replay_update_sender ,
2022-10-27 20:06:06 -07:00
& mut purge_repair_slot_counter ,
2021-03-24 23:41:52 -07:00
) ;
2022-12-06 14:09:57 -08:00
}
compute_slot_stats_time . stop ( ) ;
2021-04-10 17:34:45 -07:00
2022-12-06 14:09:57 -08:00
let mut select_forks_time = Measure ::start ( " select_forks_time " ) ;
let ( heaviest_bank , heaviest_bank_on_same_voted_fork ) =
heaviest_subtree_fork_choice . select_forks (
& frozen_banks ,
& tower ,
& progress ,
& ancestors ,
& bank_forks ,
2021-04-29 14:43:28 -07:00
) ;
2022-12-06 14:09:57 -08:00
select_forks_time . stop ( ) ;
2021-04-10 17:34:45 -07:00
2022-12-06 14:09:57 -08:00
Self ::check_for_vote_only_mode (
heaviest_bank . slot ( ) ,
forks_root ,
& in_vote_only_mode ,
& bank_forks ,
) ;
if let Some ( heaviest_bank_on_same_voted_fork ) =
heaviest_bank_on_same_voted_fork . as_ref ( )
{
if let Some ( my_latest_landed_vote ) =
progress . my_latest_landed_vote ( heaviest_bank_on_same_voted_fork . slot ( ) )
{
Self ::refresh_last_vote (
& mut tower ,
heaviest_bank_on_same_voted_fork ,
my_latest_landed_vote ,
& vote_account ,
& identity_keypair ,
& authorized_voter_keypairs . read ( ) . unwrap ( ) ,
& mut voted_signatures ,
has_new_vote_been_rooted ,
& mut last_vote_refresh_time ,
& voting_sender ,
wait_to_vote_slot ,
2021-03-24 23:41:52 -07:00
) ;
}
2022-12-06 14:09:57 -08:00
}
let mut select_vote_and_reset_forks_time =
Measure ::start ( " select_vote_and_reset_forks " ) ;
let SelectVoteAndResetForkResult {
vote_bank ,
reset_bank ,
heaviest_fork_failures ,
} = Self ::select_vote_and_reset_forks (
& heaviest_bank ,
heaviest_bank_on_same_voted_fork . as_ref ( ) ,
& ancestors ,
& descendants ,
& progress ,
& mut tower ,
& latest_validator_votes_for_frozen_banks ,
& heaviest_subtree_fork_choice ,
) ;
select_vote_and_reset_forks_time . stop ( ) ;
let mut heaviest_fork_failures_time = Measure ::start ( " heaviest_fork_failures_time " ) ;
if tower . is_recent ( heaviest_bank . slot ( ) ) & & ! heaviest_fork_failures . is_empty ( ) {
info! (
" Couldn't vote on heaviest fork: {:?}, heaviest_fork_failures: {:?} " ,
heaviest_bank . slot ( ) ,
heaviest_fork_failures
2020-02-26 19:59:29 -08:00
) ;
2020-02-03 16:48:24 -08:00
2022-12-06 14:09:57 -08:00
for r in heaviest_fork_failures {
if let HeaviestForkFailures ::NoPropagatedConfirmation ( slot ) = r {
if let Some ( latest_leader_slot ) =
progress . get_latest_leader_slot_must_exist ( slot )
{
progress . log_propagated_stats ( latest_leader_slot , & bank_forks ) ;
}
2021-04-28 11:46:16 -07:00
}
}
2022-12-06 14:09:57 -08:00
}
heaviest_fork_failures_time . stop ( ) ;
2020-03-02 12:43:43 -08:00
2022-12-06 14:09:57 -08:00
let mut voting_time = Measure ::start ( " voting_time " ) ;
// Vote on a fork
if let Some ( ( ref vote_bank , ref switch_fork_decision ) ) = vote_bank {
if let Some ( votable_leader ) =
leader_schedule_cache . slot_leader_at ( vote_bank . slot ( ) , Some ( vote_bank ) )
{
Self ::log_leader_change (
& my_pubkey ,
vote_bank . slot ( ) ,
& mut current_leader ,
& votable_leader ,
) ;
}
Self ::handle_votable_bank (
2020-05-29 14:40:36 -07:00
vote_bank ,
2022-12-06 14:09:57 -08:00
switch_fork_decision ,
& bank_forks ,
2020-10-15 02:30:33 -07:00
& mut tower ,
2022-12-06 14:09:57 -08:00
& mut progress ,
& vote_account ,
& identity_keypair ,
& authorized_voter_keypairs . read ( ) . unwrap ( ) ,
& blockstore ,
& leader_schedule_cache ,
& lockouts_sender ,
& accounts_background_request_sender ,
& latest_root_senders ,
& rpc_subscriptions ,
& block_commitment_cache ,
& mut heaviest_subtree_fork_choice ,
& bank_notification_sender ,
& mut duplicate_slots_tracker ,
& mut gossip_duplicate_confirmed_slots ,
& mut unfrozen_gossip_verified_vote_hashes ,
& mut voted_signatures ,
& mut has_new_vote_been_rooted ,
& mut replay_timing ,
& voting_sender ,
& mut epoch_slots_frozen_slots ,
& drop_bank_sender ,
wait_to_vote_slot ,
2020-05-29 14:40:36 -07:00
) ;
2022-12-06 14:09:57 -08:00
} ;
voting_time . stop ( ) ;
2020-03-02 12:43:43 -08:00
2022-12-06 14:09:57 -08:00
let mut reset_bank_time = Measure ::start ( " reset_bank " ) ;
// Reset onto a fork
if let Some ( reset_bank ) = reset_bank {
if last_reset ! = reset_bank . last_blockhash ( ) {
2020-03-02 12:43:43 -08:00
info! (
2022-12-06 14:09:57 -08:00
" vote bank: {:?} reset bank: {:?} " ,
vote_bank
. as_ref ( )
. map ( | ( b , switch_fork_decision ) | ( b . slot ( ) , switch_fork_decision ) ) ,
reset_bank . slot ( ) ,
) ;
let fork_progress = progress
. get ( & reset_bank . slot ( ) )
. expect ( " bank to reset to must exist in progress map " ) ;
datapoint_info! (
" blocks_produced " ,
( " num_blocks_on_fork " , fork_progress . num_blocks_on_fork , i64 ) ,
(
" num_dropped_blocks_on_fork " ,
fork_progress . num_dropped_blocks_on_fork ,
i64
) ,
2020-03-02 12:43:43 -08:00
) ;
2020-03-26 19:57:27 -07:00
2022-12-06 14:09:57 -08:00
if my_pubkey ! = cluster_info . id ( ) {
identity_keypair = cluster_info . keypair ( ) . clone ( ) ;
let my_old_pubkey = my_pubkey ;
my_pubkey = identity_keypair . pubkey ( ) ;
// Load the new identity's tower
tower = Tower ::restore ( tower_storage . as_ref ( ) , & my_pubkey )
. and_then ( | restored_tower | {
let root_bank = bank_forks . read ( ) . unwrap ( ) . root_bank ( ) ;
let slot_history = root_bank . get_slot_history ( ) ;
restored_tower . adjust_lockouts_after_replay (
root_bank . slot ( ) ,
& slot_history ,
)
} )
. unwrap_or_else ( | err | {
if err . is_file_missing ( ) {
Tower ::new_from_bankforks (
& bank_forks . read ( ) . unwrap ( ) ,
& my_pubkey ,
& vote_account ,
)
} else {
error! ( " Failed to load tower for {}: {} " , my_pubkey , err ) ;
std ::process ::exit ( 1 ) ;
}
} ) ;
2020-03-02 12:43:43 -08:00
2022-12-06 14:09:57 -08:00
// Ensure the validator can land votes with the new identity before
// becoming leader
has_new_vote_been_rooted = ! wait_for_vote_to_start_leader ;
warn! ( " Identity changed from {} to {} " , my_old_pubkey , my_pubkey ) ;
2019-11-15 08:36:33 -08:00
}
2020-04-02 21:05:33 -07:00
2022-12-06 14:09:57 -08:00
Self ::reset_poh_recorder (
& my_pubkey ,
2020-04-02 21:05:33 -07:00
& blockstore ,
2022-12-06 14:09:57 -08:00
& reset_bank ,
& poh_recorder ,
2020-04-02 21:05:33 -07:00
& leader_schedule_cache ,
2020-12-13 17:26:34 -08:00
) ;
2022-12-06 14:09:57 -08:00
last_reset = reset_bank . last_blockhash ( ) ;
tpu_has_bank = false ;
if let Some ( last_voted_slot ) = tower . last_voted_slot ( ) {
// If the current heaviest bank is not a descendant of the last voted slot,
// there must be a partition
let partition_detected = Self ::is_partition_detected (
& ancestors ,
last_voted_slot ,
heaviest_bank . slot ( ) ,
2020-04-08 14:35:24 -07:00
) ;
2021-06-17 13:51:06 -07:00
2022-12-06 14:09:57 -08:00
if ! partition_exists & & partition_detected {
warn! (
2020-06-29 18:49:57 -07:00
" PARTITION DETECTED waiting to join heaviest fork: {} last vote: {:?}, reset slot: {} " ,
heaviest_bank . slot ( ) ,
last_voted_slot ,
reset_bank . slot ( ) ,
) ;
2022-12-06 14:09:57 -08:00
inc_new_counter_info! ( " replay_stage-partition_detected " , 1 ) ;
datapoint_info! (
" replay_stage-partition " ,
( " slot " , reset_bank . slot ( ) as i64 , i64 )
) ;
partition_exists = true ;
} else if partition_exists & & ! partition_detected {
warn! (
2020-06-29 18:49:57 -07:00
" PARTITION resolved heaviest fork: {} last vote: {:?}, reset slot: {} " ,
heaviest_bank . slot ( ) ,
last_voted_slot ,
reset_bank . slot ( )
) ;
2022-12-06 14:09:57 -08:00
partition_exists = false ;
inc_new_counter_info! ( " replay_stage-partition_resolved " , 1 ) ;
2020-03-02 12:43:43 -08:00
}
2019-07-30 13:18:33 -07:00
}
2019-03-07 15:49:07 -08:00
}
2022-12-06 14:09:57 -08:00
}
reset_bank_time . stop ( ) ;
let mut start_leader_time = Measure ::start ( " start_leader_time " ) ;
let mut dump_then_repair_correct_slots_time =
Measure ::start ( " dump_then_repair_correct_slots_time " ) ;
// Used for correctness check
let poh_bank = poh_recorder . read ( ) . unwrap ( ) . bank ( ) ;
// Dump any duplicate slots that have been confirmed by the network in
// anticipation of repairing the confirmed version of the slot.
//
// Has to be before `maybe_start_leader()`. Otherwise, `ancestors` and `descendants`
// will be outdated, and we cannot assume `poh_bank` will be in either of these maps.
Self ::dump_then_repair_correct_slots (
& mut duplicate_slots_to_repair ,
& mut ancestors ,
& mut descendants ,
& mut progress ,
& bank_forks ,
& blockstore ,
poh_bank . map ( | bank | bank . slot ( ) ) ,
& mut purge_repair_slot_counter ,
) ;
dump_then_repair_correct_slots_time . stop ( ) ;
2021-07-08 19:07:32 -07:00
2022-12-06 14:09:57 -08:00
let mut retransmit_not_propagated_time =
Measure ::start ( " retransmit_not_propagated_time " ) ;
Self ::retransmit_latest_unpropagated_leader_slot (
& poh_recorder ,
& retransmit_slots_sender ,
& mut progress ,
) ;
retransmit_not_propagated_time . stop ( ) ;
// From this point on, its not safe to use ancestors/descendants since maybe_start_leader
// may add a bank that will not included in either of these maps.
drop ( ancestors ) ;
drop ( descendants ) ;
if ! tpu_has_bank {
Self ::maybe_start_leader (
& my_pubkey ,
& bank_forks ,
2021-12-17 05:44:40 -08:00
& poh_recorder ,
2022-12-06 14:09:57 -08:00
& leader_schedule_cache ,
& rpc_subscriptions ,
2021-12-17 05:44:40 -08:00
& mut progress ,
2022-12-06 14:09:57 -08:00
& retransmit_slots_sender ,
& mut skipped_slots_info ,
has_new_vote_been_rooted ,
transaction_status_sender . is_some ( ) ,
2021-12-17 05:44:40 -08:00
) ;
2022-12-06 14:09:57 -08:00
let poh_bank = poh_recorder . read ( ) . unwrap ( ) . bank ( ) ;
if let Some ( bank ) = poh_bank {
Self ::log_leader_change (
& my_pubkey ,
bank . slot ( ) ,
& mut current_leader ,
2019-07-18 14:54:27 -07:00
& my_pubkey ,
) ;
}
2018-05-30 13:38:15 -07:00
}
2022-12-06 14:09:57 -08:00
start_leader_time . stop ( ) ;
let mut wait_receive_time = Measure ::start ( " wait_receive_time " ) ;
if ! did_complete_bank {
// only wait for the signal if we did not just process a bank; maybe there are more slots available
let timer = Duration ::from_millis ( 100 ) ;
let result = ledger_signal_receiver . recv_timeout ( timer ) ;
match result {
Err ( RecvTimeoutError ::Timeout ) = > ( ) ,
Err ( _ ) = > break ,
Ok ( _ ) = > trace! ( " blockstore signal " ) ,
} ;
}
wait_receive_time . stop ( ) ;
replay_timing . update (
collect_frozen_banks_time . as_us ( ) ,
compute_bank_stats_time . as_us ( ) ,
select_vote_and_reset_forks_time . as_us ( ) ,
start_leader_time . as_us ( ) ,
reset_bank_time . as_us ( ) ,
voting_time . as_us ( ) ,
select_forks_time . as_us ( ) ,
compute_slot_stats_time . as_us ( ) ,
generate_new_bank_forks_time . as_us ( ) ,
replay_active_banks_time . as_us ( ) ,
wait_receive_time . as_us ( ) ,
heaviest_fork_failures_time . as_us ( ) ,
u64 ::from ( did_complete_bank ) ,
process_gossip_duplicate_confirmed_slots_time . as_us ( ) ,
process_unfrozen_gossip_verified_vote_hashes_time . as_us ( ) ,
process_duplicate_slots_time . as_us ( ) ,
dump_then_repair_correct_slots_time . as_us ( ) ,
retransmit_not_propagated_time . as_us ( ) ,
) ;
}
} ;
let t_replay = Builder ::new ( )
. name ( " solReplayStage " . to_string ( ) )
. spawn ( run_replay )
2018-12-07 19:01:28 -08:00
. unwrap ( ) ;
2020-05-14 18:22:47 -07:00
2022-08-11 23:25:20 -07:00
Ok ( Self {
2020-05-14 18:22:47 -07:00
t_replay ,
commitment_service ,
2022-08-11 23:25:20 -07:00
} )
2019-02-10 16:28:52 -08:00
}
2019-07-09 15:36:30 -07:00
2022-05-14 08:53:37 -07:00
fn check_for_vote_only_mode (
heaviest_bank_slot : Slot ,
forks_root : Slot ,
in_vote_only_mode : & AtomicBool ,
bank_forks : & RwLock < BankForks > ,
) {
if heaviest_bank_slot . saturating_sub ( forks_root ) > MAX_ROOT_DISTANCE_FOR_VOTE_ONLY {
if ! in_vote_only_mode . load ( Ordering ::Relaxed )
& & in_vote_only_mode
. compare_exchange ( false , true , Ordering ::Relaxed , Ordering ::Relaxed )
. is_ok ( )
{
let bank_forks = bank_forks . read ( ) . unwrap ( ) ;
datapoint_warn! (
" bank_forks-entering-vote-only-mode " ,
( " banks_len " , bank_forks . len ( ) , i64 ) ,
( " heaviest_bank " , heaviest_bank_slot , i64 ) ,
( " root " , bank_forks . root ( ) , i64 ) ,
) ;
}
} else if in_vote_only_mode . load ( Ordering ::Relaxed )
& & in_vote_only_mode
. compare_exchange ( true , false , Ordering ::Relaxed , Ordering ::Relaxed )
. is_ok ( )
{
let bank_forks = bank_forks . read ( ) . unwrap ( ) ;
datapoint_warn! (
" bank_forks-exiting-vote-only-mode " ,
( " banks_len " , bank_forks . len ( ) , i64 ) ,
( " heaviest_bank " , heaviest_bank_slot , i64 ) ,
( " root " , bank_forks . root ( ) , i64 ) ,
) ;
}
}
2022-01-04 00:24:16 -08:00
fn maybe_retransmit_unpropagated_slots (
metric_name : & 'static str ,
retransmit_slots_sender : & RetransmitSlotsSender ,
progress : & mut ProgressMap ,
latest_leader_slot : Slot ,
) {
let first_leader_group_slot = first_of_consecutive_leader_slots ( latest_leader_slot ) ;
for slot in first_leader_group_slot ..= latest_leader_slot {
let is_propagated = progress . is_propagated ( slot ) ;
if let Some ( retransmit_info ) = progress . get_retransmit_info_mut ( slot ) {
if ! is_propagated . expect (
" presence of retransmit_info ensures that propagation status is present " ,
) {
if retransmit_info . reached_retransmit_threshold ( ) {
info! (
" Retrying retransmit: latest_leader_slot={} slot={} retransmit_info={:?} " ,
latest_leader_slot ,
slot ,
& retransmit_info ,
) ;
datapoint_info! (
metric_name ,
( " latest_leader_slot " , latest_leader_slot , i64 ) ,
( " slot " , slot , i64 ) ,
( " retry_iteration " , retransmit_info . retry_iteration , i64 ) ,
) ;
let _ = retransmit_slots_sender . send ( slot ) ;
retransmit_info . increment_retry_iteration ( ) ;
} else {
debug! (
" Bypass retransmit of slot={} retransmit_info={:?} " ,
slot , & retransmit_info
) ;
}
}
}
}
}
2021-12-17 05:44:40 -08:00
fn retransmit_latest_unpropagated_leader_slot (
2022-07-05 07:29:44 -07:00
poh_recorder : & Arc < RwLock < PohRecorder > > ,
2021-12-17 05:44:40 -08:00
retransmit_slots_sender : & RetransmitSlotsSender ,
progress : & mut ProgressMap ,
) {
2022-07-05 07:29:44 -07:00
let start_slot = poh_recorder . read ( ) . unwrap ( ) . start_slot ( ) ;
2022-01-04 00:24:16 -08:00
if let ( false , Some ( latest_leader_slot ) ) =
progress . get_leader_propagation_slot_must_exist ( start_slot )
{
debug! (
" Slot not propagated: start_slot={} latest_leader_slot={} " ,
start_slot , latest_leader_slot
) ;
Self ::maybe_retransmit_unpropagated_slots (
" replay_stage-retransmit-timing-based " ,
retransmit_slots_sender ,
progress ,
latest_leader_slot ,
) ;
2021-12-17 05:44:40 -08:00
}
}
2020-06-29 18:49:57 -07:00
fn is_partition_detected (
ancestors : & HashMap < Slot , HashSet < Slot > > ,
last_voted_slot : Slot ,
heaviest_slot : Slot ,
) -> bool {
last_voted_slot ! = heaviest_slot
& & ! ancestors
. get ( & heaviest_slot )
. map ( | ancestors | ancestors . contains ( & last_voted_slot ) )
. unwrap_or ( true )
}
2020-09-18 22:03:54 -07:00
fn initialize_progress_and_fork_choice_with_locked_bank_forks (
bank_forks : & RwLock < BankForks > ,
my_pubkey : & Pubkey ,
vote_account : & Pubkey ,
2020-12-07 13:47:14 -08:00
) -> ( ProgressMap , HeaviestSubtreeForkChoice ) {
2020-09-18 22:03:54 -07:00
let ( root_bank , frozen_banks ) = {
let bank_forks = bank_forks . read ( ) . unwrap ( ) ;
(
2020-12-27 05:28:05 -08:00
bank_forks . root_bank ( ) ,
2020-09-18 22:03:54 -07:00
bank_forks . frozen_banks ( ) . values ( ) . cloned ( ) . collect ( ) ,
)
} ;
2021-06-18 06:34:58 -07:00
Self ::initialize_progress_and_fork_choice ( & root_bank , frozen_banks , my_pubkey , vote_account )
2020-09-18 22:03:54 -07:00
}
2021-07-08 19:07:32 -07:00
pub fn initialize_progress_and_fork_choice (
2020-12-27 05:28:05 -08:00
root_bank : & Bank ,
2020-09-18 22:03:54 -07:00
mut frozen_banks : Vec < Arc < Bank > > ,
my_pubkey : & Pubkey ,
vote_account : & Pubkey ,
2020-12-07 13:47:14 -08:00
) -> ( ProgressMap , HeaviestSubtreeForkChoice ) {
2020-09-18 22:03:54 -07:00
let mut progress = ProgressMap ::default ( ) ;
frozen_banks . sort_by_key ( | bank | bank . slot ( ) ) ;
// Initialize progress map with any root banks
for bank in & frozen_banks {
let prev_leader_slot = progress . get_bank_prev_leader_slot ( bank ) ;
progress . insert (
bank . slot ( ) ,
2021-06-18 06:34:58 -07:00
ForkProgress ::new_from_bank ( bank , my_pubkey , vote_account , prev_leader_slot , 0 , 0 ) ,
2020-09-18 22:03:54 -07:00
) ;
}
let root = root_bank . slot ( ) ;
2021-04-12 01:00:59 -07:00
let heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice ::new_from_frozen_banks (
( root , root_bank . hash ( ) ) ,
& frozen_banks ,
) ;
2020-09-18 22:03:54 -07:00
2020-12-07 13:47:14 -08:00
( progress , heaviest_subtree_fork_choice )
2020-09-18 22:03:54 -07:00
}
2021-08-02 14:33:28 -07:00
pub fn dump_then_repair_correct_slots (
2021-07-08 19:07:32 -07:00
duplicate_slots_to_repair : & mut DuplicateSlotsToRepair ,
2020-05-07 23:39:57 -07:00
ancestors : & mut HashMap < Slot , HashSet < Slot > > ,
descendants : & mut HashMap < Slot , HashSet < Slot > > ,
2020-05-05 14:07:21 -07:00
progress : & mut ProgressMap ,
bank_forks : & RwLock < BankForks > ,
2021-07-08 19:07:32 -07:00
blockstore : & Blockstore ,
poh_bank_slot : Option < Slot > ,
2022-10-27 20:06:06 -07:00
purge_repair_slot_counter : & mut PurgeRepairSlotCounter ,
2020-05-05 14:07:21 -07:00
) {
2021-07-08 19:07:32 -07:00
if duplicate_slots_to_repair . is_empty ( ) {
return ;
2020-05-05 14:07:21 -07:00
}
2021-07-08 19:07:32 -07:00
let root_bank = bank_forks . read ( ) . unwrap ( ) . root_bank ( ) ;
// TODO: handle if alternate version of descendant also got confirmed after ancestor was
// confirmed, what happens then? Should probably keep track of purged list and skip things
// in `duplicate_slots_to_repair` that have already been purged. Add test.
2021-08-13 14:21:52 -07:00
duplicate_slots_to_repair . retain ( | duplicate_slot , correct_hash | {
2021-07-08 19:07:32 -07:00
// Should not purge duplicate slots if there is currently a poh bank building
// on top of that slot, as BankingStage might still be referencing/touching that state
// concurrently.
// Luckily for us, because the fork choice rule removes duplicate slots from fork
// choice, and this function is called after:
// 1) We have picked a bank to reset to in `select_vote_and_reset_forks()`
// 2) And also called `reset_poh_recorder()`
// Then we should have reset to a fork that doesn't include the duplicate block,
// which means any working bank in PohRecorder that was built on that duplicate fork
// should have been cleared as well. However, if there is some violation of this guarantee,
// then log here
let is_poh_building_on_duplicate_fork = poh_bank_slot
. map ( | poh_bank_slot | {
ancestors
. get ( & poh_bank_slot )
. expect ( " Poh bank should exist in BankForks and thus in ancestors map " )
. contains ( duplicate_slot )
} )
. unwrap_or ( false ) ;
let did_purge_repair = {
if ! is_poh_building_on_duplicate_fork {
let frozen_hash = bank_forks . read ( ) . unwrap ( ) . bank_hash ( * duplicate_slot ) ;
if let Some ( frozen_hash ) = frozen_hash {
if frozen_hash = = * correct_hash {
warn! (
" Trying to purge slot {} with correct_hash {} " ,
* duplicate_slot , * correct_hash
) ;
return false ;
} else if frozen_hash = = Hash ::default ( )
& & ! progress . is_dead ( * duplicate_slot ) . expect (
" If slot exists in BankForks must exist in the progress map " ,
)
{
warn! (
" Trying to purge unfrozen slot {} that is not dead " ,
* duplicate_slot
) ;
return false ;
}
} else {
warn! (
" Trying to purge slot {} which does not exist in bank forks " ,
* duplicate_slot
) ;
return false ;
}
Self ::purge_unconfirmed_duplicate_slot (
* duplicate_slot ,
ancestors ,
descendants ,
progress ,
& root_bank ,
bank_forks ,
blockstore ,
) ;
2022-10-27 20:06:06 -07:00
let attempt_no = purge_repair_slot_counter
. entry ( * duplicate_slot )
. and_modify ( | x | * x + = 1 )
. or_insert ( 1 ) ;
if * attempt_no > MAX_REPAIR_RETRY_LOOP_ATTEMPTS {
panic! ( " We have tried to repair duplicate slot: {} more than {} times and are unable to freeze a block with bankhash {} , instead we have a block with bankhash {:?} . This is most likely a bug in the runtime. At this point manual intervention is needed to make progress. Exiting " , * duplicate_slot , MAX_REPAIR_RETRY_LOOP_ATTEMPTS , * correct_hash , frozen_hash ) ;
}
2021-08-13 14:21:52 -07:00
warn! (
2022-10-27 20:06:06 -07:00
" Notifying repair service to repair duplicate slot: {}, attempt {} " ,
* duplicate_slot , * attempt_no ,
2021-08-13 14:21:52 -07:00
) ;
2021-07-08 19:07:32 -07:00
true
// TODO: Send signal to repair to repair the correct version of
// `duplicate_slot` with hash == `correct_hash`
} else {
warn! (
" PoH bank for slot {} is building on duplicate slot {} " ,
poh_bank_slot . unwrap ( ) ,
duplicate_slot
) ;
false
}
} ;
// If we purged/repaired, then no need to keep the slot in the set of pending work
! did_purge_repair
} ) ;
2020-05-05 14:07:21 -07:00
}
2021-08-13 14:21:52 -07:00
#[ allow(clippy::too_many_arguments) ]
fn process_epoch_slots_frozen_dead_slots (
pubkey : & Pubkey ,
blockstore : & Blockstore ,
epoch_slots_frozen_receiver : & DuplicateSlotsResetReceiver ,
duplicate_slots_tracker : & mut DuplicateSlotsTracker ,
gossip_duplicate_confirmed_slots : & GossipDuplicateConfirmedSlots ,
epoch_slots_frozen_slots : & mut EpochSlotsFrozenSlots ,
progress : & mut ProgressMap ,
fork_choice : & mut HeaviestSubtreeForkChoice ,
bank_forks : & RwLock < BankForks > ,
duplicate_slots_to_repair : & mut DuplicateSlotsToRepair ,
ancestor_hashes_replay_update_sender : & AncestorHashesReplayUpdateSender ,
2022-10-27 20:06:06 -07:00
purge_repair_slot_counter : & mut PurgeRepairSlotCounter ,
2021-08-13 14:21:52 -07:00
) {
let root = bank_forks . read ( ) . unwrap ( ) . root ( ) ;
for maybe_purgeable_duplicate_slots in epoch_slots_frozen_receiver . try_iter ( ) {
warn! (
" {} ReplayStage notified of epoch slots duplicate frozen dead slots: {:?} " ,
pubkey , maybe_purgeable_duplicate_slots
) ;
for ( epoch_slots_frozen_slot , epoch_slots_frozen_hash ) in
maybe_purgeable_duplicate_slots . into_iter ( )
{
let epoch_slots_frozen_state = EpochSlotsFrozenState ::new_from_state (
epoch_slots_frozen_slot ,
epoch_slots_frozen_hash ,
gossip_duplicate_confirmed_slots ,
fork_choice ,
| | progress . is_dead ( epoch_slots_frozen_slot ) . unwrap_or ( false ) ,
| | {
bank_forks
. read ( )
. unwrap ( )
. get ( epoch_slots_frozen_slot )
. map ( | b | b . hash ( ) )
} ,
) ;
check_slot_agrees_with_cluster (
epoch_slots_frozen_slot ,
root ,
blockstore ,
duplicate_slots_tracker ,
epoch_slots_frozen_slots ,
fork_choice ,
duplicate_slots_to_repair ,
ancestor_hashes_replay_update_sender ,
2022-10-27 20:06:06 -07:00
purge_repair_slot_counter ,
2021-08-13 14:21:52 -07:00
SlotStateUpdate ::EpochSlotsFrozen ( epoch_slots_frozen_state ) ,
) ;
}
}
}
2020-05-05 14:07:21 -07:00
fn purge_unconfirmed_duplicate_slot (
duplicate_slot : Slot ,
2020-05-07 23:39:57 -07:00
ancestors : & mut HashMap < Slot , HashSet < Slot > > ,
descendants : & mut HashMap < Slot , HashSet < Slot > > ,
2020-05-05 14:07:21 -07:00
progress : & mut ProgressMap ,
2021-07-08 19:07:32 -07:00
root_bank : & Bank ,
2020-05-05 14:07:21 -07:00
bank_forks : & RwLock < BankForks > ,
2021-07-08 19:07:32 -07:00
blockstore : & Blockstore ,
2020-05-05 14:07:21 -07:00
) {
2020-05-07 23:39:57 -07:00
warn! ( " purging slot {} " , duplicate_slot ) ;
2021-07-08 19:07:32 -07:00
// Doesn't need to be root bank, just needs a common bank to
// access the status cache and accounts
2020-05-07 23:39:57 -07:00
let slot_descendants = descendants . get ( & duplicate_slot ) . cloned ( ) ;
2020-05-06 11:44:49 -07:00
if slot_descendants . is_none ( ) {
// Root has already moved past this slot, no need to purge it
2021-07-08 19:07:32 -07:00
if root_bank . slot ( ) < = duplicate_slot {
blockstore . clear_unconfirmed_slot ( duplicate_slot ) ;
}
2020-05-06 11:44:49 -07:00
return ;
}
2020-05-05 14:07:21 -07:00
2020-05-07 23:39:57 -07:00
// Clear the ancestors/descendants map to keep them
// consistent
let slot_descendants = slot_descendants . unwrap ( ) ;
Self ::purge_ancestors_descendants (
duplicate_slot ,
& slot_descendants ,
ancestors ,
descendants ,
) ;
2021-07-08 19:07:32 -07:00
// Grab the Slot and BankId's of the banks we need to purge, then clear the banks
// from BankForks
let ( slots_to_purge , removed_banks ) : ( Vec < ( Slot , BankId ) > , Vec < Arc < Bank > > ) = {
let mut w_bank_forks = bank_forks . write ( ) . unwrap ( ) ;
slot_descendants
. iter ( )
. chain ( std ::iter ::once ( & duplicate_slot ) )
. map ( | slot | {
// Clear the duplicate banks from BankForks
let bank = w_bank_forks
. remove ( * slot )
. expect ( " BankForks should not have been purged yet " ) ;
( ( * slot , bank . bank_id ( ) ) , bank )
} )
. unzip ( )
} ;
2020-05-05 14:07:21 -07:00
2021-07-08 19:07:32 -07:00
// Clear the accounts for these slots so that any ongoing RPC scans fail.
// These have to be atomically cleared together in the same batch, in order
// to prevent RPC from seeing inconsistent results in scans.
root_bank . remove_unrooted_slots ( & slots_to_purge ) ;
// Once the slots above have been purged, now it's safe to remove the banks from
// BankForks, allowing the Bank::drop() purging to run and not race with the
// `remove_unrooted_slots()` call.
drop ( removed_banks ) ;
for ( slot , slot_id ) in slots_to_purge {
warn! (
" purging descendant: {} with slot_id {}, of slot {} " ,
slot , slot_id , duplicate_slot
) ;
// Clear the slot signatures from status cache for this slot.
// TODO: What about RPC queries that had already cloned the Bank for this slot
// and are looking up the signature for this slot?
root_bank . clear_slot_signatures ( slot ) ;
// Clear the slot-related data in blockstore. This will:
// 1) Clear old shreds allowing new ones to be inserted
// 2) Clear the "dead" flag allowing ReplayStage to start replaying
// this slot
blockstore . clear_unconfirmed_slot ( slot ) ;
// Clear the progress map of these forks
let _ = progress . remove ( & slot ) ;
2020-05-05 14:07:21 -07:00
}
}
2020-05-07 23:39:57 -07:00
// Purge given slot and all its descendants from the `ancestors` and
// `descendants` structures so that they're consistent with `BankForks`
// and the `progress` map.
fn purge_ancestors_descendants (
slot : Slot ,
slot_descendants : & HashSet < Slot > ,
ancestors : & mut HashMap < Slot , HashSet < Slot > > ,
descendants : & mut HashMap < Slot , HashSet < Slot > > ,
) {
if ! ancestors . contains_key ( & slot ) {
// Slot has already been purged
return ;
}
// Purge this slot from each of its ancestors' `descendants` maps
for a in ancestors
. get ( & slot )
. expect ( " must exist based on earlier check " )
{
descendants
2021-06-18 06:34:46 -07:00
. get_mut ( a )
2020-05-07 23:39:57 -07:00
. expect ( " If exists in ancestor map must exist in descendants map " )
. retain ( | d | * d ! = slot & & ! slot_descendants . contains ( d ) ) ;
}
ancestors
. remove ( & slot )
. expect ( " must exist based on earlier check " ) ;
// Purge all the descendants of this slot from both maps
for descendant in slot_descendants {
2021-06-18 06:34:46 -07:00
ancestors . remove ( descendant ) . expect ( " must exist " ) ;
2020-05-07 23:39:57 -07:00
descendants
2021-06-18 06:34:46 -07:00
. remove ( descendant )
2020-05-07 23:39:57 -07:00
. expect ( " must exist based on earlier check " ) ;
}
descendants
. remove ( & slot )
. expect ( " must exist based on earlier check " ) ;
}
2021-03-24 23:41:52 -07:00
// Check for any newly confirmed slots by the cluster. This is only detects
// optimistic and in the future, duplicate slot confirmations on the exact
// single slots and does not account for votes on their descendants. Used solely
// for duplicate slot recovery.
2021-08-13 14:21:52 -07:00
#[ allow(clippy::too_many_arguments) ]
2021-03-24 23:41:52 -07:00
fn process_gossip_duplicate_confirmed_slots (
gossip_duplicate_confirmed_slots_receiver : & GossipDuplicateConfirmedSlotsReceiver ,
2021-07-18 17:04:25 -07:00
blockstore : & Blockstore ,
2021-06-10 22:28:23 -07:00
duplicate_slots_tracker : & mut DuplicateSlotsTracker ,
2021-03-24 23:41:52 -07:00
gossip_duplicate_confirmed_slots : & mut GossipDuplicateConfirmedSlots ,
2021-08-13 14:21:52 -07:00
epoch_slots_frozen_slots : & mut EpochSlotsFrozenSlots ,
2021-03-24 23:41:52 -07:00
bank_forks : & RwLock < BankForks > ,
progress : & mut ProgressMap ,
fork_choice : & mut HeaviestSubtreeForkChoice ,
2021-07-08 19:07:32 -07:00
duplicate_slots_to_repair : & mut DuplicateSlotsToRepair ,
2021-07-26 20:59:00 -07:00
ancestor_hashes_replay_update_sender : & AncestorHashesReplayUpdateSender ,
2022-10-27 20:06:06 -07:00
purge_repair_slot_counter : & mut PurgeRepairSlotCounter ,
2021-03-24 23:41:52 -07:00
) {
let root = bank_forks . read ( ) . unwrap ( ) . root ( ) ;
for new_confirmed_slots in gossip_duplicate_confirmed_slots_receiver . try_iter ( ) {
2021-07-18 17:04:25 -07:00
for ( confirmed_slot , duplicate_confirmed_hash ) in new_confirmed_slots {
2021-03-24 23:41:52 -07:00
if confirmed_slot < = root {
continue ;
2021-07-18 17:04:25 -07:00
} else if let Some ( prev_hash ) = gossip_duplicate_confirmed_slots
. insert ( confirmed_slot , duplicate_confirmed_hash )
2021-03-24 23:41:52 -07:00
{
2021-07-18 17:04:25 -07:00
assert_eq! ( prev_hash , duplicate_confirmed_hash ) ;
2021-03-24 23:41:52 -07:00
// Already processed this signal
return ;
}
2021-07-18 17:04:25 -07:00
let duplicate_confirmed_state = DuplicateConfirmedState ::new_from_state (
duplicate_confirmed_hash ,
| | progress . is_dead ( confirmed_slot ) . unwrap_or ( false ) ,
| | bank_forks . read ( ) . unwrap ( ) . bank_hash ( confirmed_slot ) ,
) ;
2021-03-24 23:41:52 -07:00
check_slot_agrees_with_cluster (
confirmed_slot ,
root ,
2021-07-18 17:04:25 -07:00
blockstore ,
2021-06-10 22:28:23 -07:00
duplicate_slots_tracker ,
2021-08-13 14:21:52 -07:00
epoch_slots_frozen_slots ,
2021-03-24 23:41:52 -07:00
fork_choice ,
2021-07-08 19:07:32 -07:00
duplicate_slots_to_repair ,
2021-07-26 20:59:00 -07:00
ancestor_hashes_replay_update_sender ,
2022-10-27 20:06:06 -07:00
purge_repair_slot_counter ,
2021-07-18 17:04:25 -07:00
SlotStateUpdate ::DuplicateConfirmed ( duplicate_confirmed_state ) ,
2021-03-24 23:41:52 -07:00
) ;
}
}
}
2021-04-10 17:34:45 -07:00
fn process_gossip_verified_vote_hashes (
gossip_verified_vote_hash_receiver : & GossipVerifiedVoteHashReceiver ,
2021-04-21 14:40:35 -07:00
unfrozen_gossip_verified_vote_hashes : & mut UnfrozenGossipVerifiedVoteHashes ,
heaviest_subtree_fork_choice : & HeaviestSubtreeForkChoice ,
latest_validator_votes_for_frozen_banks : & mut LatestValidatorVotesForFrozenBanks ,
2021-04-10 17:34:45 -07:00
) {
for ( pubkey , slot , hash ) in gossip_verified_vote_hash_receiver . try_iter ( ) {
2021-04-21 14:40:35 -07:00
let is_frozen = heaviest_subtree_fork_choice . contains_block ( & ( slot , hash ) ) ;
2021-04-10 17:34:45 -07:00
// cluster_info_vote_listener will ensure it doesn't push duplicates
2021-04-21 14:40:35 -07:00
unfrozen_gossip_verified_vote_hashes . add_vote (
pubkey ,
slot ,
hash ,
is_frozen ,
latest_validator_votes_for_frozen_banks ,
)
2021-04-10 17:34:45 -07:00
}
}
2021-03-24 23:41:52 -07:00
// Checks for and handle forks with duplicate slots.
2021-08-13 14:21:52 -07:00
#[ allow(clippy::too_many_arguments) ]
2021-03-24 23:41:52 -07:00
fn process_duplicate_slots (
2021-07-18 17:04:25 -07:00
blockstore : & Blockstore ,
2021-03-24 23:41:52 -07:00
duplicate_slots_receiver : & DuplicateSlotReceiver ,
2021-06-10 22:28:23 -07:00
duplicate_slots_tracker : & mut DuplicateSlotsTracker ,
2021-03-24 23:41:52 -07:00
gossip_duplicate_confirmed_slots : & GossipDuplicateConfirmedSlots ,
2021-08-13 14:21:52 -07:00
epoch_slots_frozen_slots : & mut EpochSlotsFrozenSlots ,
2021-03-24 23:41:52 -07:00
bank_forks : & RwLock < BankForks > ,
progress : & mut ProgressMap ,
fork_choice : & mut HeaviestSubtreeForkChoice ,
2021-07-08 19:07:32 -07:00
duplicate_slots_to_repair : & mut DuplicateSlotsToRepair ,
2021-07-26 20:59:00 -07:00
ancestor_hashes_replay_update_sender : & AncestorHashesReplayUpdateSender ,
2022-10-27 20:06:06 -07:00
purge_repair_slot_counter : & mut PurgeRepairSlotCounter ,
2021-03-24 23:41:52 -07:00
) {
2021-06-10 22:28:23 -07:00
let new_duplicate_slots : Vec < Slot > = duplicate_slots_receiver . try_iter ( ) . collect ( ) ;
2021-03-24 23:41:52 -07:00
let ( root_slot , bank_hashes ) = {
let r_bank_forks = bank_forks . read ( ) . unwrap ( ) ;
2021-06-10 22:28:23 -07:00
let bank_hashes : Vec < Option < Hash > > = new_duplicate_slots
2021-03-24 23:41:52 -07:00
. iter ( )
2021-07-08 19:07:32 -07:00
. map ( | duplicate_slot | r_bank_forks . bank_hash ( * duplicate_slot ) )
2021-03-24 23:41:52 -07:00
. collect ( ) ;
( r_bank_forks . root ( ) , bank_hashes )
} ;
2021-06-10 22:28:23 -07:00
for ( duplicate_slot , bank_hash ) in
new_duplicate_slots . into_iter ( ) . zip ( bank_hashes . into_iter ( ) )
2021-03-24 23:41:52 -07:00
{
// WindowService should only send the signal once per slot
2021-07-18 17:04:25 -07:00
let duplicate_state = DuplicateState ::new_from_state (
duplicate_slot ,
gossip_duplicate_confirmed_slots ,
fork_choice ,
| | progress . is_dead ( duplicate_slot ) . unwrap_or ( false ) ,
| | bank_hash ,
) ;
2021-03-24 23:41:52 -07:00
check_slot_agrees_with_cluster (
duplicate_slot ,
root_slot ,
2021-07-18 17:04:25 -07:00
blockstore ,
2021-06-10 22:28:23 -07:00
duplicate_slots_tracker ,
2021-08-13 14:21:52 -07:00
epoch_slots_frozen_slots ,
2021-03-24 23:41:52 -07:00
fork_choice ,
2021-07-08 19:07:32 -07:00
duplicate_slots_to_repair ,
2021-07-26 20:59:00 -07:00
ancestor_hashes_replay_update_sender ,
2022-10-27 20:06:06 -07:00
purge_repair_slot_counter ,
2021-07-18 17:04:25 -07:00
SlotStateUpdate ::Duplicate ( duplicate_state ) ,
2021-03-24 23:41:52 -07:00
) ;
}
}
2019-07-23 19:19:20 -07:00
fn log_leader_change (
my_pubkey : & Pubkey ,
2019-11-02 00:38:30 -07:00
bank_slot : Slot ,
2019-07-23 19:19:20 -07:00
current_leader : & mut Option < Pubkey > ,
new_leader : & Pubkey ,
) {
if let Some ( ref current_leader ) = current_leader {
if current_leader ! = new_leader {
let msg = if current_leader = = my_pubkey {
2019-08-02 10:08:42 -07:00
" . I am no longer the leader "
2019-07-23 19:19:20 -07:00
} else if new_leader = = my_pubkey {
2019-08-02 10:08:42 -07:00
" . I am now the leader "
2019-07-23 19:19:20 -07:00
} else {
" "
} ;
info! (
2019-08-02 10:08:42 -07:00
" LEADER CHANGE at slot: {} leader: {}{} " ,
2019-07-23 19:19:20 -07:00
bank_slot , new_leader , msg
) ;
}
}
current_leader . replace ( new_leader . to_owned ( ) ) ;
}
2020-03-26 19:57:27 -07:00
fn check_propagation_for_start_leader (
poh_slot : Slot ,
parent_slot : Slot ,
progress_map : & ProgressMap ,
) -> bool {
2020-12-03 12:31:38 -08:00
// Assume `NUM_CONSECUTIVE_LEADER_SLOTS` = 4. Then `skip_propagated_check`
// below is true if `poh_slot` is within the same `NUM_CONSECUTIVE_LEADER_SLOTS`
// set of blocks as `latest_leader_slot`.
//
// Example 1 (`poh_slot` directly descended from `latest_leader_slot`):
//
// [B B B B] [B B B latest_leader_slot] poh_slot
//
// Example 2:
//
// [B latest_leader_slot B poh_slot]
//
// In this example, even if there's a block `B` on another fork between
// `poh_slot` and `parent_slot`, because they're in the same
// `NUM_CONSECUTIVE_LEADER_SLOTS` block, we still skip the propagated
// check because it's still within the propagation grace period.
2022-01-04 00:24:16 -08:00
if let Some ( latest_leader_slot ) =
progress_map . get_latest_leader_slot_must_exist ( parent_slot )
{
2020-12-03 12:31:38 -08:00
let skip_propagated_check =
poh_slot - latest_leader_slot < NUM_CONSECUTIVE_LEADER_SLOTS ;
if skip_propagated_check {
return true ;
}
2020-03-26 19:57:27 -07:00
}
2020-12-03 12:31:38 -08:00
// Note that `is_propagated(parent_slot)` doesn't necessarily check
// propagation of `parent_slot`, it checks propagation of the latest ancestor
// of `parent_slot` (hence the call to `get_latest_leader_slot()` in the
// check above)
2022-01-04 00:24:16 -08:00
progress_map
. get_leader_propagation_slot_must_exist ( parent_slot )
. 0
2020-03-26 19:57:27 -07:00
}
2020-03-26 23:33:28 -07:00
fn should_retransmit ( poh_slot : Slot , last_retransmit_slot : & mut Slot ) -> bool {
if poh_slot < * last_retransmit_slot
| | poh_slot > = * last_retransmit_slot + NUM_CONSECUTIVE_LEADER_SLOTS
{
* last_retransmit_slot = poh_slot ;
true
} else {
false
}
}
2021-10-05 21:53:26 -07:00
#[ allow(clippy::too_many_arguments) ]
2019-07-09 15:36:30 -07:00
fn maybe_start_leader (
2019-05-23 23:20:04 -07:00
my_pubkey : & Pubkey ,
2019-03-05 17:56:51 -08:00
bank_forks : & Arc < RwLock < BankForks > > ,
2022-07-05 07:29:44 -07:00
poh_recorder : & Arc < RwLock < PohRecorder > > ,
2019-04-19 02:39:44 -07:00
leader_schedule_cache : & Arc < LeaderScheduleCache > ,
2021-06-16 10:57:52 -07:00
rpc_subscriptions : & Arc < RpcSubscriptions > ,
2021-12-17 05:44:40 -08:00
progress_map : & mut ProgressMap ,
2020-03-26 19:57:27 -07:00
retransmit_slots_sender : & RetransmitSlotsSender ,
skipped_slots_info : & mut SkippedSlotsInfo ,
2021-03-25 18:54:51 -07:00
has_new_vote_been_rooted : bool ,
2022-06-23 12:37:38 -07:00
track_transaction_indexes : bool ,
2019-03-05 17:56:51 -08:00
) {
2022-07-05 07:29:44 -07:00
// all the individual calls to poh_recorder.read() are designed to
2019-07-18 14:54:27 -07:00
// increase granularity, decrease contention
2019-07-09 22:06:47 -07:00
2022-07-05 07:29:44 -07:00
assert! ( ! poh_recorder . read ( ) . unwrap ( ) . has_bank ( ) ) ;
2019-07-09 22:06:47 -07:00
2022-07-05 07:29:44 -07:00
let ( poh_slot , parent_slot ) = match poh_recorder . read ( ) . unwrap ( ) . reached_leader_slot ( ) {
2022-01-22 18:28:50 -08:00
PohLeaderStatus ::Reached {
poh_slot ,
parent_slot ,
} = > ( poh_slot , parent_slot ) ,
PohLeaderStatus ::NotReached = > {
trace! ( " {} poh_recorder hasn't reached_leader_slot " , my_pubkey ) ;
return ;
}
} ;
2019-07-09 15:36:30 -07:00
2019-10-16 12:53:11 -07:00
trace! ( " {} reached_leader_slot " , my_pubkey ) ;
2019-07-09 15:36:30 -07:00
let parent = bank_forks
. read ( )
. unwrap ( )
. get ( parent_slot )
2022-04-28 11:51:00 -07:00
. expect ( " parent_slot doesn't exist in bank forks " ) ;
2019-07-09 15:36:30 -07:00
2019-07-18 14:54:27 -07:00
assert! ( parent . is_frozen ( ) ) ;
2022-06-29 14:48:33 -07:00
if ! parent . is_startup_verification_complete ( ) {
info! ( " startup verification incomplete, so skipping my leader slot " ) ;
return ;
}
2019-07-18 14:54:27 -07:00
if bank_forks . read ( ) . unwrap ( ) . get ( poh_slot ) . is_some ( ) {
warn! ( " {} already have bank in forks at {}? " , my_pubkey , poh_slot ) ;
2019-07-09 15:36:30 -07:00
return ;
}
2019-07-18 14:54:27 -07:00
trace! (
" {} poh_slot {} parent_slot {} " ,
my_pubkey ,
poh_slot ,
parent_slot
) ;
2019-07-09 15:36:30 -07:00
if let Some ( next_leader ) = leader_schedule_cache . slot_leader_at ( poh_slot , Some ( & parent ) ) {
2021-03-26 17:45:53 -07:00
if ! has_new_vote_been_rooted {
info! ( " Haven't landed a vote, so skipping my leader slot " ) ;
return ;
}
2019-07-09 15:36:30 -07:00
trace! (
" {} leader {} at poh slot: {} " ,
my_pubkey ,
next_leader ,
poh_slot
) ;
2019-07-09 22:06:47 -07:00
// I guess I missed my slot
if next_leader ! = * my_pubkey {
return ;
2019-07-09 15:36:30 -07:00
}
2019-07-09 22:06:47 -07:00
2020-02-21 13:41:49 -08:00
datapoint_info! (
2019-07-09 22:06:47 -07:00
" replay_stage-new_leader " ,
2019-07-30 13:18:33 -07:00
( " slot " , poh_slot , i64 ) ,
( " leader " , next_leader . to_string ( ) , String ) ,
2019-07-09 22:06:47 -07:00
) ;
2020-03-26 19:57:27 -07:00
if ! Self ::check_propagation_for_start_leader ( poh_slot , parent_slot , progress_map ) {
2022-01-04 00:24:16 -08:00
let latest_unconfirmed_leader_slot = progress_map . get_latest_leader_slot_must_exist ( parent_slot )
2021-06-08 09:53:15 -07:00
. expect ( " In order for propagated check to fail, latest leader must exist in progress map " ) ;
2020-03-26 19:57:27 -07:00
if poh_slot ! = skipped_slots_info . last_skipped_slot {
datapoint_info! (
" replay_stage-skip_leader_slot " ,
( " slot " , poh_slot , i64 ) ,
( " parent_slot " , parent_slot , i64 ) ,
2020-04-08 14:35:24 -07:00
(
" latest_unconfirmed_leader_slot " ,
latest_unconfirmed_leader_slot ,
i64
)
2020-03-26 19:57:27 -07:00
) ;
2020-04-08 14:35:24 -07:00
progress_map . log_propagated_stats ( latest_unconfirmed_leader_slot , bank_forks ) ;
2020-03-26 19:57:27 -07:00
skipped_slots_info . last_skipped_slot = poh_slot ;
}
2020-03-26 23:33:28 -07:00
if Self ::should_retransmit ( poh_slot , & mut skipped_slots_info . last_retransmit_slot ) {
2022-01-04 00:24:16 -08:00
Self ::maybe_retransmit_unpropagated_slots (
" replay_stage-retransmit " ,
retransmit_slots_sender ,
progress_map ,
latest_unconfirmed_leader_slot ,
) ;
2020-03-26 19:57:27 -07:00
}
return ;
}
2019-11-26 00:42:54 -08:00
let root_slot = bank_forks . read ( ) . unwrap ( ) . root ( ) ;
2020-04-08 14:35:24 -07:00
datapoint_info! ( " replay_stage-my_leader_slot " , ( " slot " , poh_slot , i64 ) , ) ;
2019-11-26 00:42:54 -08:00
info! (
" new fork:{} parent:{} (leader) root:{} " ,
poh_slot , parent_slot , root_slot
) ;
2019-07-09 22:06:47 -07:00
2021-09-16 06:12:26 -07:00
let root_distance = poh_slot - root_slot ;
let vote_only_bank = if root_distance > MAX_ROOT_DISTANCE_FOR_VOTE_ONLY {
datapoint_info! ( " vote-only-bank " , ( " slot " , poh_slot , i64 ) ) ;
true
} else {
false
} ;
2020-02-25 15:49:59 -08:00
let tpu_bank = Self ::new_bank_from_parent_with_notify (
& parent ,
poh_slot ,
root_slot ,
my_pubkey ,
2021-06-16 10:57:52 -07:00
rpc_subscriptions ,
2022-02-21 13:15:37 -08:00
NewBankOptions { vote_only_bank } ,
2020-02-25 15:49:59 -08:00
) ;
let tpu_bank = bank_forks . write ( ) . unwrap ( ) . insert ( tpu_bank ) ;
2022-06-23 12:37:38 -07:00
poh_recorder
2022-07-05 07:29:44 -07:00
. write ( )
2022-06-23 12:37:38 -07:00
. unwrap ( )
. set_bank ( & tpu_bank , track_transaction_indexes ) ;
2019-07-09 15:36:30 -07:00
} else {
error! ( " {} No next leader found " , my_pubkey ) ;
2019-03-05 17:56:51 -08:00
}
}
2019-06-20 15:50:41 -07:00
2020-01-13 13:13:52 -08:00
fn replay_blockstore_into_bank (
2019-10-08 14:58:49 -07:00
bank : & Arc < Bank > ,
2020-01-13 13:13:52 -08:00
blockstore : & Blockstore ,
2022-07-28 11:33:19 -07:00
replay_stats : & RwLock < ReplaySlotStats > ,
replay_progress : & RwLock < ConfirmationProgress > ,
2021-05-10 12:14:56 -07:00
transaction_status_sender : Option < & TransactionStatusSender > ,
2020-08-07 11:21:35 -07:00
replay_vote_sender : & ReplayVoteSender ,
2019-12-12 10:36:27 -08:00
verify_recyclers : & VerifyRecyclers ,
2022-07-11 08:53:18 -07:00
log_messages_bytes_limit : Option < usize > ,
2022-08-31 06:00:55 -07:00
prioritization_fee_cache : & PrioritizationFeeCache ,
2020-01-14 17:15:26 -08:00
) -> result ::Result < usize , BlockstoreProcessorError > {
2022-07-28 11:33:19 -07:00
let mut w_replay_stats = replay_stats . write ( ) . unwrap ( ) ;
let mut w_replay_progress = replay_progress . write ( ) . unwrap ( ) ;
let tx_count_before = w_replay_progress . num_txs ;
2022-02-25 04:57:04 -08:00
// All errors must lead to marking the slot as dead, otherwise,
// the `check_slot_agrees_with_cluster()` called by `replay_active_banks()`
// will break!
blockstore_processor ::confirm_slot (
2020-01-14 17:15:26 -08:00
blockstore ,
bank ,
2022-07-28 11:33:19 -07:00
& mut w_replay_stats ,
& mut w_replay_progress ,
2020-01-14 17:15:26 -08:00
false ,
transaction_status_sender ,
2020-08-07 11:21:35 -07:00
Some ( replay_vote_sender ) ,
2020-01-14 17:15:26 -08:00
None ,
verify_recyclers ,
2021-02-06 17:26:42 -08:00
false ,
2022-07-11 08:53:18 -07:00
log_messages_bytes_limit ,
2022-08-31 06:00:55 -07:00
prioritization_fee_cache ,
2022-02-25 04:57:04 -08:00
) ? ;
2022-07-28 11:33:19 -07:00
let tx_count_after = w_replay_progress . num_txs ;
2020-01-14 17:15:26 -08:00
let tx_count = tx_count_after - tx_count_before ;
Ok ( tx_count )
2019-02-26 21:57:45 -08:00
}
2021-03-24 23:41:52 -07:00
#[ allow(clippy::too_many_arguments) ]
2020-12-09 23:14:31 -08:00
fn mark_dead_slot (
blockstore : & Blockstore ,
2021-03-24 23:41:52 -07:00
bank : & Bank ,
root : Slot ,
2020-12-09 23:14:31 -08:00
err : & BlockstoreProcessorError ,
2021-06-16 10:57:52 -07:00
rpc_subscriptions : & Arc < RpcSubscriptions > ,
2021-06-10 22:28:23 -07:00
duplicate_slots_tracker : & mut DuplicateSlotsTracker ,
2021-03-24 23:41:52 -07:00
gossip_duplicate_confirmed_slots : & GossipDuplicateConfirmedSlots ,
2021-08-13 14:21:52 -07:00
epoch_slots_frozen_slots : & mut EpochSlotsFrozenSlots ,
2021-03-24 23:41:52 -07:00
progress : & mut ProgressMap ,
heaviest_subtree_fork_choice : & mut HeaviestSubtreeForkChoice ,
2021-07-08 19:07:32 -07:00
duplicate_slots_to_repair : & mut DuplicateSlotsToRepair ,
2021-07-26 20:59:00 -07:00
ancestor_hashes_replay_update_sender : & AncestorHashesReplayUpdateSender ,
2022-10-27 20:06:06 -07:00
purge_repair_slot_counter : & mut PurgeRepairSlotCounter ,
2020-12-09 23:14:31 -08:00
) {
2021-03-24 23:41:52 -07:00
// Do not remove from progress map when marking dead! Needed by
// `process_gossip_duplicate_confirmed_slots()`
// Block producer can abandon the block if it detects a better one
// while producing. Somewhat common and expected in a
// network with variable network/machine configuration.
let is_serious = ! matches! (
err ,
BlockstoreProcessorError ::InvalidBlock ( BlockError ::TooFewTicks )
) ;
let slot = bank . slot ( ) ;
2020-12-09 23:14:31 -08:00
if is_serious {
datapoint_error! (
" replay-stage-mark_dead_slot " ,
2022-12-06 06:30:06 -08:00
( " error " , format! ( " error: {err:?} " ) , String ) ,
2020-12-09 23:14:31 -08:00
( " slot " , slot , i64 )
) ;
} else {
datapoint_info! (
" replay-stage-mark_dead_slot " ,
2022-12-06 06:30:06 -08:00
( " error " , format! ( " error: {err:?} " ) , String ) ,
2020-12-09 23:14:31 -08:00
( " slot " , slot , i64 )
) ;
}
2021-03-24 23:41:52 -07:00
progress . get_mut ( & slot ) . unwrap ( ) . is_dead = true ;
2020-12-09 23:14:31 -08:00
blockstore
. set_dead_slot ( slot )
. expect ( " Failed to mark slot as dead in blockstore " ) ;
2022-04-04 14:44:21 -07:00
blockstore . slots_stats . mark_dead ( slot ) ;
2021-06-16 10:57:52 -07:00
rpc_subscriptions . notify_slot_update ( SlotUpdate ::Dead {
2021-03-12 05:44:06 -08:00
slot ,
2022-12-06 06:30:06 -08:00
err : format ! ( " error: {err:?} " ) ,
2021-03-12 05:44:06 -08:00
timestamp : timestamp ( ) ,
} ) ;
2021-07-18 17:04:25 -07:00
let dead_state = DeadState ::new_from_state (
slot ,
duplicate_slots_tracker ,
gossip_duplicate_confirmed_slots ,
heaviest_subtree_fork_choice ,
2021-08-13 14:21:52 -07:00
epoch_slots_frozen_slots ,
2021-07-18 17:04:25 -07:00
) ;
2021-03-24 23:41:52 -07:00
check_slot_agrees_with_cluster (
slot ,
root ,
2021-07-18 17:04:25 -07:00
blockstore ,
2021-06-10 22:28:23 -07:00
duplicate_slots_tracker ,
2021-08-13 14:21:52 -07:00
epoch_slots_frozen_slots ,
2021-03-24 23:41:52 -07:00
heaviest_subtree_fork_choice ,
2021-07-08 19:07:32 -07:00
duplicate_slots_to_repair ,
2021-07-26 20:59:00 -07:00
ancestor_hashes_replay_update_sender ,
2022-10-27 20:06:06 -07:00
purge_repair_slot_counter ,
2021-07-18 17:04:25 -07:00
SlotStateUpdate ::Dead ( dead_state ) ,
2021-03-24 23:41:52 -07:00
) ;
2020-12-09 23:14:31 -08:00
}
2019-05-03 16:27:53 -07:00
#[ allow(clippy::too_many_arguments) ]
2019-12-04 10:17:17 -08:00
fn handle_votable_bank (
2019-03-21 11:53:18 -07:00
bank : & Arc < Bank > ,
2020-05-29 14:40:36 -07:00
switch_fork_decision : & SwitchForkDecision ,
2019-03-21 11:53:18 -07:00
bank_forks : & Arc < RwLock < BankForks > > ,
2019-06-24 13:41:23 -07:00
tower : & mut Tower ,
2020-03-02 12:43:43 -08:00
progress : & mut ProgressMap ,
2020-03-31 08:23:42 -07:00
vote_account_pubkey : & Pubkey ,
2021-06-17 13:51:06 -07:00
identity_keypair : & Keypair ,
2020-03-31 08:23:42 -07:00
authorized_voter_keypairs : & [ Arc < Keypair > ] ,
2020-01-13 13:13:52 -08:00
blockstore : & Arc < Blockstore > ,
2019-04-30 13:23:21 -07:00
leader_schedule_cache : & Arc < LeaderScheduleCache > ,
2019-11-04 15:44:27 -08:00
lockouts_sender : & Sender < CommitmentAggregationData > ,
2021-02-18 23:42:09 -08:00
accounts_background_request_sender : & AbsRequestSender ,
2019-12-18 11:50:09 -08:00
latest_root_senders : & [ Sender < Slot > ] ,
2021-06-16 10:57:52 -07:00
rpc_subscriptions : & Arc < RpcSubscriptions > ,
2020-04-24 15:49:57 -07:00
block_commitment_cache : & Arc < RwLock < BlockCommitmentCache > > ,
2020-06-11 12:16:04 -07:00
heaviest_subtree_fork_choice : & mut HeaviestSubtreeForkChoice ,
2020-09-28 19:43:05 -07:00
bank_notification_sender : & Option < BankNotificationSender > ,
2021-06-10 22:28:23 -07:00
duplicate_slots_tracker : & mut DuplicateSlotsTracker ,
2021-03-24 23:41:52 -07:00
gossip_duplicate_confirmed_slots : & mut GossipDuplicateConfirmedSlots ,
2021-04-21 14:40:35 -07:00
unfrozen_gossip_verified_vote_hashes : & mut UnfrozenGossipVerifiedVoteHashes ,
2021-03-25 18:54:51 -07:00
vote_signatures : & mut Vec < Signature > ,
has_new_vote_been_rooted : & mut bool ,
2021-06-26 08:32:08 -07:00
replay_timing : & mut ReplayTiming ,
2021-07-15 07:35:51 -07:00
voting_sender : & Sender < VoteOp > ,
2021-08-13 14:21:52 -07:00
epoch_slots_frozen_slots : & mut EpochSlotsFrozenSlots ,
2022-03-15 13:28:40 -07:00
drop_bank_sender : & Sender < Vec < Arc < Bank > > > ,
2022-02-15 12:19:34 -08:00
wait_to_vote_slot : Option < Slot > ,
2020-12-13 17:26:34 -08:00
) {
2019-11-06 01:02:26 -08:00
if bank . is_empty ( ) {
inc_new_counter_info! ( " replay_stage-voted_empty_bank " , 1 ) ;
}
2019-07-14 18:48:15 -07:00
trace! ( " handle votable bank {} " , bank . slot ( ) ) ;
2021-04-28 11:46:16 -07:00
let new_root = tower . record_bank_vote ( bank , vote_account_pubkey ) ;
2020-09-18 22:03:54 -07:00
if let Some ( new_root ) = new_root {
2019-05-20 15:01:55 -07:00
// get the root bank before squash
let root_bank = bank_forks
. read ( )
. unwrap ( )
. get ( new_root )
2022-04-28 11:51:00 -07:00
. expect ( " Root bank doesn't exist " ) ;
2019-06-11 18:27:47 -07:00
let mut rooted_banks = root_bank . parents ( ) ;
2020-09-28 19:43:05 -07:00
rooted_banks . push ( root_bank . clone ( ) ) ;
2019-06-11 18:27:47 -07:00
let rooted_slots : Vec < _ > = rooted_banks . iter ( ) . map ( | bank | bank . slot ( ) ) . collect ( ) ;
2020-01-13 13:13:52 -08:00
// Call leader schedule_cache.set_root() before blockstore.set_root() because
2019-08-27 15:09:41 -07:00
// bank_forks.root is consumed by repair_service to update gossip, so we don't want to
2019-11-14 11:49:31 -08:00
// get shreds for repair on gossip before we update leader schedule, otherwise they may
2019-08-27 15:09:41 -07:00
// get dropped.
leader_schedule_cache . set_root ( rooted_banks . last ( ) . unwrap ( ) ) ;
2020-01-13 13:13:52 -08:00
blockstore
2021-07-01 20:02:40 -07:00
. set_roots ( rooted_slots . iter ( ) )
2019-05-29 09:43:22 -07:00
. expect ( " Ledger set roots failed " ) ;
2020-07-07 16:59:46 -07:00
let highest_confirmed_root = Some (
2020-04-24 15:49:57 -07:00
block_commitment_cache
. read ( )
. unwrap ( )
2020-07-07 16:59:46 -07:00
. highest_confirmed_root ( ) ,
2020-04-24 15:49:57 -07:00
) ;
2020-03-02 12:43:43 -08:00
Self ::handle_new_root (
new_root ,
2021-06-18 06:34:46 -07:00
bank_forks ,
2020-03-02 12:43:43 -08:00
progress ,
2020-12-12 17:22:34 -08:00
accounts_background_request_sender ,
2020-07-07 16:59:46 -07:00
highest_confirmed_root ,
2020-06-11 12:16:04 -07:00
heaviest_subtree_fork_choice ,
2021-06-10 22:28:23 -07:00
duplicate_slots_tracker ,
2021-03-24 23:41:52 -07:00
gossip_duplicate_confirmed_slots ,
2021-04-21 14:40:35 -07:00
unfrozen_gossip_verified_vote_hashes ,
2021-03-25 18:54:51 -07:00
has_new_vote_been_rooted ,
vote_signatures ,
2021-08-13 14:21:52 -07:00
epoch_slots_frozen_slots ,
2022-03-15 13:28:40 -07:00
drop_bank_sender ,
2020-03-02 12:43:43 -08:00
) ;
2022-04-04 14:44:21 -07:00
blockstore . slots_stats . mark_rooted ( new_root ) ;
2021-06-16 10:57:52 -07:00
rpc_subscriptions . notify_roots ( rooted_slots ) ;
2020-09-28 19:43:05 -07:00
if let Some ( sender ) = bank_notification_sender {
sender
. send ( BankNotification ::Root ( root_bank ) )
. unwrap_or_else ( | err | warn! ( " bank_notification_sender failed: {:?} " , err ) ) ;
}
2019-12-18 11:50:09 -08:00
latest_root_senders . iter ( ) . for_each ( | s | {
if let Err ( e ) = s . send ( new_root ) {
trace! ( " latest root send failed: {:?} " , e ) ;
}
} ) ;
2020-05-05 14:07:21 -07:00
info! ( " new root {} " , new_root ) ;
2019-04-12 12:03:02 -07:00
}
2020-03-02 12:43:43 -08:00
2021-06-26 08:32:08 -07:00
let mut update_commitment_cache_time = Measure ::start ( " update_commitment_cache " ) ;
2020-03-02 12:43:43 -08:00
Self ::update_commitment_cache (
bank . clone ( ) ,
2020-03-30 10:29:30 -07:00
bank_forks . read ( ) . unwrap ( ) . root ( ) ,
2020-06-22 18:30:09 -07:00
progress . get_fork_stats ( bank . slot ( ) ) . unwrap ( ) . total_stake ,
2020-03-02 12:43:43 -08:00
lockouts_sender ,
) ;
2021-06-26 08:32:08 -07:00
update_commitment_cache_time . stop ( ) ;
replay_timing . update_commitment_cache_us + = update_commitment_cache_time . as_us ( ) ;
2020-03-31 08:23:42 -07:00
Self ::push_vote (
bank ,
vote_account_pubkey ,
2021-06-17 13:51:06 -07:00
identity_keypair ,
2020-03-31 08:23:42 -07:00
authorized_voter_keypairs ,
2021-04-28 11:46:16 -07:00
tower ,
2020-05-29 14:40:36 -07:00
switch_fork_decision ,
2021-03-25 18:54:51 -07:00
vote_signatures ,
* has_new_vote_been_rooted ,
2021-06-26 08:32:08 -07:00
replay_timing ,
2021-07-15 07:35:51 -07:00
voting_sender ,
2022-02-15 12:19:34 -08:00
wait_to_vote_slot ,
2020-03-31 08:23:42 -07:00
) ;
}
2019-05-20 13:32:32 -07:00
2021-04-28 11:46:16 -07:00
fn generate_vote_tx (
2021-06-17 13:51:06 -07:00
node_keypair : & Keypair ,
2021-04-28 11:46:16 -07:00
bank : & Bank ,
2020-03-31 08:23:42 -07:00
vote_account_pubkey : & Pubkey ,
authorized_voter_keypairs : & [ Arc < Keypair > ] ,
2022-02-07 14:06:19 -08:00
vote : VoteTransaction ,
2020-05-29 14:40:36 -07:00
switch_fork_decision : & SwitchForkDecision ,
2021-03-25 18:54:51 -07:00
vote_signatures : & mut Vec < Signature > ,
has_new_vote_been_rooted : bool ,
2022-02-15 12:19:34 -08:00
wait_to_vote_slot : Option < Slot > ,
2021-04-28 11:46:16 -07:00
) -> Option < Transaction > {
2022-06-29 14:48:33 -07:00
if ! bank . is_startup_verification_complete ( ) {
info! ( " startup verification incomplete, so unable to vote " ) ;
return None ;
}
2020-03-31 08:23:42 -07:00
if authorized_voter_keypairs . is_empty ( ) {
2021-04-28 11:46:16 -07:00
return None ;
2020-03-31 08:23:42 -07:00
}
2022-02-15 12:19:34 -08:00
if let Some ( slot ) = wait_to_vote_slot {
if bank . slot ( ) < slot {
return None ;
}
}
2020-11-30 09:18:33 -08:00
let vote_account = match bank . get_vote_account ( vote_account_pubkey ) {
None = > {
2020-03-31 08:23:42 -07:00
warn! (
" Vote account {} does not exist. Unable to vote " ,
vote_account_pubkey ,
) ;
2021-04-28 11:46:16 -07:00
return None ;
2020-11-30 09:18:33 -08:00
}
2022-06-25 09:27:43 -07:00
Some ( vote_account ) = > vote_account ,
2020-11-30 09:18:33 -08:00
} ;
let vote_state = vote_account . vote_state ( ) ;
let vote_state = match vote_state . as_ref ( ) {
Err ( _ ) = > {
warn! (
" Vote account {} is unreadable. Unable to vote " ,
vote_account_pubkey ,
) ;
2021-04-28 11:46:16 -07:00
return None ;
2020-11-30 09:18:33 -08:00
}
Ok ( vote_state ) = > vote_state ,
} ;
2021-07-13 11:32:45 -07:00
if vote_state . node_pubkey ! = node_keypair . pubkey ( ) {
info! (
" Vote account node_pubkey mismatch: {} (expected: {}). Unable to vote " ,
vote_state . node_pubkey ,
node_keypair . pubkey ( )
) ;
return None ;
}
2020-03-31 08:23:42 -07:00
let authorized_voter_pubkey =
if let Some ( authorized_voter_pubkey ) = vote_state . get_authorized_voter ( bank . epoch ( ) ) {
authorized_voter_pubkey
} else {
warn! (
" Vote account {} has no authorized voter for epoch {}. Unable to vote " ,
vote_account_pubkey ,
bank . epoch ( )
) ;
2021-04-28 11:46:16 -07:00
return None ;
2020-03-31 08:23:42 -07:00
} ;
2019-05-31 11:45:17 -07:00
2020-03-31 08:23:42 -07:00
let authorized_voter_keypair = match authorized_voter_keypairs
. iter ( )
. find ( | keypair | keypair . pubkey ( ) = = authorized_voter_pubkey )
{
None = > {
warn! ( " The authorized keypair {} for vote account {} is not available. Unable to vote " ,
authorized_voter_pubkey , vote_account_pubkey ) ;
2021-04-28 11:46:16 -07:00
return None ;
2020-03-31 08:23:42 -07:00
}
Some ( authorized_voter_keypair ) = > authorized_voter_keypair ,
} ;
// Send our last few votes along with the new one
2022-07-27 12:23:44 -07:00
// Compact the vote state update before sending
let should_compact = bank
. feature_set
. is_active ( & feature_set ::compact_vote_state_updates ::id ( ) ) ;
let vote = match ( should_compact , vote ) {
( true , VoteTransaction ::VoteStateUpdate ( vote_state_update ) ) = > {
2022-09-05 10:02:16 -07:00
VoteTransaction ::CompactVoteStateUpdate ( vote_state_update )
2022-07-27 12:23:44 -07:00
}
( _ , vote ) = > vote ,
} ;
2021-04-28 11:46:16 -07:00
let vote_ix = switch_fork_decision
. to_vote_instruction (
vote ,
2021-06-18 06:34:46 -07:00
vote_account_pubkey ,
2020-05-29 14:40:36 -07:00
& authorized_voter_keypair . pubkey ( ) ,
)
2021-04-28 11:46:16 -07:00
. expect ( " Switch threshold failure should not lead to voting " ) ;
2020-03-31 08:23:42 -07:00
2020-04-24 12:03:46 -07:00
let mut vote_tx = Transaction ::new_with_payer ( & [ vote_ix ] , Some ( & node_keypair . pubkey ( ) ) ) ;
2020-03-31 08:23:42 -07:00
2021-03-26 17:45:53 -07:00
let blockhash = bank . last_blockhash ( ) ;
2021-06-17 13:51:06 -07:00
vote_tx . partial_sign ( & [ node_keypair ] , blockhash ) ;
2021-03-26 17:45:53 -07:00
vote_tx . partial_sign ( & [ authorized_voter_keypair . as_ref ( ) ] , blockhash ) ;
2021-03-25 18:54:51 -07:00
if ! has_new_vote_been_rooted {
vote_signatures . push ( vote_tx . signatures [ 0 ] ) ;
if vote_signatures . len ( ) > MAX_VOTE_SIGNATURES {
vote_signatures . remove ( 0 ) ;
}
} else {
vote_signatures . clear ( ) ;
}
2021-03-26 17:45:53 -07:00
2021-04-28 11:46:16 -07:00
Some ( vote_tx )
}
#[ allow(clippy::too_many_arguments) ]
fn refresh_last_vote (
tower : & mut Tower ,
heaviest_bank_on_same_fork : & Bank ,
my_latest_landed_vote : Slot ,
vote_account_pubkey : & Pubkey ,
2021-06-17 13:51:06 -07:00
identity_keypair : & Keypair ,
2021-04-28 11:46:16 -07:00
authorized_voter_keypairs : & [ Arc < Keypair > ] ,
vote_signatures : & mut Vec < Signature > ,
has_new_vote_been_rooted : bool ,
last_vote_refresh_time : & mut LastVoteRefreshTime ,
2021-07-15 07:35:51 -07:00
voting_sender : & Sender < VoteOp > ,
2022-02-15 12:19:34 -08:00
wait_to_vote_slot : Option < Slot > ,
2021-04-28 11:46:16 -07:00
) {
let last_voted_slot = tower . last_voted_slot ( ) ;
if last_voted_slot . is_none ( ) {
return ;
}
// Refresh the vote if our latest vote hasn't landed, and the recent blockhash of the
// last attempt at a vote transaction has expired
let last_voted_slot = last_voted_slot . unwrap ( ) ;
if my_latest_landed_vote > last_voted_slot
& & last_vote_refresh_time . last_print_time . elapsed ( ) . as_secs ( ) > = 1
{
last_vote_refresh_time . last_print_time = Instant ::now ( ) ;
2021-06-08 09:53:15 -07:00
info! (
" Last landed vote for slot {} in bank {} is greater than the current last vote for slot: {} tracked by Tower " ,
my_latest_landed_vote ,
heaviest_bank_on_same_fork . slot ( ) ,
last_voted_slot
) ;
2021-04-28 11:46:16 -07:00
}
if my_latest_landed_vote > = last_voted_slot
| | heaviest_bank_on_same_fork
2022-04-20 20:57:17 -07:00
. is_hash_valid_for_age ( & tower . last_vote_tx_blockhash ( ) , MAX_PROCESSING_AGE )
2022-12-06 14:09:57 -08:00
| | {
// In order to avoid voting on multiple forks all past MAX_PROCESSING_AGE that don't
// include the last voted blockhash
last_vote_refresh_time
. last_refresh_time
. elapsed ( )
. as_millis ( )
< MAX_VOTE_REFRESH_INTERVAL_MILLIS as u128
}
2021-04-28 11:46:16 -07:00
{
return ;
}
// TODO: check the timestamp in this vote is correct, i.e. it shouldn't
// have changed from the original timestamp of the vote.
let vote_tx = Self ::generate_vote_tx (
2021-06-17 13:51:06 -07:00
identity_keypair ,
2021-04-28 11:46:16 -07:00
heaviest_bank_on_same_fork ,
vote_account_pubkey ,
authorized_voter_keypairs ,
tower . last_vote ( ) ,
& SwitchForkDecision ::SameFork ,
vote_signatures ,
has_new_vote_been_rooted ,
2022-02-15 12:19:34 -08:00
wait_to_vote_slot ,
2021-04-28 11:46:16 -07:00
) ;
if let Some ( vote_tx ) = vote_tx {
let recent_blockhash = vote_tx . message . recent_blockhash ;
tower . refresh_last_vote_tx_blockhash ( recent_blockhash ) ;
// Send the votes to the TPU and gossip for network propagation
2022-12-06 06:30:06 -08:00
let hash_string = format! ( " {recent_blockhash} " ) ;
2021-04-28 11:46:16 -07:00
datapoint_info! (
" refresh_vote " ,
( " last_voted_slot " , last_voted_slot , i64 ) ,
( " target_bank_slot " , heaviest_bank_on_same_fork . slot ( ) , i64 ) ,
( " target_bank_hash " , hash_string , String ) ,
) ;
2021-07-15 07:35:51 -07:00
voting_sender
. send ( VoteOp ::RefreshVote {
2022-03-22 18:47:55 -07:00
tx : vote_tx ,
2021-07-15 07:35:51 -07:00
last_voted_slot ,
} )
. unwrap_or_else ( | err | warn! ( " Error: {:?} " , err ) ) ;
2021-04-28 11:46:16 -07:00
last_vote_refresh_time . last_refresh_time = Instant ::now ( ) ;
}
}
2021-06-26 08:32:08 -07:00
#[ allow(clippy::too_many_arguments) ]
2021-04-28 11:46:16 -07:00
fn push_vote (
bank : & Bank ,
vote_account_pubkey : & Pubkey ,
2021-06-17 13:51:06 -07:00
identity_keypair : & Keypair ,
2021-04-28 11:46:16 -07:00
authorized_voter_keypairs : & [ Arc < Keypair > ] ,
tower : & mut Tower ,
switch_fork_decision : & SwitchForkDecision ,
vote_signatures : & mut Vec < Signature > ,
has_new_vote_been_rooted : bool ,
2021-06-26 08:32:08 -07:00
replay_timing : & mut ReplayTiming ,
2021-07-15 07:35:51 -07:00
voting_sender : & Sender < VoteOp > ,
2022-02-15 12:19:34 -08:00
wait_to_vote_slot : Option < Slot > ,
2021-04-28 11:46:16 -07:00
) {
2021-06-26 08:32:08 -07:00
let mut generate_time = Measure ::start ( " generate_vote " ) ;
2021-04-28 11:46:16 -07:00
let vote_tx = Self ::generate_vote_tx (
2021-06-17 13:51:06 -07:00
identity_keypair ,
2021-04-28 11:46:16 -07:00
bank ,
vote_account_pubkey ,
authorized_voter_keypairs ,
tower . last_vote ( ) ,
switch_fork_decision ,
vote_signatures ,
has_new_vote_been_rooted ,
2022-02-15 12:19:34 -08:00
wait_to_vote_slot ,
2021-04-17 10:22:00 -07:00
) ;
2021-06-26 08:32:08 -07:00
generate_time . stop ( ) ;
replay_timing . generate_vote_us + = generate_time . as_us ( ) ;
2021-04-28 11:46:16 -07:00
if let Some ( vote_tx ) = vote_tx {
tower . refresh_last_vote_tx_blockhash ( vote_tx . message . recent_blockhash ) ;
2021-07-15 07:35:51 -07:00
2022-05-19 18:27:58 -07:00
let saved_tower = SavedTower ::new ( tower , identity_keypair ) . unwrap_or_else ( | err | {
error! ( " Unable to create saved tower: {:?} " , err ) ;
std ::process ::exit ( 1 ) ;
} ) ;
2021-07-15 07:35:51 -07:00
let tower_slots = tower . tower_slots ( ) ;
voting_sender
. send ( VoteOp ::PushVote {
2022-03-22 18:47:55 -07:00
tx : vote_tx ,
2021-07-15 07:35:51 -07:00
tower_slots ,
2022-02-07 14:06:19 -08:00
saved_tower : SavedTowerVersions ::from ( saved_tower ) ,
2021-07-15 07:35:51 -07:00
} )
. unwrap_or_else ( | err | warn! ( " Error: {:?} " , err ) ) ;
2021-04-28 11:46:16 -07:00
}
2019-03-21 11:53:18 -07:00
}
2019-11-04 15:44:27 -08:00
fn update_commitment_cache (
2019-09-20 19:38:56 -07:00
bank : Arc < Bank > ,
2020-03-30 10:29:30 -07:00
root : Slot ,
2020-06-22 18:30:09 -07:00
total_stake : Stake ,
2019-11-04 15:44:27 -08:00
lockouts_sender : & Sender < CommitmentAggregationData > ,
2019-07-26 10:27:57 -07:00
) {
2020-03-30 10:29:30 -07:00
if let Err ( e ) =
2020-06-22 18:30:09 -07:00
lockouts_sender . send ( CommitmentAggregationData ::new ( bank , root , total_stake ) )
2020-03-30 10:29:30 -07:00
{
2019-07-26 10:27:57 -07:00
trace! ( " lockouts_sender failed: {:?} " , e ) ;
}
}
2019-03-21 11:53:18 -07:00
fn reset_poh_recorder (
2019-05-23 23:20:04 -07:00
my_pubkey : & Pubkey ,
2020-01-13 13:13:52 -08:00
blockstore : & Blockstore ,
2019-03-21 11:53:18 -07:00
bank : & Arc < Bank > ,
2022-07-05 07:29:44 -07:00
poh_recorder : & RwLock < PohRecorder > ,
2021-03-24 23:41:52 -07:00
leader_schedule_cache : & LeaderScheduleCache ,
2019-03-21 11:53:18 -07:00
) {
2020-01-13 13:13:52 -08:00
let next_leader_slot = leader_schedule_cache . next_leader_slot (
2021-06-18 06:34:46 -07:00
my_pubkey ,
2020-01-13 13:13:52 -08:00
bank . slot ( ) ,
2021-06-18 06:34:46 -07:00
bank ,
2020-01-13 13:13:52 -08:00
Some ( blockstore ) ,
2020-02-26 13:35:50 -08:00
GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS ,
2020-01-13 13:13:52 -08:00
) ;
2019-07-17 14:10:15 -07:00
poh_recorder
2022-07-05 07:29:44 -07:00
. write ( )
2019-07-17 14:10:15 -07:00
. unwrap ( )
2021-09-13 16:55:35 -07:00
. reset ( bank . clone ( ) , next_leader_slot ) ;
2019-07-23 19:19:20 -07:00
let next_leader_msg = if let Some ( next_leader_slot ) = next_leader_slot {
2019-08-02 10:08:42 -07:00
format! ( " My next leader slot is {} " , next_leader_slot . 0 )
2019-07-23 19:19:20 -07:00
} else {
2019-08-02 10:08:42 -07:00
" I am not in the leader schedule yet " . to_owned ( )
2019-07-23 19:19:20 -07:00
} ;
info! (
2019-11-15 08:36:33 -08:00
" {} reset PoH to tick {} (within slot {}). {} " ,
2019-05-23 23:20:04 -07:00
my_pubkey ,
2019-03-21 11:53:18 -07:00
bank . tick_height ( ) ,
2019-11-05 18:40:00 -08:00
bank . slot ( ) ,
2019-07-23 19:19:20 -07:00
next_leader_msg ,
2019-03-21 11:53:18 -07:00
) ;
}
2020-07-29 23:17:40 -07:00
#[ allow(clippy::too_many_arguments) ]
2022-07-28 11:33:19 -07:00
fn replay_active_banks_concurrently (
2021-03-24 23:41:52 -07:00
blockstore : & Blockstore ,
bank_forks : & RwLock < BankForks > ,
2019-05-23 23:20:04 -07:00
my_pubkey : & Pubkey ,
2020-03-26 19:57:27 -07:00
vote_account : & Pubkey ,
2020-03-02 12:43:43 -08:00
progress : & mut ProgressMap ,
2021-05-10 12:14:56 -07:00
transaction_status_sender : Option < & TransactionStatusSender > ,
2019-12-12 10:36:27 -08:00
verify_recyclers : & VerifyRecyclers ,
2020-08-07 11:21:35 -07:00
replay_vote_sender : & ReplayVoteSender ,
2022-06-03 10:45:27 -07:00
replay_timing : & mut ReplayTiming ,
2022-07-11 08:53:18 -07:00
log_messages_bytes_limit : Option < usize > ,
2022-07-28 11:33:19 -07:00
active_bank_slots : & [ Slot ] ,
2022-08-31 06:00:55 -07:00
prioritization_fee_cache : & PrioritizationFeeCache ,
2022-07-28 11:33:19 -07:00
) -> Vec < ReplaySlotFromBlockstore > {
// Make mutable shared structures thread safe.
let progress = RwLock ::new ( progress ) ;
let longest_replay_time_us = AtomicU64 ::new ( 0 ) ;
// Allow for concurrent replaying of slots from different forks.
let replay_result_vec : Vec < ReplaySlotFromBlockstore > = PAR_THREAD_POOL . install ( | | {
active_bank_slots
. into_par_iter ( )
. map ( | bank_slot | {
let bank_slot = * bank_slot ;
let mut replay_result = ReplaySlotFromBlockstore {
is_slot_dead : false ,
bank_slot ,
replay_result : None ,
} ;
let my_pubkey = & my_pubkey . clone ( ) ;
trace! (
" Replay active bank: slot {}, thread_idx {} " ,
bank_slot ,
PAR_THREAD_POOL . current_thread_index ( ) . unwrap_or_default ( )
) ;
let mut progress_lock = progress . write ( ) . unwrap ( ) ;
if progress_lock
. get ( & bank_slot )
. map ( | p | p . is_dead )
. unwrap_or ( false )
{
// If the fork was marked as dead, don't replay it
debug! ( " bank_slot {:?} is marked dead " , bank_slot ) ;
replay_result . is_slot_dead = true ;
return replay_result ;
}
2019-03-21 11:53:18 -07:00
2022-07-28 11:33:19 -07:00
let bank = & bank_forks . read ( ) . unwrap ( ) . get ( bank_slot ) . unwrap ( ) ;
let parent_slot = bank . parent_slot ( ) ;
let ( num_blocks_on_fork , num_dropped_blocks_on_fork ) = {
let stats = progress_lock
. get ( & parent_slot )
. expect ( " parent of active bank must exist in progress map " ) ;
let num_blocks_on_fork = stats . num_blocks_on_fork + 1 ;
let new_dropped_blocks = bank . slot ( ) - parent_slot - 1 ;
let num_dropped_blocks_on_fork =
stats . num_dropped_blocks_on_fork + new_dropped_blocks ;
( num_blocks_on_fork , num_dropped_blocks_on_fork )
} ;
let prev_leader_slot = progress_lock . get_bank_prev_leader_slot ( bank ) ;
let bank_progress = progress_lock . entry ( bank . slot ( ) ) . or_insert_with ( | | {
ForkProgress ::new_from_bank (
bank ,
my_pubkey ,
& vote_account . clone ( ) ,
prev_leader_slot ,
num_blocks_on_fork ,
num_dropped_blocks_on_fork ,
)
} ) ;
2019-06-20 15:50:41 -07:00
2022-07-28 11:33:19 -07:00
let replay_stats = bank_progress . replay_stats . clone ( ) ;
let replay_progress = bank_progress . replay_progress . clone ( ) ;
drop ( progress_lock ) ;
if bank . collector_id ( ) ! = my_pubkey {
let mut replay_blockstore_time =
Measure ::start ( " replay_blockstore_into_bank " ) ;
let blockstore_result = Self ::replay_blockstore_into_bank (
bank ,
blockstore ,
& replay_stats ,
& replay_progress ,
transaction_status_sender ,
& replay_vote_sender . clone ( ) ,
& verify_recyclers . clone ( ) ,
log_messages_bytes_limit ,
2022-08-31 06:00:55 -07:00
prioritization_fee_cache ,
2022-07-28 11:33:19 -07:00
) ;
replay_blockstore_time . stop ( ) ;
replay_result . replay_result = Some ( blockstore_result ) ;
longest_replay_time_us
. fetch_max ( replay_blockstore_time . as_us ( ) , Ordering ::Relaxed ) ;
}
replay_result
} )
. collect ( )
} ) ;
// Accumulating time across all slots could inflate this number and make it seem like an
// overly large amount of time is being spent on blockstore compared to other activities.
replay_timing . replay_blockstore_us + = longest_replay_time_us . load ( Ordering ::Relaxed ) ;
replay_result_vec
}
#[ allow(clippy::too_many_arguments) ]
fn replay_active_bank (
blockstore : & Blockstore ,
bank_forks : & RwLock < BankForks > ,
my_pubkey : & Pubkey ,
vote_account : & Pubkey ,
progress : & mut ProgressMap ,
transaction_status_sender : Option < & TransactionStatusSender > ,
verify_recyclers : & VerifyRecyclers ,
replay_vote_sender : & ReplayVoteSender ,
replay_timing : & mut ReplayTiming ,
log_messages_bytes_limit : Option < usize > ,
bank_slot : Slot ,
2022-08-31 06:00:55 -07:00
prioritization_fee_cache : & PrioritizationFeeCache ,
2022-07-28 11:33:19 -07:00
) -> ReplaySlotFromBlockstore {
let mut replay_result = ReplaySlotFromBlockstore {
is_slot_dead : false ,
bank_slot ,
replay_result : None ,
} ;
let my_pubkey = & my_pubkey . clone ( ) ;
trace! ( " Replay active bank: slot {} " , bank_slot ) ;
if progress . get ( & bank_slot ) . map ( | p | p . is_dead ) . unwrap_or ( false ) {
// If the fork was marked as dead, don't replay it
debug! ( " bank_slot {:?} is marked dead " , bank_slot ) ;
replay_result . is_slot_dead = true ;
} else {
let bank = & bank_forks . read ( ) . unwrap ( ) . get ( bank_slot ) . unwrap ( ) ;
2020-04-08 14:35:24 -07:00
let parent_slot = bank . parent_slot ( ) ;
2022-07-28 11:33:19 -07:00
let prev_leader_slot = progress . get_bank_prev_leader_slot ( bank ) ;
2020-04-08 14:35:24 -07:00
let ( num_blocks_on_fork , num_dropped_blocks_on_fork ) = {
let stats = progress
. get ( & parent_slot )
. expect ( " parent of active bank must exist in progress map " ) ;
let num_blocks_on_fork = stats . num_blocks_on_fork + 1 ;
let new_dropped_blocks = bank . slot ( ) - parent_slot - 1 ;
let num_dropped_blocks_on_fork =
stats . num_dropped_blocks_on_fork + new_dropped_blocks ;
( num_blocks_on_fork , num_dropped_blocks_on_fork )
} ;
2021-03-24 23:41:52 -07:00
2022-07-28 11:33:19 -07:00
let bank_progress = progress . entry ( bank . slot ( ) ) . or_insert_with ( | | {
2020-04-08 14:35:24 -07:00
ForkProgress ::new_from_bank (
2022-07-28 11:33:19 -07:00
bank ,
2021-06-18 06:34:46 -07:00
my_pubkey ,
2022-07-28 11:33:19 -07:00
& vote_account . clone ( ) ,
2020-04-08 14:35:24 -07:00
prev_leader_slot ,
num_blocks_on_fork ,
num_dropped_blocks_on_fork ,
)
2020-03-26 19:57:27 -07:00
} ) ;
2022-07-28 11:33:19 -07:00
2019-07-30 13:18:33 -07:00
if bank . collector_id ( ) ! = my_pubkey {
2022-06-03 10:45:27 -07:00
let mut replay_blockstore_time = Measure ::start ( " replay_blockstore_into_bank " ) ;
2022-07-28 11:33:19 -07:00
let blockstore_result = Self ::replay_blockstore_into_bank (
bank ,
2021-06-18 06:34:46 -07:00
blockstore ,
2022-07-28 11:33:19 -07:00
& bank_progress . replay_stats ,
& bank_progress . replay_progress ,
2021-05-10 12:14:56 -07:00
transaction_status_sender ,
2022-07-28 11:33:19 -07:00
& replay_vote_sender . clone ( ) ,
& verify_recyclers . clone ( ) ,
2022-07-11 08:53:18 -07:00
log_messages_bytes_limit ,
2022-08-31 06:00:55 -07:00
prioritization_fee_cache ,
2019-11-20 15:43:10 -08:00
) ;
2022-06-03 10:45:27 -07:00
replay_blockstore_time . stop ( ) ;
2022-07-28 11:33:19 -07:00
replay_result . replay_result = Some ( blockstore_result ) ;
2022-06-03 10:45:27 -07:00
replay_timing . replay_blockstore_us + = replay_blockstore_time . as_us ( ) ;
2022-07-28 11:33:19 -07:00
}
}
replay_result
}
#[ allow(clippy::too_many_arguments) ]
fn process_replay_results (
blockstore : & Blockstore ,
bank_forks : & RwLock < BankForks > ,
progress : & mut ProgressMap ,
transaction_status_sender : Option < & TransactionStatusSender > ,
cache_block_meta_sender : Option < & CacheBlockMetaSender > ,
heaviest_subtree_fork_choice : & mut HeaviestSubtreeForkChoice ,
bank_notification_sender : & Option < BankNotificationSender > ,
rewards_recorder_sender : & Option < RewardsRecorderSender > ,
rpc_subscriptions : & Arc < RpcSubscriptions > ,
duplicate_slots_tracker : & mut DuplicateSlotsTracker ,
gossip_duplicate_confirmed_slots : & GossipDuplicateConfirmedSlots ,
epoch_slots_frozen_slots : & mut EpochSlotsFrozenSlots ,
unfrozen_gossip_verified_vote_hashes : & mut UnfrozenGossipVerifiedVoteHashes ,
latest_validator_votes_for_frozen_banks : & mut LatestValidatorVotesForFrozenBanks ,
cluster_slots_update_sender : & ClusterSlotsUpdateSender ,
cost_update_sender : & Sender < CostUpdate > ,
duplicate_slots_to_repair : & mut DuplicateSlotsToRepair ,
ancestor_hashes_replay_update_sender : & AncestorHashesReplayUpdateSender ,
block_metadata_notifier : Option < BlockMetadataNotifierLock > ,
replay_result_vec : & [ ReplaySlotFromBlockstore ] ,
2022-08-31 06:00:55 -07:00
prioritization_fee_cache : & PrioritizationFeeCache ,
2022-10-27 20:06:06 -07:00
purge_repair_slot_counter : & mut PurgeRepairSlotCounter ,
2022-07-28 11:33:19 -07:00
) -> bool {
// TODO: See if processing of blockstore replay results and bank completion can be made thread safe.
let mut did_complete_bank = false ;
let mut tx_count = 0 ;
let mut execute_timings = ExecuteTimings ::default ( ) ;
for replay_result in replay_result_vec {
if replay_result . is_slot_dead {
continue ;
}
let bank_slot = replay_result . bank_slot ;
let bank = & bank_forks . read ( ) . unwrap ( ) . get ( bank_slot ) . unwrap ( ) ;
if let Some ( replay_result ) = & replay_result . replay_result {
2020-01-14 17:15:26 -08:00
match replay_result {
Ok ( replay_tx_count ) = > tx_count + = replay_tx_count ,
Err ( err ) = > {
2021-03-24 23:41:52 -07:00
// Error means the slot needs to be marked as dead
Self ::mark_dead_slot (
blockstore ,
2022-07-28 11:33:19 -07:00
bank ,
bank_forks . read ( ) . unwrap ( ) . root ( ) ,
err ,
2021-06-16 10:57:52 -07:00
rpc_subscriptions ,
2021-06-10 22:28:23 -07:00
duplicate_slots_tracker ,
2021-03-24 23:41:52 -07:00
gossip_duplicate_confirmed_slots ,
2021-08-13 14:21:52 -07:00
epoch_slots_frozen_slots ,
2021-03-24 23:41:52 -07:00
progress ,
heaviest_subtree_fork_choice ,
2021-07-08 19:07:32 -07:00
duplicate_slots_to_repair ,
2021-07-26 20:59:00 -07:00
ancestor_hashes_replay_update_sender ,
2022-10-27 20:06:06 -07:00
purge_repair_slot_counter ,
2021-03-24 23:41:52 -07:00
) ;
2020-01-14 17:15:26 -08:00
// If the bank was corrupted, don't try to run the below logic to check if the
// bank is completed
continue ;
}
2019-07-30 13:18:33 -07:00
}
2019-03-21 11:53:18 -07:00
}
2022-07-28 11:33:19 -07:00
assert_eq! ( bank_slot , bank . slot ( ) ) ;
2020-01-14 17:15:26 -08:00
if bank . is_complete ( ) {
2022-06-03 10:45:27 -07:00
let mut bank_complete_time = Measure ::start ( " bank_complete_time " ) ;
2022-07-28 11:33:19 -07:00
let bank_progress = progress
. get_mut ( & bank . slot ( ) )
. expect ( " Bank fork progress entry missing for completed bank " ) ;
let replay_stats = bank_progress . replay_stats . clone ( ) ;
let r_replay_stats = replay_stats . read ( ) . unwrap ( ) ;
let replay_progress = bank_progress . replay_progress . clone ( ) ;
let r_replay_progress = replay_progress . read ( ) . unwrap ( ) ;
2021-08-12 08:48:47 -07:00
debug! ( " bank {} is completed replay from blockstore, contribute to update cost with {:?} " ,
2022-07-28 11:33:19 -07:00
bank . slot ( ) ,
r_replay_stats . execute_timings
) ;
2021-03-24 23:41:52 -07:00
did_complete_bank = true ;
info! ( " bank frozen: {} " , bank . slot ( ) ) ;
2022-07-28 11:33:19 -07:00
let _ = cluster_slots_update_sender . send ( vec! [ bank_slot ] ) ;
2021-05-10 12:14:56 -07:00
if let Some ( transaction_status_sender ) = transaction_status_sender {
2022-07-28 11:33:19 -07:00
transaction_status_sender . send_transaction_status_freeze_message ( bank ) ;
2021-03-26 15:47:35 -07:00
}
2021-03-24 23:41:52 -07:00
bank . freeze ( ) ;
2022-10-03 18:45:55 -07:00
datapoint_info! (
" bank_frozen " ,
( " slot " , bank_slot , i64 ) ,
( " hash " , bank . hash ( ) . to_string ( ) , String ) ,
) ;
2021-10-24 20:19:23 -07:00
// report cost tracker stats
cost_update_sender
. send ( CostUpdate ::FrozenBank { bank : bank . clone ( ) } )
. unwrap_or_else ( | err | {
warn! ( " cost_update_sender failed sending bank stats: {:?} " , err )
} ) ;
2022-08-31 06:00:55 -07:00
// finalize block's minimum prioritization fee cache for this bank
prioritization_fee_cache . finalize_priority_fee ( bank . slot ( ) ) ;
2022-07-28 11:33:19 -07:00
assert_ne! ( bank . hash ( ) , Hash ::default ( ) ) ;
2021-06-11 03:09:57 -07:00
// Needs to be updated before `check_slot_agrees_with_cluster()` so that
// any updates in `check_slot_agrees_with_cluster()` on fork choice take
// effect
2021-04-12 01:00:59 -07:00
heaviest_subtree_fork_choice . add_new_leaf_slot (
( bank . slot ( ) , bank . hash ( ) ) ,
Some ( ( bank . parent_slot ( ) , bank . parent_hash ( ) ) ) ,
) ;
2022-06-03 10:45:27 -07:00
bank_progress . fork_stats . bank_hash = Some ( bank . hash ( ) ) ;
2021-07-18 17:04:25 -07:00
let bank_frozen_state = BankFrozenState ::new_from_state (
bank . slot ( ) ,
bank . hash ( ) ,
duplicate_slots_tracker ,
gossip_duplicate_confirmed_slots ,
heaviest_subtree_fork_choice ,
2021-08-13 14:21:52 -07:00
epoch_slots_frozen_slots ,
2021-07-18 17:04:25 -07:00
) ;
2021-03-24 23:41:52 -07:00
check_slot_agrees_with_cluster (
bank . slot ( ) ,
bank_forks . read ( ) . unwrap ( ) . root ( ) ,
2021-07-18 17:04:25 -07:00
blockstore ,
2021-06-10 22:28:23 -07:00
duplicate_slots_tracker ,
2021-08-13 14:21:52 -07:00
epoch_slots_frozen_slots ,
2021-03-24 23:41:52 -07:00
heaviest_subtree_fork_choice ,
2021-07-08 19:07:32 -07:00
duplicate_slots_to_repair ,
2021-07-26 20:59:00 -07:00
ancestor_hashes_replay_update_sender ,
2022-10-27 20:06:06 -07:00
purge_repair_slot_counter ,
2021-07-18 17:04:25 -07:00
SlotStateUpdate ::BankFrozen ( bank_frozen_state ) ,
2021-03-24 23:41:52 -07:00
) ;
if let Some ( sender ) = bank_notification_sender {
sender
. send ( BankNotification ::Frozen ( bank . clone ( ) ) )
. unwrap_or_else ( | err | warn! ( " bank_notification_sender failed: {:?} " , err ) ) ;
2020-12-09 23:14:31 -08:00
}
2022-07-28 11:33:19 -07:00
blockstore_processor ::cache_block_meta ( bank , cache_block_meta_sender ) ;
2021-03-24 23:41:52 -07:00
2021-04-21 14:40:35 -07:00
let bank_hash = bank . hash ( ) ;
if let Some ( new_frozen_voters ) =
unfrozen_gossip_verified_vote_hashes . remove_slot_hash ( bank . slot ( ) , & bank_hash )
{
for pubkey in new_frozen_voters {
latest_validator_votes_for_frozen_banks . check_add_vote (
pubkey ,
bank . slot ( ) ,
Some ( bank_hash ) ,
2021-04-29 14:43:28 -07:00
false ,
2021-04-21 14:40:35 -07:00
) ;
}
}
2022-07-28 11:33:19 -07:00
Self ::record_rewards ( bank , rewards_recorder_sender ) ;
2021-12-29 15:12:01 -08:00
if let Some ( ref block_metadata_notifier ) = block_metadata_notifier {
let block_metadata_notifier = block_metadata_notifier . read ( ) . unwrap ( ) ;
block_metadata_notifier . notify_block_metadata (
bank . slot ( ) ,
& bank . last_blockhash ( ) . to_string ( ) ,
& bank . rewards ,
Some ( bank . clock ( ) . unix_timestamp ) ,
Some ( bank . block_height ( ) ) ,
)
}
2022-06-03 10:45:27 -07:00
bank_complete_time . stop ( ) ;
2022-07-28 11:33:19 -07:00
r_replay_stats . report_stats (
2022-06-03 10:45:27 -07:00
bank . slot ( ) ,
2022-10-15 06:07:03 -07:00
r_replay_progress . num_txs ,
2022-07-28 11:33:19 -07:00
r_replay_progress . num_entries ,
r_replay_progress . num_shreds ,
2022-06-03 10:45:27 -07:00
bank_complete_time . as_us ( ) ,
) ;
2022-07-28 11:33:19 -07:00
execute_timings . accumulate ( & r_replay_stats . execute_timings ) ;
2019-07-09 15:36:30 -07:00
} else {
trace! (
" bank {} not completed tick_height: {}, max_tick_height: {} " ,
bank . slot ( ) ,
bank . tick_height ( ) ,
bank . max_tick_height ( )
) ;
2019-03-21 11:53:18 -07:00
}
}
2021-07-01 09:32:41 -07:00
2019-08-12 15:15:34 -07:00
inc_new_counter_info! ( " replay_stage-replay_transactions " , tx_count ) ;
2019-07-18 14:07:32 -07:00
did_complete_bank
2019-03-21 11:53:18 -07:00
}
2022-07-28 11:33:19 -07:00
#[ allow(clippy::too_many_arguments) ]
fn replay_active_banks (
blockstore : & Blockstore ,
bank_forks : & RwLock < BankForks > ,
my_pubkey : & Pubkey ,
vote_account : & Pubkey ,
progress : & mut ProgressMap ,
transaction_status_sender : Option < & TransactionStatusSender > ,
cache_block_meta_sender : Option < & CacheBlockMetaSender > ,
verify_recyclers : & VerifyRecyclers ,
heaviest_subtree_fork_choice : & mut HeaviestSubtreeForkChoice ,
replay_vote_sender : & ReplayVoteSender ,
bank_notification_sender : & Option < BankNotificationSender > ,
rewards_recorder_sender : & Option < RewardsRecorderSender > ,
rpc_subscriptions : & Arc < RpcSubscriptions > ,
duplicate_slots_tracker : & mut DuplicateSlotsTracker ,
gossip_duplicate_confirmed_slots : & GossipDuplicateConfirmedSlots ,
epoch_slots_frozen_slots : & mut EpochSlotsFrozenSlots ,
unfrozen_gossip_verified_vote_hashes : & mut UnfrozenGossipVerifiedVoteHashes ,
latest_validator_votes_for_frozen_banks : & mut LatestValidatorVotesForFrozenBanks ,
cluster_slots_update_sender : & ClusterSlotsUpdateSender ,
cost_update_sender : & Sender < CostUpdate > ,
duplicate_slots_to_repair : & mut DuplicateSlotsToRepair ,
ancestor_hashes_replay_update_sender : & AncestorHashesReplayUpdateSender ,
block_metadata_notifier : Option < BlockMetadataNotifierLock > ,
replay_timing : & mut ReplayTiming ,
log_messages_bytes_limit : Option < usize > ,
2022-08-26 12:36:02 -07:00
replay_slots_concurrently : bool ,
2022-08-31 06:00:55 -07:00
prioritization_fee_cache : & PrioritizationFeeCache ,
2022-10-27 20:06:06 -07:00
purge_repair_slot_counter : & mut PurgeRepairSlotCounter ,
2022-08-26 12:36:02 -07:00
) -> bool /* completed a bank */ {
2022-07-28 11:33:19 -07:00
let active_bank_slots = bank_forks . read ( ) . unwrap ( ) . active_bank_slots ( ) ;
let num_active_banks = active_bank_slots . len ( ) ;
2022-08-03 13:51:16 -07:00
trace! (
2022-07-28 11:33:19 -07:00
" {} active bank(s) to replay: {:?} " ,
2022-08-03 13:51:16 -07:00
num_active_banks ,
active_bank_slots
2022-07-28 11:33:19 -07:00
) ;
if num_active_banks > 0 {
2022-08-26 12:36:02 -07:00
let replay_result_vec = if num_active_banks > 1 & & replay_slots_concurrently {
Self ::replay_active_banks_concurrently (
2022-07-28 11:33:19 -07:00
blockstore ,
bank_forks ,
my_pubkey ,
vote_account ,
progress ,
transaction_status_sender ,
verify_recyclers ,
replay_vote_sender ,
replay_timing ,
log_messages_bytes_limit ,
2022-08-26 12:36:02 -07:00
& active_bank_slots ,
2022-08-31 06:00:55 -07:00
prioritization_fee_cache ,
2022-08-26 12:36:02 -07:00
)
} else {
active_bank_slots
. iter ( )
. map ( | bank_slot | {
Self ::replay_active_bank (
blockstore ,
bank_forks ,
my_pubkey ,
vote_account ,
progress ,
transaction_status_sender ,
verify_recyclers ,
replay_vote_sender ,
replay_timing ,
log_messages_bytes_limit ,
* bank_slot ,
2022-08-31 06:00:55 -07:00
prioritization_fee_cache ,
2022-08-26 12:36:02 -07:00
)
} )
. collect ( )
2022-07-28 11:33:19 -07:00
} ;
Self ::process_replay_results (
blockstore ,
bank_forks ,
progress ,
transaction_status_sender ,
cache_block_meta_sender ,
heaviest_subtree_fork_choice ,
bank_notification_sender ,
rewards_recorder_sender ,
rpc_subscriptions ,
duplicate_slots_tracker ,
gossip_duplicate_confirmed_slots ,
epoch_slots_frozen_slots ,
unfrozen_gossip_verified_vote_hashes ,
latest_validator_votes_for_frozen_banks ,
cluster_slots_update_sender ,
cost_update_sender ,
duplicate_slots_to_repair ,
ancestor_hashes_replay_update_sender ,
block_metadata_notifier ,
& replay_result_vec ,
2022-08-31 06:00:55 -07:00
prioritization_fee_cache ,
2022-10-27 20:06:06 -07:00
purge_repair_slot_counter ,
2022-07-28 11:33:19 -07:00
)
} else {
false
}
}
2020-06-11 12:16:04 -07:00
#[ allow(clippy::too_many_arguments) ]
2021-07-08 19:07:32 -07:00
pub fn compute_bank_stats (
2021-04-28 11:46:16 -07:00
my_vote_pubkey : & Pubkey ,
2019-09-04 23:10:25 -07:00
ancestors : & HashMap < u64 , HashSet < u64 > > ,
2020-02-03 16:48:24 -08:00
frozen_banks : & mut Vec < Arc < Bank > > ,
2022-02-07 14:06:19 -08:00
tower : & mut Tower ,
2020-03-02 12:43:43 -08:00
progress : & mut ProgressMap ,
2020-03-26 19:57:27 -07:00
vote_tracker : & VoteTracker ,
2020-03-30 19:57:11 -07:00
cluster_slots : & ClusterSlots ,
2020-03-26 19:57:27 -07:00
bank_forks : & RwLock < BankForks > ,
2021-04-12 01:00:59 -07:00
heaviest_subtree_fork_choice : & mut HeaviestSubtreeForkChoice ,
2021-04-21 14:40:35 -07:00
latest_validator_votes_for_frozen_banks : & mut LatestValidatorVotesForFrozenBanks ,
2020-02-03 16:48:24 -08:00
) -> Vec < Slot > {
2019-11-15 08:36:33 -08:00
frozen_banks . sort_by_key ( | bank | bank . slot ( ) ) ;
2020-03-04 11:49:56 -08:00
let mut new_stats = vec! [ ] ;
2020-02-03 16:48:24 -08:00
for bank in frozen_banks {
2020-03-26 19:57:27 -07:00
let bank_slot = bank . slot ( ) ;
2020-02-03 16:48:24 -08:00
// Only time progress map should be missing a bank slot
// is if this node was the leader for this slot as those banks
// are not replayed in replay_active_banks()
2020-03-26 19:57:27 -07:00
{
2020-06-11 12:16:04 -07:00
let is_computed = progress
2020-03-26 19:57:27 -07:00
. get_fork_stats_mut ( bank_slot )
2020-06-11 12:16:04 -07:00
. expect ( " All frozen banks must exist in the Progress map " )
. computed ;
if ! is_computed {
2022-02-07 14:06:19 -08:00
// Check if our tower is behind, if so (and the feature migration flag is in use)
// overwrite with the newer bank.
2022-06-25 09:27:43 -07:00
if let ( true , Some ( vote_account ) ) = (
2022-02-07 14:06:19 -08:00
Tower ::is_direct_vote_state_update_enabled ( bank ) ,
bank . get_vote_account ( my_vote_pubkey ) ,
) {
if let Some ( mut bank_vote_state ) =
vote_account . vote_state ( ) . as_ref ( ) . ok ( ) . cloned ( )
{
if bank_vote_state . last_voted_slot ( )
> tower . vote_state . last_voted_slot ( )
{
info! (
" Frozen bank vote state slot {:?}
is newer than our local vote state slot { :? } ,
adopting the bank vote state as our own .
Bank votes : { :? } , root : { :? } ,
Local votes : { :? } , root : { :? } " ,
bank_vote_state . last_voted_slot ( ) ,
tower . vote_state . last_voted_slot ( ) ,
bank_vote_state . votes ,
bank_vote_state . root_slot ,
tower . vote_state . votes ,
tower . vote_state . root_slot
) ;
if let Some ( local_root ) = tower . vote_state . root_slot {
if bank_vote_state
. root_slot
. map ( | bank_root | local_root > bank_root )
. unwrap_or ( true )
{
// If the local root is larger than this on chain vote state
// root (possible due to supermajority roots being set on
// startup), then we need to adjust the tower
bank_vote_state . root_slot = Some ( local_root ) ;
bank_vote_state
. votes
. retain ( | lockout | lockout . slot > local_root ) ;
info! (
" Local root is larger than on chain root,
overwrote bank root { :? } and updated votes { :? } " ,
bank_vote_state . root_slot , bank_vote_state . votes
) ;
if let Some ( first_vote ) = bank_vote_state . votes . front ( ) {
assert! ( ancestors
. get ( & first_vote . slot )
. expect (
" Ancestors map must contain an
entry for all slots on this fork
greater than ` local_root ` and less
than ` bank_slot ` "
)
. contains ( & local_root ) ) ;
}
}
}
tower . vote_state . root_slot = bank_vote_state . root_slot ;
tower . vote_state . votes = bank_vote_state . votes ;
}
}
}
2020-06-11 12:16:04 -07:00
let computed_bank_state = Tower ::collect_vote_lockouts (
2021-04-28 11:46:16 -07:00
my_vote_pubkey ,
2020-06-11 12:16:04 -07:00
bank_slot ,
2021-08-30 08:54:01 -07:00
& bank . vote_accounts ( ) ,
2021-06-18 06:34:46 -07:00
ancestors ,
2021-04-21 14:40:35 -07:00
| slot | progress . get_hash ( slot ) ,
latest_validator_votes_for_frozen_banks ,
2020-06-11 12:16:04 -07:00
) ;
2020-07-20 17:29:07 -07:00
// Notify any listeners of the votes found in this newly computed
// bank
2020-06-11 12:16:04 -07:00
heaviest_subtree_fork_choice . compute_bank_stats (
2021-06-18 06:34:46 -07:00
bank ,
2020-06-11 12:16:04 -07:00
tower ,
2021-04-21 14:40:35 -07:00
latest_validator_votes_for_frozen_banks ,
2020-06-11 12:16:04 -07:00
) ;
let ComputedBankState {
2020-06-22 18:30:09 -07:00
voted_stakes ,
total_stake ,
2020-06-11 12:16:04 -07:00
lockout_intervals ,
2021-04-28 11:46:16 -07:00
my_latest_landed_vote ,
2020-06-11 12:16:04 -07:00
..
} = computed_bank_state ;
let stats = progress
. get_fork_stats_mut ( bank_slot )
. expect ( " All frozen banks must exist in the Progress map " ) ;
2020-06-22 18:30:09 -07:00
stats . total_stake = total_stake ;
stats . voted_stakes = voted_stakes ;
2020-06-11 12:16:04 -07:00
stats . lockout_intervals = lockout_intervals ;
stats . block_height = bank . block_height ( ) ;
2021-04-28 11:46:16 -07:00
stats . my_latest_landed_vote = my_latest_landed_vote ;
2020-06-11 12:16:04 -07:00
stats . computed = true ;
new_stats . push ( bank_slot ) ;
2020-03-26 19:57:27 -07:00
datapoint_info! (
" bank_weight " ,
( " slot " , bank_slot , i64 ) ,
// u128 too large for influx, convert to hex
( " weight " , format! ( " {:X} " , stats . weight ) , String ) ,
) ;
info! (
" {} slot_weight: {} {} {} {} " ,
2021-04-28 11:46:16 -07:00
my_vote_pubkey ,
2020-04-10 23:52:37 -07:00
bank_slot ,
2020-03-26 19:57:27 -07:00
stats . weight ,
stats . fork_weight ,
bank . parent ( ) . map ( | b | b . slot ( ) ) . unwrap_or ( 0 )
) ;
}
2020-02-03 16:48:24 -08:00
}
2020-03-26 19:57:27 -07:00
Self ::update_propagation_status (
progress ,
bank_slot ,
bank_forks ,
vote_tracker ,
2020-03-30 19:57:11 -07:00
cluster_slots ,
2020-03-26 19:57:27 -07:00
) ;
let stats = progress
. get_fork_stats_mut ( bank_slot )
. expect ( " All frozen banks must exist in the Progress map " ) ;
2020-06-22 18:30:09 -07:00
stats . vote_threshold =
tower . check_vote_stake_threshold ( bank_slot , & stats . voted_stakes , stats . total_stake ) ;
2021-05-04 00:51:42 -07:00
stats . is_locked_out = tower . is_locked_out (
bank_slot ,
ancestors
. get ( & bank_slot )
. expect ( " Ancestors map should contain slot for is_locked_out() check " ) ,
) ;
2020-03-26 19:57:27 -07:00
stats . has_voted = tower . has_voted ( bank_slot ) ;
stats . is_recent = tower . is_recent ( bank_slot ) ;
2020-02-03 16:48:24 -08:00
}
new_stats
}
2020-03-26 19:57:27 -07:00
fn update_propagation_status (
progress : & mut ProgressMap ,
slot : Slot ,
bank_forks : & RwLock < BankForks > ,
vote_tracker : & VoteTracker ,
2020-03-30 19:57:11 -07:00
cluster_slots : & ClusterSlots ,
2020-03-26 19:57:27 -07:00
) {
// If propagation has already been confirmed, return
2022-01-04 00:24:16 -08:00
if progress . get_leader_propagation_slot_must_exist ( slot ) . 0 {
2020-03-26 19:57:27 -07:00
return ;
}
// Otherwise we have to check the votes for confirmation
2022-01-04 00:24:16 -08:00
let mut propagated_stats = progress
. get_propagated_stats_mut ( slot )
2022-12-06 06:30:06 -08:00
. unwrap_or_else ( | | panic! ( " slot= {slot} must exist in ProgressMap " ) ) ;
2020-03-26 19:57:27 -07:00
2022-01-04 00:24:16 -08:00
if propagated_stats . slot_vote_tracker . is_none ( ) {
propagated_stats . slot_vote_tracker = vote_tracker . get_slot_vote_tracker ( slot ) ;
2020-03-26 19:57:27 -07:00
}
2022-01-04 00:24:16 -08:00
let slot_vote_tracker = propagated_stats . slot_vote_tracker . clone ( ) ;
2020-03-26 19:57:27 -07:00
2022-01-04 00:24:16 -08:00
if propagated_stats . cluster_slot_pubkeys . is_none ( ) {
propagated_stats . cluster_slot_pubkeys = cluster_slots . lookup ( slot ) ;
2020-03-30 19:57:11 -07:00
}
2022-01-04 00:24:16 -08:00
let cluster_slot_pubkeys = propagated_stats . cluster_slot_pubkeys . clone ( ) ;
2020-03-30 19:57:11 -07:00
2020-03-26 19:57:27 -07:00
let newly_voted_pubkeys = slot_vote_tracker
. as_ref ( )
2021-04-10 17:34:45 -07:00
. and_then ( | slot_vote_tracker | {
slot_vote_tracker . write ( ) . unwrap ( ) . get_voted_slot_updates ( )
} )
2020-06-11 12:16:04 -07:00
. unwrap_or_default ( ) ;
2020-03-26 19:57:27 -07:00
2020-03-30 19:57:11 -07:00
let cluster_slot_pubkeys = cluster_slot_pubkeys
. map ( | v | v . read ( ) . unwrap ( ) . keys ( ) . cloned ( ) . collect ( ) )
2020-06-11 12:16:04 -07:00
. unwrap_or_default ( ) ;
2020-03-30 19:57:11 -07:00
2020-03-26 19:57:27 -07:00
Self ::update_fork_propagated_threshold_from_votes (
progress ,
newly_voted_pubkeys ,
2020-03-30 19:57:11 -07:00
cluster_slot_pubkeys ,
2020-03-26 19:57:27 -07:00
slot ,
2022-08-04 07:45:31 -07:00
& bank_forks . read ( ) . unwrap ( ) ,
2020-03-26 19:57:27 -07:00
) ;
}
2022-10-03 01:49:47 -07:00
/// Given a `heaviest_bank` and a `heaviest_bank_on_same_voted_fork`, return
/// a bank to vote on, a bank to reset to, and a list of switch failure
/// reasons.
///
/// If `heaviest_bank_on_same_voted_fork` is `None` due to that fork no
/// longer being valid to vote on, it's possible that a validator will not
/// be able to reset away from the invalid fork that they last voted on. To
/// resolve this scenario, validators need to wait until they can create a
/// switch proof for another fork or until the invalid fork is be marked
/// valid again if it was confirmed by the cluster.
/// Until this is resolved, leaders will build each of their
/// blocks from the last reset bank on the invalid fork.
2021-07-08 19:07:32 -07:00
pub fn select_vote_and_reset_forks (
2020-06-11 12:16:04 -07:00
heaviest_bank : & Arc < Bank > ,
2021-03-24 23:41:52 -07:00
// Should only be None if there was no previous vote
heaviest_bank_on_same_voted_fork : Option < & Arc < Bank > > ,
2020-03-02 12:43:43 -08:00
ancestors : & HashMap < u64 , HashSet < u64 > > ,
descendants : & HashMap < u64 , HashSet < u64 > > ,
progress : & ProgressMap ,
2020-10-15 02:30:33 -07:00
tower : & mut Tower ,
2021-05-04 00:51:42 -07:00
latest_validator_votes_for_frozen_banks : & LatestValidatorVotesForFrozenBanks ,
2021-06-11 03:09:57 -07:00
fork_choice : & HeaviestSubtreeForkChoice ,
2020-05-29 14:40:36 -07:00
) -> SelectVoteAndResetForkResult {
2020-03-02 12:43:43 -08:00
// Try to vote on the actual heaviest fork. If the heaviest bank is
// locked out or fails the threshold check, the validator will:
// 1) Not continue to vote on current fork, waiting for lockouts to expire/
// threshold check to pass
// 2) Will reset PoH to heaviest fork in order to make sure the heaviest
// fork is propagated
// This above behavior should ensure correct voting and resetting PoH
// behavior under all cases:
// 1) The best "selected" bank is on same fork
// 2) The best "selected" bank is on a different fork,
// switch_threshold fails
// 3) The best "selected" bank is on a different fork,
2020-06-17 20:54:52 -07:00
// switch_threshold succeeds
2020-03-02 12:43:43 -08:00
let mut failure_reasons = vec! [ ] ;
let selected_fork = {
2020-06-11 12:16:04 -07:00
let switch_fork_decision = tower . check_switch_threshold (
heaviest_bank . slot ( ) ,
2021-06-18 06:34:46 -07:00
ancestors ,
descendants ,
progress ,
2020-06-11 12:16:04 -07:00
heaviest_bank . total_epoch_stake ( ) ,
heaviest_bank
. epoch_vote_accounts ( heaviest_bank . epoch ( ) )
. expect ( " Bank epoch vote accounts must contain entry for the bank's own epoch " ) ,
2021-05-04 00:51:42 -07:00
latest_validator_votes_for_frozen_banks ,
2021-06-11 03:09:57 -07:00
fork_choice ,
2020-06-11 12:16:04 -07:00
) ;
2021-03-24 23:41:52 -07:00
match switch_fork_decision {
2022-10-03 18:45:55 -07:00
SwitchForkDecision ::FailedSwitchThreshold ( switch_proof_stake , total_stake ) = > {
2021-03-24 23:41:52 -07:00
let reset_bank = heaviest_bank_on_same_voted_fork ;
// If we can't switch and our last vote was on a non-duplicate/confirmed slot, then
// reset to the the next votable bank on the same fork as our last vote,
// but don't vote.
// We don't just reset to the heaviest fork when switch threshold fails because
// a situation like this can occur:
/* Figure 1:
slot 0
|
slot 1
/ \
slot 2 ( last vote ) |
| slot 8 ( 10 % )
slot 4 ( 9 % )
* /
// Imagine 90% of validators voted on slot 4, but only 9% landed. If everybody that fails
2022-03-23 11:36:55 -07:00
// the switch threshold abandons slot 4 to build on slot 8 (because it's *currently* heavier),
2021-03-24 23:41:52 -07:00
// then there will be no blocks to include the votes for slot 4, and the network halts
// because 90% of validators can't vote
info! (
2022-10-03 18:45:55 -07:00
" Waiting to switch vote to {},
resetting to slot { :? } for now ,
switch proof stake : { } ,
threshold stake : { } ,
total stake : { } " ,
2021-03-24 23:41:52 -07:00
heaviest_bank . slot ( ) ,
reset_bank . as_ref ( ) . map ( | b | b . slot ( ) ) ,
2022-10-03 18:45:55 -07:00
switch_proof_stake ,
total_stake as f64 * SWITCH_FORK_THRESHOLD ,
total_stake
2021-03-24 23:41:52 -07:00
) ;
failure_reasons . push ( HeaviestForkFailures ::FailedSwitchThreshold (
heaviest_bank . slot ( ) ,
) ) ;
reset_bank . map ( | b | ( b , switch_fork_decision ) )
}
SwitchForkDecision ::FailedSwitchDuplicateRollback ( latest_duplicate_ancestor ) = > {
// If we can't switch and our last vote was on an unconfirmed, duplicate slot,
// then we need to reset to the heaviest bank, even if the heaviest bank is not
// a descendant of the last vote (usually for switch threshold failures we reset
// to the heaviest descendant of the last vote, but in this case, the last vote
// was on a duplicate branch). This is because in the case of *unconfirmed* duplicate
// slots, somebody needs to generate an alternative branch to escape a situation
// like a 50-50 split where both partitions have voted on different versions of the
// same duplicate slot.
// Unlike the situation described in `Figure 1` above, this is safe. To see why,
// imagine the same situation described in Figure 1 above occurs, but slot 2 is
// a duplicate block. There are now a few cases:
//
// Note first that DUPLICATE_THRESHOLD + SWITCH_FORK_THRESHOLD + DUPLICATE_LIVENESS_THRESHOLD = 1;
//
// 1) > DUPLICATE_THRESHOLD of the network voted on some version of slot 2. Because duplicate slots can be confirmed
// by gossip, unlike the situation described in `Figure 1`, we don't need those
// votes to land in a descendant to confirm slot 2. Once slot 2 is confirmed by
// gossip votes, that fork is added back to the fork choice set and falls back into
// normal fork choice, which is covered by the `FailedSwitchThreshold` case above
// (everyone will resume building on their last voted fork, slot 4, since slot 8
// doesn't have for switch threshold)
//
// 2) <= DUPLICATE_THRESHOLD of the network voted on some version of slot 2, > SWITCH_FORK_THRESHOLD of the network voted
// on slot 8. Then everybody abandons the duplicate fork from fork choice and both builds
// on slot 8's fork. They can also vote on slot 8's fork because it has sufficient weight
// to pass the switching threshold
//
// 3) <= DUPLICATE_THRESHOLD of the network voted on some version of slot 2, <= SWITCH_FORK_THRESHOLD of the network voted
// on slot 8. This means more than DUPLICATE_LIVENESS_THRESHOLD of the network is gone, so we cannot
// guarantee progress anyways
// Note the heaviest fork is never descended from a known unconfirmed duplicate slot
// because the fork choice rule ensures that (marks it as an invalid candidate),
// thus it's safe to use as the reset bank.
let reset_bank = Some ( heaviest_bank ) ;
info! (
" Waiting to switch vote to {}, resetting to slot {:?} for now, latest duplicate ancestor: {:?} " ,
heaviest_bank . slot ( ) ,
reset_bank . as_ref ( ) . map ( | b | b . slot ( ) ) ,
latest_duplicate_ancestor ,
) ;
failure_reasons . push ( HeaviestForkFailures ::FailedSwitchThreshold (
heaviest_bank . slot ( ) ,
) ) ;
reset_bank . map ( | b | ( b , switch_fork_decision ) )
}
_ = > Some ( ( heaviest_bank , switch_fork_decision ) ) ,
2020-03-02 12:43:43 -08:00
}
} ;
2020-05-29 14:40:36 -07:00
if let Some ( ( bank , switch_fork_decision ) ) = selected_fork {
2020-03-26 19:57:27 -07:00
let ( is_locked_out , vote_threshold , is_leader_slot , fork_weight ) = {
let fork_stats = progress . get_fork_stats ( bank . slot ( ) ) . unwrap ( ) ;
let propagated_stats = & progress . get_propagated_stats ( bank . slot ( ) ) . unwrap ( ) ;
2020-03-02 12:43:43 -08:00
(
fork_stats . is_locked_out ,
fork_stats . vote_threshold ,
2020-03-26 19:57:27 -07:00
propagated_stats . is_leader_slot ,
2020-03-02 12:43:43 -08:00
fork_stats . weight ,
)
} ;
2020-03-26 19:57:27 -07:00
2022-01-04 00:24:16 -08:00
let propagation_confirmed = is_leader_slot
| | progress
. get_leader_propagation_slot_must_exist ( bank . slot ( ) )
. 0 ;
2020-03-26 19:57:27 -07:00
2020-03-02 12:43:43 -08:00
if is_locked_out {
failure_reasons . push ( HeaviestForkFailures ::LockedOut ( bank . slot ( ) ) ) ;
}
if ! vote_threshold {
failure_reasons . push ( HeaviestForkFailures ::FailedThreshold ( bank . slot ( ) ) ) ;
}
2020-03-26 19:57:27 -07:00
if ! propagation_confirmed {
failure_reasons . push ( HeaviestForkFailures ::NoPropagatedConfirmation ( bank . slot ( ) ) ) ;
}
2020-03-02 12:43:43 -08:00
2020-05-29 14:40:36 -07:00
if ! is_locked_out
& & vote_threshold
& & propagation_confirmed
2020-10-15 02:30:33 -07:00
& & switch_fork_decision . can_vote ( )
2020-05-29 14:40:36 -07:00
{
2020-03-02 12:43:43 -08:00
info! ( " voting: {} {} " , bank . slot ( ) , fork_weight ) ;
2020-05-29 14:40:36 -07:00
SelectVoteAndResetForkResult {
vote_bank : Some ( ( bank . clone ( ) , switch_fork_decision ) ) ,
reset_bank : Some ( bank . clone ( ) ) ,
heaviest_fork_failures : failure_reasons ,
}
2020-03-02 12:43:43 -08:00
} else {
2020-05-29 14:40:36 -07:00
SelectVoteAndResetForkResult {
vote_bank : None ,
reset_bank : Some ( bank . clone ( ) ) ,
heaviest_fork_failures : failure_reasons ,
}
2020-03-02 12:43:43 -08:00
}
} else {
2020-05-29 14:40:36 -07:00
SelectVoteAndResetForkResult {
vote_bank : None ,
reset_bank : None ,
heaviest_fork_failures : failure_reasons ,
}
2020-03-02 12:43:43 -08:00
}
2019-03-21 11:53:18 -07:00
}
2020-03-26 19:57:27 -07:00
fn update_fork_propagated_threshold_from_votes (
progress : & mut ProgressMap ,
2021-02-07 18:07:00 -08:00
mut newly_voted_pubkeys : Vec < Pubkey > ,
mut cluster_slot_pubkeys : Vec < Pubkey > ,
2020-03-26 19:57:27 -07:00
fork_tip : Slot ,
2022-08-04 07:45:31 -07:00
bank_forks : & BankForks ,
2020-03-26 19:57:27 -07:00
) {
2022-01-04 00:24:16 -08:00
let mut current_leader_slot = progress . get_latest_leader_slot_must_exist ( fork_tip ) ;
2020-03-26 19:57:27 -07:00
let mut did_newly_reach_threshold = false ;
2022-08-04 07:45:31 -07:00
let root = bank_forks . root ( ) ;
2020-03-26 19:57:27 -07:00
loop {
// These cases mean confirmation of propagation on any earlier
// leader blocks must have been reached
2022-09-07 13:04:46 -07:00
if current_leader_slot . is_none ( ) | | current_leader_slot . unwrap ( ) < root {
2020-03-26 19:57:27 -07:00
break ;
}
let leader_propagated_stats = progress
. get_propagated_stats_mut ( current_leader_slot . unwrap ( ) )
2020-09-11 02:03:11 -07:00
. expect ( " current_leader_slot >= root, so must exist in the progress map " ) ;
2020-03-26 19:57:27 -07:00
// If a descendant has reached propagation threshold, then
2020-03-30 19:57:11 -07:00
// all its ancestor banks have also reached propagation
2020-03-26 19:57:27 -07:00
// threshold as well (Validators can't have voted for a
// descendant without also getting the ancestor block)
2022-12-06 14:09:57 -08:00
if leader_propagated_stats . is_propagated | | {
2020-03-26 19:57:27 -07:00
// If there's no new validators to record, and there's no
// newly achieved threshold, then there's no further
// information to propagate backwards to past leader blocks
2022-12-06 14:09:57 -08:00
newly_voted_pubkeys . is_empty ( )
& & cluster_slot_pubkeys . is_empty ( )
& & ! did_newly_reach_threshold
} {
2020-03-26 19:57:27 -07:00
break ;
}
// We only iterate through the list of leader slots by traversing
// the linked list of 'prev_leader_slot`'s outlined in the
// `progress` map
assert! ( leader_propagated_stats . is_leader_slot ) ;
let leader_bank = bank_forks
. get ( current_leader_slot . unwrap ( ) )
. expect ( " Entry in progress map must exist in BankForks " )
. clone ( ) ;
did_newly_reach_threshold = Self ::update_slot_propagated_threshold_from_votes (
& mut newly_voted_pubkeys ,
2020-03-30 19:57:11 -07:00
& mut cluster_slot_pubkeys ,
2020-03-26 19:57:27 -07:00
& leader_bank ,
leader_propagated_stats ,
did_newly_reach_threshold ,
) | | did_newly_reach_threshold ;
// Now jump to process the previous leader slot
current_leader_slot = leader_propagated_stats . prev_leader_slot ;
}
}
fn update_slot_propagated_threshold_from_votes (
2021-02-07 18:07:00 -08:00
newly_voted_pubkeys : & mut Vec < Pubkey > ,
cluster_slot_pubkeys : & mut Vec < Pubkey > ,
2020-03-26 19:57:27 -07:00
leader_bank : & Bank ,
leader_propagated_stats : & mut PropagatedStats ,
did_child_reach_threshold : bool ,
) -> bool {
// Track whether this slot newly confirm propagation
// throughout the network (switched from is_propagated == false
// to is_propagated == true)
let mut did_newly_reach_threshold = false ;
// If a child of this slot confirmed propagation, then
// we can return early as this implies this slot must also
// be propagated
if did_child_reach_threshold {
if ! leader_propagated_stats . is_propagated {
leader_propagated_stats . is_propagated = true ;
return true ;
} else {
return false ;
}
}
if leader_propagated_stats . is_propagated {
return false ;
}
2020-03-30 19:57:11 -07:00
// Remove the vote/node pubkeys that we already know voted for this
// slot. These vote accounts/validator identities are safe to drop
2020-06-17 20:54:52 -07:00
// because they don't to be ported back any further because earlier
2020-03-30 19:57:11 -07:00
// parents must have:
2020-06-17 20:54:52 -07:00
// 1) Also recorded these pubkeys already, or
2020-03-26 19:57:27 -07:00
// 2) Already reached the propagation threshold, in which case
// they no longer need to track the set of propagated validators
2020-03-30 19:57:11 -07:00
newly_voted_pubkeys . retain ( | vote_pubkey | {
let exists = leader_propagated_stats
2020-03-26 19:57:27 -07:00
. propagated_validators
2021-02-03 15:02:11 -08:00
. contains ( vote_pubkey ) ;
2020-03-30 19:57:11 -07:00
leader_propagated_stats . add_vote_pubkey (
2021-02-07 18:07:00 -08:00
* vote_pubkey ,
2021-06-18 06:34:46 -07:00
leader_bank . epoch_vote_account_stake ( vote_pubkey ) ,
2020-03-30 19:57:11 -07:00
) ;
! exists
} ) ;
cluster_slot_pubkeys . retain ( | node_pubkey | {
let exists = leader_propagated_stats
. propagated_node_ids
2021-02-07 18:07:00 -08:00
. contains ( node_pubkey ) ;
2022-06-21 13:26:51 -07:00
leader_propagated_stats . add_node_pubkey ( node_pubkey , leader_bank ) ;
2020-03-30 19:57:11 -07:00
! exists
2020-03-26 19:57:27 -07:00
} ) ;
2020-03-30 19:57:11 -07:00
if leader_propagated_stats . total_epoch_stake = = 0
| | leader_propagated_stats . propagated_validators_stake as f64
/ leader_propagated_stats . total_epoch_stake as f64
> SUPERMINORITY_THRESHOLD
{
leader_propagated_stats . is_propagated = true ;
did_newly_reach_threshold = true
}
2020-03-26 19:57:27 -07:00
did_newly_reach_threshold
}
2022-10-27 20:06:06 -07:00
#[ allow(clippy::too_many_arguments) ]
2021-03-24 23:41:52 -07:00
fn mark_slots_confirmed (
2021-07-18 17:04:25 -07:00
confirmed_forks : & [ ( Slot , Hash ) ] ,
blockstore : & Blockstore ,
2021-03-24 23:41:52 -07:00
bank_forks : & RwLock < BankForks > ,
progress : & mut ProgressMap ,
2021-06-10 22:28:23 -07:00
duplicate_slots_tracker : & mut DuplicateSlotsTracker ,
2021-03-24 23:41:52 -07:00
fork_choice : & mut HeaviestSubtreeForkChoice ,
2021-08-13 14:21:52 -07:00
epoch_slots_frozen_slots : & mut EpochSlotsFrozenSlots ,
2021-07-08 19:07:32 -07:00
duplicate_slots_to_repair : & mut DuplicateSlotsToRepair ,
2021-07-26 20:59:00 -07:00
ancestor_hashes_replay_update_sender : & AncestorHashesReplayUpdateSender ,
2022-10-27 20:06:06 -07:00
purge_repair_slot_counter : & mut PurgeRepairSlotCounter ,
2021-03-24 23:41:52 -07:00
) {
2021-07-18 17:04:25 -07:00
let root_slot = bank_forks . read ( ) . unwrap ( ) . root ( ) ;
for ( slot , frozen_hash ) in confirmed_forks . iter ( ) {
2021-03-24 23:41:52 -07:00
// This case should be guaranteed as false by confirm_forks()
if let Some ( false ) = progress . is_supermajority_confirmed ( * slot ) {
2021-06-11 03:09:57 -07:00
// Because supermajority confirmation will iterate through and update the
// subtree in fork choice, only incur this cost if the slot wasn't already
2021-03-24 23:41:52 -07:00
// confirmed
progress . set_supermajority_confirmed_slot ( * slot ) ;
2021-07-18 17:04:25 -07:00
// If the slot was confirmed, then it must be frozen. Otherwise, we couldn't
// have replayed any of its descendants and figured out it was confirmed.
assert! ( * frozen_hash ! = Hash ::default ( ) ) ;
let duplicate_confirmed_state = DuplicateConfirmedState ::new_from_state (
* frozen_hash ,
| | false ,
| | Some ( * frozen_hash ) ,
) ;
2021-03-24 23:41:52 -07:00
check_slot_agrees_with_cluster (
* slot ,
root_slot ,
2021-07-18 17:04:25 -07:00
blockstore ,
2021-06-10 22:28:23 -07:00
duplicate_slots_tracker ,
2021-08-13 14:21:52 -07:00
epoch_slots_frozen_slots ,
2021-03-24 23:41:52 -07:00
fork_choice ,
2021-07-08 19:07:32 -07:00
duplicate_slots_to_repair ,
2021-07-26 20:59:00 -07:00
ancestor_hashes_replay_update_sender ,
2022-10-27 20:06:06 -07:00
purge_repair_slot_counter ,
2021-07-18 17:04:25 -07:00
SlotStateUpdate ::DuplicateConfirmed ( duplicate_confirmed_state ) ,
2021-03-24 23:41:52 -07:00
) ;
}
}
}
2019-03-27 04:30:26 -07:00
fn confirm_forks (
2019-06-24 13:41:23 -07:00
tower : & Tower ,
2020-06-22 18:30:09 -07:00
voted_stakes : & VotedStakes ,
total_stake : Stake ,
2020-03-02 12:43:43 -08:00
progress : & ProgressMap ,
2020-01-28 16:02:28 -08:00
bank_forks : & RwLock < BankForks > ,
2021-07-18 17:04:25 -07:00
) -> Vec < ( Slot , Hash ) > {
2020-02-03 16:48:24 -08:00
let mut confirmed_forks = vec! [ ] ;
for ( slot , prog ) in progress . iter ( ) {
2021-03-24 23:41:52 -07:00
if ! prog . fork_stats . is_supermajority_confirmed {
2020-02-03 16:48:24 -08:00
let bank = bank_forks
. read ( )
. unwrap ( )
. get ( * slot )
. expect ( " bank in progress must exist in BankForks " )
. clone ( ) ;
2022-07-28 11:33:19 -07:00
let duration = prog
. replay_stats
. read ( )
. unwrap ( )
. started
. elapsed ( )
. as_millis ( ) ;
2020-06-22 18:30:09 -07:00
if bank . is_frozen ( ) & & tower . is_slot_confirmed ( * slot , voted_stakes , total_stake ) {
2019-11-19 02:36:00 -08:00
info! ( " validator fork confirmed {} {}ms " , * slot , duration ) ;
2020-02-19 14:25:49 -08:00
datapoint_info! ( " validator-confirmation " , ( " duration_ms " , duration , i64 ) ) ;
2021-07-18 17:04:25 -07:00
confirmed_forks . push ( ( * slot , bank . hash ( ) ) ) ;
2019-11-19 02:36:00 -08:00
} else {
debug! (
" validator fork not confirmed {} {}ms {:?} " ,
* slot ,
duration ,
2020-06-22 18:30:09 -07:00
voted_stakes . get ( slot )
2019-11-19 02:36:00 -08:00
) ;
}
2019-03-27 04:30:26 -07:00
}
2019-11-19 02:36:00 -08:00
}
2020-02-03 16:48:24 -08:00
confirmed_forks
2019-03-27 04:30:26 -07:00
}
2021-04-10 17:34:45 -07:00
#[ allow(clippy::too_many_arguments) ]
2021-07-08 19:07:32 -07:00
pub fn handle_new_root (
2020-04-24 15:49:57 -07:00
new_root : Slot ,
2020-01-28 16:02:28 -08:00
bank_forks : & RwLock < BankForks > ,
2020-03-02 12:43:43 -08:00
progress : & mut ProgressMap ,
2021-02-18 23:42:09 -08:00
accounts_background_request_sender : & AbsRequestSender ,
2020-07-07 16:59:46 -07:00
highest_confirmed_root : Option < Slot > ,
2020-06-11 12:16:04 -07:00
heaviest_subtree_fork_choice : & mut HeaviestSubtreeForkChoice ,
2021-06-10 22:28:23 -07:00
duplicate_slots_tracker : & mut DuplicateSlotsTracker ,
2021-03-24 23:41:52 -07:00
gossip_duplicate_confirmed_slots : & mut GossipDuplicateConfirmedSlots ,
2021-04-21 14:40:35 -07:00
unfrozen_gossip_verified_vote_hashes : & mut UnfrozenGossipVerifiedVoteHashes ,
2021-03-25 18:54:51 -07:00
has_new_vote_been_rooted : & mut bool ,
voted_signatures : & mut Vec < Signature > ,
2021-08-13 14:21:52 -07:00
epoch_slots_frozen_slots : & mut EpochSlotsFrozenSlots ,
2022-03-15 13:28:40 -07:00
drop_bank_sender : & Sender < Vec < Arc < Bank > > > ,
2019-03-19 17:30:36 -07:00
) {
2021-11-15 15:28:18 -08:00
let removed_banks = bank_forks . write ( ) . unwrap ( ) . set_root (
2020-04-24 15:49:57 -07:00
new_root ,
2020-12-12 17:22:34 -08:00
accounts_background_request_sender ,
2020-07-07 16:59:46 -07:00
highest_confirmed_root ,
2020-04-24 15:49:57 -07:00
) ;
2022-04-04 14:44:21 -07:00
2022-03-15 13:28:40 -07:00
drop_bank_sender
2021-11-19 08:20:18 -08:00
. send ( removed_banks )
. unwrap_or_else ( | err | warn! ( " bank drop failed: {:?} " , err ) ) ;
2021-09-02 13:52:14 -07:00
// Dropping the bank_forks write lock and reacquiring as a read lock is
// safe because updates to bank_forks are only made by a single thread.
2019-03-19 17:30:36 -07:00
let r_bank_forks = bank_forks . read ( ) . unwrap ( ) ;
2021-03-25 18:54:51 -07:00
let new_root_bank = & r_bank_forks [ new_root ] ;
if ! * has_new_vote_been_rooted {
for signature in voted_signatures . iter ( ) {
if new_root_bank . get_signature_status ( signature ) . is_some ( ) {
* has_new_vote_been_rooted = true ;
break ;
}
}
if * has_new_vote_been_rooted {
std ::mem ::take ( voted_signatures ) ;
}
}
2020-03-26 19:57:27 -07:00
progress . handle_new_root ( & r_bank_forks ) ;
2022-11-15 18:38:07 -08:00
heaviest_subtree_fork_choice . set_tree_root ( ( new_root , r_bank_forks . root_bank ( ) . hash ( ) ) ) ;
2022-07-11 04:33:15 -07:00
* duplicate_slots_tracker = duplicate_slots_tracker . split_off ( & new_root ) ;
2021-06-10 22:28:23 -07:00
// duplicate_slots_tracker now only contains entries >= `new_root`
2022-07-11 04:33:15 -07:00
* gossip_duplicate_confirmed_slots = gossip_duplicate_confirmed_slots . split_off ( & new_root ) ;
2021-04-12 01:00:59 -07:00
// gossip_confirmed_slots now only contains entries >= `new_root`
2021-04-10 17:34:45 -07:00
2021-04-21 14:40:35 -07:00
unfrozen_gossip_verified_vote_hashes . set_root ( new_root ) ;
2022-07-11 04:33:15 -07:00
* epoch_slots_frozen_slots = epoch_slots_frozen_slots . split_off ( & new_root ) ;
2021-08-13 14:21:52 -07:00
// epoch_slots_frozen_slots now only contains entries >= `new_root`
2019-03-19 17:30:36 -07:00
}
2019-04-19 02:39:44 -07:00
fn generate_new_bank_forks (
2020-01-13 13:13:52 -08:00
blockstore : & Blockstore ,
2020-03-26 19:57:27 -07:00
bank_forks : & RwLock < BankForks > ,
2019-04-19 02:39:44 -07:00
leader_schedule_cache : & Arc < LeaderScheduleCache > ,
2021-06-16 10:57:52 -07:00
rpc_subscriptions : & Arc < RpcSubscriptions > ,
2020-03-26 19:57:27 -07:00
progress : & mut ProgressMap ,
2022-01-17 08:59:47 -08:00
replay_timing : & mut ReplayTiming ,
2019-04-19 02:39:44 -07:00
) {
2019-02-07 15:10:54 -08:00
// Find the next slot that chains to the old slot
2022-01-17 08:59:47 -08:00
let mut generate_new_bank_forks_read_lock =
Measure ::start ( " generate_new_bank_forks_read_lock " ) ;
2020-03-26 19:57:27 -07:00
let forks = bank_forks . read ( ) . unwrap ( ) ;
2022-01-17 08:59:47 -08:00
generate_new_bank_forks_read_lock . stop ( ) ;
2019-02-26 21:57:45 -08:00
let frozen_banks = forks . frozen_banks ( ) ;
2020-04-24 15:49:57 -07:00
let frozen_bank_slots : Vec < u64 > = frozen_banks
. keys ( )
. cloned ( )
. filter ( | s | * s > = forks . root ( ) )
. collect ( ) ;
2022-01-17 08:59:47 -08:00
let mut generate_new_bank_forks_get_slots_since =
Measure ::start ( " generate_new_bank_forks_get_slots_since " ) ;
2020-01-13 13:13:52 -08:00
let next_slots = blockstore
2019-03-04 16:40:28 -08:00
. get_slots_since ( & frozen_bank_slots )
2019-02-26 21:57:45 -08:00
. expect ( " Db error " ) ;
2022-01-17 08:59:47 -08:00
generate_new_bank_forks_get_slots_since . stop ( ) ;
2019-03-18 16:04:36 -07:00
// Filter out what we've already seen
2019-07-17 14:10:15 -07:00
trace! ( " generate new forks {:?} " , {
let mut next_slots = next_slots . iter ( ) . collect ::< Vec < _ > > ( ) ;
next_slots . sort ( ) ;
next_slots
} ) ;
2022-01-17 08:59:47 -08:00
let mut generate_new_bank_forks_loop = Measure ::start ( " generate_new_bank_forks_loop " ) ;
2019-12-13 17:20:31 -08:00
let mut new_banks = HashMap ::new ( ) ;
2019-11-08 11:30:25 -08:00
for ( parent_slot , children ) in next_slots {
2019-02-26 21:57:45 -08:00
let parent_bank = frozen_banks
2019-11-08 11:30:25 -08:00
. get ( & parent_slot )
2019-02-26 21:57:45 -08:00
. expect ( " missing parent in bank forks " )
. clone ( ) ;
2019-11-08 11:30:25 -08:00
for child_slot in children {
2019-12-13 17:20:31 -08:00
if forks . get ( child_slot ) . is_some ( ) | | new_banks . get ( & child_slot ) . is_some ( ) {
2019-11-08 11:30:25 -08:00
trace! ( " child already active or frozen {} " , child_slot ) ;
2019-03-03 16:44:06 -08:00
continue ;
2019-02-28 19:49:22 -08:00
}
2019-04-19 02:39:44 -07:00
let leader = leader_schedule_cache
2019-11-08 11:30:25 -08:00
. slot_leader_at ( child_slot , Some ( & parent_bank ) )
2019-04-19 02:39:44 -07:00
. unwrap ( ) ;
2019-11-26 00:42:54 -08:00
info! (
" new fork:{} parent:{} root:{} " ,
child_slot ,
parent_slot ,
forks . root ( )
) ;
2020-02-25 15:49:59 -08:00
let child_bank = Self ::new_bank_from_parent_with_notify (
& parent_bank ,
child_slot ,
forks . root ( ) ,
& leader ,
2021-06-16 10:57:52 -07:00
rpc_subscriptions ,
2021-10-05 21:53:26 -07:00
NewBankOptions ::default ( ) ,
2020-02-25 15:49:59 -08:00
) ;
2021-02-07 18:07:00 -08:00
let empty : Vec < Pubkey > = vec! [ ] ;
2020-03-30 19:57:11 -07:00
Self ::update_fork_propagated_threshold_from_votes (
progress ,
empty ,
2021-02-07 18:07:00 -08:00
vec! [ leader ] ,
2020-03-30 19:57:11 -07:00
parent_bank . slot ( ) ,
2022-08-04 07:45:31 -07:00
& forks ,
2020-03-30 19:57:11 -07:00
) ;
2020-02-04 18:50:24 -08:00
new_banks . insert ( child_slot , child_bank ) ;
2019-02-26 21:57:45 -08:00
}
}
2019-12-13 17:20:31 -08:00
drop ( forks ) ;
2022-01-17 08:59:47 -08:00
generate_new_bank_forks_loop . stop ( ) ;
2019-12-13 17:20:31 -08:00
2022-01-17 08:59:47 -08:00
let mut generate_new_bank_forks_write_lock =
Measure ::start ( " generate_new_bank_forks_write_lock " ) ;
2020-03-26 19:57:27 -07:00
let mut forks = bank_forks . write ( ) . unwrap ( ) ;
2019-12-13 17:20:31 -08:00
for ( _ , bank ) in new_banks {
forks . insert ( bank ) ;
}
2022-01-17 08:59:47 -08:00
generate_new_bank_forks_write_lock . stop ( ) ;
saturating_add_assign! (
replay_timing . generate_new_bank_forks_read_lock_us ,
generate_new_bank_forks_read_lock . as_us ( )
) ;
saturating_add_assign! (
replay_timing . generate_new_bank_forks_get_slots_since_us ,
generate_new_bank_forks_get_slots_since . as_us ( )
) ;
saturating_add_assign! (
replay_timing . generate_new_bank_forks_loop_us ,
generate_new_bank_forks_loop . as_us ( )
) ;
saturating_add_assign! (
replay_timing . generate_new_bank_forks_write_lock_us ,
generate_new_bank_forks_write_lock . as_us ( )
) ;
2019-02-07 15:10:54 -08:00
}
2018-10-10 16:49:41 -07:00
2020-02-25 15:49:59 -08:00
fn new_bank_from_parent_with_notify (
parent : & Arc < Bank > ,
slot : u64 ,
root_slot : u64 ,
leader : & Pubkey ,
2021-06-16 10:57:52 -07:00
rpc_subscriptions : & Arc < RpcSubscriptions > ,
2021-10-05 21:53:26 -07:00
new_bank_options : NewBankOptions ,
2020-02-25 15:49:59 -08:00
) -> Bank {
2021-06-16 10:57:52 -07:00
rpc_subscriptions . notify_slot ( slot , parent . slot ( ) , root_slot ) ;
2021-10-05 21:53:26 -07:00
Bank ::new_from_parent_with_options ( parent , leader , slot , new_bank_options )
2020-02-25 15:49:59 -08:00
}
2020-02-11 17:01:49 -08:00
fn record_rewards ( bank : & Bank , rewards_recorder_sender : & Option < RewardsRecorderSender > ) {
if let Some ( rewards_recorder_sender ) = rewards_recorder_sender {
2020-10-09 12:55:35 -07:00
let rewards = bank . rewards . read ( ) . unwrap ( ) ;
if ! rewards . is_empty ( ) {
2020-02-11 17:01:49 -08:00
rewards_recorder_sender
2020-10-09 12:55:35 -07:00
. send ( ( bank . slot ( ) , rewards . clone ( ) ) )
2020-02-11 17:01:49 -08:00
. unwrap_or_else ( | err | warn! ( " rewards_recorder_sender failed: {:?} " , err ) ) ;
2020-02-04 18:50:24 -08:00
}
}
}
2020-09-08 07:55:09 -07:00
pub fn get_unlock_switch_vote_slot ( cluster_type : ClusterType ) -> Slot {
match cluster_type {
ClusterType ::Development = > 0 ,
ClusterType ::Devnet = > 0 ,
2020-06-16 02:55:36 -07:00
// Epoch 63
2020-09-08 07:55:09 -07:00
ClusterType ::Testnet = > 21_692_256 ,
// 400_000 slots into epoch 61
ClusterType ::MainnetBeta = > 26_752_000 ,
2020-05-30 00:03:19 -07:00
}
}
2019-11-13 10:12:09 -08:00
pub fn join ( self ) -> thread ::Result < ( ) > {
2019-11-04 15:44:27 -08:00
self . commitment_service . join ( ) ? ;
2019-02-26 21:57:45 -08:00
self . t_replay . join ( ) . map ( | _ | ( ) )
2018-10-10 16:49:41 -07:00
}
}
2018-09-13 14:00:17 -07:00
2018-10-10 16:49:41 -07:00
#[ cfg(test) ]
2022-05-02 16:33:53 -07:00
pub ( crate ) mod tests {
2021-12-03 09:00:31 -08:00
use {
super ::* ,
crate ::{
2022-01-04 00:24:16 -08:00
broadcast_stage ::RetransmitSlotsReceiver ,
2021-12-03 09:00:31 -08:00
consensus ::Tower ,
2021-12-17 05:44:40 -08:00
progress_map ::{ ValidatorStakeInfo , RETRANSMIT_BASE_DELAY_MS } ,
2021-12-03 09:00:31 -08:00
replay_stage ::ReplayStage ,
tree_diff ::TreeDiff ,
vote_simulator ::{ self , VoteSimulator } ,
2019-11-20 15:43:10 -08:00
} ,
2021-12-03 09:00:31 -08:00
crossbeam_channel ::unbounded ,
solana_entry ::entry ::{ self , Entry } ,
solana_gossip ::{ cluster_info ::Node , crds ::Cursor } ,
solana_ledger ::{
blockstore ::{ entries_to_test_shreds , make_slot_entries , BlockstoreError } ,
create_new_tmp_ledger ,
genesis_utils ::{ create_genesis_config , create_genesis_config_with_leader } ,
get_tmp_ledger_path ,
2022-05-30 05:51:19 -07:00
shred ::{ Shred , ShredFlags , LEGACY_SHRED_DATA_CAPACITY } ,
2021-12-03 09:00:31 -08:00
} ,
solana_rpc ::{
optimistically_confirmed_bank_tracker ::OptimisticallyConfirmedBank ,
2022-03-07 23:20:34 -08:00
rpc ::{ create_test_transaction_entries , populate_blockstore_for_tests } ,
2021-12-03 09:00:31 -08:00
} ,
solana_runtime ::{
accounts_background_service ::AbsRequestSender ,
commitment ::BlockCommitment ,
genesis_utils ::{ GenesisConfigInfo , ValidatorVoteKeypairs } ,
} ,
solana_sdk ::{
clock ::NUM_CONSECUTIVE_LEADER_SLOTS ,
genesis_config ,
hash ::{ hash , Hash } ,
instruction ::InstructionError ,
poh_config ::PohConfig ,
2022-09-12 07:59:41 -07:00
signature ::{ Keypair , Signer } ,
2021-12-03 09:00:31 -08:00
system_transaction ,
transaction ::TransactionError ,
} ,
solana_streamer ::socket ::SocketAddrSpace ,
2022-01-13 23:24:41 -08:00
solana_transaction_status ::VersionedTransactionWithStatusMeta ,
2021-12-03 09:00:31 -08:00
solana_vote_program ::{
2022-08-03 23:12:59 -07:00
vote_state ::{ self , VoteStateVersions } ,
2021-12-03 09:00:31 -08:00
vote_transaction ,
} ,
std ::{
fs ::remove_dir_all ,
iter ,
2022-01-11 02:44:46 -08:00
sync ::{ atomic ::AtomicU64 , Arc , RwLock } ,
2021-12-03 09:00:31 -08:00
} ,
trees ::{ tr , Tree } ,
2019-11-13 07:14:09 -08:00
} ;
2018-10-10 16:49:41 -07:00
2020-06-29 18:49:57 -07:00
#[ test ]
fn test_is_partition_detected ( ) {
2021-07-08 19:07:32 -07:00
let ( VoteSimulator { bank_forks , .. } , _ ) = setup_default_forks ( 1 , None ::< GenerateVotes > ) ;
2020-06-29 18:49:57 -07:00
let ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
// Last vote 1 is an ancestor of the heaviest slot 3, no partition
assert! ( ! ReplayStage ::is_partition_detected ( & ancestors , 1 , 3 ) ) ;
// Last vote 1 is an ancestor of the from heaviest slot 1, no partition
assert! ( ! ReplayStage ::is_partition_detected ( & ancestors , 3 , 3 ) ) ;
// Last vote 2 is not an ancestor of the heaviest slot 3,
// partition detected!
assert! ( ReplayStage ::is_partition_detected ( & ancestors , 2 , 3 ) ) ;
// Last vote 4 is not an ancestor of the heaviest slot 3,
// partition detected!
assert! ( ReplayStage ::is_partition_detected ( & ancestors , 4 , 3 ) ) ;
}
2021-08-02 14:33:28 -07:00
pub struct ReplayBlockstoreComponents {
pub blockstore : Arc < Blockstore > ,
2021-04-28 11:46:16 -07:00
validator_node_to_vote_keys : HashMap < Pubkey , Pubkey > ,
2021-06-28 20:19:42 -07:00
my_pubkey : Pubkey ,
2021-04-28 11:46:16 -07:00
cluster_info : ClusterInfo ,
2020-07-24 02:55:25 -07:00
leader_schedule_cache : Arc < LeaderScheduleCache > ,
2022-07-05 07:29:44 -07:00
poh_recorder : RwLock < PohRecorder > ,
2021-04-28 11:46:16 -07:00
tower : Tower ,
2020-07-24 02:55:25 -07:00
rpc_subscriptions : Arc < RpcSubscriptions > ,
2021-08-02 14:33:28 -07:00
pub vote_simulator : VoteSimulator ,
2020-07-24 02:55:25 -07:00
}
2021-08-02 14:33:28 -07:00
pub fn replay_blockstore_components (
2021-07-08 19:07:32 -07:00
forks : Option < Tree < Slot > > ,
num_validators : usize ,
generate_votes : Option < GenerateVotes > ,
) -> ReplayBlockstoreComponents {
2020-07-24 02:55:25 -07:00
// Setup blockstore
2021-07-08 19:07:32 -07:00
let ( vote_simulator , blockstore ) = setup_forks_from_tree (
forks . unwrap_or_else ( | | tr ( 0 ) ) ,
num_validators ,
generate_votes ,
) ;
2020-07-24 02:55:25 -07:00
2021-06-28 20:19:42 -07:00
let VoteSimulator {
2021-07-08 19:07:32 -07:00
ref validator_keypairs ,
ref bank_forks ,
2021-06-28 20:19:42 -07:00
..
} = vote_simulator ;
2020-07-24 02:55:25 -07:00
2021-06-28 20:19:42 -07:00
let blockstore = Arc ::new ( blockstore ) ;
let validator_node_to_vote_keys : HashMap < Pubkey , Pubkey > = validator_keypairs
. iter ( )
. map ( | ( _ , keypairs ) | {
(
keypairs . node_keypair . pubkey ( ) ,
keypairs . vote_keypair . pubkey ( ) ,
)
} )
. collect ( ) ;
2020-07-24 02:55:25 -07:00
2021-04-28 11:46:16 -07:00
// ClusterInfo
2021-06-28 20:19:42 -07:00
let my_keypairs = validator_keypairs . values ( ) . next ( ) . unwrap ( ) ;
2021-04-28 11:46:16 -07:00
let my_pubkey = my_keypairs . node_keypair . pubkey ( ) ;
let cluster_info = ClusterInfo ::new (
Node ::new_localhost_with_pubkey ( & my_pubkey ) . info ,
2022-09-12 07:59:41 -07:00
Arc ::new ( my_keypairs . node_keypair . insecure_clone ( ) ) ,
2021-07-23 08:25:03 -07:00
SocketAddrSpace ::Unspecified ,
2021-04-28 11:46:16 -07:00
) ;
assert_eq! ( my_pubkey , cluster_info . id ( ) ) ;
2020-07-24 02:55:25 -07:00
// Leader schedule cache
2021-06-28 20:19:42 -07:00
let root_bank = bank_forks . read ( ) . unwrap ( ) . root_bank ( ) ;
let leader_schedule_cache = Arc ::new ( LeaderScheduleCache ::new_from_bank ( & root_bank ) ) ;
2020-07-24 02:55:25 -07:00
2021-04-28 11:46:16 -07:00
// PohRecorder
2021-06-28 20:19:42 -07:00
let working_bank = bank_forks . read ( ) . unwrap ( ) . working_bank ( ) ;
2022-07-05 07:29:44 -07:00
let poh_recorder = RwLock ::new (
2021-04-28 11:46:16 -07:00
PohRecorder ::new (
2021-06-28 20:19:42 -07:00
working_bank . tick_height ( ) ,
working_bank . last_blockhash ( ) ,
2021-09-13 16:55:35 -07:00
working_bank . clone ( ) ,
2021-04-28 11:46:16 -07:00
None ,
2021-06-28 20:19:42 -07:00
working_bank . ticks_per_slot ( ) ,
2021-04-28 11:46:16 -07:00
& Pubkey ::default ( ) ,
& blockstore ,
& leader_schedule_cache ,
& Arc ::new ( PohConfig ::default ( ) ) ,
Arc ::new ( AtomicBool ::new ( false ) ) ,
)
. 0 ,
) ;
// Tower
let my_vote_pubkey = my_keypairs . vote_keypair . pubkey ( ) ;
let tower = Tower ::new_from_bankforks (
& bank_forks . read ( ) . unwrap ( ) ,
& cluster_info . id ( ) ,
& my_vote_pubkey ,
) ;
2020-07-24 02:55:25 -07:00
// RpcSubscriptions
2020-09-28 19:43:05 -07:00
let optimistically_confirmed_bank =
2021-07-08 19:07:32 -07:00
OptimisticallyConfirmedBank ::locked_from_bank_forks_root ( bank_forks ) ;
2020-07-24 02:55:25 -07:00
let exit = Arc ::new ( AtomicBool ::new ( false ) ) ;
2021-12-17 15:03:09 -08:00
let max_complete_transaction_status_slot = Arc ::new ( AtomicU64 ::default ( ) ) ;
2021-09-17 12:40:14 -07:00
let rpc_subscriptions = Arc ::new ( RpcSubscriptions ::new_for_tests (
2020-07-24 02:55:25 -07:00
& exit ,
2021-12-17 15:03:09 -08:00
max_complete_transaction_status_slot ,
2020-07-24 02:55:25 -07:00
bank_forks . clone ( ) ,
Arc ::new ( RwLock ::new ( BlockCommitmentCache ::default ( ) ) ) ,
2020-09-28 19:43:05 -07:00
optimistically_confirmed_bank ,
2020-07-24 02:55:25 -07:00
) ) ;
ReplayBlockstoreComponents {
blockstore ,
2021-04-28 11:46:16 -07:00
validator_node_to_vote_keys ,
2021-06-28 20:19:42 -07:00
my_pubkey ,
2021-04-28 11:46:16 -07:00
cluster_info ,
2020-07-24 02:55:25 -07:00
leader_schedule_cache ,
2021-04-28 11:46:16 -07:00
poh_recorder ,
tower ,
2020-07-24 02:55:25 -07:00
rpc_subscriptions ,
2021-07-08 19:07:32 -07:00
vote_simulator ,
2020-07-24 02:55:25 -07:00
}
}
#[ test ]
fn test_child_slots_of_same_parent ( ) {
let ReplayBlockstoreComponents {
blockstore ,
2021-04-28 11:46:16 -07:00
validator_node_to_vote_keys ,
2021-07-08 19:07:32 -07:00
vote_simulator ,
2020-07-24 02:55:25 -07:00
leader_schedule_cache ,
rpc_subscriptions ,
2021-04-28 11:46:16 -07:00
..
2021-07-08 19:07:32 -07:00
} = replay_blockstore_components ( None , 1 , None ::< GenerateVotes > ) ;
let VoteSimulator {
mut progress ,
bank_forks ,
..
} = vote_simulator ;
2020-07-24 02:55:25 -07:00
// Insert a non-root bank so that the propagation logic will update this
// bank
let bank1 = Bank ::new_from_parent (
2022-04-28 11:51:00 -07:00
& bank_forks . read ( ) . unwrap ( ) . get ( 0 ) . unwrap ( ) ,
2020-07-24 02:55:25 -07:00
& leader_schedule_cache . slot_leader_at ( 1 , None ) . unwrap ( ) ,
1 ,
) ;
progress . insert (
1 ,
ForkProgress ::new_from_bank (
& bank1 ,
bank1 . collector_id ( ) ,
2021-04-28 11:46:16 -07:00
validator_node_to_vote_keys
2021-06-18 06:34:46 -07:00
. get ( bank1 . collector_id ( ) )
2021-04-28 11:46:16 -07:00
. unwrap ( ) ,
2020-07-24 02:55:25 -07:00
Some ( 0 ) ,
0 ,
0 ,
) ,
) ;
assert! ( progress . get_propagated_stats ( 1 ) . unwrap ( ) . is_leader_slot ) ;
bank1 . freeze ( ) ;
bank_forks . write ( ) . unwrap ( ) . insert ( bank1 ) ;
// Insert shreds for slot NUM_CONSECUTIVE_LEADER_SLOTS,
// chaining to slot 1
2022-09-15 14:51:41 -07:00
let ( shreds , _ ) = make_slot_entries (
NUM_CONSECUTIVE_LEADER_SLOTS , // slot
1 , // parent_slot
8 , // num_entries
true , // merkle_variant
) ;
2020-07-24 02:55:25 -07:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
assert! ( bank_forks
. read ( )
. unwrap ( )
. get ( NUM_CONSECUTIVE_LEADER_SLOTS )
. is_none ( ) ) ;
2022-01-17 08:59:47 -08:00
let mut replay_timing = ReplayTiming ::default ( ) ;
2020-07-24 02:55:25 -07:00
ReplayStage ::generate_new_bank_forks (
& blockstore ,
& bank_forks ,
& leader_schedule_cache ,
& rpc_subscriptions ,
& mut progress ,
2022-01-17 08:59:47 -08:00
& mut replay_timing ,
2020-07-24 02:55:25 -07:00
) ;
assert! ( bank_forks
. read ( )
. unwrap ( )
. get ( NUM_CONSECUTIVE_LEADER_SLOTS )
. is_some ( ) ) ;
// Insert shreds for slot 2 * NUM_CONSECUTIVE_LEADER_SLOTS,
// chaining to slot 1
2022-09-15 14:51:41 -07:00
let ( shreds , _ ) = make_slot_entries (
2 * NUM_CONSECUTIVE_LEADER_SLOTS ,
1 ,
8 ,
true , // merkle_variant
) ;
2020-07-24 02:55:25 -07:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
assert! ( bank_forks
. read ( )
. unwrap ( )
. get ( 2 * NUM_CONSECUTIVE_LEADER_SLOTS )
. is_none ( ) ) ;
ReplayStage ::generate_new_bank_forks (
& blockstore ,
& bank_forks ,
& leader_schedule_cache ,
& rpc_subscriptions ,
& mut progress ,
2022-01-17 08:59:47 -08:00
& mut replay_timing ,
2020-07-24 02:55:25 -07:00
) ;
assert! ( bank_forks
. read ( )
. unwrap ( )
. get ( NUM_CONSECUTIVE_LEADER_SLOTS )
. is_some ( ) ) ;
assert! ( bank_forks
. read ( )
. unwrap ( )
. get ( 2 * NUM_CONSECUTIVE_LEADER_SLOTS )
. is_some ( ) ) ;
// // There are 20 equally staked accounts, of which 3 have built
// banks above or at bank 1. Because 3/20 < SUPERMINORITY_THRESHOLD,
// we should see 3 validators in bank 1's propagated_validator set.
let expected_leader_slots = vec! [
1 ,
NUM_CONSECUTIVE_LEADER_SLOTS ,
2 * NUM_CONSECUTIVE_LEADER_SLOTS ,
] ;
for slot in expected_leader_slots {
let leader = leader_schedule_cache . slot_leader_at ( slot , None ) . unwrap ( ) ;
2021-04-28 11:46:16 -07:00
let vote_key = validator_node_to_vote_keys . get ( & leader ) . unwrap ( ) ;
2020-07-24 02:55:25 -07:00
assert! ( progress
. get_propagated_stats ( 1 )
2020-03-26 19:57:27 -07:00
. unwrap ( )
2020-07-24 02:55:25 -07:00
. propagated_validators
. contains ( vote_key ) ) ;
2019-03-18 16:04:36 -07:00
}
}
2019-03-19 17:30:36 -07:00
#[ test ]
fn test_handle_new_root ( ) {
2019-11-08 20:56:57 -08:00
let genesis_config = create_genesis_config ( 10_000 ) . genesis_config ;
2021-08-05 06:42:38 -07:00
let bank0 = Bank ::new_for_tests ( & genesis_config ) ;
2020-06-12 10:04:17 -07:00
let bank_forks = Arc ::new ( RwLock ::new ( BankForks ::new ( bank0 ) ) ) ;
2021-04-12 01:00:59 -07:00
2020-01-28 16:02:28 -08:00
let root = 3 ;
let root_bank = Bank ::new_from_parent (
2022-04-28 11:51:00 -07:00
& bank_forks . read ( ) . unwrap ( ) . get ( 0 ) . unwrap ( ) ,
2020-01-28 16:02:28 -08:00
& Pubkey ::default ( ) ,
root ,
) ;
2021-04-12 01:00:59 -07:00
root_bank . freeze ( ) ;
let root_hash = root_bank . hash ( ) ;
2020-01-28 16:02:28 -08:00
bank_forks . write ( ) . unwrap ( ) . insert ( root_bank ) ;
2021-04-12 01:00:59 -07:00
let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice ::new ( ( root , root_hash ) ) ;
2020-03-26 19:57:27 -07:00
let mut progress = ProgressMap ::default ( ) ;
2020-01-28 16:02:28 -08:00
for i in 0 ..= root {
2021-06-11 03:09:57 -07:00
progress . insert ( i , ForkProgress ::new ( Hash ::default ( ) , None , None , 0 , 0 ) ) ;
2020-01-28 16:02:28 -08:00
}
2021-06-10 22:28:23 -07:00
let mut duplicate_slots_tracker : DuplicateSlotsTracker =
vec! [ root - 1 , root , root + 1 ] . into_iter ( ) . collect ( ) ;
2021-03-24 23:41:52 -07:00
let mut gossip_duplicate_confirmed_slots : GossipDuplicateConfirmedSlots =
vec! [ root - 1 , root , root + 1 ]
. into_iter ( )
. map ( | s | ( s , Hash ::default ( ) ) )
. collect ( ) ;
2021-04-21 14:40:35 -07:00
let mut unfrozen_gossip_verified_vote_hashes : UnfrozenGossipVerifiedVoteHashes =
UnfrozenGossipVerifiedVoteHashes {
votes_per_slot : vec ! [ root - 1 , root , root + 1 ]
. into_iter ( )
. map ( | s | ( s , HashMap ::new ( ) ) )
. collect ( ) ,
} ;
2021-08-13 14:21:52 -07:00
let mut epoch_slots_frozen_slots : EpochSlotsFrozenSlots = vec! [ root - 1 , root , root + 1 ]
. into_iter ( )
. map ( | slot | ( slot , Hash ::default ( ) ) )
. collect ( ) ;
2022-03-15 13:28:40 -07:00
let ( drop_bank_sender , _drop_bank_receiver ) = unbounded ( ) ;
2020-04-24 15:49:57 -07:00
ReplayStage ::handle_new_root (
root ,
& bank_forks ,
& mut progress ,
2021-02-18 23:42:09 -08:00
& AbsRequestSender ::default ( ) ,
2020-04-24 15:49:57 -07:00
None ,
2020-06-11 12:16:04 -07:00
& mut heaviest_subtree_fork_choice ,
2021-06-10 22:28:23 -07:00
& mut duplicate_slots_tracker ,
2021-03-24 23:41:52 -07:00
& mut gossip_duplicate_confirmed_slots ,
2021-04-21 14:40:35 -07:00
& mut unfrozen_gossip_verified_vote_hashes ,
2021-03-25 18:54:51 -07:00
& mut true ,
& mut Vec ::new ( ) ,
2021-08-13 14:21:52 -07:00
& mut epoch_slots_frozen_slots ,
2022-03-15 13:28:40 -07:00
& drop_bank_sender ,
2020-04-24 15:49:57 -07:00
) ;
2020-01-28 16:02:28 -08:00
assert_eq! ( bank_forks . read ( ) . unwrap ( ) . root ( ) , root ) ;
assert_eq! ( progress . len ( ) , 1 ) ;
assert! ( progress . get ( & root ) . is_some ( ) ) ;
2021-03-24 23:41:52 -07:00
// root - 1 is filtered out
2021-06-10 22:28:23 -07:00
assert_eq! (
duplicate_slots_tracker . into_iter ( ) . collect ::< Vec < Slot > > ( ) ,
vec! [ root , root + 1 ]
) ;
2021-03-24 23:41:52 -07:00
assert_eq! (
gossip_duplicate_confirmed_slots
. keys ( )
. cloned ( )
. collect ::< Vec < Slot > > ( ) ,
vec! [ root , root + 1 ]
) ;
2021-04-10 17:34:45 -07:00
assert_eq! (
2021-04-21 14:40:35 -07:00
unfrozen_gossip_verified_vote_hashes
. votes_per_slot
2021-04-10 17:34:45 -07:00
. keys ( )
. cloned ( )
. collect ::< Vec < Slot > > ( ) ,
vec! [ root , root + 1 ]
) ;
2021-08-13 14:21:52 -07:00
assert_eq! (
2022-09-23 13:57:27 -07:00
epoch_slots_frozen_slots . into_keys ( ) . collect ::< Vec < Slot > > ( ) ,
2021-08-13 14:21:52 -07:00
vec! [ root , root + 1 ]
) ;
2019-03-19 17:30:36 -07:00
}
2019-06-20 15:50:41 -07:00
2020-04-24 15:49:57 -07:00
#[ test ]
2020-07-07 16:59:46 -07:00
fn test_handle_new_root_ahead_of_highest_confirmed_root ( ) {
2020-04-24 15:49:57 -07:00
let genesis_config = create_genesis_config ( 10_000 ) . genesis_config ;
2021-08-05 06:42:38 -07:00
let bank0 = Bank ::new_for_tests ( & genesis_config ) ;
2020-06-12 10:04:17 -07:00
let bank_forks = Arc ::new ( RwLock ::new ( BankForks ::new ( bank0 ) ) ) ;
2020-04-24 15:49:57 -07:00
let confirmed_root = 1 ;
let fork = 2 ;
let bank1 = Bank ::new_from_parent (
2022-04-28 11:51:00 -07:00
& bank_forks . read ( ) . unwrap ( ) . get ( 0 ) . unwrap ( ) ,
2020-04-24 15:49:57 -07:00
& Pubkey ::default ( ) ,
confirmed_root ,
) ;
bank_forks . write ( ) . unwrap ( ) . insert ( bank1 ) ;
let bank2 = Bank ::new_from_parent (
2022-04-28 11:51:00 -07:00
& bank_forks . read ( ) . unwrap ( ) . get ( confirmed_root ) . unwrap ( ) ,
2020-04-24 15:49:57 -07:00
& Pubkey ::default ( ) ,
fork ,
) ;
bank_forks . write ( ) . unwrap ( ) . insert ( bank2 ) ;
let root = 3 ;
let root_bank = Bank ::new_from_parent (
2022-04-28 11:51:00 -07:00
& bank_forks . read ( ) . unwrap ( ) . get ( confirmed_root ) . unwrap ( ) ,
2020-04-24 15:49:57 -07:00
& Pubkey ::default ( ) ,
root ,
) ;
2021-04-12 01:00:59 -07:00
root_bank . freeze ( ) ;
let root_hash = root_bank . hash ( ) ;
2020-04-24 15:49:57 -07:00
bank_forks . write ( ) . unwrap ( ) . insert ( root_bank ) ;
2021-04-12 01:00:59 -07:00
let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice ::new ( ( root , root_hash ) ) ;
2020-04-24 15:49:57 -07:00
let mut progress = ProgressMap ::default ( ) ;
for i in 0 ..= root {
2021-06-11 03:09:57 -07:00
progress . insert ( i , ForkProgress ::new ( Hash ::default ( ) , None , None , 0 , 0 ) ) ;
2020-04-24 15:49:57 -07:00
}
2022-03-15 13:28:40 -07:00
let ( drop_bank_sender , _drop_bank_receiver ) = unbounded ( ) ;
2020-04-24 15:49:57 -07:00
ReplayStage ::handle_new_root (
root ,
& bank_forks ,
& mut progress ,
2021-02-18 23:42:09 -08:00
& AbsRequestSender ::default ( ) ,
2020-04-24 15:49:57 -07:00
Some ( confirmed_root ) ,
2020-06-11 12:16:04 -07:00
& mut heaviest_subtree_fork_choice ,
2021-06-10 22:28:23 -07:00
& mut DuplicateSlotsTracker ::default ( ) ,
2021-04-21 14:40:35 -07:00
& mut GossipDuplicateConfirmedSlots ::default ( ) ,
& mut UnfrozenGossipVerifiedVoteHashes ::default ( ) ,
2021-03-25 18:54:51 -07:00
& mut true ,
& mut Vec ::new ( ) ,
2021-08-13 14:21:52 -07:00
& mut EpochSlotsFrozenSlots ::default ( ) ,
2022-03-15 13:28:40 -07:00
& drop_bank_sender ,
2020-04-24 15:49:57 -07:00
) ;
assert_eq! ( bank_forks . read ( ) . unwrap ( ) . root ( ) , root ) ;
assert! ( bank_forks . read ( ) . unwrap ( ) . get ( confirmed_root ) . is_some ( ) ) ;
assert! ( bank_forks . read ( ) . unwrap ( ) . get ( fork ) . is_none ( ) ) ;
assert_eq! ( progress . len ( ) , 2 ) ;
assert! ( progress . get ( & root ) . is_some ( ) ) ;
assert! ( progress . get ( & confirmed_root ) . is_some ( ) ) ;
assert! ( progress . get ( & fork ) . is_none ( ) ) ;
}
2019-06-20 15:50:41 -07:00
#[ test ]
2019-07-07 14:37:12 -07:00
fn test_dead_fork_transaction_error ( ) {
let keypair1 = Keypair ::new ( ) ;
let keypair2 = Keypair ::new ( ) ;
let missing_keypair = Keypair ::new ( ) ;
let missing_keypair2 = Keypair ::new ( ) ;
2019-10-31 13:38:50 -07:00
let res = check_dead_fork ( | _keypair , bank | {
let blockhash = bank . last_blockhash ( ) ;
let slot = bank . slot ( ) ;
let hashes_per_tick = bank . hashes_per_tick ( ) . unwrap_or ( 0 ) ;
2019-08-20 17:16:06 -07:00
let entry = entry ::next_entry (
2019-10-31 13:38:50 -07:00
& blockhash ,
hashes_per_tick . saturating_sub ( 1 ) ,
2019-06-20 15:50:41 -07:00
vec! [
2019-10-31 13:38:50 -07:00
system_transaction ::transfer ( & keypair1 , & keypair2 . pubkey ( ) , 2 , blockhash ) , // should be fine,
2019-06-20 15:50:41 -07:00
system_transaction ::transfer (
& missing_keypair ,
2019-07-07 14:37:12 -07:00
& missing_keypair2 . pubkey ( ) ,
2019-06-20 15:50:41 -07:00
2 ,
2019-10-31 13:38:50 -07:00
blockhash ,
2019-06-20 15:50:41 -07:00
) , // should cause AccountNotFound error
] ,
2019-08-20 17:16:06 -07:00
) ;
2022-09-15 14:51:41 -07:00
entries_to_test_shreds (
& [ entry ] ,
slot ,
slot . saturating_sub ( 1 ) , // parent_slot
false , // is_full_slot
0 , // version
true , // merkle_variant
)
2019-07-07 14:37:12 -07:00
} ) ;
2019-06-20 15:50:41 -07:00
2019-07-07 14:37:12 -07:00
assert_matches! (
res ,
2020-01-14 17:15:26 -08:00
Err ( BlockstoreProcessorError ::InvalidTransaction (
TransactionError ::AccountNotFound
) )
2019-07-07 14:37:12 -07:00
) ;
}
2019-06-20 15:50:41 -07:00
2019-07-07 14:37:12 -07:00
#[ test ]
fn test_dead_fork_entry_verification_failure ( ) {
let keypair2 = Keypair ::new ( ) ;
2019-10-31 13:38:50 -07:00
let res = check_dead_fork ( | genesis_keypair , bank | {
let blockhash = bank . last_blockhash ( ) ;
let slot = bank . slot ( ) ;
2019-06-20 15:50:41 -07:00
let bad_hash = hash ( & [ 2 ; 30 ] ) ;
2019-10-31 13:38:50 -07:00
let hashes_per_tick = bank . hashes_per_tick ( ) . unwrap_or ( 0 ) ;
2019-08-20 17:16:06 -07:00
let entry = entry ::next_entry (
2019-10-23 12:11:04 -07:00
// Use wrong blockhash so that the entry causes an entry verification failure
2019-06-20 15:50:41 -07:00
& bad_hash ,
2019-10-31 13:38:50 -07:00
hashes_per_tick . saturating_sub ( 1 ) ,
2019-10-23 22:01:22 -07:00
vec! [ system_transaction ::transfer (
2021-06-18 06:34:46 -07:00
genesis_keypair ,
2019-06-20 15:50:41 -07:00
& keypair2 . pubkey ( ) ,
2 ,
2019-10-31 13:38:50 -07:00
blockhash ,
2019-07-07 14:37:12 -07:00
) ] ,
2019-08-20 17:16:06 -07:00
) ;
2022-09-15 14:51:41 -07:00
entries_to_test_shreds (
& [ entry ] ,
slot ,
slot . saturating_sub ( 1 ) , // parent_slot
false , // is_full_slot
0 , // version
true , // merkle_variant
)
2019-07-07 14:37:12 -07:00
} ) ;
2020-01-14 17:15:26 -08:00
if let Err ( BlockstoreProcessorError ::InvalidBlock ( block_error ) ) = res {
2019-10-31 13:38:50 -07:00
assert_eq! ( block_error , BlockError ::InvalidEntryHash ) ;
} else {
2020-05-15 09:35:43 -07:00
panic! ( ) ;
2019-10-31 13:38:50 -07:00
}
}
#[ test ]
fn test_dead_fork_invalid_tick_hash_count ( ) {
let res = check_dead_fork ( | _keypair , bank | {
let blockhash = bank . last_blockhash ( ) ;
let slot = bank . slot ( ) ;
let hashes_per_tick = bank . hashes_per_tick ( ) . unwrap_or ( 0 ) ;
assert! ( hashes_per_tick > 0 ) ;
let too_few_hashes_tick = Entry ::new ( & blockhash , hashes_per_tick - 1 , vec! [ ] ) ;
entries_to_test_shreds (
2021-12-24 12:32:43 -08:00
& [ too_few_hashes_tick ] ,
2019-10-31 13:38:50 -07:00
slot ,
slot . saturating_sub ( 1 ) ,
false ,
2019-11-18 18:05:02 -08:00
0 ,
2022-09-15 14:51:41 -07:00
true , // merkle_variant
2019-10-31 13:38:50 -07:00
)
} ) ;
2020-01-14 17:15:26 -08:00
if let Err ( BlockstoreProcessorError ::InvalidBlock ( block_error ) ) = res {
2019-10-31 13:38:50 -07:00
assert_eq! ( block_error , BlockError ::InvalidTickHashCount ) ;
} else {
2020-05-15 09:35:43 -07:00
panic! ( ) ;
2019-10-31 13:38:50 -07:00
}
}
#[ test ]
fn test_dead_fork_invalid_slot_tick_count ( ) {
2021-03-01 14:57:37 -08:00
solana_logger ::setup ( ) ;
2019-11-08 17:21:54 -08:00
// Too many ticks per slot
2019-10-31 13:38:50 -07:00
let res = check_dead_fork ( | _keypair , bank | {
let blockhash = bank . last_blockhash ( ) ;
let slot = bank . slot ( ) ;
let hashes_per_tick = bank . hashes_per_tick ( ) . unwrap_or ( 0 ) ;
entries_to_test_shreds (
2021-12-24 12:32:43 -08:00
& entry ::create_ticks ( bank . ticks_per_slot ( ) + 1 , hashes_per_tick , blockhash ) ,
2019-10-31 13:38:50 -07:00
slot ,
slot . saturating_sub ( 1 ) ,
false ,
2019-11-18 18:05:02 -08:00
0 ,
2022-09-15 14:51:41 -07:00
true , // merkle_variant
2019-10-31 13:38:50 -07:00
)
} ) ;
2020-01-14 17:15:26 -08:00
if let Err ( BlockstoreProcessorError ::InvalidBlock ( block_error ) ) = res {
2021-03-01 14:57:37 -08:00
assert_eq! ( block_error , BlockError ::TooManyTicks ) ;
2019-10-31 13:38:50 -07:00
} else {
2020-05-15 09:35:43 -07:00
panic! ( ) ;
2019-10-31 13:38:50 -07:00
}
2019-11-08 17:21:54 -08:00
// Too few ticks per slot
let res = check_dead_fork ( | _keypair , bank | {
let blockhash = bank . last_blockhash ( ) ;
let slot = bank . slot ( ) ;
let hashes_per_tick = bank . hashes_per_tick ( ) . unwrap_or ( 0 ) ;
entries_to_test_shreds (
2021-12-24 12:32:43 -08:00
& entry ::create_ticks ( bank . ticks_per_slot ( ) - 1 , hashes_per_tick , blockhash ) ,
2019-11-08 17:21:54 -08:00
slot ,
slot . saturating_sub ( 1 ) ,
true ,
2019-11-18 18:05:02 -08:00
0 ,
2022-09-15 14:51:41 -07:00
true , // merkle_variant
2019-11-08 17:21:54 -08:00
)
} ) ;
2020-01-14 17:15:26 -08:00
if let Err ( BlockstoreProcessorError ::InvalidBlock ( block_error ) ) = res {
2021-03-01 14:57:37 -08:00
assert_eq! ( block_error , BlockError ::TooFewTicks ) ;
2019-11-08 17:21:54 -08:00
} else {
2020-05-15 09:35:43 -07:00
panic! ( ) ;
2019-11-08 17:21:54 -08:00
}
}
#[ test ]
fn test_dead_fork_invalid_last_tick ( ) {
let res = check_dead_fork ( | _keypair , bank | {
let blockhash = bank . last_blockhash ( ) ;
let slot = bank . slot ( ) ;
let hashes_per_tick = bank . hashes_per_tick ( ) . unwrap_or ( 0 ) ;
entries_to_test_shreds (
2021-12-24 12:32:43 -08:00
& entry ::create_ticks ( bank . ticks_per_slot ( ) , hashes_per_tick , blockhash ) ,
2019-11-08 17:21:54 -08:00
slot ,
slot . saturating_sub ( 1 ) ,
false ,
2019-11-18 18:05:02 -08:00
0 ,
2022-09-15 14:51:41 -07:00
true , // merkle_variant
2019-11-08 17:21:54 -08:00
)
} ) ;
2020-01-14 17:15:26 -08:00
if let Err ( BlockstoreProcessorError ::InvalidBlock ( block_error ) ) = res {
2019-11-08 17:21:54 -08:00
assert_eq! ( block_error , BlockError ::InvalidLastTick ) ;
} else {
2020-05-15 09:35:43 -07:00
panic! ( ) ;
2019-11-08 17:21:54 -08:00
}
2019-10-31 13:38:50 -07:00
}
#[ test ]
fn test_dead_fork_trailing_entry ( ) {
let keypair = Keypair ::new ( ) ;
2021-07-18 17:04:25 -07:00
let res = check_dead_fork ( | funded_keypair , bank | {
2019-10-31 13:38:50 -07:00
let blockhash = bank . last_blockhash ( ) ;
let slot = bank . slot ( ) ;
let hashes_per_tick = bank . hashes_per_tick ( ) . unwrap_or ( 0 ) ;
let mut entries =
2020-05-15 09:35:43 -07:00
entry ::create_ticks ( bank . ticks_per_slot ( ) , hashes_per_tick , blockhash ) ;
2019-10-31 13:38:50 -07:00
let last_entry_hash = entries . last ( ) . unwrap ( ) . hash ;
2021-07-18 17:04:25 -07:00
let tx = system_transaction ::transfer ( funded_keypair , & keypair . pubkey ( ) , 2 , blockhash ) ;
2019-10-31 13:38:50 -07:00
let trailing_entry = entry ::next_entry ( & last_entry_hash , 1 , vec! [ tx ] ) ;
entries . push ( trailing_entry ) ;
2022-09-15 14:51:41 -07:00
entries_to_test_shreds (
& entries ,
slot ,
slot . saturating_sub ( 1 ) , // parent_slot
true , // is_full_slot
0 , // version
true , // merkle_variant
)
2019-10-31 13:38:50 -07:00
} ) ;
2020-01-14 17:15:26 -08:00
if let Err ( BlockstoreProcessorError ::InvalidBlock ( block_error ) ) = res {
2019-10-31 13:38:50 -07:00
assert_eq! ( block_error , BlockError ::TrailingEntry ) ;
} else {
2020-05-15 09:35:43 -07:00
panic! ( ) ;
2019-10-31 13:38:50 -07:00
}
2019-07-07 14:37:12 -07:00
}
#[ test ]
2019-10-16 15:41:43 -07:00
fn test_dead_fork_entry_deserialize_failure ( ) {
// Insert entry that causes deserialization failure
2021-07-18 17:04:25 -07:00
let res = check_dead_fork ( | _ , bank | {
2022-05-30 05:51:19 -07:00
let gibberish = [ 0xa5 u8 ; LEGACY_SHRED_DATA_CAPACITY ] ;
2022-04-25 05:43:22 -07:00
let parent_offset = bank . slot ( ) - bank . parent_slot ( ) ;
let shred = Shred ::new_from_data (
bank . slot ( ) ,
0 , // index,
parent_offset as u16 ,
2022-04-27 11:04:10 -07:00
& gibberish ,
2022-05-02 16:33:53 -07:00
ShredFlags ::DATA_COMPLETE_SHRED ,
0 , // reference_tick
0 , // version
0 , // fec_set_index
2019-08-20 17:16:06 -07:00
) ;
2019-10-16 15:41:43 -07:00
vec! [ shred ]
2019-07-07 14:37:12 -07:00
} ) ;
2019-06-20 15:50:41 -07:00
2019-07-07 14:37:12 -07:00
assert_matches! (
res ,
2021-02-18 23:29:31 -08:00
Err ( BlockstoreProcessorError ::FailedToLoadEntries (
BlockstoreError ::InvalidShredData ( _ )
) , )
2019-07-07 14:37:12 -07:00
) ;
}
2019-11-14 11:49:31 -08:00
// Given a shred and a fatal expected error, check that replaying that shred causes causes the fork to be
2019-07-07 14:37:12 -07:00
// marked as dead. Returns the error for caller to verify.
2020-01-14 17:15:26 -08:00
fn check_dead_fork < F > ( shred_to_insert : F ) -> result ::Result < ( ) , BlockstoreProcessorError >
2019-07-07 14:37:12 -07:00
where
2019-10-31 13:38:50 -07:00
F : Fn ( & Keypair , Arc < Bank > ) -> Vec < Shred > ,
2019-07-07 14:37:12 -07:00
{
let ledger_path = get_tmp_ledger_path! ( ) ;
2020-08-07 11:21:35 -07:00
let ( replay_vote_sender , _replay_vote_receiver ) = unbounded ( ) ;
2019-07-07 14:37:12 -07:00
let res = {
2021-07-18 17:04:25 -07:00
let ReplayBlockstoreComponents {
blockstore ,
vote_simulator ,
..
} = replay_blockstore_components ( Some ( tr ( 0 ) ) , 1 , None ) ;
let VoteSimulator {
mut progress ,
bank_forks ,
mut heaviest_subtree_fork_choice ,
validator_keypairs ,
2019-10-23 12:11:04 -07:00
..
2021-07-18 17:04:25 -07:00
} = vote_simulator ;
2022-04-28 11:51:00 -07:00
let bank0 = bank_forks . read ( ) . unwrap ( ) . get ( 0 ) . unwrap ( ) ;
2021-07-18 17:04:25 -07:00
assert! ( bank0 . is_frozen ( ) ) ;
assert_eq! ( bank0 . tick_height ( ) , bank0 . max_tick_height ( ) ) ;
let bank1 = Bank ::new_from_parent ( & bank0 , & Pubkey ::default ( ) , 1 ) ;
bank_forks . write ( ) . unwrap ( ) . insert ( bank1 ) ;
2022-04-28 11:51:00 -07:00
let bank1 = bank_forks . read ( ) . unwrap ( ) . get ( 1 ) . unwrap ( ) ;
2021-10-05 22:24:48 -07:00
let bank1_progress = progress
2021-07-18 17:04:25 -07:00
. entry ( bank1 . slot ( ) )
. or_insert_with ( | | ForkProgress ::new ( bank1 . last_blockhash ( ) , None , None , 0 , 0 ) ) ;
let shreds = shred_to_insert (
& validator_keypairs . values ( ) . next ( ) . unwrap ( ) . node_keypair ,
bank1 . clone ( ) ,
) ;
2020-01-13 13:13:52 -08:00
blockstore . insert_shreds ( shreds , None , false ) . unwrap ( ) ;
2021-03-12 05:44:06 -08:00
let block_commitment_cache = Arc ::new ( RwLock ::new ( BlockCommitmentCache ::default ( ) ) ) ;
let exit = Arc ::new ( AtomicBool ::new ( false ) ) ;
2020-01-14 17:15:26 -08:00
let res = ReplayStage ::replay_blockstore_into_bank (
2021-07-18 17:04:25 -07:00
& bank1 ,
2020-01-13 13:13:52 -08:00
& blockstore ,
2022-07-28 11:33:19 -07:00
& bank1_progress . replay_stats ,
& bank1_progress . replay_progress ,
2019-11-20 15:43:10 -08:00
None ,
2020-08-07 11:21:35 -07:00
& replay_vote_sender ,
2021-06-18 06:34:46 -07:00
& VerifyRecyclers ::default ( ) ,
2022-07-11 08:53:18 -07:00
None ,
2022-08-31 06:00:55 -07:00
& PrioritizationFeeCache ::new ( 0 u64 ) ,
2019-11-20 15:43:10 -08:00
) ;
2021-12-17 15:03:09 -08:00
let max_complete_transaction_status_slot = Arc ::new ( AtomicU64 ::default ( ) ) ;
2021-09-17 12:40:14 -07:00
let rpc_subscriptions = Arc ::new ( RpcSubscriptions ::new_for_tests (
2021-03-24 23:41:52 -07:00
& exit ,
2021-12-17 15:03:09 -08:00
max_complete_transaction_status_slot ,
2021-03-24 23:41:52 -07:00
bank_forks . clone ( ) ,
block_commitment_cache ,
OptimisticallyConfirmedBank ::locked_from_bank_forks_root ( & bank_forks ) ,
) ) ;
2021-07-26 20:59:00 -07:00
let ( ancestor_hashes_replay_update_sender , _ancestor_hashes_replay_update_receiver ) =
unbounded ( ) ;
2021-03-24 23:41:52 -07:00
if let Err ( err ) = & res {
ReplayStage ::mark_dead_slot (
& blockstore ,
2021-07-18 17:04:25 -07:00
& bank1 ,
2021-03-24 23:41:52 -07:00
0 ,
err ,
2021-06-16 10:57:52 -07:00
& rpc_subscriptions ,
2021-06-10 22:28:23 -07:00
& mut DuplicateSlotsTracker ::default ( ) ,
2021-08-13 14:21:52 -07:00
& GossipDuplicateConfirmedSlots ::new ( ) ,
& mut EpochSlotsFrozenSlots ::default ( ) ,
2021-03-24 23:41:52 -07:00
& mut progress ,
2021-07-18 17:04:25 -07:00
& mut heaviest_subtree_fork_choice ,
2021-07-08 19:07:32 -07:00
& mut DuplicateSlotsToRepair ::default ( ) ,
2021-07-26 20:59:00 -07:00
& ancestor_hashes_replay_update_sender ,
2022-10-27 20:06:06 -07:00
& mut PurgeRepairSlotCounter ::default ( ) ,
2021-03-24 23:41:52 -07:00
) ;
}
2019-06-20 15:50:41 -07:00
// Check that the erroring bank was marked as dead in the progress map
assert! ( progress
2021-07-18 17:04:25 -07:00
. get ( & bank1 . slot ( ) )
2019-06-20 15:50:41 -07:00
. map ( | b | b . is_dead )
. unwrap_or ( false ) ) ;
2020-01-13 13:13:52 -08:00
// Check that the erroring bank was marked as dead in blockstore
2021-07-18 17:04:25 -07:00
assert! ( blockstore . is_dead ( bank1 . slot ( ) ) ) ;
2020-01-14 17:15:26 -08:00
res . map ( | _ | ( ) )
2019-07-07 14:37:12 -07:00
} ;
2022-11-09 11:39:38 -08:00
let _ignored = remove_dir_all ( ledger_path ) ;
2019-07-07 14:37:12 -07:00
res
2019-06-20 15:50:41 -07:00
}
2019-07-26 10:27:57 -07:00
#[ test ]
2019-11-04 15:44:27 -08:00
fn test_replay_commitment_cache ( ) {
2020-12-15 12:45:40 -08:00
fn leader_vote ( vote_slot : Slot , bank : & Arc < Bank > , pubkey : & Pubkey ) {
2021-06-18 06:34:46 -07:00
let mut leader_vote_account = bank . get_account ( pubkey ) . unwrap ( ) ;
2022-08-03 23:12:59 -07:00
let mut vote_state = vote_state ::from ( & leader_vote_account ) . unwrap ( ) ;
vote_state ::process_slot_vote_unchecked ( & mut vote_state , vote_slot ) ;
2020-12-21 15:19:04 -08:00
let versioned = VoteStateVersions ::new_current ( vote_state ) ;
2022-08-03 23:12:59 -07:00
vote_state ::to ( & versioned , & mut leader_vote_account ) . unwrap ( ) ;
2021-06-18 06:34:46 -07:00
bank . store_account ( pubkey , & leader_vote_account ) ;
2019-07-26 10:27:57 -07:00
}
2020-10-19 12:12:08 -07:00
let leader_pubkey = solana_sdk ::pubkey ::new_rand ( ) ;
2019-07-26 10:27:57 -07:00
let leader_lamports = 3 ;
2019-11-08 20:56:57 -08:00
let genesis_config_info =
create_genesis_config_with_leader ( 50 , & leader_pubkey , leader_lamports ) ;
let mut genesis_config = genesis_config_info . genesis_config ;
let leader_voting_pubkey = genesis_config_info . voting_keypair . pubkey ( ) ;
genesis_config . epoch_schedule . warmup = false ;
genesis_config . ticks_per_slot = 4 ;
2021-08-05 06:42:38 -07:00
let bank0 = Bank ::new_for_tests ( & genesis_config ) ;
2019-11-08 20:56:57 -08:00
for _ in 0 .. genesis_config . ticks_per_slot {
2019-07-26 10:27:57 -07:00
bank0 . register_tick ( & Hash ::default ( ) ) ;
}
bank0 . freeze ( ) ;
let arc_bank0 = Arc ::new ( bank0 ) ;
2020-12-15 12:45:40 -08:00
let bank_forks = Arc ::new ( RwLock ::new ( BankForks ::new_from_banks ( & [ arc_bank0 ] , 0 ) ) ) ;
2019-07-26 10:27:57 -07:00
2020-05-18 11:49:01 -07:00
let exit = Arc ::new ( AtomicBool ::new ( false ) ) ;
2020-06-25 21:06:58 -07:00
let block_commitment_cache = Arc ::new ( RwLock ::new ( BlockCommitmentCache ::default ( ) ) ) ;
2021-12-17 15:03:09 -08:00
let max_complete_transaction_status_slot = Arc ::new ( AtomicU64 ::default ( ) ) ;
2021-09-17 12:40:14 -07:00
let rpc_subscriptions = Arc ::new ( RpcSubscriptions ::new_for_tests (
2020-05-18 11:49:01 -07:00
& exit ,
2021-12-17 15:03:09 -08:00
max_complete_transaction_status_slot ,
2020-05-18 11:49:01 -07:00
bank_forks . clone ( ) ,
block_commitment_cache . clone ( ) ,
2020-09-28 19:43:05 -07:00
OptimisticallyConfirmedBank ::locked_from_bank_forks_root ( & bank_forks ) ,
2020-05-18 11:49:01 -07:00
) ) ;
2021-06-16 10:57:52 -07:00
let ( lockouts_sender , _ ) = AggregateCommitmentService ::new (
& exit ,
block_commitment_cache . clone ( ) ,
rpc_subscriptions ,
) ;
2020-05-18 11:49:01 -07:00
2019-11-04 15:44:27 -08:00
assert! ( block_commitment_cache
2019-09-20 19:38:56 -07:00
. read ( )
. unwrap ( )
2019-11-04 15:44:27 -08:00
. get_block_commitment ( 0 )
2019-09-20 19:38:56 -07:00
. is_none ( ) ) ;
2019-11-04 15:44:27 -08:00
assert! ( block_commitment_cache
2019-09-04 23:10:25 -07:00
. read ( )
. unwrap ( )
2019-11-04 15:44:27 -08:00
. get_block_commitment ( 1 )
2019-09-04 23:10:25 -07:00
. is_none ( ) ) ;
2019-07-26 10:27:57 -07:00
2020-12-15 12:45:40 -08:00
for i in 1 ..= 3 {
2022-04-28 11:51:00 -07:00
let prev_bank = bank_forks . read ( ) . unwrap ( ) . get ( i - 1 ) . unwrap ( ) ;
2020-12-15 12:45:40 -08:00
let bank = Bank ::new_from_parent ( & prev_bank , & Pubkey ::default ( ) , prev_bank . slot ( ) + 1 ) ;
let _res = bank . transfer (
10 ,
& genesis_config_info . mint_keypair ,
& solana_sdk ::pubkey ::new_rand ( ) ,
) ;
for _ in 0 .. genesis_config . ticks_per_slot {
bank . register_tick ( & Hash ::default ( ) ) ;
}
bank_forks . write ( ) . unwrap ( ) . insert ( bank ) ;
2022-04-28 11:51:00 -07:00
let arc_bank = bank_forks . read ( ) . unwrap ( ) . get ( i ) . unwrap ( ) ;
2020-12-15 12:45:40 -08:00
leader_vote ( i - 1 , & arc_bank , & leader_voting_pubkey ) ;
ReplayStage ::update_commitment_cache (
arc_bank . clone ( ) ,
0 ,
leader_lamports ,
& lockouts_sender ,
) ;
arc_bank . freeze ( ) ;
2019-07-26 10:27:57 -07:00
}
2021-03-25 14:16:39 -07:00
for _ in 0 .. 10 {
let done = {
let bcc = block_commitment_cache . read ( ) . unwrap ( ) ;
bcc . get_block_commitment ( 0 ) . is_some ( )
& & bcc . get_block_commitment ( 1 ) . is_some ( )
& & bcc . get_block_commitment ( 2 ) . is_some ( )
} ;
if done {
break ;
} else {
thread ::sleep ( Duration ::from_millis ( 200 ) ) ;
}
}
2019-07-26 10:27:57 -07:00
2019-11-04 15:44:27 -08:00
let mut expected0 = BlockCommitment ::default ( ) ;
2020-12-15 12:45:40 -08:00
expected0 . increase_confirmation_stake ( 3 , leader_lamports ) ;
2019-07-26 10:27:57 -07:00
assert_eq! (
2019-11-04 15:44:27 -08:00
block_commitment_cache
2019-09-04 23:10:25 -07:00
. read ( )
. unwrap ( )
2019-11-04 15:44:27 -08:00
. get_block_commitment ( 0 )
2019-09-04 23:10:25 -07:00
. unwrap ( ) ,
2019-09-20 19:38:56 -07:00
& expected0 ,
2019-07-26 10:27:57 -07:00
) ;
2019-11-04 15:44:27 -08:00
let mut expected1 = BlockCommitment ::default ( ) ;
2019-09-20 19:38:56 -07:00
expected1 . increase_confirmation_stake ( 2 , leader_lamports ) ;
2019-07-26 10:27:57 -07:00
assert_eq! (
2019-11-04 15:44:27 -08:00
block_commitment_cache
2019-09-04 23:10:25 -07:00
. read ( )
. unwrap ( )
2019-11-04 15:44:27 -08:00
. get_block_commitment ( 1 )
2019-09-04 23:10:25 -07:00
. unwrap ( ) ,
2019-09-20 19:38:56 -07:00
& expected1
2019-07-26 10:27:57 -07:00
) ;
2019-11-04 15:44:27 -08:00
let mut expected2 = BlockCommitment ::default ( ) ;
2019-09-20 19:38:56 -07:00
expected2 . increase_confirmation_stake ( 1 , leader_lamports ) ;
2019-07-26 10:27:57 -07:00
assert_eq! (
2019-11-04 15:44:27 -08:00
block_commitment_cache
2019-09-04 23:10:25 -07:00
. read ( )
. unwrap ( )
2019-11-04 15:44:27 -08:00
. get_block_commitment ( 2 )
2019-09-04 23:10:25 -07:00
. unwrap ( ) ,
2019-09-20 19:38:56 -07:00
& expected2
2019-07-26 10:27:57 -07:00
) ;
}
2019-11-19 17:55:42 -08:00
2019-11-20 15:43:10 -08:00
#[ test ]
fn test_write_persist_transaction_status ( ) {
let GenesisConfigInfo {
2022-01-11 10:32:25 -08:00
mut genesis_config ,
2019-11-20 15:43:10 -08:00
mint_keypair ,
..
2022-01-11 10:32:25 -08:00
} = create_genesis_config ( solana_sdk ::native_token ::sol_to_lamports ( 1000.0 ) ) ;
genesis_config . rent . lamports_per_byte_year = 50 ;
genesis_config . rent . exemption_threshold = 2.0 ;
2019-11-25 11:08:03 -08:00
let ( ledger_path , _ ) = create_new_tmp_ledger! ( & genesis_config ) ;
2019-11-20 15:43:10 -08:00
{
2020-01-13 13:13:52 -08:00
let blockstore = Blockstore ::open ( & ledger_path )
2019-11-20 15:43:10 -08:00
. expect ( " Expected to successfully open database ledger " ) ;
2020-01-13 13:13:52 -08:00
let blockstore = Arc ::new ( blockstore ) ;
2019-11-20 15:43:10 -08:00
let keypair1 = Keypair ::new ( ) ;
let keypair2 = Keypair ::new ( ) ;
let keypair3 = Keypair ::new ( ) ;
2021-08-05 06:42:38 -07:00
let bank0 = Arc ::new ( Bank ::new_for_tests ( & genesis_config ) ) ;
2019-11-20 15:43:10 -08:00
bank0
2022-01-11 10:32:25 -08:00
. transfer (
bank0 . get_minimum_balance_for_rent_exemption ( 0 ) ,
& mint_keypair ,
& keypair2 . pubkey ( ) ,
)
2019-11-20 15:43:10 -08:00
. unwrap ( ) ;
let bank1 = Arc ::new ( Bank ::new_from_parent ( & bank0 , & Pubkey ::default ( ) , 1 ) ) ;
let slot = bank1 . slot ( ) ;
2022-03-07 23:20:34 -08:00
let ( entries , test_signatures ) = create_test_transaction_entries (
2019-11-25 11:08:03 -08:00
vec! [ & mint_keypair , & keypair1 , & keypair2 , & keypair3 ] ,
2022-03-07 23:20:34 -08:00
bank1 . clone ( ) ,
) ;
populate_blockstore_for_tests (
entries ,
2019-11-25 11:08:03 -08:00
bank1 ,
2020-01-13 13:13:52 -08:00
blockstore . clone ( ) ,
2021-03-26 15:47:35 -07:00
Arc ::new ( AtomicU64 ::default ( ) ) ,
2022-03-07 23:20:34 -08:00
) ;
2019-11-20 15:43:10 -08:00
2022-03-07 23:20:34 -08:00
let mut test_signatures_iter = test_signatures . into_iter ( ) ;
2021-03-26 15:47:35 -07:00
let confirmed_block = blockstore . get_rooted_block ( slot , false ) . unwrap ( ) ;
2022-02-09 21:28:18 -08:00
let actual_tx_results : Vec < _ > = confirmed_block
. transactions
. into_iter ( )
. map ( | VersionedTransactionWithStatusMeta { transaction , meta } | {
( transaction . signatures [ 0 ] , meta . status )
} )
. collect ( ) ;
let expected_tx_results = vec! [
( test_signatures_iter . next ( ) . unwrap ( ) , Ok ( ( ) ) ) ,
(
test_signatures_iter . next ( ) . unwrap ( ) ,
Err ( TransactionError ::InstructionError (
0 ,
InstructionError ::Custom ( 1 ) ,
) ) ,
) ,
] ;
assert_eq! ( actual_tx_results , expected_tx_results ) ;
assert! ( test_signatures_iter . next ( ) . is_none ( ) ) ;
2019-11-20 15:43:10 -08:00
}
2020-01-13 13:13:52 -08:00
Blockstore ::destroy ( & ledger_path ) . unwrap ( ) ;
2019-11-20 15:43:10 -08:00
}
2020-02-03 16:48:24 -08:00
2020-03-04 11:49:56 -08:00
#[ test ]
fn test_compute_bank_stats_confirmed ( ) {
2020-07-23 18:50:42 -07:00
let vote_keypairs = ValidatorVoteKeypairs ::new_rand ( ) ;
2021-04-28 11:46:16 -07:00
let my_node_pubkey = vote_keypairs . node_keypair . pubkey ( ) ;
let my_vote_pubkey = vote_keypairs . vote_keypair . pubkey ( ) ;
let keypairs : HashMap < _ , _ > = vec! [ ( my_node_pubkey , vote_keypairs ) ] . into_iter ( ) . collect ( ) ;
2020-03-04 11:49:56 -08:00
2020-06-11 12:16:04 -07:00
let ( bank_forks , mut progress , mut heaviest_subtree_fork_choice ) =
2021-07-08 19:07:32 -07:00
vote_simulator ::initialize_state ( & keypairs , 10_000 ) ;
2021-04-21 14:40:35 -07:00
let mut latest_validator_votes_for_frozen_banks =
LatestValidatorVotesForFrozenBanks ::default ( ) ;
2022-04-28 11:51:00 -07:00
let bank0 = bank_forks . get ( 0 ) . unwrap ( ) ;
2021-04-28 11:46:16 -07:00
let my_keypairs = keypairs . get ( & my_node_pubkey ) . unwrap ( ) ;
2020-03-04 11:49:56 -08:00
let vote_tx = vote_transaction ::new_vote_transaction (
vec! [ 0 ] ,
bank0 . hash ( ) ,
bank0 . last_blockhash ( ) ,
& my_keypairs . node_keypair ,
& my_keypairs . vote_keypair ,
& my_keypairs . vote_keypair ,
2020-07-28 02:33:27 -07:00
None ,
2020-03-04 11:49:56 -08:00
) ;
let bank_forks = RwLock ::new ( bank_forks ) ;
2021-04-28 11:46:16 -07:00
let bank1 = Bank ::new_from_parent ( & bank0 , & my_node_pubkey , 1 ) ;
2020-03-04 11:49:56 -08:00
bank1 . process_transaction ( & vote_tx ) . unwrap ( ) ;
bank1 . freeze ( ) ;
// Test confirmations
let ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
let mut frozen_banks : Vec < _ > = bank_forks
. read ( )
. unwrap ( )
. frozen_banks ( )
. values ( )
. cloned ( )
. collect ( ) ;
2022-02-07 14:06:19 -08:00
let mut tower = Tower ::new_for_tests ( 0 , 0.67 ) ;
2020-03-04 11:49:56 -08:00
let newly_computed = ReplayStage ::compute_bank_stats (
2021-04-28 11:46:16 -07:00
& my_vote_pubkey ,
2020-03-04 11:49:56 -08:00
& ancestors ,
& mut frozen_banks ,
2022-02-07 14:06:19 -08:00
& mut tower ,
2020-03-04 11:49:56 -08:00
& mut progress ,
2020-03-26 19:57:27 -07:00
& VoteTracker ::default ( ) ,
2020-03-30 19:57:11 -07:00
& ClusterSlots ::default ( ) ,
2020-03-26 19:57:27 -07:00
& bank_forks ,
2020-06-11 12:16:04 -07:00
& mut heaviest_subtree_fork_choice ,
2021-04-21 14:40:35 -07:00
& mut latest_validator_votes_for_frozen_banks ,
2020-03-04 11:49:56 -08:00
) ;
2020-07-20 17:29:07 -07:00
// bank 0 has no votes, should not send any votes on the channel
2020-03-04 11:49:56 -08:00
assert_eq! ( newly_computed , vec! [ 0 ] ) ;
// The only vote is in bank 1, and bank_forks does not currently contain
// bank 1, so no slot should be confirmed.
{
let fork_progress = progress . get ( & 0 ) . unwrap ( ) ;
let confirmed_forks = ReplayStage ::confirm_forks (
& tower ,
2020-06-22 18:30:09 -07:00
& fork_progress . fork_stats . voted_stakes ,
fork_progress . fork_stats . total_stake ,
2020-03-04 11:49:56 -08:00
& progress ,
& bank_forks ,
) ;
2021-03-24 23:41:52 -07:00
assert! ( confirmed_forks . is_empty ( ) ) ;
2020-03-04 11:49:56 -08:00
}
// Insert the bank that contains a vote for slot 0, which confirms slot 0
bank_forks . write ( ) . unwrap ( ) . insert ( bank1 ) ;
2020-04-08 14:35:24 -07:00
progress . insert (
1 ,
2021-06-11 03:09:57 -07:00
ForkProgress ::new ( bank0 . last_blockhash ( ) , None , None , 0 , 0 ) ,
2020-04-08 14:35:24 -07:00
) ;
2020-03-04 11:49:56 -08:00
let ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
let mut frozen_banks : Vec < _ > = bank_forks
. read ( )
. unwrap ( )
. frozen_banks ( )
. values ( )
. cloned ( )
. collect ( ) ;
let newly_computed = ReplayStage ::compute_bank_stats (
2021-04-28 11:46:16 -07:00
& my_vote_pubkey ,
2020-03-04 11:49:56 -08:00
& ancestors ,
& mut frozen_banks ,
2022-02-07 14:06:19 -08:00
& mut tower ,
2020-03-04 11:49:56 -08:00
& mut progress ,
2020-03-26 19:57:27 -07:00
& VoteTracker ::default ( ) ,
2020-03-30 19:57:11 -07:00
& ClusterSlots ::default ( ) ,
2020-03-26 19:57:27 -07:00
& bank_forks ,
2020-06-11 12:16:04 -07:00
& mut heaviest_subtree_fork_choice ,
2021-04-21 14:40:35 -07:00
& mut latest_validator_votes_for_frozen_banks ,
2020-03-04 11:49:56 -08:00
) ;
2020-07-29 23:17:40 -07:00
// Bank 1 had one vote
2020-03-04 11:49:56 -08:00
assert_eq! ( newly_computed , vec! [ 1 ] ) ;
{
let fork_progress = progress . get ( & 1 ) . unwrap ( ) ;
let confirmed_forks = ReplayStage ::confirm_forks (
& tower ,
2020-06-22 18:30:09 -07:00
& fork_progress . fork_stats . voted_stakes ,
fork_progress . fork_stats . total_stake ,
2020-03-04 11:49:56 -08:00
& progress ,
& bank_forks ,
) ;
2021-03-24 23:41:52 -07:00
// No new stats should have been computed
2021-07-18 17:04:25 -07:00
assert_eq! ( confirmed_forks , vec! [ ( 0 , bank0 . hash ( ) ) ] ) ;
2020-03-04 11:49:56 -08:00
}
let ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
let mut frozen_banks : Vec < _ > = bank_forks
. read ( )
. unwrap ( )
. frozen_banks ( )
. values ( )
. cloned ( )
. collect ( ) ;
let newly_computed = ReplayStage ::compute_bank_stats (
2021-04-28 11:46:16 -07:00
& my_vote_pubkey ,
2020-03-04 11:49:56 -08:00
& ancestors ,
& mut frozen_banks ,
2022-02-07 14:06:19 -08:00
& mut tower ,
2020-03-04 11:49:56 -08:00
& mut progress ,
2020-03-26 19:57:27 -07:00
& VoteTracker ::default ( ) ,
2020-03-30 19:57:11 -07:00
& ClusterSlots ::default ( ) ,
2020-03-26 19:57:27 -07:00
& bank_forks ,
2020-06-11 12:16:04 -07:00
& mut heaviest_subtree_fork_choice ,
2021-04-21 14:40:35 -07:00
& mut latest_validator_votes_for_frozen_banks ,
2020-03-04 11:49:56 -08:00
) ;
// No new stats should have been computed
assert! ( newly_computed . is_empty ( ) ) ;
}
2020-04-10 23:52:37 -07:00
#[ test ]
fn test_same_weight_select_lower_slot ( ) {
// Init state
let mut vote_simulator = VoteSimulator ::new ( 1 ) ;
2022-02-07 14:06:19 -08:00
let mut tower = Tower ::default ( ) ;
2020-04-10 23:52:37 -07:00
// Create the tree of banks in a BankForks object
let forks = tr ( 0 ) / ( tr ( 1 ) ) / ( tr ( 2 ) ) ;
2021-08-02 14:33:28 -07:00
vote_simulator . fill_bank_forks ( forks , & HashMap ::new ( ) , true ) ;
2020-04-10 23:52:37 -07:00
let mut frozen_banks : Vec < _ > = vote_simulator
. bank_forks
. read ( )
. unwrap ( )
. frozen_banks ( )
. values ( )
. cloned ( )
. collect ( ) ;
2021-10-05 22:24:48 -07:00
let heaviest_subtree_fork_choice = & mut vote_simulator . heaviest_subtree_fork_choice ;
2021-04-21 14:40:35 -07:00
let mut latest_validator_votes_for_frozen_banks =
LatestValidatorVotesForFrozenBanks ::default ( ) ;
2020-04-10 23:52:37 -07:00
let ancestors = vote_simulator . bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
2021-04-28 11:46:16 -07:00
let my_vote_pubkey = vote_simulator . vote_pubkeys [ 0 ] ;
2020-04-10 23:52:37 -07:00
ReplayStage ::compute_bank_stats (
2021-04-28 11:46:16 -07:00
& my_vote_pubkey ,
2020-04-10 23:52:37 -07:00
& ancestors ,
& mut frozen_banks ,
2022-02-07 14:06:19 -08:00
& mut tower ,
2020-04-10 23:52:37 -07:00
& mut vote_simulator . progress ,
& VoteTracker ::default ( ) ,
& ClusterSlots ::default ( ) ,
& vote_simulator . bank_forks ,
2021-10-05 22:24:48 -07:00
heaviest_subtree_fork_choice ,
2021-04-21 14:40:35 -07:00
& mut latest_validator_votes_for_frozen_banks ,
2020-04-10 23:52:37 -07:00
) ;
2022-04-28 11:51:00 -07:00
let bank1 = vote_simulator . bank_forks . read ( ) . unwrap ( ) . get ( 1 ) . unwrap ( ) ;
let bank2 = vote_simulator . bank_forks . read ( ) . unwrap ( ) . get ( 2 ) . unwrap ( ) ;
2020-04-10 23:52:37 -07:00
assert_eq! (
2021-04-12 01:00:59 -07:00
heaviest_subtree_fork_choice
. stake_voted_subtree ( & ( 1 , bank1 . hash ( ) ) )
. unwrap ( ) ,
heaviest_subtree_fork_choice
. stake_voted_subtree ( & ( 2 , bank2 . hash ( ) ) )
. unwrap ( )
2020-06-11 12:16:04 -07:00
) ;
let ( heaviest_bank , _ ) = heaviest_subtree_fork_choice . select_forks (
& frozen_banks ,
& tower ,
& vote_simulator . progress ,
& ancestors ,
& vote_simulator . bank_forks ,
2020-04-10 23:52:37 -07:00
) ;
// Should pick the lower of the two equally weighted banks
2020-06-11 12:16:04 -07:00
assert_eq! ( heaviest_bank . slot ( ) , 1 ) ;
2020-04-10 23:52:37 -07:00
}
2020-02-03 16:48:24 -08:00
#[ test ]
fn test_child_bank_heavier ( ) {
2020-04-10 15:16:12 -07:00
// Init state
let mut vote_simulator = VoteSimulator ::new ( 1 ) ;
2021-04-28 11:46:16 -07:00
let my_node_pubkey = vote_simulator . node_pubkeys [ 0 ] ;
2021-07-13 09:36:52 -07:00
let mut tower = Tower ::default ( ) ;
2020-02-03 16:48:24 -08:00
// Create the tree of banks in a BankForks object
let forks = tr ( 0 ) / ( tr ( 1 ) / ( tr ( 2 ) / ( tr ( 3 ) ) ) ) ;
2020-04-10 15:16:12 -07:00
// Set the voting behavior
let mut cluster_votes = HashMap ::new ( ) ;
2022-02-07 14:06:19 -08:00
let votes = vec! [ 2 ] ;
2021-04-28 11:46:16 -07:00
cluster_votes . insert ( my_node_pubkey , votes . clone ( ) ) ;
2021-08-02 14:33:28 -07:00
vote_simulator . fill_bank_forks ( forks , & cluster_votes , true ) ;
2020-06-11 12:16:04 -07:00
// Fill banks with votes
2020-04-10 15:16:12 -07:00
for vote in votes {
assert! ( vote_simulator
2021-04-28 11:46:16 -07:00
. simulate_vote ( vote , & my_node_pubkey , & mut tower , )
2020-02-26 14:09:07 -08:00
. is_empty ( ) ) ;
2020-02-03 16:48:24 -08:00
}
2020-04-10 15:16:12 -07:00
let mut frozen_banks : Vec < _ > = vote_simulator
. bank_forks
2020-02-03 16:48:24 -08:00
. read ( )
. unwrap ( )
. frozen_banks ( )
. values ( )
. cloned ( )
. collect ( ) ;
2021-04-28 11:46:16 -07:00
let my_vote_pubkey = vote_simulator . vote_pubkeys [ 0 ] ;
2020-02-03 16:48:24 -08:00
ReplayStage ::compute_bank_stats (
2021-04-28 11:46:16 -07:00
& my_vote_pubkey ,
2020-04-10 15:16:12 -07:00
& vote_simulator . bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ,
2020-02-03 16:48:24 -08:00
& mut frozen_banks ,
2022-02-07 14:06:19 -08:00
& mut tower ,
2020-04-10 15:16:12 -07:00
& mut vote_simulator . progress ,
2020-03-26 19:57:27 -07:00
& VoteTracker ::default ( ) ,
2020-03-30 19:57:11 -07:00
& ClusterSlots ::default ( ) ,
2020-04-10 15:16:12 -07:00
& vote_simulator . bank_forks ,
2020-06-11 12:16:04 -07:00
& mut vote_simulator . heaviest_subtree_fork_choice ,
2021-04-21 14:40:35 -07:00
& mut vote_simulator . latest_validator_votes_for_frozen_banks ,
2020-02-03 16:48:24 -08:00
) ;
frozen_banks . sort_by_key ( | bank | bank . slot ( ) ) ;
for pair in frozen_banks . windows ( 2 ) {
2020-04-10 15:16:12 -07:00
let first = vote_simulator
. progress
. get_fork_stats ( pair [ 0 ] . slot ( ) )
. unwrap ( )
. fork_weight ;
let second = vote_simulator
. progress
. get_fork_stats ( pair [ 1 ] . slot ( ) )
. unwrap ( )
. fork_weight ;
2020-02-03 16:48:24 -08:00
assert! ( second > = first ) ;
}
2020-06-11 12:16:04 -07:00
for bank in frozen_banks {
// The only leaf should always be chosen over parents
assert_eq! (
vote_simulator
. heaviest_subtree_fork_choice
2021-04-12 01:00:59 -07:00
. best_slot ( & ( bank . slot ( ) , bank . hash ( ) ) )
. unwrap ( )
. 0 ,
2020-06-11 12:16:04 -07:00
3
) ;
}
2020-02-03 16:48:24 -08:00
}
2020-03-26 19:57:27 -07:00
2020-03-26 23:33:28 -07:00
#[ test ]
fn test_should_retransmit ( ) {
let poh_slot = 4 ;
let mut last_retransmit_slot = 4 ;
// We retransmitted already at slot 4, shouldn't retransmit until
// >= 4 + NUM_CONSECUTIVE_LEADER_SLOTS, or if we reset to < 4
assert! ( ! ReplayStage ::should_retransmit (
poh_slot ,
& mut last_retransmit_slot
) ) ;
assert_eq! ( last_retransmit_slot , 4 ) ;
for poh_slot in 4 .. 4 + NUM_CONSECUTIVE_LEADER_SLOTS {
assert! ( ! ReplayStage ::should_retransmit (
poh_slot ,
& mut last_retransmit_slot
) ) ;
assert_eq! ( last_retransmit_slot , 4 ) ;
}
let poh_slot = 4 + NUM_CONSECUTIVE_LEADER_SLOTS ;
last_retransmit_slot = 4 ;
assert! ( ReplayStage ::should_retransmit (
poh_slot ,
& mut last_retransmit_slot
) ) ;
assert_eq! ( last_retransmit_slot , poh_slot ) ;
let poh_slot = 3 ;
last_retransmit_slot = 4 ;
assert! ( ReplayStage ::should_retransmit (
poh_slot ,
& mut last_retransmit_slot
) ) ;
assert_eq! ( last_retransmit_slot , poh_slot ) ;
}
2020-03-26 19:57:27 -07:00
#[ test ]
fn test_update_slot_propagated_threshold_from_votes ( ) {
let keypairs : HashMap < _ , _ > = iter ::repeat_with ( | | {
2020-07-23 18:50:42 -07:00
let vote_keypairs = ValidatorVoteKeypairs ::new_rand ( ) ;
( vote_keypairs . node_keypair . pubkey ( ) , vote_keypairs )
2020-03-26 19:57:27 -07:00
} )
. take ( 10 )
. collect ( ) ;
2020-03-30 19:57:11 -07:00
let new_vote_pubkeys : Vec < _ > = keypairs
2020-03-26 19:57:27 -07:00
. values ( )
. map ( | keys | keys . vote_keypair . pubkey ( ) )
. collect ( ) ;
2020-03-30 19:57:11 -07:00
let new_node_pubkeys : Vec < _ > = keypairs
. values ( )
. map ( | keys | keys . node_keypair . pubkey ( ) )
. collect ( ) ;
// Once 4/10 validators have voted, we have hit threshold
run_test_update_slot_propagated_threshold_from_votes ( & keypairs , & new_vote_pubkeys , & [ ] , 4 ) ;
// Adding the same node pubkey's instead of the corresponding
// vote pubkeys should be equivalent
run_test_update_slot_propagated_threshold_from_votes ( & keypairs , & [ ] , & new_node_pubkeys , 4 ) ;
// Adding the same node pubkey's in the same order as their
// corresponding vote accounts is redundant, so we don't
// reach the threshold any sooner.
run_test_update_slot_propagated_threshold_from_votes (
& keypairs ,
& new_vote_pubkeys ,
& new_node_pubkeys ,
4 ,
) ;
// However, if we add different node pubkey's than the
// vote accounts, we should hit threshold much faster
// because now we are getting 2 new pubkeys on each
// iteration instead of 1, so by the 2nd iteration
// we should have 4/10 validators voting
run_test_update_slot_propagated_threshold_from_votes (
& keypairs ,
& new_vote_pubkeys [ 0 .. 5 ] ,
& new_node_pubkeys [ 5 .. ] ,
2 ,
) ;
}
2020-03-26 19:57:27 -07:00
2020-03-30 19:57:11 -07:00
fn run_test_update_slot_propagated_threshold_from_votes (
all_keypairs : & HashMap < Pubkey , ValidatorVoteKeypairs > ,
new_vote_pubkeys : & [ Pubkey ] ,
new_node_pubkeys : & [ Pubkey ] ,
success_index : usize ,
) {
2020-03-26 19:57:27 -07:00
let stake = 10_000 ;
2021-07-08 19:07:32 -07:00
let ( bank_forks , _ , _ ) = vote_simulator ::initialize_state ( all_keypairs , stake ) ;
2020-12-27 05:28:05 -08:00
let root_bank = bank_forks . root_bank ( ) ;
2020-03-26 19:57:27 -07:00
let mut propagated_stats = PropagatedStats {
2020-03-30 19:57:11 -07:00
total_epoch_stake : stake * all_keypairs . len ( ) as u64 ,
2020-03-26 19:57:27 -07:00
.. PropagatedStats ::default ( )
} ;
2020-03-30 19:57:11 -07:00
let child_reached_threshold = false ;
for i in 0 .. std ::cmp ::max ( new_vote_pubkeys . len ( ) , new_node_pubkeys . len ( ) ) {
2020-03-26 19:57:27 -07:00
propagated_stats . is_propagated = false ;
2020-03-30 19:57:11 -07:00
let len = std ::cmp ::min ( i , new_vote_pubkeys . len ( ) ) ;
2022-01-21 16:01:22 -08:00
let mut voted_pubkeys = new_vote_pubkeys [ .. len ] . to_vec ( ) ;
2020-03-30 19:57:11 -07:00
let len = std ::cmp ::min ( i , new_node_pubkeys . len ( ) ) ;
2022-01-21 16:01:22 -08:00
let mut node_pubkeys = new_node_pubkeys [ .. len ] . to_vec ( ) ;
2020-03-26 19:57:27 -07:00
let did_newly_reach_threshold =
ReplayStage ::update_slot_propagated_threshold_from_votes (
2020-03-30 19:57:11 -07:00
& mut voted_pubkeys ,
& mut node_pubkeys ,
2020-03-26 19:57:27 -07:00
& root_bank ,
& mut propagated_stats ,
child_reached_threshold ,
) ;
// Only the i'th voted pubkey should be new (everything else was
// inserted in previous iteration of the loop), so those redundant
2020-03-30 19:57:11 -07:00
// pubkeys should have been filtered out
let remaining_vote_pubkeys = {
if i = = 0 | | i > = new_vote_pubkeys . len ( ) {
2020-03-26 19:57:27 -07:00
vec! [ ]
} else {
2021-02-07 18:07:00 -08:00
vec! [ new_vote_pubkeys [ i - 1 ] ]
2020-03-26 19:57:27 -07:00
}
} ;
2020-03-30 19:57:11 -07:00
let remaining_node_pubkeys = {
if i = = 0 | | i > = new_node_pubkeys . len ( ) {
vec! [ ]
} else {
2021-02-07 18:07:00 -08:00
vec! [ new_node_pubkeys [ i - 1 ] ]
2020-03-30 19:57:11 -07:00
}
} ;
assert_eq! ( voted_pubkeys , remaining_vote_pubkeys ) ;
assert_eq! ( node_pubkeys , remaining_node_pubkeys ) ;
2020-03-26 19:57:27 -07:00
// If we crossed the superminority threshold, then
// `did_newly_reach_threshold == true`, otherwise the
// threshold has not been reached
2020-03-30 19:57:11 -07:00
if i > = success_index {
2020-03-26 19:57:27 -07:00
assert! ( propagated_stats . is_propagated ) ;
assert! ( did_newly_reach_threshold ) ;
} else {
assert! ( ! propagated_stats . is_propagated ) ;
assert! ( ! did_newly_reach_threshold ) ;
}
}
2020-03-30 19:57:11 -07:00
}
2020-03-26 19:57:27 -07:00
2020-03-30 19:57:11 -07:00
#[ test ]
fn test_update_slot_propagated_threshold_from_votes2 ( ) {
2021-02-07 18:07:00 -08:00
let mut empty : Vec < Pubkey > = vec! [ ] ;
2020-03-30 19:57:11 -07:00
let genesis_config = create_genesis_config ( 100_000_000 ) . genesis_config ;
2021-08-05 06:42:38 -07:00
let root_bank = Bank ::new_for_tests ( & genesis_config ) ;
2020-03-30 19:57:11 -07:00
let stake = 10_000 ;
2020-03-26 19:57:27 -07:00
// Simulate a child slot seeing threshold (`child_reached_threshold` = true),
// then the parent should also be marked as having reached threshold,
// even if there are no new pubkeys to add (`newly_voted_pubkeys.is_empty()`)
2020-03-30 19:57:11 -07:00
let mut propagated_stats = PropagatedStats {
2020-03-26 19:57:27 -07:00
total_epoch_stake : stake * 10 ,
.. PropagatedStats ::default ( )
} ;
propagated_stats . total_epoch_stake = stake * 10 ;
2020-03-30 19:57:11 -07:00
let child_reached_threshold = true ;
2021-02-07 18:07:00 -08:00
let mut newly_voted_pubkeys : Vec < Pubkey > = vec! [ ] ;
2020-03-26 19:57:27 -07:00
assert! ( ReplayStage ::update_slot_propagated_threshold_from_votes (
& mut newly_voted_pubkeys ,
2020-03-30 19:57:11 -07:00
& mut empty ,
2020-03-26 19:57:27 -07:00
& root_bank ,
& mut propagated_stats ,
child_reached_threshold ,
) ) ;
// If propagation already happened (propagated_stats.is_propagated = true),
// always returns false
propagated_stats = PropagatedStats {
total_epoch_stake : stake * 10 ,
.. PropagatedStats ::default ( )
} ;
propagated_stats . is_propagated = true ;
newly_voted_pubkeys = vec! [ ] ;
assert! ( ! ReplayStage ::update_slot_propagated_threshold_from_votes (
& mut newly_voted_pubkeys ,
2020-03-30 19:57:11 -07:00
& mut empty ,
2020-03-26 19:57:27 -07:00
& root_bank ,
& mut propagated_stats ,
child_reached_threshold ,
) ) ;
2020-03-30 19:57:11 -07:00
let child_reached_threshold = false ;
2020-03-26 19:57:27 -07:00
assert! ( ! ReplayStage ::update_slot_propagated_threshold_from_votes (
& mut newly_voted_pubkeys ,
2020-03-30 19:57:11 -07:00
& mut empty ,
2020-03-26 19:57:27 -07:00
& root_bank ,
& mut propagated_stats ,
child_reached_threshold ,
) ) ;
}
#[ test ]
fn test_update_propagation_status ( ) {
// Create genesis stakers
2020-07-23 18:50:42 -07:00
let vote_keypairs = ValidatorVoteKeypairs ::new_rand ( ) ;
let node_pubkey = vote_keypairs . node_keypair . pubkey ( ) ;
2021-02-07 18:07:00 -08:00
let vote_pubkey = vote_keypairs . vote_keypair . pubkey ( ) ;
2020-07-23 18:50:42 -07:00
let keypairs : HashMap < _ , _ > = vec! [ ( node_pubkey , vote_keypairs ) ] . into_iter ( ) . collect ( ) ;
2020-03-26 19:57:27 -07:00
let stake = 10_000 ;
2021-07-08 19:07:32 -07:00
let ( mut bank_forks , mut progress_map , _ ) =
vote_simulator ::initialize_state ( & keypairs , stake ) ;
2020-03-26 19:57:27 -07:00
2022-04-28 11:51:00 -07:00
let bank0 = bank_forks . get ( 0 ) . unwrap ( ) ;
2020-03-26 19:57:27 -07:00
bank_forks . insert ( Bank ::new_from_parent ( & bank0 , & Pubkey ::default ( ) , 9 ) ) ;
2022-04-28 11:51:00 -07:00
let bank9 = bank_forks . get ( 9 ) . unwrap ( ) ;
2020-03-26 19:57:27 -07:00
bank_forks . insert ( Bank ::new_from_parent ( & bank9 , & Pubkey ::default ( ) , 10 ) ) ;
2021-02-18 23:42:09 -08:00
bank_forks . set_root ( 9 , & AbsRequestSender ::default ( ) , None ) ;
2020-03-26 19:57:27 -07:00
let total_epoch_stake = bank0 . total_epoch_stake ( ) ;
// Insert new ForkProgress for slot 10 and its
// previous leader slot 9
progress_map . insert (
10 ,
ForkProgress ::new (
Hash ::default ( ) ,
Some ( 9 ) ,
Some ( ValidatorStakeInfo {
total_epoch_stake ,
.. ValidatorStakeInfo ::default ( )
} ) ,
2020-04-08 14:35:24 -07:00
0 ,
0 ,
2020-03-26 19:57:27 -07:00
) ,
) ;
progress_map . insert (
9 ,
ForkProgress ::new (
Hash ::default ( ) ,
Some ( 8 ) ,
Some ( ValidatorStakeInfo {
total_epoch_stake ,
.. ValidatorStakeInfo ::default ( )
} ) ,
2020-04-08 14:35:24 -07:00
0 ,
0 ,
2020-03-26 19:57:27 -07:00
) ,
) ;
// Make sure is_propagated == false so that the propagation logic
// runs in `update_propagation_status`
2022-01-04 00:24:16 -08:00
assert! ( ! progress_map . get_leader_propagation_slot_must_exist ( 10 ) . 0 ) ;
2020-03-26 19:57:27 -07:00
2022-02-04 11:01:59 -08:00
let vote_tracker = VoteTracker ::default ( ) ;
2021-02-07 18:07:00 -08:00
vote_tracker . insert_vote ( 10 , vote_pubkey ) ;
2020-03-26 19:57:27 -07:00
ReplayStage ::update_propagation_status (
& mut progress_map ,
10 ,
& RwLock ::new ( bank_forks ) ,
& vote_tracker ,
2020-03-30 19:57:11 -07:00
& ClusterSlots ::default ( ) ,
2020-03-26 19:57:27 -07:00
) ;
let propagated_stats = & progress_map . get ( & 10 ) . unwrap ( ) . propagated_stats ;
// There should now be a cached reference to the VoteTracker for
// slot 10
assert! ( propagated_stats . slot_vote_tracker . is_some ( ) ) ;
// Updates should have been consumed
assert! ( propagated_stats
. slot_vote_tracker
. as_ref ( )
. unwrap ( )
. write ( )
. unwrap ( )
2021-04-10 17:34:45 -07:00
. get_voted_slot_updates ( )
2020-03-26 19:57:27 -07:00
. is_none ( ) ) ;
// The voter should be recorded
assert! ( propagated_stats
. propagated_validators
2021-02-07 18:07:00 -08:00
. contains ( & vote_pubkey ) ) ;
2020-03-26 19:57:27 -07:00
assert_eq! ( propagated_stats . propagated_validators_stake , stake ) ;
}
#[ test ]
fn test_chain_update_propagation_status ( ) {
let keypairs : HashMap < _ , _ > = iter ::repeat_with ( | | {
2020-07-23 18:50:42 -07:00
let vote_keypairs = ValidatorVoteKeypairs ::new_rand ( ) ;
( vote_keypairs . node_keypair . pubkey ( ) , vote_keypairs )
2020-03-26 19:57:27 -07:00
} )
. take ( 10 )
. collect ( ) ;
let vote_pubkeys : Vec < _ > = keypairs
. values ( )
. map ( | keys | keys . vote_keypair . pubkey ( ) )
. collect ( ) ;
let stake_per_validator = 10_000 ;
2020-06-11 12:16:04 -07:00
let ( mut bank_forks , mut progress_map , _ ) =
2021-07-08 19:07:32 -07:00
vote_simulator ::initialize_state ( & keypairs , stake_per_validator ) ;
2020-09-11 02:03:11 -07:00
progress_map
. get_propagated_stats_mut ( 0 )
. unwrap ( )
. is_leader_slot = true ;
2021-02-18 23:42:09 -08:00
bank_forks . set_root ( 0 , & AbsRequestSender ::default ( ) , None ) ;
2020-03-26 19:57:27 -07:00
let total_epoch_stake = bank_forks . root_bank ( ) . total_epoch_stake ( ) ;
// Insert new ForkProgress representing a slot for all slots 1..=num_banks. Only
// make even numbered ones leader slots
for i in 1 ..= 10 {
let parent_bank = bank_forks . get ( i - 1 ) . unwrap ( ) . clone ( ) ;
let prev_leader_slot = ( ( i - 1 ) / 2 ) * 2 ;
bank_forks . insert ( Bank ::new_from_parent ( & parent_bank , & Pubkey ::default ( ) , i ) ) ;
progress_map . insert (
i ,
2020-04-08 14:35:24 -07:00
ForkProgress ::new (
Hash ::default ( ) ,
Some ( prev_leader_slot ) ,
{
if i % 2 = = 0 {
Some ( ValidatorStakeInfo {
total_epoch_stake ,
.. ValidatorStakeInfo ::default ( )
} )
} else {
None
}
} ,
0 ,
0 ,
) ,
2020-03-26 19:57:27 -07:00
) ;
}
2022-02-04 11:01:59 -08:00
let vote_tracker = VoteTracker ::default ( ) ;
2020-03-26 19:57:27 -07:00
for vote_pubkey in & vote_pubkeys {
// Insert a vote for the last bank for each voter
2021-02-07 18:07:00 -08:00
vote_tracker . insert_vote ( 10 , * vote_pubkey ) ;
2020-03-26 19:57:27 -07:00
}
// The last bank should reach propagation threshold, and propagate it all
// the way back through earlier leader banks
ReplayStage ::update_propagation_status (
& mut progress_map ,
10 ,
& RwLock ::new ( bank_forks ) ,
& vote_tracker ,
2020-03-30 19:57:11 -07:00
& ClusterSlots ::default ( ) ,
2020-03-26 19:57:27 -07:00
) ;
for i in 1 ..= 10 {
let propagated_stats = & progress_map . get ( & i ) . unwrap ( ) . propagated_stats ;
// Only the even numbered ones were leader banks, so only
// those should have been updated
if i % 2 = = 0 {
assert! ( propagated_stats . is_propagated ) ;
} else {
assert! ( ! propagated_stats . is_propagated ) ;
}
}
}
#[ test ]
fn test_chain_update_propagation_status2 ( ) {
let num_validators = 6 ;
let keypairs : HashMap < _ , _ > = iter ::repeat_with ( | | {
2020-07-23 18:50:42 -07:00
let vote_keypairs = ValidatorVoteKeypairs ::new_rand ( ) ;
( vote_keypairs . node_keypair . pubkey ( ) , vote_keypairs )
2020-03-26 19:57:27 -07:00
} )
. take ( num_validators )
. collect ( ) ;
let vote_pubkeys : Vec < _ > = keypairs
. values ( )
. map ( | keys | keys . vote_keypair . pubkey ( ) )
. collect ( ) ;
let stake_per_validator = 10_000 ;
2020-06-11 12:16:04 -07:00
let ( mut bank_forks , mut progress_map , _ ) =
2021-07-08 19:07:32 -07:00
vote_simulator ::initialize_state ( & keypairs , stake_per_validator ) ;
2020-09-11 02:03:11 -07:00
progress_map
. get_propagated_stats_mut ( 0 )
. unwrap ( )
. is_leader_slot = true ;
2021-02-18 23:42:09 -08:00
bank_forks . set_root ( 0 , & AbsRequestSender ::default ( ) , None ) ;
2020-03-26 19:57:27 -07:00
let total_epoch_stake = num_validators as u64 * stake_per_validator ;
// Insert new ForkProgress representing a slot for all slots 1..=num_banks. Only
// make even numbered ones leader slots
for i in 1 ..= 10 {
let parent_bank = bank_forks . get ( i - 1 ) . unwrap ( ) . clone ( ) ;
let prev_leader_slot = i - 1 ;
bank_forks . insert ( Bank ::new_from_parent ( & parent_bank , & Pubkey ::default ( ) , i ) ) ;
let mut fork_progress = ForkProgress ::new (
Hash ::default ( ) ,
Some ( prev_leader_slot ) ,
Some ( ValidatorStakeInfo {
total_epoch_stake ,
.. ValidatorStakeInfo ::default ( )
} ) ,
2020-04-08 14:35:24 -07:00
0 ,
0 ,
2020-03-26 19:57:27 -07:00
) ;
let end_range = {
// The earlier slots are one pubkey away from reaching confirmation
if i < 5 {
2
} else {
// The later slots are two pubkeys away from reaching confirmation
1
}
} ;
2021-02-03 15:02:11 -08:00
fork_progress . propagated_stats . propagated_validators =
vote_pubkeys [ 0 .. end_range ] . iter ( ) . copied ( ) . collect ( ) ;
2020-03-26 19:57:27 -07:00
fork_progress . propagated_stats . propagated_validators_stake =
end_range as u64 * stake_per_validator ;
progress_map . insert ( i , fork_progress ) ;
}
2022-02-04 11:01:59 -08:00
let vote_tracker = VoteTracker ::default ( ) ;
2020-03-26 19:57:27 -07:00
// Insert a new vote
2021-02-07 18:07:00 -08:00
vote_tracker . insert_vote ( 10 , vote_pubkeys [ 2 ] ) ;
2020-03-26 19:57:27 -07:00
// The last bank should reach propagation threshold, and propagate it all
// the way back through earlier leader banks
ReplayStage ::update_propagation_status (
& mut progress_map ,
10 ,
& RwLock ::new ( bank_forks ) ,
& vote_tracker ,
2020-03-30 19:57:11 -07:00
& ClusterSlots ::default ( ) ,
2020-03-26 19:57:27 -07:00
) ;
// Only the first 5 banks should have reached the threshold
for i in 1 ..= 10 {
let propagated_stats = & progress_map . get ( & i ) . unwrap ( ) . propagated_stats ;
if i < 5 {
assert! ( propagated_stats . is_propagated ) ;
} else {
assert! ( ! propagated_stats . is_propagated ) ;
}
}
}
#[ test ]
fn test_check_propagation_for_start_leader ( ) {
let mut progress_map = ProgressMap ::default ( ) ;
let poh_slot = 5 ;
2020-12-03 12:31:38 -08:00
let parent_slot = poh_slot - NUM_CONSECUTIVE_LEADER_SLOTS ;
2020-03-26 19:57:27 -07:00
// If there is no previous leader slot (previous leader slot is None),
// should succeed
2020-12-03 12:31:38 -08:00
progress_map . insert (
parent_slot ,
2021-06-11 03:09:57 -07:00
ForkProgress ::new ( Hash ::default ( ) , None , None , 0 , 0 ) ,
2020-12-03 12:31:38 -08:00
) ;
2020-03-26 19:57:27 -07:00
assert! ( ReplayStage ::check_propagation_for_start_leader (
poh_slot ,
parent_slot ,
& progress_map ,
) ) ;
2020-12-03 12:31:38 -08:00
// Now if we make the parent was itself the leader, then requires propagation
// confirmation check because the parent is at least NUM_CONSECUTIVE_LEADER_SLOTS
// slots from the `poh_slot`
2020-03-26 19:57:27 -07:00
progress_map . insert (
2020-12-03 12:31:38 -08:00
parent_slot ,
2020-04-08 14:35:24 -07:00
ForkProgress ::new (
Hash ::default ( ) ,
None ,
Some ( ValidatorStakeInfo ::default ( ) ) ,
0 ,
0 ,
) ,
2020-03-26 19:57:27 -07:00
) ;
assert! ( ! ReplayStage ::check_propagation_for_start_leader (
poh_slot ,
parent_slot ,
& progress_map ,
) ) ;
progress_map
2020-12-03 12:31:38 -08:00
. get_mut ( & parent_slot )
2020-03-26 19:57:27 -07:00
. unwrap ( )
. propagated_stats
. is_propagated = true ;
assert! ( ReplayStage ::check_propagation_for_start_leader (
poh_slot ,
parent_slot ,
& progress_map ,
) ) ;
2020-12-03 12:31:38 -08:00
// Now, set up the progress map to show that the `previous_leader_slot` of 5 is
// `parent_slot - 1` (not equal to the actual parent!), so `parent_slot - 1` needs
// to see propagation confirmation before we can start a leader for block 5
let previous_leader_slot = parent_slot - 1 ;
2020-03-26 19:57:27 -07:00
progress_map . insert (
2020-12-03 12:31:38 -08:00
parent_slot ,
2021-06-11 03:09:57 -07:00
ForkProgress ::new ( Hash ::default ( ) , Some ( previous_leader_slot ) , None , 0 , 0 ) ,
2020-12-03 12:31:38 -08:00
) ;
progress_map . insert (
previous_leader_slot ,
2020-04-08 14:35:24 -07:00
ForkProgress ::new (
Hash ::default ( ) ,
None ,
Some ( ValidatorStakeInfo ::default ( ) ) ,
0 ,
0 ,
) ,
2020-03-26 19:57:27 -07:00
) ;
2020-12-03 12:31:38 -08:00
// `previous_leader_slot` has not seen propagation threshold, so should fail
2020-03-26 19:57:27 -07:00
assert! ( ! ReplayStage ::check_propagation_for_start_leader (
poh_slot ,
parent_slot ,
& progress_map ,
) ) ;
2020-12-03 12:31:38 -08:00
// If we set the is_propagated = true for the `previous_leader_slot`, should
2020-03-26 19:57:27 -07:00
// allow the block to be generated
progress_map
2020-12-03 12:31:38 -08:00
. get_mut ( & previous_leader_slot )
2020-03-26 19:57:27 -07:00
. unwrap ( )
. propagated_stats
. is_propagated = true ;
assert! ( ReplayStage ::check_propagation_for_start_leader (
poh_slot ,
parent_slot ,
& progress_map ,
) ) ;
2020-12-03 12:31:38 -08:00
// If the root is now set to `parent_slot`, this filters out `previous_leader_slot` from the progress map,
2020-03-26 19:57:27 -07:00
// which implies confirmation
2021-08-05 06:42:38 -07:00
let bank0 = Bank ::new_for_tests ( & genesis_config ::create_genesis_config ( 10000 ) . 0 ) ;
2020-12-03 12:31:38 -08:00
let parent_slot_bank =
Bank ::new_from_parent ( & Arc ::new ( bank0 ) , & Pubkey ::default ( ) , parent_slot ) ;
let mut bank_forks = BankForks ::new ( parent_slot_bank ) ;
let bank5 =
2022-04-28 11:51:00 -07:00
Bank ::new_from_parent ( & bank_forks . get ( parent_slot ) . unwrap ( ) , & Pubkey ::default ( ) , 5 ) ;
2020-03-26 19:57:27 -07:00
bank_forks . insert ( bank5 ) ;
2020-12-03 12:31:38 -08:00
// Should purge only `previous_leader_slot` from the progress map
2020-03-26 19:57:27 -07:00
progress_map . handle_new_root ( & bank_forks ) ;
// Should succeed
assert! ( ReplayStage ::check_propagation_for_start_leader (
poh_slot ,
parent_slot ,
& progress_map ,
) ) ;
}
#[ test ]
2020-12-03 12:31:38 -08:00
fn test_check_propagation_skip_propagation_check ( ) {
2020-03-26 19:57:27 -07:00
let mut progress_map = ProgressMap ::default ( ) ;
let poh_slot = 4 ;
2020-12-03 12:31:38 -08:00
let mut parent_slot = poh_slot - 1 ;
2020-03-26 19:57:27 -07:00
// Set up the progress map to show that the last leader slot of 4 is 3,
2020-06-17 20:54:52 -07:00
// which means 3 and 4 are consecutive leader slots
2020-03-26 19:57:27 -07:00
progress_map . insert (
3 ,
2020-04-08 14:35:24 -07:00
ForkProgress ::new (
Hash ::default ( ) ,
None ,
Some ( ValidatorStakeInfo ::default ( ) ) ,
0 ,
0 ,
) ,
2020-03-26 19:57:27 -07:00
) ;
2020-12-03 12:31:38 -08:00
// If the previous leader slot has not seen propagation threshold, but
2020-03-26 19:57:27 -07:00
// was the direct parent (implying consecutive leader slots), create
// the block regardless
assert! ( ReplayStage ::check_propagation_for_start_leader (
poh_slot ,
parent_slot ,
& progress_map ,
) ) ;
// If propagation threshold was achieved on parent, block should
// also be created
progress_map
. get_mut ( & 3 )
. unwrap ( )
. propagated_stats
. is_propagated = true ;
assert! ( ReplayStage ::check_propagation_for_start_leader (
poh_slot ,
parent_slot ,
& progress_map ,
) ) ;
2020-12-03 12:31:38 -08:00
// Now insert another parent slot 2 for which this validator is also the leader
parent_slot = poh_slot - NUM_CONSECUTIVE_LEADER_SLOTS + 1 ;
progress_map . insert (
parent_slot ,
ForkProgress ::new (
Hash ::default ( ) ,
None ,
Some ( ValidatorStakeInfo ::default ( ) ) ,
0 ,
0 ,
) ,
) ;
// Even though `parent_slot` and `poh_slot` are separated by another block,
// because they're within `NUM_CONSECUTIVE` blocks of each other, the propagation
// check is still skipped
assert! ( ReplayStage ::check_propagation_for_start_leader (
poh_slot ,
parent_slot ,
& progress_map ,
) ) ;
2020-03-26 19:57:27 -07:00
2020-12-03 12:31:38 -08:00
// Once the distance becomes >= NUM_CONSECUTIVE_LEADER_SLOTS, then we need to
// enforce the propagation check
parent_slot = poh_slot - NUM_CONSECUTIVE_LEADER_SLOTS ;
progress_map . insert (
parent_slot ,
ForkProgress ::new (
Hash ::default ( ) ,
None ,
Some ( ValidatorStakeInfo ::default ( ) ) ,
0 ,
0 ,
) ,
) ;
2020-03-26 19:57:27 -07:00
assert! ( ! ReplayStage ::check_propagation_for_start_leader (
poh_slot ,
parent_slot ,
& progress_map ,
) ) ;
}
2020-05-05 14:07:21 -07:00
#[ test ]
fn test_purge_unconfirmed_duplicate_slot ( ) {
2021-07-08 19:07:32 -07:00
let ( vote_simulator , blockstore ) = setup_default_forks ( 2 , None ::< GenerateVotes > ) ;
2021-04-21 14:40:35 -07:00
let VoteSimulator {
bank_forks ,
2021-07-08 19:07:32 -07:00
node_pubkeys ,
2021-04-21 14:40:35 -07:00
mut progress ,
2021-07-08 19:07:32 -07:00
validator_keypairs ,
2021-04-21 14:40:35 -07:00
..
2021-06-28 20:19:42 -07:00
} = vote_simulator ;
2021-07-08 19:07:32 -07:00
// Create bank 7
let root_bank = bank_forks . read ( ) . unwrap ( ) . root_bank ( ) ;
let bank7 = Bank ::new_from_parent (
2022-04-28 11:51:00 -07:00
& bank_forks . read ( ) . unwrap ( ) . get ( 6 ) . unwrap ( ) ,
2021-07-08 19:07:32 -07:00
& Pubkey ::default ( ) ,
7 ,
) ;
bank_forks . write ( ) . unwrap ( ) . insert ( bank7 ) ;
blockstore . add_tree ( tr ( 6 ) / tr ( 7 ) , false , false , 3 , Hash ::default ( ) ) ;
2022-04-28 11:51:00 -07:00
let bank7 = bank_forks . read ( ) . unwrap ( ) . get ( 7 ) . unwrap ( ) ;
let mut descendants = bank_forks . read ( ) . unwrap ( ) . descendants ( ) ;
2020-05-07 23:39:57 -07:00
let mut ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
2020-05-05 14:07:21 -07:00
2021-07-08 19:07:32 -07:00
// Process a transfer on bank 7
let sender = node_pubkeys [ 0 ] ;
let receiver = node_pubkeys [ 1 ] ;
let old_balance = bank7 . get_balance ( & sender ) ;
let transfer_amount = old_balance / 2 ;
let transfer_sig = bank7
. transfer (
transfer_amount ,
& validator_keypairs . get ( & sender ) . unwrap ( ) . node_keypair ,
& receiver ,
)
. unwrap ( ) ;
// Process a vote for slot 0 in bank 5
let validator0_keypairs = & validator_keypairs . get ( & sender ) . unwrap ( ) ;
2022-04-28 11:51:00 -07:00
let bank0 = bank_forks . read ( ) . unwrap ( ) . get ( 0 ) . unwrap ( ) ;
2021-07-08 19:07:32 -07:00
let vote_tx = vote_transaction ::new_vote_transaction (
vec! [ 0 ] ,
bank0 . hash ( ) ,
bank0 . last_blockhash ( ) ,
& validator0_keypairs . node_keypair ,
& validator0_keypairs . vote_keypair ,
& validator0_keypairs . vote_keypair ,
None ,
) ;
bank7 . process_transaction ( & vote_tx ) . unwrap ( ) ;
assert! ( bank7 . get_signature_status ( & vote_tx . signatures [ 0 ] ) . is_some ( ) ) ;
// Both signatures should exist in status cache
assert! ( bank7 . get_signature_status ( & vote_tx . signatures [ 0 ] ) . is_some ( ) ) ;
assert! ( bank7 . get_signature_status ( & transfer_sig ) . is_some ( ) ) ;
// Mark slot 7 dead
blockstore
. set_dead_slot ( 7 )
. expect ( " Failed to mark slot as dead in blockstore " ) ;
// Purging slot 5 should purge only slots 5 and its descendants 6,7
2020-05-07 23:39:57 -07:00
ReplayStage ::purge_unconfirmed_duplicate_slot (
5 ,
& mut ancestors ,
& mut descendants ,
& mut progress ,
2021-07-08 19:07:32 -07:00
& root_bank ,
2020-05-07 23:39:57 -07:00
& bank_forks ,
2021-07-08 19:07:32 -07:00
& blockstore ,
2020-05-07 23:39:57 -07:00
) ;
2021-07-08 19:07:32 -07:00
for i in 5 ..= 7 {
2020-05-05 14:07:21 -07:00
assert! ( bank_forks . read ( ) . unwrap ( ) . get ( i ) . is_none ( ) ) ;
assert! ( progress . get ( & i ) . is_none ( ) ) ;
}
for i in 0 ..= 4 {
assert! ( bank_forks . read ( ) . unwrap ( ) . get ( i ) . is_some ( ) ) ;
assert! ( progress . get ( & i ) . is_some ( ) ) ;
}
2021-07-08 19:07:32 -07:00
// Blockstore should have been cleared
for slot in & [ 5 , 6 , 7 ] {
assert! ( ! blockstore . is_full ( * slot ) ) ;
// Slot 7 was marked dead before, should no longer be marked
assert! ( ! blockstore . is_dead ( * slot ) ) ;
assert! ( blockstore . get_slot_entries ( * slot , 0 ) . unwrap ( ) . is_empty ( ) ) ;
}
// Should not be able to find signature in slot 5 for previously
// processed transactions
assert! ( bank7 . get_signature_status ( & vote_tx . signatures [ 0 ] ) . is_none ( ) ) ;
assert! ( bank7 . get_signature_status ( & transfer_sig ) . is_none ( ) ) ;
// Getting balance should return the old balance (accounts were cleared)
assert_eq! ( bank7 . get_balance ( & sender ) , old_balance ) ;
2020-05-05 14:07:21 -07:00
// Purging slot 4 should purge only slot 4
2022-04-28 11:51:00 -07:00
let mut descendants = bank_forks . read ( ) . unwrap ( ) . descendants ( ) ;
2020-05-07 23:39:57 -07:00
let mut ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
ReplayStage ::purge_unconfirmed_duplicate_slot (
4 ,
& mut ancestors ,
& mut descendants ,
& mut progress ,
2021-07-08 19:07:32 -07:00
& root_bank ,
2020-05-07 23:39:57 -07:00
& bank_forks ,
2021-07-08 19:07:32 -07:00
& blockstore ,
2020-05-07 23:39:57 -07:00
) ;
2021-07-08 19:07:32 -07:00
for i in 4 ..= 7 {
2020-05-05 14:07:21 -07:00
assert! ( bank_forks . read ( ) . unwrap ( ) . get ( i ) . is_none ( ) ) ;
assert! ( progress . get ( & i ) . is_none ( ) ) ;
2021-07-08 19:07:32 -07:00
assert! ( blockstore . get_slot_entries ( i , 0 ) . unwrap ( ) . is_empty ( ) ) ;
2020-05-05 14:07:21 -07:00
}
for i in 0 ..= 3 {
assert! ( bank_forks . read ( ) . unwrap ( ) . get ( i ) . is_some ( ) ) ;
assert! ( progress . get ( & i ) . is_some ( ) ) ;
2021-07-08 19:07:32 -07:00
assert! ( ! blockstore . get_slot_entries ( i , 0 ) . unwrap ( ) . is_empty ( ) ) ;
2020-05-05 14:07:21 -07:00
}
// Purging slot 1 should purge both forks 2 and 3
2022-04-28 11:51:00 -07:00
let mut descendants = bank_forks . read ( ) . unwrap ( ) . descendants ( ) ;
2020-05-07 23:39:57 -07:00
let mut ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
ReplayStage ::purge_unconfirmed_duplicate_slot (
1 ,
& mut ancestors ,
& mut descendants ,
& mut progress ,
2021-07-08 19:07:32 -07:00
& root_bank ,
2020-05-07 23:39:57 -07:00
& bank_forks ,
2021-07-08 19:07:32 -07:00
& blockstore ,
2020-05-07 23:39:57 -07:00
) ;
2021-07-08 19:07:32 -07:00
for i in 1 ..= 7 {
2020-05-05 14:07:21 -07:00
assert! ( bank_forks . read ( ) . unwrap ( ) . get ( i ) . is_none ( ) ) ;
assert! ( progress . get ( & i ) . is_none ( ) ) ;
2021-07-08 19:07:32 -07:00
assert! ( blockstore . get_slot_entries ( i , 0 ) . unwrap ( ) . is_empty ( ) ) ;
2020-05-05 14:07:21 -07:00
}
assert! ( bank_forks . read ( ) . unwrap ( ) . get ( 0 ) . is_some ( ) ) ;
assert! ( progress . get ( & 0 ) . is_some ( ) ) ;
}
2020-05-07 23:39:57 -07:00
#[ test ]
fn test_purge_ancestors_descendants ( ) {
2021-07-08 19:07:32 -07:00
let ( VoteSimulator { bank_forks , .. } , _ ) = setup_default_forks ( 1 , None ::< GenerateVotes > ) ;
2020-05-07 23:39:57 -07:00
// Purge branch rooted at slot 2
2022-04-28 11:51:00 -07:00
let mut descendants = bank_forks . read ( ) . unwrap ( ) . descendants ( ) ;
2020-05-07 23:39:57 -07:00
let mut ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
let slot_2_descendants = descendants . get ( & 2 ) . unwrap ( ) . clone ( ) ;
ReplayStage ::purge_ancestors_descendants (
2 ,
& slot_2_descendants ,
& mut ancestors ,
& mut descendants ,
) ;
// Result should be equivalent to removing slot from BankForks
2020-06-17 20:54:52 -07:00
// and regenerating the `ancestor` `descendant` maps
2020-05-07 23:39:57 -07:00
for d in slot_2_descendants {
bank_forks . write ( ) . unwrap ( ) . remove ( d ) ;
}
bank_forks . write ( ) . unwrap ( ) . remove ( 2 ) ;
assert! ( check_map_eq (
& ancestors ,
& bank_forks . read ( ) . unwrap ( ) . ancestors ( )
) ) ;
assert! ( check_map_eq (
& descendants ,
2022-04-28 11:51:00 -07:00
& bank_forks . read ( ) . unwrap ( ) . descendants ( )
2020-05-07 23:39:57 -07:00
) ) ;
// Try to purge the root
2020-12-12 17:22:34 -08:00
bank_forks
. write ( )
. unwrap ( )
2021-02-18 23:42:09 -08:00
. set_root ( 3 , & AbsRequestSender ::default ( ) , None ) ;
2022-04-28 11:51:00 -07:00
let mut descendants = bank_forks . read ( ) . unwrap ( ) . descendants ( ) ;
2020-05-07 23:39:57 -07:00
let mut ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
let slot_3_descendants = descendants . get ( & 3 ) . unwrap ( ) . clone ( ) ;
ReplayStage ::purge_ancestors_descendants (
3 ,
& slot_3_descendants ,
& mut ancestors ,
& mut descendants ,
) ;
assert! ( ancestors . is_empty ( ) ) ;
// Only remaining keys should be ones < root
for k in descendants . keys ( ) {
assert! ( * k < 3 ) ;
}
}
2020-09-11 02:03:11 -07:00
#[ test ]
fn test_leader_snapshot_restart_propagation ( ) {
let ReplayBlockstoreComponents {
2021-04-28 11:46:16 -07:00
validator_node_to_vote_keys ,
2021-07-08 19:07:32 -07:00
leader_schedule_cache ,
vote_simulator ,
..
} = replay_blockstore_components ( None , 1 , None ::< GenerateVotes > ) ;
let VoteSimulator {
2020-09-11 02:03:11 -07:00
mut progress ,
bank_forks ,
..
2021-07-08 19:07:32 -07:00
} = vote_simulator ;
2020-09-11 02:03:11 -07:00
2020-12-27 05:28:05 -08:00
let root_bank = bank_forks . read ( ) . unwrap ( ) . root_bank ( ) ;
2020-09-11 02:03:11 -07:00
let my_pubkey = leader_schedule_cache
. slot_leader_at ( root_bank . slot ( ) , Some ( & root_bank ) )
. unwrap ( ) ;
// Check that we are the leader of the root bank
assert! (
progress
. get_propagated_stats ( root_bank . slot ( ) )
. unwrap ( )
. is_leader_slot
) ;
let ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
// Freeze bank so it shows up in frozen banks
root_bank . freeze ( ) ;
let mut frozen_banks : Vec < _ > = bank_forks
. read ( )
. unwrap ( )
. frozen_banks ( )
. values ( )
. cloned ( )
. collect ( ) ;
// Compute bank stats, make sure vote is propagated back to starting root bank
let vote_tracker = VoteTracker ::default ( ) ;
// Add votes
2021-04-28 11:46:16 -07:00
for vote_key in validator_node_to_vote_keys . values ( ) {
2021-02-07 18:07:00 -08:00
vote_tracker . insert_vote ( root_bank . slot ( ) , * vote_key ) ;
2020-09-11 02:03:11 -07:00
}
2022-01-04 00:24:16 -08:00
assert! (
! progress
. get_leader_propagation_slot_must_exist ( root_bank . slot ( ) )
. 0
) ;
2020-09-11 02:03:11 -07:00
// Update propagation status
2022-02-07 14:06:19 -08:00
let mut tower = Tower ::new_for_tests ( 0 , 0.67 ) ;
2020-09-11 02:03:11 -07:00
ReplayStage ::compute_bank_stats (
2021-04-28 11:46:16 -07:00
& validator_node_to_vote_keys [ & my_pubkey ] ,
2020-09-11 02:03:11 -07:00
& ancestors ,
& mut frozen_banks ,
2022-02-07 14:06:19 -08:00
& mut tower ,
2020-09-11 02:03:11 -07:00
& mut progress ,
& vote_tracker ,
& ClusterSlots ::default ( ) ,
& bank_forks ,
& mut HeaviestSubtreeForkChoice ::new_from_bank_forks ( & bank_forks . read ( ) . unwrap ( ) ) ,
2021-04-21 14:40:35 -07:00
& mut LatestValidatorVotesForFrozenBanks ::default ( ) ,
2020-09-11 02:03:11 -07:00
) ;
// Check status is true
2022-01-04 00:24:16 -08:00
assert! (
progress
. get_leader_propagation_slot_must_exist ( root_bank . slot ( ) )
. 0
) ;
2020-09-11 02:03:11 -07:00
}
2022-10-03 01:49:47 -07:00
#[ test ]
fn test_unconfirmed_duplicate_slots_and_lockouts_for_non_heaviest_fork ( ) {
/*
Build fork structure :
slot 0
|
slot 1
/ \
slot 2 |
| |
slot 3 |
| |
slot 4 |
slot 5
* /
let forks = tr ( 0 ) / ( tr ( 1 ) / ( tr ( 2 ) / ( tr ( 3 ) / ( tr ( 4 ) ) ) ) / tr ( 5 ) ) ;
let mut vote_simulator = VoteSimulator ::new ( 1 ) ;
vote_simulator . fill_bank_forks ( forks , & HashMap ::< Pubkey , Vec < u64 > > ::new ( ) , true ) ;
let ( bank_forks , mut progress ) = ( vote_simulator . bank_forks , vote_simulator . progress ) ;
let ledger_path = get_tmp_ledger_path! ( ) ;
let blockstore = Arc ::new (
Blockstore ::open ( & ledger_path ) . expect ( " Expected to be able to open database ledger " ) ,
) ;
let mut tower = Tower ::new_for_tests ( 8 , 2.0 / 3.0 ) ;
// All forks have same weight so heaviest bank to vote/reset on should be the tip of
// the fork with the lower slot
let ( vote_fork , reset_fork ) = run_compute_and_select_forks (
& bank_forks ,
& mut progress ,
& mut tower ,
& mut vote_simulator . heaviest_subtree_fork_choice ,
& mut vote_simulator . latest_validator_votes_for_frozen_banks ,
) ;
assert_eq! ( vote_fork . unwrap ( ) , 4 ) ;
assert_eq! ( reset_fork . unwrap ( ) , 4 ) ;
// Record the vote for 5 which is not on the heaviest fork.
tower . record_bank_vote (
& bank_forks . read ( ) . unwrap ( ) . get ( 5 ) . unwrap ( ) ,
& Pubkey ::default ( ) ,
) ;
// 4 should be the heaviest slot, but should not be votable
// because of lockout. 5 is the heaviest slot on the same fork as the last vote.
let ( vote_fork , reset_fork ) = run_compute_and_select_forks (
& bank_forks ,
& mut progress ,
& mut tower ,
& mut vote_simulator . heaviest_subtree_fork_choice ,
& mut vote_simulator . latest_validator_votes_for_frozen_banks ,
) ;
assert! ( vote_fork . is_none ( ) ) ;
assert_eq! ( reset_fork , Some ( 5 ) ) ;
// Mark 5 as duplicate
blockstore . store_duplicate_slot ( 5 , vec! [ ] , vec! [ ] ) . unwrap ( ) ;
let mut duplicate_slots_tracker = DuplicateSlotsTracker ::default ( ) ;
2022-10-27 20:06:06 -07:00
let mut purge_repair_slot_counter = PurgeRepairSlotCounter ::default ( ) ;
2022-10-03 01:49:47 -07:00
let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots ::default ( ) ;
let mut epoch_slots_frozen_slots = EpochSlotsFrozenSlots ::default ( ) ;
let bank5_hash = bank_forks . read ( ) . unwrap ( ) . bank_hash ( 5 ) . unwrap ( ) ;
assert_ne! ( bank5_hash , Hash ::default ( ) ) ;
let duplicate_state = DuplicateState ::new_from_state (
5 ,
& gossip_duplicate_confirmed_slots ,
& mut vote_simulator . heaviest_subtree_fork_choice ,
| | progress . is_dead ( 5 ) . unwrap_or ( false ) ,
| | Some ( bank5_hash ) ,
) ;
let ( ancestor_hashes_replay_update_sender , _ancestor_hashes_replay_update_receiver ) =
unbounded ( ) ;
check_slot_agrees_with_cluster (
5 ,
bank_forks . read ( ) . unwrap ( ) . root ( ) ,
& blockstore ,
& mut duplicate_slots_tracker ,
& mut epoch_slots_frozen_slots ,
& mut vote_simulator . heaviest_subtree_fork_choice ,
& mut DuplicateSlotsToRepair ::default ( ) ,
& ancestor_hashes_replay_update_sender ,
2022-10-27 20:06:06 -07:00
& mut purge_repair_slot_counter ,
2022-10-03 01:49:47 -07:00
SlotStateUpdate ::Duplicate ( duplicate_state ) ,
) ;
// 4 should be the heaviest slot, but should not be votable
// because of lockout. 5 is no longer valid due to it being a duplicate.
let ( vote_fork , reset_fork ) = run_compute_and_select_forks (
& bank_forks ,
& mut progress ,
& mut tower ,
& mut vote_simulator . heaviest_subtree_fork_choice ,
& mut vote_simulator . latest_validator_votes_for_frozen_banks ,
) ;
assert! ( vote_fork . is_none ( ) ) ;
assert! ( reset_fork . is_none ( ) ) ;
// If slot 5 is marked as confirmed, it becomes the heaviest bank on same slot again
let mut duplicate_slots_to_repair = DuplicateSlotsToRepair ::default ( ) ;
gossip_duplicate_confirmed_slots . insert ( 5 , bank5_hash ) ;
let duplicate_confirmed_state = DuplicateConfirmedState ::new_from_state (
bank5_hash ,
| | progress . is_dead ( 5 ) . unwrap_or ( false ) ,
| | Some ( bank5_hash ) ,
) ;
check_slot_agrees_with_cluster (
5 ,
bank_forks . read ( ) . unwrap ( ) . root ( ) ,
& blockstore ,
& mut duplicate_slots_tracker ,
& mut epoch_slots_frozen_slots ,
& mut vote_simulator . heaviest_subtree_fork_choice ,
& mut duplicate_slots_to_repair ,
& ancestor_hashes_replay_update_sender ,
2022-10-27 20:06:06 -07:00
& mut purge_repair_slot_counter ,
2022-10-03 01:49:47 -07:00
SlotStateUpdate ::DuplicateConfirmed ( duplicate_confirmed_state ) ,
) ;
// The confirmed hash is detected in `progress`, which means
// it's confirmation on the replayed block. This means we have
// the right version of the block, so `duplicate_slots_to_repair`
// should be empty
assert! ( duplicate_slots_to_repair . is_empty ( ) ) ;
let ( vote_fork , reset_fork ) = run_compute_and_select_forks (
& bank_forks ,
& mut progress ,
& mut tower ,
& mut vote_simulator . heaviest_subtree_fork_choice ,
& mut vote_simulator . latest_validator_votes_for_frozen_banks ,
) ;
// Should now pick 5 as the heaviest fork from last vote again.
assert! ( vote_fork . is_none ( ) ) ;
assert_eq! ( reset_fork . unwrap ( ) , 5 ) ;
}
2021-03-24 23:41:52 -07:00
#[ test ]
fn test_unconfirmed_duplicate_slots_and_lockouts ( ) {
/*
Build fork structure :
slot 0
|
slot 1
/ \
slot 2 |
| |
slot 3 |
| |
slot 4 |
slot 5
|
slot 6
* /
let forks = tr ( 0 ) / ( tr ( 1 ) / ( tr ( 2 ) / ( tr ( 3 ) / ( tr ( 4 ) ) ) ) / ( tr ( 5 ) / ( tr ( 6 ) ) ) ) ;
2021-07-08 19:07:32 -07:00
// Make enough validators for vote switch threshold later
2021-03-24 23:41:52 -07:00
let mut vote_simulator = VoteSimulator ::new ( 2 ) ;
let validator_votes : HashMap < Pubkey , Vec < u64 > > = vec! [
( vote_simulator . node_pubkeys [ 0 ] , vec! [ 5 ] ) ,
( vote_simulator . node_pubkeys [ 1 ] , vec! [ 2 ] ) ,
]
. into_iter ( )
. collect ( ) ;
2021-08-02 14:33:28 -07:00
vote_simulator . fill_bank_forks ( forks , & validator_votes , true ) ;
2021-03-24 23:41:52 -07:00
let ( bank_forks , mut progress ) = ( vote_simulator . bank_forks , vote_simulator . progress ) ;
let ledger_path = get_tmp_ledger_path! ( ) ;
let blockstore = Arc ::new (
Blockstore ::open ( & ledger_path ) . expect ( " Expected to be able to open database ledger " ) ,
) ;
let mut tower = Tower ::new_for_tests ( 8 , 0.67 ) ;
// All forks have same weight so heaviest bank to vote/reset on should be the tip of
// the fork with the lower slot
let ( vote_fork , reset_fork ) = run_compute_and_select_forks (
& bank_forks ,
& mut progress ,
& mut tower ,
2021-04-21 14:40:35 -07:00
& mut vote_simulator . heaviest_subtree_fork_choice ,
& mut vote_simulator . latest_validator_votes_for_frozen_banks ,
2021-03-24 23:41:52 -07:00
) ;
assert_eq! ( vote_fork . unwrap ( ) , 4 ) ;
assert_eq! ( reset_fork . unwrap ( ) , 4 ) ;
// Record the vote for 4
tower . record_bank_vote (
2022-04-28 11:51:00 -07:00
& bank_forks . read ( ) . unwrap ( ) . get ( 4 ) . unwrap ( ) ,
2021-03-24 23:41:52 -07:00
& Pubkey ::default ( ) ,
) ;
// Mark 4 as duplicate, 3 should be the heaviest slot, but should not be votable
// because of lockout
blockstore . store_duplicate_slot ( 4 , vec! [ ] , vec! [ ] ) . unwrap ( ) ;
2021-06-10 22:28:23 -07:00
let mut duplicate_slots_tracker = DuplicateSlotsTracker ::default ( ) ;
let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots ::default ( ) ;
2021-08-13 14:21:52 -07:00
let mut epoch_slots_frozen_slots = EpochSlotsFrozenSlots ::default ( ) ;
2021-07-08 19:07:32 -07:00
let bank4_hash = bank_forks . read ( ) . unwrap ( ) . bank_hash ( 4 ) . unwrap ( ) ;
2021-03-24 23:41:52 -07:00
assert_ne! ( bank4_hash , Hash ::default ( ) ) ;
2021-07-18 17:04:25 -07:00
let duplicate_state = DuplicateState ::new_from_state (
4 ,
& gossip_duplicate_confirmed_slots ,
& mut vote_simulator . heaviest_subtree_fork_choice ,
| | progress . is_dead ( 4 ) . unwrap_or ( false ) ,
| | Some ( bank4_hash ) ,
) ;
2021-07-26 20:59:00 -07:00
let ( ancestor_hashes_replay_update_sender , _ancestor_hashes_replay_update_receiver ) =
unbounded ( ) ;
2021-03-24 23:41:52 -07:00
check_slot_agrees_with_cluster (
4 ,
bank_forks . read ( ) . unwrap ( ) . root ( ) ,
2021-07-18 17:04:25 -07:00
& blockstore ,
2021-06-10 22:28:23 -07:00
& mut duplicate_slots_tracker ,
2021-08-13 14:21:52 -07:00
& mut epoch_slots_frozen_slots ,
2021-04-21 14:40:35 -07:00
& mut vote_simulator . heaviest_subtree_fork_choice ,
2021-07-08 19:07:32 -07:00
& mut DuplicateSlotsToRepair ::default ( ) ,
2021-07-26 20:59:00 -07:00
& ancestor_hashes_replay_update_sender ,
2022-10-27 20:06:06 -07:00
& mut PurgeRepairSlotCounter ::default ( ) ,
2021-07-18 17:04:25 -07:00
SlotStateUpdate ::Duplicate ( duplicate_state ) ,
2021-03-24 23:41:52 -07:00
) ;
let ( vote_fork , reset_fork ) = run_compute_and_select_forks (
& bank_forks ,
& mut progress ,
& mut tower ,
2021-04-21 14:40:35 -07:00
& mut vote_simulator . heaviest_subtree_fork_choice ,
& mut vote_simulator . latest_validator_votes_for_frozen_banks ,
2021-03-24 23:41:52 -07:00
) ;
assert! ( vote_fork . is_none ( ) ) ;
2022-10-03 01:49:47 -07:00
assert_eq! ( reset_fork , Some ( 3 ) ) ;
2021-03-24 23:41:52 -07:00
// Now mark 2, an ancestor of 4, as duplicate
blockstore . store_duplicate_slot ( 2 , vec! [ ] , vec! [ ] ) . unwrap ( ) ;
2021-07-08 19:07:32 -07:00
let bank2_hash = bank_forks . read ( ) . unwrap ( ) . bank_hash ( 2 ) . unwrap ( ) ;
2021-03-24 23:41:52 -07:00
assert_ne! ( bank2_hash , Hash ::default ( ) ) ;
2021-07-18 17:04:25 -07:00
let duplicate_state = DuplicateState ::new_from_state (
2 ,
& gossip_duplicate_confirmed_slots ,
& mut vote_simulator . heaviest_subtree_fork_choice ,
| | progress . is_dead ( 2 ) . unwrap_or ( false ) ,
| | Some ( bank2_hash ) ,
) ;
2021-03-24 23:41:52 -07:00
check_slot_agrees_with_cluster (
2 ,
bank_forks . read ( ) . unwrap ( ) . root ( ) ,
2021-07-18 17:04:25 -07:00
& blockstore ,
2021-06-10 22:28:23 -07:00
& mut duplicate_slots_tracker ,
2021-08-13 14:21:52 -07:00
& mut epoch_slots_frozen_slots ,
2021-04-21 14:40:35 -07:00
& mut vote_simulator . heaviest_subtree_fork_choice ,
2021-07-08 19:07:32 -07:00
& mut DuplicateSlotsToRepair ::default ( ) ,
2021-07-26 20:59:00 -07:00
& ancestor_hashes_replay_update_sender ,
2022-10-27 20:06:06 -07:00
& mut PurgeRepairSlotCounter ::default ( ) ,
2021-07-18 17:04:25 -07:00
SlotStateUpdate ::Duplicate ( duplicate_state ) ,
2021-03-24 23:41:52 -07:00
) ;
let ( vote_fork , reset_fork ) = run_compute_and_select_forks (
& bank_forks ,
& mut progress ,
& mut tower ,
2021-04-21 14:40:35 -07:00
& mut vote_simulator . heaviest_subtree_fork_choice ,
& mut vote_simulator . latest_validator_votes_for_frozen_banks ,
2021-03-24 23:41:52 -07:00
) ;
// Should now pick the next heaviest fork that is not a descendant of 2, which is 6.
// However the lockout from vote 4 should still apply, so 6 should not be votable
assert! ( vote_fork . is_none ( ) ) ;
assert_eq! ( reset_fork . unwrap ( ) , 6 ) ;
// If slot 4 is marked as confirmed, then this confirms slot 2 and 4, and
// then slot 4 is now the heaviest bank again
2021-08-13 14:21:52 -07:00
let mut duplicate_slots_to_repair = DuplicateSlotsToRepair ::default ( ) ;
2021-03-24 23:41:52 -07:00
gossip_duplicate_confirmed_slots . insert ( 4 , bank4_hash ) ;
2021-07-18 17:04:25 -07:00
let duplicate_confirmed_state = DuplicateConfirmedState ::new_from_state (
bank4_hash ,
| | progress . is_dead ( 4 ) . unwrap_or ( false ) ,
| | Some ( bank4_hash ) ,
) ;
2021-03-24 23:41:52 -07:00
check_slot_agrees_with_cluster (
4 ,
bank_forks . read ( ) . unwrap ( ) . root ( ) ,
2021-07-18 17:04:25 -07:00
& blockstore ,
2021-06-10 22:28:23 -07:00
& mut duplicate_slots_tracker ,
2021-08-13 14:21:52 -07:00
& mut epoch_slots_frozen_slots ,
2021-04-21 14:40:35 -07:00
& mut vote_simulator . heaviest_subtree_fork_choice ,
2021-07-08 19:07:32 -07:00
& mut duplicate_slots_to_repair ,
2021-07-26 20:59:00 -07:00
& ancestor_hashes_replay_update_sender ,
2022-10-27 20:06:06 -07:00
& mut PurgeRepairSlotCounter ::default ( ) ,
2021-07-18 17:04:25 -07:00
SlotStateUpdate ::DuplicateConfirmed ( duplicate_confirmed_state ) ,
2021-03-24 23:41:52 -07:00
) ;
2021-07-08 19:07:32 -07:00
// The confirmed hash is detected in `progress`, which means
// it's confirmation on the replayed block. This means we have
// the right version of the block, so `duplicate_slots_to_repair`
// should be empty
assert! ( duplicate_slots_to_repair . is_empty ( ) ) ;
2021-03-24 23:41:52 -07:00
let ( vote_fork , reset_fork ) = run_compute_and_select_forks (
& bank_forks ,
& mut progress ,
& mut tower ,
2021-04-21 14:40:35 -07:00
& mut vote_simulator . heaviest_subtree_fork_choice ,
& mut vote_simulator . latest_validator_votes_for_frozen_banks ,
2021-03-24 23:41:52 -07:00
) ;
// Should now pick the heaviest fork 4 again, but lockouts apply so fork 4
// is not votable, which avoids voting for 4 again.
assert! ( vote_fork . is_none ( ) ) ;
assert_eq! ( reset_fork . unwrap ( ) , 4 ) ;
}
2021-07-08 19:07:32 -07:00
#[ test ]
fn test_dump_then_repair_correct_slots ( ) {
// Create the tree of banks in a BankForks object
let forks = tr ( 0 ) / ( tr ( 1 ) ) / ( tr ( 2 ) ) ;
let ReplayBlockstoreComponents {
ref mut vote_simulator ,
ref blockstore ,
..
} = replay_blockstore_components ( Some ( forks ) , 1 , None ) ;
let VoteSimulator {
ref mut progress ,
ref bank_forks ,
..
} = vote_simulator ;
let ( mut ancestors , mut descendants ) = {
let r_bank_forks = bank_forks . read ( ) . unwrap ( ) ;
2022-04-28 11:51:00 -07:00
( r_bank_forks . ancestors ( ) , r_bank_forks . descendants ( ) )
2021-07-08 19:07:32 -07:00
} ;
// Insert different versions of both 1 and 2. Both slots 1 and 2 should
// then be purged
let mut duplicate_slots_to_repair = DuplicateSlotsToRepair ::default ( ) ;
2021-08-13 14:21:52 -07:00
duplicate_slots_to_repair . insert ( 1 , Hash ::new_unique ( ) ) ;
duplicate_slots_to_repair . insert ( 2 , Hash ::new_unique ( ) ) ;
2022-10-27 20:06:06 -07:00
let mut purge_repair_slot_counter = PurgeRepairSlotCounter ::default ( ) ;
2021-08-13 14:21:52 -07:00
2021-07-08 19:07:32 -07:00
ReplayStage ::dump_then_repair_correct_slots (
& mut duplicate_slots_to_repair ,
& mut ancestors ,
& mut descendants ,
progress ,
bank_forks ,
blockstore ,
None ,
2022-10-27 20:06:06 -07:00
& mut purge_repair_slot_counter ,
2021-07-08 19:07:32 -07:00
) ;
let r_bank_forks = bank_forks . read ( ) . unwrap ( ) ;
for slot in 0 ..= 2 {
let bank = r_bank_forks . get ( slot ) ;
let ancestor_result = ancestors . get ( & slot ) ;
let descendants_result = descendants . get ( & slot ) ;
if slot = = 0 {
assert! ( bank . is_some ( ) ) ;
assert! ( ancestor_result . is_some ( ) ) ;
assert! ( descendants_result . is_some ( ) ) ;
} else {
assert! ( bank . is_none ( ) ) ;
assert! ( ancestor_result . is_none ( ) ) ;
assert! ( descendants_result . is_none ( ) ) ;
}
}
2022-10-27 20:06:06 -07:00
assert_eq! ( 2 , purge_repair_slot_counter . len ( ) ) ;
assert_eq! ( 1 , * purge_repair_slot_counter . get ( & 1 ) . unwrap ( ) ) ;
assert_eq! ( 1 , * purge_repair_slot_counter . get ( & 2 ) . unwrap ( ) ) ;
2021-07-08 19:07:32 -07:00
}
fn setup_vote_then_rollback (
first_vote : Slot ,
num_validators : usize ,
generate_votes : Option < GenerateVotes > ,
) -> ReplayBlockstoreComponents {
/*
Build fork structure :
slot 0
|
slot 1
/ \
slot 2 |
| |
slot 3 |
| |
slot 4 |
| |
slot 5 |
slot 6
|
slot 7
* /
let forks = tr ( 0 ) / ( tr ( 1 ) / ( tr ( 2 ) / ( tr ( 3 ) / ( tr ( 4 ) / ( tr ( 5 ) ) ) ) ) / ( tr ( 6 ) / ( tr ( 7 ) ) ) ) ;
let mut replay_components =
replay_blockstore_components ( Some ( forks ) , num_validators , generate_votes ) ;
let ReplayBlockstoreComponents {
ref mut tower ,
ref blockstore ,
ref mut vote_simulator ,
..
} = replay_components ;
let VoteSimulator {
ref mut progress ,
ref bank_forks ,
ref mut heaviest_subtree_fork_choice ,
..
} = vote_simulator ;
tower . record_bank_vote (
2022-04-28 11:51:00 -07:00
& bank_forks . read ( ) . unwrap ( ) . get ( first_vote ) . unwrap ( ) ,
2021-07-08 19:07:32 -07:00
& Pubkey ::default ( ) ,
) ;
// Simulate another version of slot 2 was duplicate confirmed
let our_bank2_hash = bank_forks . read ( ) . unwrap ( ) . bank_hash ( 2 ) . unwrap ( ) ;
let duplicate_confirmed_bank2_hash = Hash ::new_unique ( ) ;
let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots ::default ( ) ;
gossip_duplicate_confirmed_slots . insert ( 2 , duplicate_confirmed_bank2_hash ) ;
let mut duplicate_slots_tracker = DuplicateSlotsTracker ::default ( ) ;
let mut duplicate_slots_to_repair = DuplicateSlotsToRepair ::default ( ) ;
2021-08-13 14:21:52 -07:00
let mut epoch_slots_frozen_slots = EpochSlotsFrozenSlots ::default ( ) ;
2021-07-08 19:07:32 -07:00
// Mark fork choice branch as invalid so select forks below doesn't panic
// on a nonexistent `heaviest_bank_on_same_fork` after we dump the duplciate fork.
2021-07-18 17:04:25 -07:00
let duplicate_confirmed_state = DuplicateConfirmedState ::new_from_state (
duplicate_confirmed_bank2_hash ,
| | progress . is_dead ( 2 ) . unwrap_or ( false ) ,
| | Some ( our_bank2_hash ) ,
) ;
2021-07-26 20:59:00 -07:00
let ( ancestor_hashes_replay_update_sender , _ancestor_hashes_replay_update_receiver ) =
unbounded ( ) ;
2021-07-08 19:07:32 -07:00
check_slot_agrees_with_cluster (
2 ,
bank_forks . read ( ) . unwrap ( ) . root ( ) ,
2021-07-18 17:04:25 -07:00
blockstore ,
2021-07-08 19:07:32 -07:00
& mut duplicate_slots_tracker ,
2021-08-13 14:21:52 -07:00
& mut epoch_slots_frozen_slots ,
2021-07-08 19:07:32 -07:00
heaviest_subtree_fork_choice ,
& mut duplicate_slots_to_repair ,
2021-07-26 20:59:00 -07:00
& ancestor_hashes_replay_update_sender ,
2022-10-27 20:06:06 -07:00
& mut PurgeRepairSlotCounter ::default ( ) ,
2021-07-18 17:04:25 -07:00
SlotStateUpdate ::DuplicateConfirmed ( duplicate_confirmed_state ) ,
2021-07-08 19:07:32 -07:00
) ;
2021-08-13 14:21:52 -07:00
assert_eq! (
* duplicate_slots_to_repair . get ( & 2 ) . unwrap ( ) ,
duplicate_confirmed_bank2_hash
) ;
2021-07-08 19:07:32 -07:00
let mut ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
2022-04-28 11:51:00 -07:00
let mut descendants = bank_forks . read ( ) . unwrap ( ) . descendants ( ) ;
2021-07-08 19:07:32 -07:00
let old_descendants_of_2 = descendants . get ( & 2 ) . unwrap ( ) . clone ( ) ;
ReplayStage ::dump_then_repair_correct_slots (
& mut duplicate_slots_to_repair ,
& mut ancestors ,
& mut descendants ,
progress ,
bank_forks ,
blockstore ,
None ,
2022-10-27 20:06:06 -07:00
& mut PurgeRepairSlotCounter ::default ( ) ,
2021-07-08 19:07:32 -07:00
) ;
// Check everything was purged properly
for purged_slot in std ::iter ::once ( & 2 ) . chain ( old_descendants_of_2 . iter ( ) ) {
assert! ( ! ancestors . contains_key ( purged_slot ) ) ;
assert! ( ! descendants . contains_key ( purged_slot ) ) ;
}
replay_components
}
fn run_test_duplicate_rollback_then_vote ( first_vote : Slot ) -> SelectVoteAndResetForkResult {
let replay_components = setup_vote_then_rollback (
first_vote ,
2 ,
Some ( Box ::new ( | node_keys | {
// Simulate everyone else voting on 6, so we have enough to
// make a switch to the other fork
node_keys . into_iter ( ) . map ( | k | ( k , vec! [ 6 ] ) ) . collect ( )
} ) ) ,
) ;
let ReplayBlockstoreComponents {
mut tower ,
vote_simulator ,
..
} = replay_components ;
let VoteSimulator {
mut progress ,
bank_forks ,
mut heaviest_subtree_fork_choice ,
mut latest_validator_votes_for_frozen_banks ,
..
} = vote_simulator ;
let mut frozen_banks : Vec < _ > = bank_forks
. read ( )
. unwrap ( )
. frozen_banks ( )
. values ( )
. cloned ( )
. collect ( ) ;
let ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
2022-04-28 11:51:00 -07:00
let descendants = bank_forks . read ( ) . unwrap ( ) . descendants ( ) ;
2021-07-08 19:07:32 -07:00
ReplayStage ::compute_bank_stats (
& Pubkey ::new_unique ( ) ,
& ancestors ,
& mut frozen_banks ,
2022-02-07 14:06:19 -08:00
& mut tower ,
2021-07-08 19:07:32 -07:00
& mut progress ,
& VoteTracker ::default ( ) ,
& ClusterSlots ::default ( ) ,
& bank_forks ,
& mut heaviest_subtree_fork_choice ,
& mut latest_validator_votes_for_frozen_banks ,
) ;
// Try to switch to vote to the heaviest slot 6, then return the vote results
let ( heaviest_bank , heaviest_bank_on_same_fork ) = heaviest_subtree_fork_choice
. select_forks ( & frozen_banks , & tower , & progress , & ancestors , & bank_forks ) ;
assert_eq! ( heaviest_bank . slot ( ) , 7 ) ;
assert! ( heaviest_bank_on_same_fork . is_none ( ) ) ;
ReplayStage ::select_vote_and_reset_forks (
& heaviest_bank ,
heaviest_bank_on_same_fork . as_ref ( ) ,
& ancestors ,
& descendants ,
& progress ,
& mut tower ,
& latest_validator_votes_for_frozen_banks ,
& heaviest_subtree_fork_choice ,
)
}
#[ test ]
fn test_duplicate_rollback_then_vote_locked_out ( ) {
let SelectVoteAndResetForkResult {
vote_bank ,
reset_bank ,
heaviest_fork_failures ,
} = run_test_duplicate_rollback_then_vote ( 5 ) ;
// If we vote on 5 first then try to vote on 7, we should be locked out,
// despite the rollback
assert! ( vote_bank . is_none ( ) ) ;
assert_eq! ( reset_bank . unwrap ( ) . slot ( ) , 7 ) ;
assert_eq! (
heaviest_fork_failures ,
vec! [ HeaviestForkFailures ::LockedOut ( 7 ) ]
) ;
}
#[ test ]
fn test_duplicate_rollback_then_vote_success ( ) {
let SelectVoteAndResetForkResult {
vote_bank ,
reset_bank ,
heaviest_fork_failures ,
} = run_test_duplicate_rollback_then_vote ( 4 ) ;
// If we vote on 4 first then try to vote on 7, we should succeed
assert_matches! (
vote_bank
. map ( | ( bank , switch_decision ) | ( bank . slot ( ) , switch_decision ) )
. unwrap ( ) ,
( 7 , SwitchForkDecision ::SwitchProof ( _ ) )
) ;
assert_eq! ( reset_bank . unwrap ( ) . slot ( ) , 7 ) ;
assert! ( heaviest_fork_failures . is_empty ( ) ) ;
}
fn run_test_duplicate_rollback_then_vote_on_other_duplicate (
first_vote : Slot ,
) -> SelectVoteAndResetForkResult {
let replay_components = setup_vote_then_rollback ( first_vote , 10 , None ::< GenerateVotes > ) ;
let ReplayBlockstoreComponents {
mut tower ,
mut vote_simulator ,
..
} = replay_components ;
// Simulate repairing an alternate version of slot 2, 3 and 4 that we just dumped. Because
// we're including votes this time for slot 1, it should generate a different
// version of 2.
let cluster_votes : HashMap < Pubkey , Vec < Slot > > = vote_simulator
. node_pubkeys
. iter ( )
. map ( | k | ( * k , vec! [ 1 , 2 ] ) )
. collect ( ) ;
// Create new versions of slots 2, 3, 4, 5, with parent slot 1
vote_simulator . create_and_vote_new_branch (
1 ,
5 ,
& cluster_votes ,
& HashSet ::new ( ) ,
& Pubkey ::new_unique ( ) ,
& mut tower ,
) ;
let VoteSimulator {
mut progress ,
bank_forks ,
mut heaviest_subtree_fork_choice ,
mut latest_validator_votes_for_frozen_banks ,
..
} = vote_simulator ;
// Check that the new branch with slot 2 is different than the original version.
let bank_1_hash = bank_forks . read ( ) . unwrap ( ) . bank_hash ( 1 ) . unwrap ( ) ;
2022-11-30 09:26:13 -08:00
let children_of_1 = ( & heaviest_subtree_fork_choice )
2021-07-08 19:07:32 -07:00
. children ( & ( 1 , bank_1_hash ) )
. unwrap ( ) ;
2022-11-30 09:26:13 -08:00
let duplicate_versions_of_2 = children_of_1 . filter ( | ( slot , _hash ) | * slot = = 2 ) . count ( ) ;
2021-07-08 19:07:32 -07:00
assert_eq! ( duplicate_versions_of_2 , 2 ) ;
let mut frozen_banks : Vec < _ > = bank_forks
. read ( )
. unwrap ( )
. frozen_banks ( )
. values ( )
. cloned ( )
. collect ( ) ;
let ancestors = bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
2022-04-28 11:51:00 -07:00
let descendants = bank_forks . read ( ) . unwrap ( ) . descendants ( ) ;
2021-07-08 19:07:32 -07:00
ReplayStage ::compute_bank_stats (
& Pubkey ::new_unique ( ) ,
& ancestors ,
& mut frozen_banks ,
2022-02-07 14:06:19 -08:00
& mut tower ,
2021-07-08 19:07:32 -07:00
& mut progress ,
& VoteTracker ::default ( ) ,
& ClusterSlots ::default ( ) ,
& bank_forks ,
& mut heaviest_subtree_fork_choice ,
& mut latest_validator_votes_for_frozen_banks ,
) ;
// Try to switch to vote to the heaviest slot 5, then return the vote results
let ( heaviest_bank , heaviest_bank_on_same_fork ) = heaviest_subtree_fork_choice
. select_forks ( & frozen_banks , & tower , & progress , & ancestors , & bank_forks ) ;
assert_eq! ( heaviest_bank . slot ( ) , 5 ) ;
assert! ( heaviest_bank_on_same_fork . is_none ( ) ) ;
ReplayStage ::select_vote_and_reset_forks (
& heaviest_bank ,
heaviest_bank_on_same_fork . as_ref ( ) ,
& ancestors ,
& descendants ,
& progress ,
& mut tower ,
& latest_validator_votes_for_frozen_banks ,
& heaviest_subtree_fork_choice ,
)
}
#[ test ]
fn test_duplicate_rollback_then_vote_on_other_duplicate_success ( ) {
let SelectVoteAndResetForkResult {
vote_bank ,
reset_bank ,
heaviest_fork_failures ,
} = run_test_duplicate_rollback_then_vote_on_other_duplicate ( 3 ) ;
// If we vote on 2 first then try to vote on 5, we should succeed
assert_matches! (
vote_bank
. map ( | ( bank , switch_decision ) | ( bank . slot ( ) , switch_decision ) )
. unwrap ( ) ,
( 5 , SwitchForkDecision ::SwitchProof ( _ ) )
) ;
assert_eq! ( reset_bank . unwrap ( ) . slot ( ) , 5 ) ;
assert! ( heaviest_fork_failures . is_empty ( ) ) ;
}
#[ test ]
fn test_duplicate_rollback_then_vote_on_other_duplicate_same_slot_locked_out ( ) {
let SelectVoteAndResetForkResult {
vote_bank ,
reset_bank ,
heaviest_fork_failures ,
} = run_test_duplicate_rollback_then_vote_on_other_duplicate ( 5 ) ;
// If we vote on 5 first then try to vote on another version of 5,
// lockout should fail
assert! ( vote_bank . is_none ( ) ) ;
assert_eq! ( reset_bank . unwrap ( ) . slot ( ) , 5 ) ;
assert_eq! (
heaviest_fork_failures ,
vec! [ HeaviestForkFailures ::LockedOut ( 5 ) ]
) ;
}
#[ test ]
#[ ignore ]
fn test_duplicate_rollback_then_vote_on_other_duplicate_different_slot_locked_out ( ) {
let SelectVoteAndResetForkResult {
vote_bank ,
reset_bank ,
heaviest_fork_failures ,
} = run_test_duplicate_rollback_then_vote_on_other_duplicate ( 4 ) ;
// If we vote on 4 first then try to vote on 5 descended from another version
// of 4, lockout should fail
assert! ( vote_bank . is_none ( ) ) ;
assert_eq! ( reset_bank . unwrap ( ) . slot ( ) , 5 ) ;
assert_eq! (
heaviest_fork_failures ,
vec! [ HeaviestForkFailures ::LockedOut ( 5 ) ]
) ;
}
2021-04-21 14:40:35 -07:00
#[ test ]
2021-04-29 14:43:28 -07:00
fn test_gossip_vote_doesnt_affect_fork_choice ( ) {
2021-06-28 20:19:42 -07:00
let (
VoteSimulator {
bank_forks ,
mut heaviest_subtree_fork_choice ,
mut latest_validator_votes_for_frozen_banks ,
vote_pubkeys ,
..
} ,
_ ,
2021-07-08 19:07:32 -07:00
) = setup_default_forks ( 1 , None ::< GenerateVotes > ) ;
2021-04-21 14:40:35 -07:00
let vote_pubkey = vote_pubkeys [ 0 ] ;
let mut unfrozen_gossip_verified_vote_hashes = UnfrozenGossipVerifiedVoteHashes ::default ( ) ;
let ( gossip_verified_vote_hash_sender , gossip_verified_vote_hash_receiver ) = unbounded ( ) ;
2021-04-29 14:43:28 -07:00
// Best slot is 4
assert_eq! ( heaviest_subtree_fork_choice . best_overall_slot ( ) . 0 , 4 ) ;
2021-04-21 14:40:35 -07:00
// Cast a vote for slot 3 on one fork
let vote_slot = 3 ;
2022-04-28 11:51:00 -07:00
let vote_bank = bank_forks . read ( ) . unwrap ( ) . get ( vote_slot ) . unwrap ( ) ;
2021-04-21 14:40:35 -07:00
gossip_verified_vote_hash_sender
. send ( ( vote_pubkey , vote_slot , vote_bank . hash ( ) ) )
. expect ( " Send should succeed " ) ;
ReplayStage ::process_gossip_verified_vote_hashes (
& gossip_verified_vote_hash_receiver ,
& mut unfrozen_gossip_verified_vote_hashes ,
& heaviest_subtree_fork_choice ,
& mut latest_validator_votes_for_frozen_banks ,
) ;
2021-04-29 14:43:28 -07:00
// Pick the best fork. Gossip votes shouldn't affect fork choice
2021-04-21 14:40:35 -07:00
heaviest_subtree_fork_choice . compute_bank_stats (
& vote_bank ,
& Tower ::default ( ) ,
& mut latest_validator_votes_for_frozen_banks ,
) ;
2021-04-29 14:43:28 -07:00
// Best slot is still 4
assert_eq! ( heaviest_subtree_fork_choice . best_overall_slot ( ) . 0 , 4 ) ;
2021-04-21 14:40:35 -07:00
}
2021-04-28 11:46:16 -07:00
#[ test ]
fn test_replay_stage_refresh_last_vote ( ) {
let ReplayBlockstoreComponents {
cluster_info ,
poh_recorder ,
mut tower ,
2021-06-28 20:19:42 -07:00
my_pubkey ,
2021-07-08 19:07:32 -07:00
vote_simulator ,
2021-04-28 11:46:16 -07:00
..
2021-07-08 19:07:32 -07:00
} = replay_blockstore_components ( None , 10 , None ::< GenerateVotes > ) ;
2021-08-09 11:32:48 -07:00
let tower_storage = crate ::tower_storage ::NullTowerStorage ::default ( ) ;
2021-07-08 19:07:32 -07:00
let VoteSimulator {
mut validator_keypairs ,
bank_forks ,
..
} = vote_simulator ;
2021-04-28 11:46:16 -07:00
let mut last_vote_refresh_time = LastVoteRefreshTime {
last_refresh_time : Instant ::now ( ) ,
last_print_time : Instant ::now ( ) ,
} ;
let has_new_vote_been_rooted = false ;
let mut voted_signatures = vec! [ ] ;
2021-06-17 13:51:06 -07:00
let identity_keypair = cluster_info . keypair ( ) . clone ( ) ;
2021-04-28 11:46:16 -07:00
let my_vote_keypair = vec! [ Arc ::new (
2021-06-28 20:19:42 -07:00
validator_keypairs . remove ( & my_pubkey ) . unwrap ( ) . vote_keypair ,
2021-04-28 11:46:16 -07:00
) ] ;
2021-06-28 20:19:42 -07:00
let my_vote_pubkey = my_vote_keypair [ 0 ] . pubkey ( ) ;
2022-04-28 11:51:00 -07:00
let bank0 = bank_forks . read ( ) . unwrap ( ) . get ( 0 ) . unwrap ( ) ;
2021-04-28 11:46:16 -07:00
2022-07-15 12:29:56 -07:00
bank0 . set_initial_accounts_hash_verification_completed ( ) ;
2022-01-11 02:44:46 -08:00
let ( voting_sender , voting_receiver ) = unbounded ( ) ;
2021-04-28 11:46:16 -07:00
// Simulate landing a vote for slot 0 landing in slot 1
let bank1 = Arc ::new ( Bank ::new_from_parent ( & bank0 , & Pubkey ::default ( ) , 1 ) ) ;
2022-04-21 06:05:29 -07:00
bank1 . fill_bank_with_ticks_for_tests ( ) ;
2021-04-28 11:46:16 -07:00
tower . record_bank_vote ( & bank0 , & my_vote_pubkey ) ;
ReplayStage ::push_vote (
& bank0 ,
& my_vote_pubkey ,
2021-06-17 13:51:06 -07:00
& identity_keypair ,
2021-04-28 11:46:16 -07:00
& my_vote_keypair ,
& mut tower ,
& SwitchForkDecision ::SameFork ,
& mut voted_signatures ,
has_new_vote_been_rooted ,
2021-06-26 08:32:08 -07:00
& mut ReplayTiming ::default ( ) ,
2021-07-15 07:35:51 -07:00
& voting_sender ,
2022-02-15 12:19:34 -08:00
None ,
2021-04-28 11:46:16 -07:00
) ;
2021-07-15 07:35:51 -07:00
let vote_info = voting_receiver
. recv_timeout ( Duration ::from_secs ( 1 ) )
. unwrap ( ) ;
2021-07-22 12:49:58 -07:00
crate ::voting_service ::VotingService ::handle_vote (
& cluster_info ,
& poh_recorder ,
& tower_storage ,
vote_info ,
2021-10-07 02:38:23 -07:00
false ,
2021-07-22 12:49:58 -07:00
) ;
2021-07-15 07:35:51 -07:00
2021-05-06 07:04:17 -07:00
let mut cursor = Cursor ::default ( ) ;
2021-11-18 15:20:41 -08:00
let votes = cluster_info . get_votes ( & mut cursor ) ;
2021-04-28 11:46:16 -07:00
assert_eq! ( votes . len ( ) , 1 ) ;
let vote_tx = & votes [ 0 ] ;
assert_eq! ( vote_tx . message . recent_blockhash , bank0 . last_blockhash ( ) ) ;
assert_eq! ( tower . last_vote_tx_blockhash ( ) , bank0 . last_blockhash ( ) ) ;
assert_eq! ( tower . last_voted_slot ( ) . unwrap ( ) , 0 ) ;
bank1 . process_transaction ( vote_tx ) . unwrap ( ) ;
bank1 . freeze ( ) ;
// Trying to refresh the vote for bank 0 in bank 1 or bank 2 won't succeed because
// the last vote has landed already
let bank2 = Arc ::new ( Bank ::new_from_parent ( & bank1 , & Pubkey ::default ( ) , 2 ) ) ;
2022-04-21 06:05:29 -07:00
bank2 . fill_bank_with_ticks_for_tests ( ) ;
2021-04-28 11:46:16 -07:00
bank2 . freeze ( ) ;
for refresh_bank in & [ & bank1 , & bank2 ] {
ReplayStage ::refresh_last_vote (
& mut tower ,
refresh_bank ,
2021-06-18 06:34:46 -07:00
Tower ::last_voted_slot_in_bank ( refresh_bank , & my_vote_pubkey ) . unwrap ( ) ,
2021-04-28 11:46:16 -07:00
& my_vote_pubkey ,
2021-06-17 13:51:06 -07:00
& identity_keypair ,
2021-04-28 11:46:16 -07:00
& my_vote_keypair ,
& mut voted_signatures ,
has_new_vote_been_rooted ,
& mut last_vote_refresh_time ,
2021-07-15 07:35:51 -07:00
& voting_sender ,
2022-02-15 12:19:34 -08:00
None ,
2021-04-28 11:46:16 -07:00
) ;
// No new votes have been submitted to gossip
2021-11-18 15:20:41 -08:00
let votes = cluster_info . get_votes ( & mut cursor ) ;
2021-04-28 11:46:16 -07:00
assert! ( votes . is_empty ( ) ) ;
// Tower's latest vote tx blockhash hasn't changed either
assert_eq! ( tower . last_vote_tx_blockhash ( ) , bank0 . last_blockhash ( ) ) ;
assert_eq! ( tower . last_voted_slot ( ) . unwrap ( ) , 0 ) ;
}
// Simulate submitting a new vote for bank 1 to the network, but the vote
// not landing
tower . record_bank_vote ( & bank1 , & my_vote_pubkey ) ;
ReplayStage ::push_vote (
& bank1 ,
& my_vote_pubkey ,
2021-06-17 13:51:06 -07:00
& identity_keypair ,
2021-04-28 11:46:16 -07:00
& my_vote_keypair ,
& mut tower ,
& SwitchForkDecision ::SameFork ,
& mut voted_signatures ,
has_new_vote_been_rooted ,
2021-06-26 08:32:08 -07:00
& mut ReplayTiming ::default ( ) ,
2021-07-15 07:35:51 -07:00
& voting_sender ,
2022-02-15 12:19:34 -08:00
None ,
2021-04-28 11:46:16 -07:00
) ;
2021-07-15 07:35:51 -07:00
let vote_info = voting_receiver
. recv_timeout ( Duration ::from_secs ( 1 ) )
. unwrap ( ) ;
2021-07-22 12:49:58 -07:00
crate ::voting_service ::VotingService ::handle_vote (
& cluster_info ,
& poh_recorder ,
& tower_storage ,
vote_info ,
2021-10-07 02:38:23 -07:00
false ,
2021-07-22 12:49:58 -07:00
) ;
2021-11-18 15:20:41 -08:00
let votes = cluster_info . get_votes ( & mut cursor ) ;
2021-04-28 11:46:16 -07:00
assert_eq! ( votes . len ( ) , 1 ) ;
let vote_tx = & votes [ 0 ] ;
assert_eq! ( vote_tx . message . recent_blockhash , bank1 . last_blockhash ( ) ) ;
assert_eq! ( tower . last_vote_tx_blockhash ( ) , bank1 . last_blockhash ( ) ) ;
assert_eq! ( tower . last_voted_slot ( ) . unwrap ( ) , 1 ) ;
// Trying to refresh the vote for bank 1 in bank 2 won't succeed because
// the last vote has not expired yet
ReplayStage ::refresh_last_vote (
& mut tower ,
& bank2 ,
Tower ::last_voted_slot_in_bank ( & bank2 , & my_vote_pubkey ) . unwrap ( ) ,
& my_vote_pubkey ,
2021-06-17 13:51:06 -07:00
& identity_keypair ,
2021-04-28 11:46:16 -07:00
& my_vote_keypair ,
& mut voted_signatures ,
has_new_vote_been_rooted ,
& mut last_vote_refresh_time ,
2021-07-15 07:35:51 -07:00
& voting_sender ,
2022-02-15 12:19:34 -08:00
None ,
2021-04-28 11:46:16 -07:00
) ;
2021-07-15 07:35:51 -07:00
2021-04-28 11:46:16 -07:00
// No new votes have been submitted to gossip
2021-11-18 15:20:41 -08:00
let votes = cluster_info . get_votes ( & mut cursor ) ;
2021-04-28 11:46:16 -07:00
assert! ( votes . is_empty ( ) ) ;
assert_eq! ( tower . last_vote_tx_blockhash ( ) , bank1 . last_blockhash ( ) ) ;
assert_eq! ( tower . last_voted_slot ( ) . unwrap ( ) , 1 ) ;
// Create a bank where the last vote transaction will have expired
2022-04-21 06:05:29 -07:00
let expired_bank = {
let mut parent_bank = bank2 . clone ( ) ;
for _ in 0 .. MAX_PROCESSING_AGE {
parent_bank = Arc ::new ( Bank ::new_from_parent (
& parent_bank ,
& Pubkey ::default ( ) ,
parent_bank . slot ( ) + 1 ,
) ) ;
parent_bank . fill_bank_with_ticks_for_tests ( ) ;
parent_bank . freeze ( ) ;
}
parent_bank
} ;
2021-04-28 11:46:16 -07:00
// Now trying to refresh the vote for slot 1 will succeed because the recent blockhash
// of the last vote transaction has expired
last_vote_refresh_time . last_refresh_time = last_vote_refresh_time
. last_refresh_time
. checked_sub ( Duration ::from_millis (
MAX_VOTE_REFRESH_INTERVAL_MILLIS as u64 + 1 ,
) )
. unwrap ( ) ;
let clone_refresh_time = last_vote_refresh_time . last_refresh_time ;
ReplayStage ::refresh_last_vote (
& mut tower ,
& expired_bank ,
Tower ::last_voted_slot_in_bank ( & expired_bank , & my_vote_pubkey ) . unwrap ( ) ,
& my_vote_pubkey ,
2021-06-17 13:51:06 -07:00
& identity_keypair ,
2021-04-28 11:46:16 -07:00
& my_vote_keypair ,
& mut voted_signatures ,
has_new_vote_been_rooted ,
& mut last_vote_refresh_time ,
2021-07-15 07:35:51 -07:00
& voting_sender ,
2022-02-15 12:19:34 -08:00
None ,
2021-04-28 11:46:16 -07:00
) ;
2021-07-15 07:35:51 -07:00
let vote_info = voting_receiver
. recv_timeout ( Duration ::from_secs ( 1 ) )
. unwrap ( ) ;
2021-07-22 12:49:58 -07:00
crate ::voting_service ::VotingService ::handle_vote (
& cluster_info ,
& poh_recorder ,
& tower_storage ,
vote_info ,
2021-10-07 02:38:23 -07:00
false ,
2021-07-22 12:49:58 -07:00
) ;
2021-07-15 07:35:51 -07:00
2021-04-28 11:46:16 -07:00
assert! ( last_vote_refresh_time . last_refresh_time > clone_refresh_time ) ;
2021-11-18 15:20:41 -08:00
let votes = cluster_info . get_votes ( & mut cursor ) ;
2021-04-28 11:46:16 -07:00
assert_eq! ( votes . len ( ) , 1 ) ;
let vote_tx = & votes [ 0 ] ;
assert_eq! (
vote_tx . message . recent_blockhash ,
expired_bank . last_blockhash ( )
) ;
assert_eq! (
tower . last_vote_tx_blockhash ( ) ,
expired_bank . last_blockhash ( )
) ;
assert_eq! ( tower . last_voted_slot ( ) . unwrap ( ) , 1 ) ;
// Processing the vote transaction should be valid
let expired_bank_child = Arc ::new ( Bank ::new_from_parent (
& expired_bank ,
& Pubkey ::default ( ) ,
expired_bank . slot ( ) + 1 ,
) ) ;
expired_bank_child . process_transaction ( vote_tx ) . unwrap ( ) ;
2022-06-25 09:27:43 -07:00
let vote_account = expired_bank_child
2021-04-28 11:46:16 -07:00
. get_vote_account ( & my_vote_pubkey )
. unwrap ( ) ;
assert_eq! (
vote_account . vote_state ( ) . as_ref ( ) . unwrap ( ) . tower ( ) ,
vec! [ 0 , 1 ]
) ;
2022-04-21 06:05:29 -07:00
expired_bank_child . fill_bank_with_ticks_for_tests ( ) ;
2021-04-28 11:46:16 -07:00
expired_bank_child . freeze ( ) ;
// Trying to refresh the vote on a sibling bank where:
// 1) The vote for slot 1 hasn't landed
// 2) The latest refresh vote transaction's recent blockhash (the sibling's hash) doesn't exist
// This will still not refresh because `MAX_VOTE_REFRESH_INTERVAL_MILLIS` has not expired yet
let expired_bank_sibling = Arc ::new ( Bank ::new_from_parent (
& bank2 ,
& Pubkey ::default ( ) ,
expired_bank_child . slot ( ) + 1 ,
) ) ;
2022-04-21 06:05:29 -07:00
expired_bank_sibling . fill_bank_with_ticks_for_tests ( ) ;
2021-04-28 11:46:16 -07:00
expired_bank_sibling . freeze ( ) ;
// Set the last refresh to now, shouldn't refresh because the last refresh just happened.
last_vote_refresh_time . last_refresh_time = Instant ::now ( ) ;
ReplayStage ::refresh_last_vote (
& mut tower ,
& expired_bank_sibling ,
Tower ::last_voted_slot_in_bank ( & expired_bank_sibling , & my_vote_pubkey ) . unwrap ( ) ,
& my_vote_pubkey ,
2021-06-17 13:51:06 -07:00
& identity_keypair ,
2021-04-28 11:46:16 -07:00
& my_vote_keypair ,
& mut voted_signatures ,
has_new_vote_been_rooted ,
& mut last_vote_refresh_time ,
2021-07-15 07:35:51 -07:00
& voting_sender ,
2022-02-15 12:19:34 -08:00
None ,
2021-04-28 11:46:16 -07:00
) ;
2021-07-15 07:35:51 -07:00
2021-11-18 15:20:41 -08:00
let votes = cluster_info . get_votes ( & mut cursor ) ;
2021-04-28 11:46:16 -07:00
assert! ( votes . is_empty ( ) ) ;
assert_eq! (
vote_tx . message . recent_blockhash ,
expired_bank . last_blockhash ( )
) ;
assert_eq! (
tower . last_vote_tx_blockhash ( ) ,
expired_bank . last_blockhash ( )
) ;
assert_eq! ( tower . last_voted_slot ( ) . unwrap ( ) , 1 ) ;
}
2021-07-08 19:07:32 -07:00
2021-12-17 05:44:40 -08:00
#[ test ]
fn test_retransmit_latest_unpropagated_leader_slot ( ) {
let ReplayBlockstoreComponents {
validator_node_to_vote_keys ,
leader_schedule_cache ,
poh_recorder ,
vote_simulator ,
..
} = replay_blockstore_components ( None , 10 , None ::< GenerateVotes > ) ;
let VoteSimulator {
mut progress ,
ref bank_forks ,
..
} = vote_simulator ;
let poh_recorder = Arc ::new ( poh_recorder ) ;
let ( retransmit_slots_sender , retransmit_slots_receiver ) = unbounded ( ) ;
let bank1 = Bank ::new_from_parent (
2022-04-28 11:51:00 -07:00
& bank_forks . read ( ) . unwrap ( ) . get ( 0 ) . unwrap ( ) ,
2021-12-17 05:44:40 -08:00
& leader_schedule_cache . slot_leader_at ( 1 , None ) . unwrap ( ) ,
1 ,
) ;
progress . insert (
1 ,
ForkProgress ::new_from_bank (
& bank1 ,
bank1 . collector_id ( ) ,
validator_node_to_vote_keys
. get ( bank1 . collector_id ( ) )
. unwrap ( ) ,
Some ( 0 ) ,
0 ,
0 ,
) ,
) ;
assert! ( progress . get_propagated_stats ( 1 ) . unwrap ( ) . is_leader_slot ) ;
bank1 . freeze ( ) ;
bank_forks . write ( ) . unwrap ( ) . insert ( bank1 ) ;
ReplayStage ::retransmit_latest_unpropagated_leader_slot (
& poh_recorder ,
& retransmit_slots_sender ,
& mut progress ,
) ;
let res = retransmit_slots_receiver . recv_timeout ( Duration ::from_millis ( 10 ) ) ;
assert! ( res . is_ok ( ) , " retry_iteration=0, retry_time=None " ) ;
assert_eq! (
progress . get_retransmit_info ( 0 ) . unwrap ( ) . retry_iteration ,
0 ,
" retransmit should not advance retry_iteration before time has been set "
) ;
ReplayStage ::retransmit_latest_unpropagated_leader_slot (
& poh_recorder ,
& retransmit_slots_sender ,
& mut progress ,
) ;
let res = retransmit_slots_receiver . recv_timeout ( Duration ::from_millis ( 10 ) ) ;
assert! (
res . is_err ( ) ,
" retry_iteration=0, elapsed < 2^0 * RETRANSMIT_BASE_DELAY_MS "
) ;
2022-12-06 06:20:47 -08:00
progress . get_retransmit_info_mut ( 0 ) . unwrap ( ) . retry_time = Some (
Instant ::now ( )
. checked_sub ( Duration ::from_millis ( RETRANSMIT_BASE_DELAY_MS + 1 ) )
. unwrap ( ) ,
) ;
2021-12-17 05:44:40 -08:00
ReplayStage ::retransmit_latest_unpropagated_leader_slot (
& poh_recorder ,
& retransmit_slots_sender ,
& mut progress ,
) ;
let res = retransmit_slots_receiver . recv_timeout ( Duration ::from_millis ( 10 ) ) ;
assert! (
res . is_ok ( ) ,
" retry_iteration=0, elapsed > RETRANSMIT_BASE_DELAY_MS "
) ;
assert_eq! (
progress . get_retransmit_info ( 0 ) . unwrap ( ) . retry_iteration ,
1 ,
" retransmit should advance retry_iteration "
) ;
ReplayStage ::retransmit_latest_unpropagated_leader_slot (
& poh_recorder ,
& retransmit_slots_sender ,
& mut progress ,
) ;
let res = retransmit_slots_receiver . recv_timeout ( Duration ::from_millis ( 10 ) ) ;
assert! (
res . is_err ( ) ,
" retry_iteration=1, elapsed < 2^1 * RETRY_BASE_DELAY_MS "
) ;
2022-12-06 06:20:47 -08:00
progress . get_retransmit_info_mut ( 0 ) . unwrap ( ) . retry_time = Some (
Instant ::now ( )
. checked_sub ( Duration ::from_millis ( RETRANSMIT_BASE_DELAY_MS + 1 ) )
. unwrap ( ) ,
) ;
2021-12-17 05:44:40 -08:00
ReplayStage ::retransmit_latest_unpropagated_leader_slot (
& poh_recorder ,
& retransmit_slots_sender ,
& mut progress ,
) ;
let res = retransmit_slots_receiver . recv_timeout ( Duration ::from_millis ( 10 ) ) ;
assert! (
res . is_err ( ) ,
" retry_iteration=1, elapsed < 2^1 * RETRANSMIT_BASE_DELAY_MS "
) ;
2022-12-06 06:20:47 -08:00
progress . get_retransmit_info_mut ( 0 ) . unwrap ( ) . retry_time = Some (
Instant ::now ( )
. checked_sub ( Duration ::from_millis ( 2 * RETRANSMIT_BASE_DELAY_MS + 1 ) )
. unwrap ( ) ,
) ;
2021-12-17 05:44:40 -08:00
ReplayStage ::retransmit_latest_unpropagated_leader_slot (
& poh_recorder ,
& retransmit_slots_sender ,
& mut progress ,
) ;
let res = retransmit_slots_receiver . recv_timeout ( Duration ::from_millis ( 10 ) ) ;
assert! (
res . is_ok ( ) ,
" retry_iteration=1, elapsed > 2^1 * RETRANSMIT_BASE_DELAY_MS "
) ;
assert_eq! (
progress . get_retransmit_info ( 0 ) . unwrap ( ) . retry_iteration ,
2 ,
" retransmit should advance retry_iteration "
) ;
// increment to retry iteration 3
progress
2022-01-04 00:24:16 -08:00
. get_retransmit_info_mut ( 0 )
2021-12-17 05:44:40 -08:00
. unwrap ( )
. increment_retry_iteration ( ) ;
2022-12-06 06:20:47 -08:00
progress . get_retransmit_info_mut ( 0 ) . unwrap ( ) . retry_time = Some (
Instant ::now ( )
. checked_sub ( Duration ::from_millis ( 2 * RETRANSMIT_BASE_DELAY_MS + 1 ) )
. unwrap ( ) ,
) ;
2021-12-17 05:44:40 -08:00
ReplayStage ::retransmit_latest_unpropagated_leader_slot (
& poh_recorder ,
& retransmit_slots_sender ,
& mut progress ,
) ;
let res = retransmit_slots_receiver . recv_timeout ( Duration ::from_millis ( 10 ) ) ;
assert! (
res . is_err ( ) ,
" retry_iteration=3, elapsed < 2^3 * RETRANSMIT_BASE_DELAY_MS "
) ;
2022-12-06 06:20:47 -08:00
progress . get_retransmit_info_mut ( 0 ) . unwrap ( ) . retry_time = Some (
Instant ::now ( )
. checked_sub ( Duration ::from_millis ( 8 * RETRANSMIT_BASE_DELAY_MS + 1 ) )
. unwrap ( ) ,
) ;
2021-12-17 05:44:40 -08:00
ReplayStage ::retransmit_latest_unpropagated_leader_slot (
& poh_recorder ,
& retransmit_slots_sender ,
& mut progress ,
) ;
let res = retransmit_slots_receiver . recv_timeout ( Duration ::from_millis ( 10 ) ) ;
assert! (
res . is_ok ( ) ,
" retry_iteration=3, elapsed > 2^3 * RETRANSMIT_BASE_DELAY "
) ;
assert_eq! (
progress . get_retransmit_info ( 0 ) . unwrap ( ) . retry_iteration ,
4 ,
" retransmit should advance retry_iteration "
) ;
}
2022-01-04 00:24:16 -08:00
fn receive_slots ( retransmit_slots_receiver : & RetransmitSlotsReceiver ) -> Vec < Slot > {
let mut slots = Vec ::default ( ) ;
while let Ok ( slot ) = retransmit_slots_receiver . recv_timeout ( Duration ::from_millis ( 10 ) ) {
slots . push ( slot ) ;
}
slots
}
#[ test ]
fn test_maybe_retransmit_unpropagated_slots ( ) {
let ReplayBlockstoreComponents {
validator_node_to_vote_keys ,
leader_schedule_cache ,
vote_simulator ,
..
} = replay_blockstore_components ( None , 10 , None ::< GenerateVotes > ) ;
let VoteSimulator {
mut progress ,
ref bank_forks ,
..
} = vote_simulator ;
let ( retransmit_slots_sender , retransmit_slots_receiver ) = unbounded ( ) ;
let mut prev_index = 0 ;
for i in ( 1 .. 10 ) . chain ( 11 .. 15 ) {
let bank = Bank ::new_from_parent (
2022-04-28 11:51:00 -07:00
& bank_forks . read ( ) . unwrap ( ) . get ( prev_index ) . unwrap ( ) ,
2022-01-04 00:24:16 -08:00
& leader_schedule_cache . slot_leader_at ( i , None ) . unwrap ( ) ,
i ,
) ;
progress . insert (
i ,
ForkProgress ::new_from_bank (
& bank ,
bank . collector_id ( ) ,
validator_node_to_vote_keys
. get ( bank . collector_id ( ) )
. unwrap ( ) ,
Some ( 0 ) ,
0 ,
0 ,
) ,
) ;
assert! ( progress . get_propagated_stats ( i ) . unwrap ( ) . is_leader_slot ) ;
bank . freeze ( ) ;
bank_forks . write ( ) . unwrap ( ) . insert ( bank ) ;
prev_index = i ;
}
// expect single slot when latest_leader_slot is the start of a consecutive range
let latest_leader_slot = 0 ;
ReplayStage ::maybe_retransmit_unpropagated_slots (
" test " ,
& retransmit_slots_sender ,
& mut progress ,
latest_leader_slot ,
) ;
let received_slots = receive_slots ( & retransmit_slots_receiver ) ;
assert_eq! ( received_slots , vec! [ 0 ] ) ;
// expect range of slots from start of consecutive slots
let latest_leader_slot = 6 ;
ReplayStage ::maybe_retransmit_unpropagated_slots (
" test " ,
& retransmit_slots_sender ,
& mut progress ,
latest_leader_slot ,
) ;
let received_slots = receive_slots ( & retransmit_slots_receiver ) ;
assert_eq! ( received_slots , vec! [ 4 , 5 , 6 ] ) ;
// expect range of slots skipping a discontinuity in the range
let latest_leader_slot = 11 ;
ReplayStage ::maybe_retransmit_unpropagated_slots (
" test " ,
& retransmit_slots_sender ,
& mut progress ,
latest_leader_slot ,
) ;
let received_slots = receive_slots ( & retransmit_slots_receiver ) ;
assert_eq! ( received_slots , vec! [ 8 , 9 , 11 ] ) ;
}
2021-03-24 23:41:52 -07:00
fn run_compute_and_select_forks (
bank_forks : & RwLock < BankForks > ,
progress : & mut ProgressMap ,
tower : & mut Tower ,
heaviest_subtree_fork_choice : & mut HeaviestSubtreeForkChoice ,
2021-04-21 14:40:35 -07:00
latest_validator_votes_for_frozen_banks : & mut LatestValidatorVotesForFrozenBanks ,
2021-03-24 23:41:52 -07:00
) -> ( Option < Slot > , Option < Slot > ) {
let mut frozen_banks : Vec < _ > = bank_forks
. read ( )
. unwrap ( )
. frozen_banks ( )
. values ( )
. cloned ( )
. collect ( ) ;
let ancestors = & bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ;
2022-04-28 11:51:00 -07:00
let descendants = & bank_forks . read ( ) . unwrap ( ) . descendants ( ) ;
2021-03-24 23:41:52 -07:00
ReplayStage ::compute_bank_stats (
& Pubkey ::default ( ) ,
& bank_forks . read ( ) . unwrap ( ) . ancestors ( ) ,
& mut frozen_banks ,
tower ,
progress ,
& VoteTracker ::default ( ) ,
& ClusterSlots ::default ( ) ,
2021-06-18 06:34:46 -07:00
bank_forks ,
2021-03-24 23:41:52 -07:00
heaviest_subtree_fork_choice ,
2021-04-21 14:40:35 -07:00
latest_validator_votes_for_frozen_banks ,
2021-03-24 23:41:52 -07:00
) ;
let ( heaviest_bank , heaviest_bank_on_same_fork ) = heaviest_subtree_fork_choice
2021-06-18 06:34:46 -07:00
. select_forks ( & frozen_banks , tower , progress , ancestors , bank_forks ) ;
2021-03-24 23:41:52 -07:00
let SelectVoteAndResetForkResult {
vote_bank ,
reset_bank ,
..
} = ReplayStage ::select_vote_and_reset_forks (
& heaviest_bank ,
heaviest_bank_on_same_fork . as_ref ( ) ,
2021-06-18 06:34:46 -07:00
ancestors ,
descendants ,
2021-03-24 23:41:52 -07:00
progress ,
tower ,
2021-05-04 00:51:42 -07:00
latest_validator_votes_for_frozen_banks ,
2021-06-11 03:09:57 -07:00
heaviest_subtree_fork_choice ,
2021-03-24 23:41:52 -07:00
) ;
(
vote_bank . map ( | ( b , _ ) | b . slot ( ) ) ,
reset_bank . map ( | b | b . slot ( ) ) ,
)
}
2021-07-08 19:07:32 -07:00
type GenerateVotes = Box < dyn Fn ( Vec < Pubkey > ) -> HashMap < Pubkey , Vec < Slot > > > ;
2021-07-18 17:04:25 -07:00
pub fn setup_forks_from_tree (
2021-07-08 19:07:32 -07:00
tree : Tree < Slot > ,
num_keys : usize ,
generate_votes : Option < GenerateVotes > ,
) -> ( VoteSimulator , Blockstore ) {
2021-06-28 20:19:42 -07:00
let mut vote_simulator = VoteSimulator ::new ( num_keys ) ;
2021-07-08 19:07:32 -07:00
let pubkeys : Vec < Pubkey > = vote_simulator
. validator_keypairs
. values ( )
. map ( | k | k . node_keypair . pubkey ( ) )
. collect ( ) ;
let cluster_votes = generate_votes
. map ( | generate_votes | generate_votes ( pubkeys ) )
. unwrap_or_default ( ) ;
2021-08-02 14:33:28 -07:00
vote_simulator . fill_bank_forks ( tree . clone ( ) , & cluster_votes , true ) ;
2021-06-28 20:19:42 -07:00
let ledger_path = get_tmp_ledger_path! ( ) ;
let blockstore = Blockstore ::open ( & ledger_path ) . unwrap ( ) ;
blockstore . add_tree ( tree , false , true , 2 , Hash ::default ( ) ) ;
( vote_simulator , blockstore )
}
2021-07-08 19:07:32 -07:00
fn setup_default_forks (
num_keys : usize ,
generate_votes : Option < GenerateVotes > ,
) -> ( VoteSimulator , Blockstore ) {
2020-05-05 14:07:21 -07:00
/*
Build fork structure :
2021-03-24 23:41:52 -07:00
2020-05-05 14:07:21 -07:00
slot 0
|
slot 1
/ \
slot 2 |
| slot 3
slot 4 |
slot 5
|
slot 6
* /
2021-06-28 20:19:42 -07:00
let tree = tr ( 0 ) / ( tr ( 1 ) / ( tr ( 2 ) / ( tr ( 4 ) ) ) / ( tr ( 3 ) / ( tr ( 5 ) / ( tr ( 6 ) ) ) ) ) ;
2021-07-08 19:07:32 -07:00
setup_forks_from_tree ( tree , num_keys , generate_votes )
2020-05-05 14:07:21 -07:00
}
2020-05-07 23:39:57 -07:00
fn check_map_eq < K : Eq + std ::hash ::Hash + std ::fmt ::Debug , T : PartialEq + std ::fmt ::Debug > (
map1 : & HashMap < K , T > ,
map2 : & HashMap < K , T > ,
) -> bool {
map1 . len ( ) = = map2 . len ( ) & & map1 . iter ( ) . all ( | ( k , v ) | map2 . get ( k ) . unwrap ( ) = = v )
}
2022-05-14 08:53:37 -07:00
#[ test ]
fn test_check_for_vote_only_mode ( ) {
let in_vote_only_mode = AtomicBool ::new ( false ) ;
let genesis_config = create_genesis_config ( 10_000 ) . genesis_config ;
let bank0 = Bank ::new_for_tests ( & genesis_config ) ;
let bank_forks = RwLock ::new ( BankForks ::new ( bank0 ) ) ;
ReplayStage ::check_for_vote_only_mode ( 1000 , 0 , & in_vote_only_mode , & bank_forks ) ;
assert! ( in_vote_only_mode . load ( Ordering ::Relaxed ) ) ;
ReplayStage ::check_for_vote_only_mode ( 10 , 0 , & in_vote_only_mode , & bank_forks ) ;
assert! ( ! in_vote_only_mode . load ( Ordering ::Relaxed ) ) ;
}
2018-07-03 21:14:08 -07:00
}