Move block-time caching earlier (#17109)

* Require that blockstore block-time only be recognized slot, instead of root

* Move cache_block_time to after Bank freeze

* Single use statement

* Pass transaction_status_sender by reference

* Remove unnecessary slot-existence check before caching block time altogether

* Move block-time existence check into Blockstore::cache_block_time, Blockstore no longer needed in blockstore_processor helper
This commit is contained in:
Tyera Eulberg 2021-05-10 13:14:56 -06:00 committed by GitHub
parent f39dda00e0
commit 6e9deaf1bd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 116 additions and 97 deletions

View File

@ -1,18 +1,20 @@
use crossbeam_channel::{Receiver, RecvTimeoutError, Sender};
use solana_ledger::blockstore::Blockstore;
use solana_measure::measure::Measure;
use solana_runtime::bank::Bank;
use std::{
sync::{
atomic::{AtomicBool, Ordering},
Arc,
pub use solana_ledger::blockstore_processor::CacheBlockTimeSender;
use {
crossbeam_channel::{Receiver, RecvTimeoutError},
solana_ledger::blockstore::Blockstore,
solana_measure::measure::Measure,
solana_runtime::bank::Bank,
std::{
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
thread::{self, Builder, JoinHandle},
time::Duration,
},
thread::{self, Builder, JoinHandle},
time::Duration,
};
pub type CacheBlockTimeReceiver = Receiver<Arc<Bank>>;
pub type CacheBlockTimeSender = Sender<Arc<Bank>>;
pub struct CacheBlockTimeService {
thread_hdl: JoinHandle<()>,

View File

@ -372,7 +372,8 @@ impl ReplayStage {
&my_pubkey,
&vote_account,
&mut progress,
transaction_status_sender.clone(),
transaction_status_sender.as_ref(),
cache_block_time_sender.as_ref(),
&verify_recyclers,
&mut heaviest_subtree_fork_choice,
&replay_vote_sender,
@ -565,7 +566,6 @@ impl ReplayStage {
&subscriptions,
&block_commitment_cache,
&mut heaviest_subtree_fork_choice,
&cache_block_time_sender,
&bank_notification_sender,
&mut gossip_duplicate_confirmed_slots,
&mut unfrozen_gossip_verified_vote_hashes,
@ -1187,7 +1187,7 @@ impl ReplayStage {
bank: &Arc<Bank>,
blockstore: &Blockstore,
bank_progress: &mut ForkProgress,
transaction_status_sender: Option<TransactionStatusSender>,
transaction_status_sender: Option<&TransactionStatusSender>,
replay_vote_sender: &ReplayVoteSender,
verify_recyclers: &VerifyRecyclers,
) -> result::Result<usize, BlockstoreProcessorError> {
@ -1294,7 +1294,6 @@ impl ReplayStage {
subscriptions: &Arc<RpcSubscriptions>,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice,
cache_block_time_sender: &Option<CacheBlockTimeSender>,
bank_notification_sender: &Option<BankNotificationSender>,
gossip_duplicate_confirmed_slots: &mut GossipDuplicateConfirmedSlots,
unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes,
@ -1331,12 +1330,6 @@ impl ReplayStage {
blockstore
.set_roots(&rooted_slots)
.expect("Ledger set roots failed");
Self::cache_block_times(
blockstore,
bank_forks,
&rooted_slots,
cache_block_time_sender,
);
let highest_confirmed_root = Some(
block_commitment_cache
.read()
@ -1630,7 +1623,8 @@ impl ReplayStage {
my_pubkey: &Pubkey,
vote_account: &Pubkey,
progress: &mut ProgressMap,
transaction_status_sender: Option<TransactionStatusSender>,
transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_time_sender: Option<&CacheBlockTimeSender>,
verify_recyclers: &VerifyRecyclers,
heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice,
replay_vote_sender: &ReplayVoteSender,
@ -1694,7 +1688,7 @@ impl ReplayStage {
&bank,
&blockstore,
bank_progress,
transaction_status_sender.clone(),
transaction_status_sender,
replay_vote_sender,
verify_recyclers,
);
@ -1729,7 +1723,7 @@ impl ReplayStage {
);
did_complete_bank = true;
info!("bank frozen: {}", bank.slot());
if let Some(transaction_status_sender) = transaction_status_sender.clone() {
if let Some(transaction_status_sender) = transaction_status_sender {
transaction_status_sender.send_transaction_status_freeze_message(&bank);
}
bank.freeze();
@ -1755,6 +1749,7 @@ impl ReplayStage {
.send(BankNotification::Frozen(bank.clone()))
.unwrap_or_else(|err| warn!("bank_notification_sender failed: {:?}", err));
}
blockstore_processor::cache_block_time(&bank, cache_block_time_sender);
let bank_hash = bank.hash();
if let Some(new_frozen_voters) =
@ -2455,36 +2450,6 @@ impl ReplayStage {
}
}
fn cache_block_times(
blockstore: &Arc<Blockstore>,
bank_forks: &Arc<RwLock<BankForks>>,
rooted_slots: &[Slot],
cache_block_time_sender: &Option<CacheBlockTimeSender>,
) {
if let Some(cache_block_time_sender) = cache_block_time_sender {
for slot in rooted_slots {
if blockstore
.get_block_time(*slot)
.unwrap_or_default()
.is_none()
{
if let Some(rooted_bank) = bank_forks.read().unwrap().get(*slot) {
cache_block_time_sender
.send(rooted_bank.clone())
.unwrap_or_else(|err| {
warn!("cache_block_time_sender failed: {:?}", err)
});
} else {
error!(
"rooted_bank {:?} not available in BankForks; block time not cached",
slot
);
}
}
}
}
}
pub fn get_unlock_switch_vote_slot(cluster_type: ClusterType) -> Slot {
match cluster_type {
ClusterType::Development => 0,
@ -3381,7 +3346,7 @@ pub(crate) mod tests {
&bank,
&mut entries,
true,
Some(TransactionStatusSender {
Some(&TransactionStatusSender {
sender: transaction_status_sender,
enable_cpi_and_log_storage: false,
}),

View File

@ -693,7 +693,7 @@ mod tests {
..ProcessOptions::default()
};
let (bank_forks, cached_leader_schedule) =
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts, None).unwrap();
let leader_schedule_cache = Arc::new(cached_leader_schedule);
let bank_forks = Arc::new(RwLock::new(bank_forks));

View File

@ -1119,7 +1119,10 @@ fn new_banks_from_ledger(
process_options,
transaction_history_services
.transaction_status_sender
.clone(),
.as_ref(),
transaction_history_services
.cache_block_time_sender
.as_ref(),
)
.unwrap_or_else(|err| {
error!("Failed to load ledger: {:?}", err);

View File

@ -703,6 +703,7 @@ fn load_bank_forks(
snapshot_config.as_ref(),
process_options,
None,
None,
)
}

View File

@ -1,8 +1,8 @@
use crate::{
blockstore::Blockstore,
blockstore_processor::{
self, BlockstoreProcessorError, BlockstoreProcessorResult, ProcessOptions,
TransactionStatusSender,
self, BlockstoreProcessorError, BlockstoreProcessorResult, CacheBlockTimeSender,
ProcessOptions, TransactionStatusSender,
},
entry::VerifyRecyclers,
leader_schedule_cache::LeaderScheduleCache,
@ -36,7 +36,8 @@ pub fn load(
shrink_paths: Option<Vec<PathBuf>>,
snapshot_config: Option<&SnapshotConfig>,
process_options: ProcessOptions,
transaction_status_sender: Option<TransactionStatusSender>,
transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_time_sender: Option<&CacheBlockTimeSender>,
) -> LoadResult {
if let Some(snapshot_config) = snapshot_config.as_ref() {
info!(
@ -96,6 +97,7 @@ pub fn load(
&process_options,
&VerifyRecyclers::default(),
transaction_status_sender,
cache_block_time_sender,
),
Some(deserialized_snapshot_hash),
);
@ -113,6 +115,7 @@ pub fn load(
&blockstore,
account_paths,
process_options,
cache_block_time_sender,
),
None,
)

View File

@ -1755,10 +1755,11 @@ impl Blockstore {
}
pub fn cache_block_time(&self, slot: Slot, timestamp: UnixTimestamp) -> Result<()> {
if !self.is_root(slot) {
return Err(BlockstoreError::SlotNotRooted);
if self.get_block_time(slot).unwrap_or_default().is_none() {
self.blocktime_cf.put(slot, &timestamp)
} else {
Ok(())
}
self.blocktime_cf.put(slot, &timestamp)
}
pub fn get_first_available_block(&self) -> Result<Slot> {

View File

@ -101,7 +101,7 @@ fn get_first_error(
fn execute_batch(
batch: &TransactionBatch,
bank: &Arc<Bank>,
transaction_status_sender: Option<TransactionStatusSender>,
transaction_status_sender: Option<&TransactionStatusSender>,
replay_vote_sender: Option<&ReplayVoteSender>,
timings: &mut ExecuteTimings,
) -> Result<()> {
@ -163,7 +163,7 @@ fn execute_batches(
bank: &Arc<Bank>,
batches: &[TransactionBatch],
entry_callback: Option<&ProcessCallback>,
transaction_status_sender: Option<TransactionStatusSender>,
transaction_status_sender: Option<&TransactionStatusSender>,
replay_vote_sender: Option<&ReplayVoteSender>,
timings: &mut ExecuteTimings,
) -> Result<()> {
@ -173,12 +173,12 @@ fn execute_batches(
thread_pool.borrow().install(|| {
batches
.into_par_iter()
.map_with(transaction_status_sender, |sender, batch| {
.map(|batch| {
let mut timings = ExecuteTimings::default();
let result = execute_batch(
batch,
bank,
sender.clone(),
transaction_status_sender,
replay_vote_sender,
&mut timings,
);
@ -207,7 +207,7 @@ pub fn process_entries(
bank: &Arc<Bank>,
entries: &mut [Entry],
randomize: bool,
transaction_status_sender: Option<TransactionStatusSender>,
transaction_status_sender: Option<&TransactionStatusSender>,
replay_vote_sender: Option<&ReplayVoteSender>,
) -> Result<()> {
let mut timings = ExecuteTimings::default();
@ -232,7 +232,7 @@ fn process_entries_with_callback(
entries: &mut [EntryType],
randomize: bool,
entry_callback: Option<&ProcessCallback>,
transaction_status_sender: Option<TransactionStatusSender>,
transaction_status_sender: Option<&TransactionStatusSender>,
replay_vote_sender: Option<&ReplayVoteSender>,
timings: &mut ExecuteTimings,
) -> Result<()> {
@ -253,7 +253,7 @@ fn process_entries_with_callback(
bank,
&batches,
entry_callback,
transaction_status_sender.clone(),
transaction_status_sender,
replay_vote_sender,
timings,
)?;
@ -304,7 +304,7 @@ fn process_entries_with_callback(
bank,
&batches,
entry_callback,
transaction_status_sender.clone(),
transaction_status_sender,
replay_vote_sender,
timings,
)?;
@ -376,6 +376,7 @@ pub fn process_blockstore(
blockstore: &Blockstore,
account_paths: Vec<PathBuf>,
opts: ProcessOptions,
cache_block_time_sender: Option<&CacheBlockTimeSender>,
) -> BlockstoreProcessorResult {
if let Some(num_threads) = opts.override_num_threads {
PAR_THREAD_POOL.with(|pool| {
@ -399,8 +400,21 @@ pub fn process_blockstore(
let bank0 = Arc::new(bank0);
info!("processing ledger for slot 0...");
let recyclers = VerifyRecyclers::default();
process_bank_0(&bank0, blockstore, &opts, &recyclers);
do_process_blockstore_from_root(blockstore, bank0, &opts, &recyclers, None)
process_bank_0(
&bank0,
blockstore,
&opts,
&recyclers,
cache_block_time_sender,
);
do_process_blockstore_from_root(
blockstore,
bank0,
&opts,
&recyclers,
None,
cache_block_time_sender,
)
}
// Process blockstore from a known root bank
@ -409,7 +423,8 @@ pub(crate) fn process_blockstore_from_root(
bank: Bank,
opts: &ProcessOptions,
recyclers: &VerifyRecyclers,
transaction_status_sender: Option<TransactionStatusSender>,
transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_time_sender: Option<&CacheBlockTimeSender>,
) -> BlockstoreProcessorResult {
do_process_blockstore_from_root(
blockstore,
@ -417,6 +432,7 @@ pub(crate) fn process_blockstore_from_root(
opts,
recyclers,
transaction_status_sender,
cache_block_time_sender,
)
}
@ -425,7 +441,8 @@ fn do_process_blockstore_from_root(
bank: Arc<Bank>,
opts: &ProcessOptions,
recyclers: &VerifyRecyclers,
transaction_status_sender: Option<TransactionStatusSender>,
transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_time_sender: Option<&CacheBlockTimeSender>,
) -> BlockstoreProcessorResult {
info!("processing ledger from slot {}...", bank.slot());
@ -486,6 +503,7 @@ fn do_process_blockstore_from_root(
opts,
recyclers,
transaction_status_sender,
cache_block_time_sender,
&mut timing,
)?;
initial_forks.sort_by_key(|bank| bank.slot());
@ -586,7 +604,7 @@ fn confirm_full_slot(
opts: &ProcessOptions,
recyclers: &VerifyRecyclers,
progress: &mut ConfirmationProgress,
transaction_status_sender: Option<TransactionStatusSender>,
transaction_status_sender: Option<&TransactionStatusSender>,
replay_vote_sender: Option<&ReplayVoteSender>,
timing: &mut ExecuteTimings,
) -> result::Result<(), BlockstoreProcessorError> {
@ -665,7 +683,7 @@ pub fn confirm_slot(
timing: &mut ConfirmationTiming,
progress: &mut ConfirmationProgress,
skip_verification: bool,
transaction_status_sender: Option<TransactionStatusSender>,
transaction_status_sender: Option<&TransactionStatusSender>,
replay_vote_sender: Option<&ReplayVoteSender>,
entry_callback: Option<&ProcessCallback>,
recyclers: &VerifyRecyclers,
@ -785,6 +803,7 @@ fn process_bank_0(
blockstore: &Blockstore,
opts: &ProcessOptions,
recyclers: &VerifyRecyclers,
cache_block_time_sender: Option<&CacheBlockTimeSender>,
) {
assert_eq!(bank0.slot(), 0);
let mut progress = ConfirmationProgress::new(bank0.last_blockhash());
@ -800,6 +819,7 @@ fn process_bank_0(
)
.expect("processing for bank 0 must succeed");
bank0.freeze();
cache_block_time(bank0, cache_block_time_sender);
}
// Given a bank, add its children to the pending slots queue if those children slots are
@ -857,6 +877,7 @@ fn process_next_slots(
// Iterate through blockstore processing slots starting from the root slot pointed to by the
// given `meta` and return a vector of frozen bank forks
#[allow(clippy::too_many_arguments)]
fn load_frozen_forks(
root_bank: &Arc<Bank>,
root_meta: &SlotMeta,
@ -865,7 +886,8 @@ fn load_frozen_forks(
root: &mut Slot,
opts: &ProcessOptions,
recyclers: &VerifyRecyclers,
transaction_status_sender: Option<TransactionStatusSender>,
transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_time_sender: Option<&CacheBlockTimeSender>,
timing: &mut ExecuteTimings,
) -> result::Result<Vec<Arc<Bank>>, BlockstoreProcessorError> {
let mut initial_forks = HashMap::new();
@ -919,7 +941,8 @@ fn load_frozen_forks(
opts,
recyclers,
&mut progress,
transaction_status_sender.clone(),
transaction_status_sender,
cache_block_time_sender,
None,
timing,
)
@ -1077,7 +1100,8 @@ fn process_single_slot(
opts: &ProcessOptions,
recyclers: &VerifyRecyclers,
progress: &mut ConfirmationProgress,
transaction_status_sender: Option<TransactionStatusSender>,
transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_time_sender: Option<&CacheBlockTimeSender>,
replay_vote_sender: Option<&ReplayVoteSender>,
timing: &mut ExecuteTimings,
) -> result::Result<(), BlockstoreProcessorError> {
@ -1097,6 +1121,7 @@ fn process_single_slot(
})?;
bank.freeze(); // all banks handled by this routine are created from complete slots
cache_block_time(bank, cache_block_time_sender);
Ok(())
}
@ -1171,6 +1196,16 @@ impl TransactionStatusSender {
}
}
pub type CacheBlockTimeSender = Sender<Arc<Bank>>;
pub fn cache_block_time(bank: &Arc<Bank>, cache_block_time_sender: Option<&CacheBlockTimeSender>) {
if let Some(cache_block_time_sender) = cache_block_time_sender {
cache_block_time_sender
.send(bank.clone())
.unwrap_or_else(|err| warn!("cache_block_time_sender failed: {:?}", err));
}
}
// used for tests only
pub fn fill_blockstore_slot_with_ticks(
blockstore: &Blockstore,
@ -1276,6 +1311,7 @@ pub mod tests {
poh_verify: true,
..ProcessOptions::default()
},
None,
)
.unwrap();
assert_eq!(frozen_bank_slots(&bank_forks), vec![0]);
@ -1320,6 +1356,7 @@ pub mod tests {
poh_verify: true,
..ProcessOptions::default()
},
None,
)
.unwrap();
assert_eq!(frozen_bank_slots(&bank_forks), vec![0]);
@ -1336,6 +1373,7 @@ pub mod tests {
poh_verify: true,
..ProcessOptions::default()
},
None,
)
.unwrap();
@ -1391,7 +1429,7 @@ pub mod tests {
..ProcessOptions::default()
};
let (bank_forks, _leader_schedule) =
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts, None).unwrap();
assert_eq!(frozen_bank_slots(&bank_forks), vec![0]);
}
@ -1456,7 +1494,7 @@ pub mod tests {
..ProcessOptions::default()
};
let (bank_forks, _leader_schedule) =
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts, None).unwrap();
assert_eq!(frozen_bank_slots(&bank_forks), vec![0]); // slot 1 isn't "full", we stop at slot zero
@ -1475,7 +1513,7 @@ pub mod tests {
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 0, blockhash);
// Slot 0 should not show up in the ending bank_forks_info
let (bank_forks, _leader_schedule) =
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts, None).unwrap();
// slot 1 isn't "full", we stop at slot zero
assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 3]);
@ -1542,7 +1580,7 @@ pub mod tests {
..ProcessOptions::default()
};
let (bank_forks, _leader_schedule) =
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts, None).unwrap();
// One fork, other one is ignored b/c not a descendant of the root
assert_eq!(frozen_bank_slots(&bank_forks), vec![4]);
@ -1621,7 +1659,7 @@ pub mod tests {
..ProcessOptions::default()
};
let (bank_forks, _leader_schedule) =
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts, None).unwrap();
assert_eq!(frozen_bank_slots(&bank_forks), vec![1, 2, 3, 4]);
assert_eq!(bank_forks.working_bank().slot(), 4);
@ -1681,6 +1719,7 @@ pub mod tests {
&blockstore,
Vec::new(),
ProcessOptions::default(),
None,
)
.unwrap();
@ -1730,6 +1769,7 @@ pub mod tests {
&blockstore,
Vec::new(),
ProcessOptions::default(),
None,
)
.unwrap();
@ -1782,6 +1822,7 @@ pub mod tests {
&blockstore,
Vec::new(),
ProcessOptions::default(),
None,
)
.unwrap();
@ -1832,7 +1873,7 @@ pub mod tests {
..ProcessOptions::default()
};
let (bank_forks, _leader_schedule) =
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts, None).unwrap();
// There is one fork, head is last_slot + 1
assert_eq!(frozen_bank_slots(&bank_forks), vec![last_slot + 1]);
@ -1976,7 +2017,7 @@ pub mod tests {
..ProcessOptions::default()
};
let (bank_forks, _leader_schedule) =
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts, None).unwrap();
assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 1]);
assert_eq!(bank_forks.root(), 0);
@ -2005,7 +2046,7 @@ pub mod tests {
..ProcessOptions::default()
};
let (bank_forks, _leader_schedule) =
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts, None).unwrap();
assert_eq!(frozen_bank_slots(&bank_forks), vec![0]);
let bank = bank_forks[0].clone();
@ -2022,7 +2063,7 @@ pub mod tests {
override_num_threads: Some(1),
..ProcessOptions::default()
};
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts, None).unwrap();
PAR_THREAD_POOL.with(|pool| {
assert_eq!(pool.borrow().current_num_threads(), 1);
});
@ -2039,7 +2080,7 @@ pub mod tests {
..ProcessOptions::default()
};
let (_bank_forks, leader_schedule) =
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts, None).unwrap();
assert_eq!(leader_schedule.max_schedules(), std::usize::MAX);
}
@ -2099,7 +2140,7 @@ pub mod tests {
entry_callback: Some(entry_callback),
..ProcessOptions::default()
};
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts, None).unwrap();
assert_eq!(*callback_counter.write().unwrap(), 2);
}
@ -2753,7 +2794,7 @@ pub mod tests {
..ProcessOptions::default()
};
let (bank_forks, _leader_schedule) =
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts, None).unwrap();
// Should be able to fetch slot 0 because we specified halting at slot 0, even
// if there is a greater root at slot 1.
@ -2803,7 +2844,7 @@ pub mod tests {
..ProcessOptions::default()
};
let recyclers = VerifyRecyclers::default();
process_bank_0(&bank0, &blockstore, &opts, &recyclers);
process_bank_0(&bank0, &blockstore, &opts, &recyclers, None);
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
confirm_full_slot(
&blockstore,
@ -2820,7 +2861,8 @@ pub mod tests {
// Test process_blockstore_from_root() from slot 1 onwards
let (bank_forks, _leader_schedule) =
do_process_blockstore_from_root(&blockstore, bank1, &opts, &recyclers, None).unwrap();
do_process_blockstore_from_root(&blockstore, bank1, &opts, &recyclers, None, None)
.unwrap();
assert_eq!(frozen_bank_slots(&bank_forks), vec![5, 6]);
assert_eq!(bank_forks.working_bank().slot(), 6);
@ -3247,7 +3289,8 @@ pub mod tests {
..ProcessOptions::default()
};
let (bank_forks, _leader_schedule) =
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts.clone()).unwrap();
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts.clone(), None)
.unwrap();
// prepare to add votes
let last_vote_bank_hash = bank_forks.get(last_main_fork_slot - 1).unwrap().hash();
@ -3279,7 +3322,8 @@ pub mod tests {
);
let (bank_forks, _leader_schedule) =
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts.clone()).unwrap();
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts.clone(), None)
.unwrap();
assert_eq!(bank_forks.root(), expected_root_slot);
assert_eq!(
@ -3334,7 +3378,7 @@ pub mod tests {
);
let (bank_forks, _leader_schedule) =
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts, None).unwrap();
assert_eq!(bank_forks.root(), really_expected_root_slot);
}