Blockstore should drop signals before validator exit (#24025)

* timeout for validator exits

* clippy

* print backtrace when panic

* add backtrace package

* increase time out to 30s

* debug logging

* make rpc complete service non blocking

* reduce log level

* remove logging

* recv_timeout

* remove backtrace

* remove sleep

* wip

* remove unused variable

* add comments

* Update core/src/validator.rs

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>

* Update core/src/validator.rs

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>

* whitespace

* more whitespace

* fix build

* clean up import

* add mutex for signal senders in blockstore

* remove mut

* refactor: extract add signal functions

* make blockstore signal private

* let compiler infer mutex type

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
This commit is contained in:
HaoranYi 2022-04-04 11:38:05 -05:00 committed by GitHub
parent f8f3edac3c
commit 6ba4e870c4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 38 additions and 12 deletions

View File

@ -337,6 +337,7 @@ pub struct Validator {
ip_echo_server: Option<solana_net_utils::IpEchoServer>,
pub cluster_info: Arc<ClusterInfo>,
pub bank_forks: Arc<RwLock<BankForks>>,
pub blockstore: Arc<Blockstore>,
accountsdb_repl_service: Option<AccountsDbReplService>,
geyser_plugin_service: Option<GeyserPluginService>,
}
@ -656,7 +657,7 @@ impl Validator {
bank.ticks_per_slot(),
&id,
&blockstore,
blockstore.new_shreds_signals.first().cloned(),
blockstore.get_new_shred_signal(0),
&leader_schedule_cache,
&poh_config,
Some(poh_timing_point_sender),
@ -856,7 +857,7 @@ impl Validator {
record_receiver,
);
assert_eq!(
blockstore.new_shreds_signals.len(),
blockstore.get_new_shred_signals_len(),
1,
"New shred signal for the TVU should be the same as the clear bank signal."
);
@ -994,6 +995,7 @@ impl Validator {
validator_exit: config.validator_exit.clone(),
cluster_info,
bank_forks,
blockstore: blockstore.clone(),
accountsdb_repl_service,
geyser_plugin_service,
}
@ -1002,6 +1004,9 @@ impl Validator {
// Used for notifying many nodes in parallel to exit
pub fn exit(&mut self) {
self.validator_exit.write().unwrap().exit();
// drop all signals in blockstore
self.blockstore.drop_signal();
}
pub fn close(mut self) {

View File

@ -174,8 +174,8 @@ pub struct Blockstore {
bank_hash_cf: LedgerColumn<cf::BankHash>,
last_root: RwLock<Slot>,
insert_shreds_lock: Mutex<()>,
pub new_shreds_signals: Vec<Sender<bool>>,
pub completed_slots_senders: Vec<CompletedSlotsSender>,
new_shreds_signals: Mutex<Vec<Sender<bool>>>,
completed_slots_senders: Mutex<Vec<CompletedSlotsSender>>,
pub shred_timing_point_sender: Option<PohTimingSender>,
pub lowest_cleanup_slot: RwLock<Slot>,
no_compaction: bool,
@ -444,8 +444,8 @@ impl Blockstore {
block_height_cf,
program_costs_cf,
bank_hash_cf,
new_shreds_signals: vec![],
completed_slots_senders: vec![],
new_shreds_signals: Mutex::default(),
completed_slots_senders: Mutex::default(),
shred_timing_point_sender: None,
insert_shreds_lock: Mutex::<()>::default(),
last_root,
@ -463,13 +463,13 @@ impl Blockstore {
ledger_path: &Path,
options: BlockstoreOptions,
) -> Result<BlockstoreSignals> {
let mut blockstore = Self::open_with_options(ledger_path, options)?;
let blockstore = Self::open_with_options(ledger_path, options)?;
let (ledger_signal_sender, ledger_signal_receiver) = bounded(1);
let (completed_slots_sender, completed_slots_receiver) =
bounded(MAX_COMPLETED_SLOTS_IN_CHANNEL);
blockstore.new_shreds_signals = vec![ledger_signal_sender];
blockstore.completed_slots_senders = vec![completed_slots_sender];
blockstore.add_new_shred_signal(ledger_signal_sender);
blockstore.add_completed_slots_signal(completed_slots_sender);
Ok(BlockstoreSignals {
blockstore,
@ -1027,7 +1027,7 @@ impl Blockstore {
let mut start = Measure::start("Commit Working Sets");
let (should_signal, newly_completed_slots) = commit_slot_meta_working_set(
&slot_meta_working_set,
&self.completed_slots_senders,
&self.completed_slots_senders.lock().unwrap(),
&mut write_batch,
)?;
@ -1049,8 +1049,8 @@ impl Blockstore {
metrics.write_batch_elapsed += start.as_us();
send_signals(
&self.new_shreds_signals,
&self.completed_slots_senders,
&self.new_shreds_signals.lock().unwrap(),
&self.completed_slots_senders.lock().unwrap(),
should_signal,
newly_completed_slots,
);
@ -1063,6 +1063,27 @@ impl Blockstore {
Ok((newly_completed_data_sets, inserted_indices))
}
pub fn add_new_shred_signal(&self, s: Sender<bool>) {
self.new_shreds_signals.lock().unwrap().push(s);
}
pub fn add_completed_slots_signal(&self, s: CompletedSlotsSender) {
self.completed_slots_senders.lock().unwrap().push(s);
}
pub fn get_new_shred_signals_len(&self) -> usize {
self.new_shreds_signals.lock().unwrap().len()
}
pub fn get_new_shred_signal(&self, index: usize) -> Option<Sender<bool>> {
self.new_shreds_signals.lock().unwrap().get(index).cloned()
}
pub fn drop_signal(&self) {
self.new_shreds_signals.lock().unwrap().clear();
self.completed_slots_senders.lock().unwrap().clear();
}
/// Range-delete all entries which prefix matches the specified `slot` and
/// clear all the related `SlotMeta` except its next_slots.
///