add ActiveStats to track when long running, expensive bg processes are running (#22608)
This commit is contained in:
parent
38b02bbcc0
commit
4abf7a7f88
|
@ -33,6 +33,7 @@ use {
|
||||||
ACCOUNTS_INDEX_CONFIG_FOR_TESTING,
|
ACCOUNTS_INDEX_CONFIG_FOR_TESTING,
|
||||||
},
|
},
|
||||||
accounts_update_notifier_interface::AccountsUpdateNotifier,
|
accounts_update_notifier_interface::AccountsUpdateNotifier,
|
||||||
|
active_stats::{ActiveStatItem, ActiveStats},
|
||||||
ancestors::Ancestors,
|
ancestors::Ancestors,
|
||||||
append_vec::{AppendVec, StoredAccountMeta, StoredMeta, StoredMetaWriteVersion},
|
append_vec::{AppendVec, StoredAccountMeta, StoredMeta, StoredMetaWriteVersion},
|
||||||
cache_hash_data::CacheHashData,
|
cache_hash_data::CacheHashData,
|
||||||
|
@ -1061,6 +1062,8 @@ pub struct AccountsDb {
|
||||||
filler_account_count: usize,
|
filler_account_count: usize,
|
||||||
pub filler_account_suffix: Option<Pubkey>,
|
pub filler_account_suffix: Option<Pubkey>,
|
||||||
|
|
||||||
|
active_stats: ActiveStats,
|
||||||
|
|
||||||
// # of passes should be a function of the total # of accounts that are active.
|
// # of passes should be a function of the total # of accounts that are active.
|
||||||
// higher passes = slower total time, lower dynamic memory usage
|
// higher passes = slower total time, lower dynamic memory usage
|
||||||
// lower passes = faster total time, higher dynamic memory usage
|
// lower passes = faster total time, higher dynamic memory usage
|
||||||
|
@ -1555,6 +1558,7 @@ impl AccountsDb {
|
||||||
Self::bins_per_pass(num_hash_scan_passes);
|
Self::bins_per_pass(num_hash_scan_passes);
|
||||||
|
|
||||||
AccountsDb {
|
AccountsDb {
|
||||||
|
active_stats: ActiveStats::default(),
|
||||||
accounts_index,
|
accounts_index,
|
||||||
storage: AccountStorage::default(),
|
storage: AccountStorage::default(),
|
||||||
accounts_cache: AccountsCache::default(),
|
accounts_cache: AccountsCache::default(),
|
||||||
|
@ -2061,6 +2065,8 @@ impl AccountsDb {
|
||||||
is_startup: bool,
|
is_startup: bool,
|
||||||
last_full_snapshot_slot: Option<Slot>,
|
last_full_snapshot_slot: Option<Slot>,
|
||||||
) {
|
) {
|
||||||
|
let _guard = self.active_stats.activate(ActiveStatItem::Clean);
|
||||||
|
|
||||||
let mut measure_all = Measure::start("clean_accounts");
|
let mut measure_all = Measure::start("clean_accounts");
|
||||||
let max_clean_root = self.max_clean_root(max_clean_root);
|
let max_clean_root = self.max_clean_root(max_clean_root);
|
||||||
|
|
||||||
|
@ -3011,6 +3017,8 @@ impl AccountsDb {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn shrink_candidate_slots(&self) -> usize {
|
pub fn shrink_candidate_slots(&self) -> usize {
|
||||||
|
let _guard = self.active_stats.activate(ActiveStatItem::Shrink);
|
||||||
|
|
||||||
let shrink_candidates_slots =
|
let shrink_candidates_slots =
|
||||||
std::mem::take(&mut *self.shrink_candidate_slots.lock().unwrap());
|
std::mem::take(&mut *self.shrink_candidate_slots.lock().unwrap());
|
||||||
let (shrink_slots, shrink_slots_next_batch) = {
|
let (shrink_slots, shrink_slots_next_batch) = {
|
||||||
|
@ -3057,6 +3065,7 @@ impl AccountsDb {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn shrink_all_slots(&self, is_startup: bool, last_full_snapshot_slot: Option<Slot>) {
|
pub fn shrink_all_slots(&self, is_startup: bool, last_full_snapshot_slot: Option<Slot>) {
|
||||||
|
let _guard = self.active_stats.activate(ActiveStatItem::Shrink);
|
||||||
const DIRTY_STORES_CLEANING_THRESHOLD: usize = 10_000;
|
const DIRTY_STORES_CLEANING_THRESHOLD: usize = 10_000;
|
||||||
const OUTER_CHUNK_SIZE: usize = 2000;
|
const OUTER_CHUNK_SIZE: usize = 2000;
|
||||||
if is_startup && self.caching_enabled {
|
if is_startup && self.caching_enabled {
|
||||||
|
@ -4554,6 +4563,8 @@ impl AccountsDb {
|
||||||
let mut account_bytes_saved = 0;
|
let mut account_bytes_saved = 0;
|
||||||
let mut num_accounts_saved = 0;
|
let mut num_accounts_saved = 0;
|
||||||
|
|
||||||
|
let _guard = self.active_stats.activate(ActiveStatItem::Flush);
|
||||||
|
|
||||||
// Note even if force_flush is false, we will still flush all roots <= the
|
// Note even if force_flush is false, we will still flush all roots <= the
|
||||||
// given `requested_flush_root`, even if some of the later roots cannot be used for
|
// given `requested_flush_root`, even if some of the later roots cannot be used for
|
||||||
// cleaning due to an ongoing scan
|
// cleaning due to an ongoing scan
|
||||||
|
@ -5529,6 +5540,7 @@ impl AccountsDb {
|
||||||
slots_per_epoch: Option<Slot>,
|
slots_per_epoch: Option<Slot>,
|
||||||
is_startup: bool,
|
is_startup: bool,
|
||||||
) -> Result<(Hash, u64), BankHashVerificationError> {
|
) -> Result<(Hash, u64), BankHashVerificationError> {
|
||||||
|
let _guard = self.active_stats.activate(ActiveStatItem::Hash);
|
||||||
let (hash, total_lamports) = self.calculate_accounts_hash_helper(
|
let (hash, total_lamports) = self.calculate_accounts_hash_helper(
|
||||||
use_index,
|
use_index,
|
||||||
slot,
|
slot,
|
||||||
|
|
|
@ -0,0 +1,65 @@
|
||||||
|
//! keep track of areas of the validator that are currently active
|
||||||
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
|
||||||
|
/// counters of different areas of a validator which could be active
|
||||||
|
#[derive(Debug, Default)]
|
||||||
|
pub struct ActiveStats {
|
||||||
|
clean: AtomicUsize,
|
||||||
|
shrink: AtomicUsize,
|
||||||
|
hash: AtomicUsize,
|
||||||
|
flush: AtomicUsize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Copy, Clone)]
|
||||||
|
pub enum ActiveStatItem {
|
||||||
|
Clean,
|
||||||
|
Shrink,
|
||||||
|
Hash,
|
||||||
|
Flush,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// sole purpose is to handle 'drop' so that stat is decremented when self is dropped
|
||||||
|
pub struct ActiveStatGuard<'a> {
|
||||||
|
stats: &'a ActiveStats,
|
||||||
|
item: ActiveStatItem,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Drop for ActiveStatGuard<'a> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.stats.update_and_log(self.item, |stat| {
|
||||||
|
stat.fetch_sub(1, Ordering::Relaxed).wrapping_sub(1)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ActiveStats {
|
||||||
|
#[must_use]
|
||||||
|
/// create a stack object to set the state to increment stat initially and decrement on drop
|
||||||
|
pub fn activate(&self, stat: ActiveStatItem) -> ActiveStatGuard<'_> {
|
||||||
|
self.update_and_log(stat, |stat| {
|
||||||
|
stat.fetch_add(1, Ordering::Relaxed).wrapping_add(1)
|
||||||
|
});
|
||||||
|
ActiveStatGuard {
|
||||||
|
stats: self,
|
||||||
|
item: stat,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/// update and log the change to the specified 'item'
|
||||||
|
fn update_and_log(&self, item: ActiveStatItem, modify_stat: impl Fn(&AtomicUsize) -> usize) {
|
||||||
|
let stat = match item {
|
||||||
|
ActiveStatItem::Clean => &self.clean,
|
||||||
|
ActiveStatItem::Shrink => &self.shrink,
|
||||||
|
ActiveStatItem::Hash => &self.hash,
|
||||||
|
ActiveStatItem::Flush => &self.flush,
|
||||||
|
};
|
||||||
|
let value = modify_stat(stat);
|
||||||
|
match item {
|
||||||
|
ActiveStatItem::Clean => datapoint_info!("accounts_db_active", ("clean", value, i64)),
|
||||||
|
ActiveStatItem::Shrink => {
|
||||||
|
datapoint_info!("accounts_db_active", ("shrink", value, i64))
|
||||||
|
}
|
||||||
|
ActiveStatItem::Hash => datapoint_info!("accounts_db_active", ("hash", value, i64)),
|
||||||
|
ActiveStatItem::Flush => datapoint_info!("accounts_db_active", ("flush", value, i64)),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
|
@ -10,6 +10,7 @@ pub mod accounts_hash;
|
||||||
pub mod accounts_index;
|
pub mod accounts_index;
|
||||||
pub mod accounts_index_storage;
|
pub mod accounts_index_storage;
|
||||||
pub mod accounts_update_notifier_interface;
|
pub mod accounts_update_notifier_interface;
|
||||||
|
mod active_stats;
|
||||||
pub mod ancestors;
|
pub mod ancestors;
|
||||||
pub mod append_vec;
|
pub mod append_vec;
|
||||||
pub mod bank;
|
pub mod bank;
|
||||||
|
|
Loading…
Reference in New Issue