diff --git a/runtime/src/accounts_index_storage.rs b/runtime/src/accounts_index_storage.rs index 4799b35751..ca0199c278 100644 --- a/runtime/src/accounts_index_storage.rs +++ b/runtime/src/accounts_index_storage.rs @@ -114,7 +114,7 @@ impl AccountsIndexStorage { .and_then(|config| config.flush_threads) .unwrap_or_else(Self::num_threads); - let storage = Arc::new(BucketMapHolder::new(bins, config)); + let storage = Arc::new(BucketMapHolder::new(bins, config, threads)); let in_mem = (0..bins) .into_iter() diff --git a/runtime/src/bucket_map_holder.rs b/runtime/src/bucket_map_holder.rs index c954f1540d..f5493d0361 100644 --- a/runtime/src/bucket_map_holder.rs +++ b/runtime/src/bucket_map_holder.rs @@ -28,6 +28,8 @@ pub struct BucketMapHolder { next_bucket_to_flush: Mutex, bins: usize, + pub threads: usize, + // how much mb are we allowed to keep in the in-mem index? // Rest goes to disk. pub mem_budget_mb: Option, @@ -130,7 +132,7 @@ impl BucketMapHolder { } } - pub fn new(bins: usize, config: &Option) -> Self { + pub fn new(bins: usize, config: &Option, threads: usize) -> Self { const DEFAULT_AGE_TO_STAY_IN_CACHE: Age = 5; let ages_to_stay_in_cache = config .as_ref() @@ -154,6 +156,7 @@ impl BucketMapHolder { bins, startup: AtomicBool::default(), mem_budget_mb, + threads, } } @@ -293,7 +296,7 @@ pub mod tests { fn test_next_bucket_to_flush() { solana_logger::setup(); let bins = 4; - let test = BucketMapHolder::::new(bins, &Some(AccountsIndexConfig::default())); + let test = BucketMapHolder::::new(bins, &Some(AccountsIndexConfig::default()), 1); let visited = (0..bins) .into_iter() .map(|_| AtomicUsize::default()) @@ -317,7 +320,7 @@ pub mod tests { fn test_age_increment() { solana_logger::setup(); let bins = 4; - let test = BucketMapHolder::::new(bins, &Some(AccountsIndexConfig::default())); + let test = BucketMapHolder::::new(bins, &Some(AccountsIndexConfig::default()), 1); for age in 0..513 { assert_eq!(test.current_age(), (age % 256) as Age); @@ -337,7 +340,7 @@ pub mod tests { fn test_throttle() { solana_logger::setup(); let bins = 100; - let test = BucketMapHolder::::new(bins, &Some(AccountsIndexConfig::default())); + let test = BucketMapHolder::::new(bins, &Some(AccountsIndexConfig::default()), 1); let bins = test.bins as u64; let interval_ms = test.age_interval_ms(); // 90% of time elapsed, all but 1 bins flushed, should not wait since we'll end up right on time @@ -366,7 +369,7 @@ pub mod tests { fn test_age_time() { solana_logger::setup(); let bins = 1; - let test = BucketMapHolder::::new(bins, &Some(AccountsIndexConfig::default())); + let test = BucketMapHolder::::new(bins, &Some(AccountsIndexConfig::default()), 1); let threads = 2; let time = AGE_MS * 5 / 2; let expected = (time / AGE_MS) as Age; @@ -386,7 +389,7 @@ pub mod tests { fn test_age_broad() { solana_logger::setup(); let bins = 4; - let test = BucketMapHolder::::new(bins, &Some(AccountsIndexConfig::default())); + let test = BucketMapHolder::::new(bins, &Some(AccountsIndexConfig::default()), 1); assert_eq!(test.current_age(), 0); for _ in 0..bins { assert!(!test.all_buckets_flushed_at_current_age()); diff --git a/runtime/src/bucket_map_holder_stats.rs b/runtime/src/bucket_map_holder_stats.rs index a793466450..7280de80ad 100644 --- a/runtime/src/bucket_map_holder_stats.rs +++ b/runtime/src/bucket_map_holder_stats.rs @@ -88,9 +88,8 @@ impl BucketMapHolderStats { now.saturating_sub(last) // could saturate to 0. That is ok. } - fn ms_per_age(&self, storage: &BucketMapHolder) -> u64 { + fn ms_per_age(&self, storage: &BucketMapHolder, elapsed_ms: u64) -> u64 { if !storage.get_startup() { - let elapsed_ms = self.get_elapsed_ms_and_reset(); let age_now = storage.current_age(); let last_age = self.last_age.swap(age_now, Ordering::Relaxed) as u64; let mut age_now = age_now as u64; @@ -126,12 +125,22 @@ impl BucketMapHolderStats { } } + fn calc_percent(&self, ms: u64, elapsed_ms: u64) -> f32 { + if elapsed_ms == 0 { + 0.0 + } else { + ms as f32 / elapsed_ms as f32 + } + } + pub fn report_stats(&self, storage: &BucketMapHolder) { if !self.last_time.should_update(STATS_INTERVAL_MS) { return; } - let ms_per_age = self.ms_per_age(storage); + let elapsed_ms = self.get_elapsed_ms_and_reset(); + + let ms_per_age = self.ms_per_age(storage, elapsed_ms); let in_mem_per_bucket_counts = self .per_bucket_count @@ -151,11 +160,17 @@ impl BucketMapHolderStats { let in_mem_stats = Self::get_stats(in_mem_per_bucket_counts); let disk_stats = Self::get_stats(disk_per_bucket_counts); + const US_PER_MS: u64 = 1_000; + // all metrics during startup are written to a different data point let startup = storage.get_startup(); let was_startup = self.last_was_startup.swap(startup, Ordering::Relaxed); + + // sum of elapsed time in each thread + let mut thread_time_elapsed_ms = elapsed_ms * storage.threads as u64; datapoint_info!( if startup || was_startup { + thread_time_elapsed_ms *= 2; // more threads are allocated during startup "accounts_index_startup" } else { "accounts_index" @@ -167,14 +182,20 @@ impl BucketMapHolderStats { ), ("count", self.count.load(Ordering::Relaxed), i64), ( - "bg_waiting_us", - self.bg_waiting_us.swap(0, Ordering::Relaxed), - i64 + "bg_waiting_percent", + self.calc_percent( + self.bg_waiting_us.swap(0, Ordering::Relaxed) / US_PER_MS, + thread_time_elapsed_ms + ), + f64 ), ( - "bg_throttling_wait_us", - self.bg_throttling_wait_us.swap(0, Ordering::Relaxed), - i64 + "bg_throttling_wait_percent", + self.calc_percent( + self.bg_throttling_wait_us.swap(0, Ordering::Relaxed) / US_PER_MS, + thread_time_elapsed_ms + ), + f64 ), ( "held_in_mem_slot_list_len", diff --git a/runtime/src/in_mem_accounts_index.rs b/runtime/src/in_mem_accounts_index.rs index 95a388ecb7..7f03a14e39 100644 --- a/runtime/src/in_mem_accounts_index.rs +++ b/runtime/src/in_mem_accounts_index.rs @@ -871,6 +871,7 @@ mod tests { let holder = Arc::new(BucketMapHolder::new( BINS_FOR_TESTING, &Some(AccountsIndexConfig::default()), + 1, )); let bin = 0; InMemAccountsIndex::new(&holder, bin)