add buckets stats for # files and total file size (#30780)
* add buckets stats for # files and total file size * change metrics to bytes * get index file size correct
This commit is contained in:
parent
d77f0a22c7
commit
df2c95119c
|
@ -91,6 +91,8 @@ impl<'b, T: Clone + Copy + 'static> Bucket<T> {
|
|||
Arc::clone(&stats.index),
|
||||
count,
|
||||
);
|
||||
stats.index.resize_grow(0, index.capacity_bytes());
|
||||
|
||||
Self {
|
||||
random: thread_rng().gen(),
|
||||
drives,
|
||||
|
@ -421,6 +423,10 @@ impl<'b, T: Clone + Copy + 'static> Bucket<T> {
|
|||
}
|
||||
|
||||
pub fn apply_grow_index(&mut self, random: u64, index: BucketStorage) {
|
||||
self.stats
|
||||
.index
|
||||
.resize_grow(self.index.capacity_bytes(), index.capacity_bytes());
|
||||
|
||||
self.random = random;
|
||||
self.index = index;
|
||||
}
|
||||
|
@ -429,21 +435,31 @@ impl<'b, T: Clone + Copy + 'static> Bucket<T> {
|
|||
std::mem::size_of::<T>() as u64
|
||||
}
|
||||
|
||||
fn add_data_bucket(&mut self, bucket: BucketStorage) {
|
||||
self.stats.data.file_count.fetch_add(1, Ordering::Relaxed);
|
||||
self.stats.data.resize_grow(0, bucket.capacity_bytes());
|
||||
self.data.push(bucket);
|
||||
}
|
||||
|
||||
pub fn apply_grow_data(&mut self, ix: usize, bucket: BucketStorage) {
|
||||
if self.data.get(ix).is_none() {
|
||||
for i in self.data.len()..ix {
|
||||
// insert empty data buckets
|
||||
self.data.push(BucketStorage::new(
|
||||
self.add_data_bucket(BucketStorage::new(
|
||||
Arc::clone(&self.drives),
|
||||
1 << i,
|
||||
Self::elem_size(),
|
||||
self.index.max_search,
|
||||
Arc::clone(&self.stats.data),
|
||||
Arc::default(),
|
||||
))
|
||||
));
|
||||
}
|
||||
self.data.push(bucket);
|
||||
self.add_data_bucket(bucket);
|
||||
} else {
|
||||
let data_bucket = &mut self.data[ix];
|
||||
self.stats
|
||||
.data
|
||||
.resize_grow(data_bucket.capacity_bytes(), bucket.capacity_bytes());
|
||||
self.data[ix] = bucket;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,12 +12,20 @@ pub struct BucketStats {
|
|||
pub flush_file_us: AtomicU64,
|
||||
pub mmap_us: AtomicU64,
|
||||
pub find_entry_mut_us: AtomicU64,
|
||||
pub file_count: AtomicU64,
|
||||
pub total_file_size: AtomicU64,
|
||||
}
|
||||
|
||||
impl BucketStats {
|
||||
pub fn update_max_size(&self, size: u64) {
|
||||
self.max_size.fetch_max(size, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn resize_grow(&self, old_size: u64, new_size: u64) {
|
||||
let size_change = new_size.saturating_sub(old_size);
|
||||
self.total_file_size
|
||||
.fetch_add(size_change, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
|
|
|
@ -332,6 +332,7 @@ impl BucketStorage {
|
|||
};
|
||||
});
|
||||
m.stop();
|
||||
// resized so update total file size
|
||||
self.stats.resizes.fetch_add(1, Ordering::Relaxed);
|
||||
self.stats.resize_us.fetch_add(m.as_us(), Ordering::Relaxed);
|
||||
}
|
||||
|
@ -368,6 +369,11 @@ impl BucketStorage {
|
|||
new_bucket
|
||||
}
|
||||
|
||||
/// Return the number of bytes currently allocated
|
||||
pub(crate) fn capacity_bytes(&self) -> u64 {
|
||||
self.capacity() * self.cell_size
|
||||
}
|
||||
|
||||
/// Return the number of cells currently allocated
|
||||
pub fn capacity(&self) -> u64 {
|
||||
1 << self.capacity_pow2
|
||||
|
|
|
@ -422,6 +422,24 @@ impl BucketMapHolderStats {
|
|||
.unwrap_or_default(),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"disk_index_index_file_size",
|
||||
disk.map(|disk| disk.stats.index.total_file_size.load(Ordering::Relaxed))
|
||||
.unwrap_or_default(),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"disk_index_data_file_size",
|
||||
disk.map(|disk| disk.stats.data.total_file_size.load(Ordering::Relaxed))
|
||||
.unwrap_or_default(),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"disk_index_data_file_count",
|
||||
disk.map(|disk| disk.stats.data.file_count.load(Ordering::Relaxed))
|
||||
.unwrap_or_default(),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"disk_index_find_entry_mut_us",
|
||||
disk.map(|disk| disk
|
||||
|
|
Loading…
Reference in New Issue