feat: ledger size and cleanup metrics (#7335)

This commit is contained in:
Sunny Gleason 2019-12-06 22:32:45 -05:00 committed by GitHub
parent 42247e0e1a
commit c00216e3be
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 31 additions and 1 deletions

View File

@ -2,6 +2,7 @@
use crate::result::{Error, Result}; use crate::result::{Error, Result};
use solana_ledger::blocktree::Blocktree; use solana_ledger::blocktree::Blocktree;
use solana_metrics::datapoint_debug;
use solana_sdk::clock::DEFAULT_SLOTS_PER_EPOCH; use solana_sdk::clock::DEFAULT_SLOTS_PER_EPOCH;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use std::string::ToString; use std::string::ToString;
@ -55,11 +56,27 @@ impl LedgerCleanupService {
blocktree: &Arc<Blocktree>, blocktree: &Arc<Blocktree>,
max_ledger_slots: u64, max_ledger_slots: u64,
) -> Result<()> { ) -> Result<()> {
let disk_utilization_pre = blocktree.storage_size();
let (slot, _) = slot_full_receiver.recv_timeout(Duration::from_secs(1))?; let (slot, _) = slot_full_receiver.recv_timeout(Duration::from_secs(1))?;
if slot > max_ledger_slots { if slot > max_ledger_slots {
//cleanup //cleanup
blocktree.purge_slots(0, Some(slot - max_ledger_slots)); blocktree.purge_slots(0, Some(slot - max_ledger_slots));
} }
let disk_utilization_post = blocktree.storage_size();
datapoint_debug!(
"ledger_disk_utilization",
("disk_utilization_pre", disk_utilization_pre as i64, i64),
("disk_utilization_post", disk_utilization_post as i64, i64),
(
"disk_utilization_delta",
(disk_utilization_pre as i64 - disk_utilization_post as i64),
i64
)
);
Ok(()) Ok(())
} }

View File

@ -1469,6 +1469,10 @@ impl Blocktree {
// This means blocktree is empty, should never get here aside from right at boot. // This means blocktree is empty, should never get here aside from right at boot.
self.last_root() self.last_root()
} }
pub fn storage_size(&self) -> u64 {
self.db.storage_size()
}
} }
fn update_slot_meta( fn update_slot_meta(

View File

@ -1,6 +1,7 @@
use crate::blocktree_meta; use crate::blocktree_meta;
use bincode::{deserialize, serialize}; use bincode::{deserialize, serialize};
use byteorder::{BigEndian, ByteOrder}; use byteorder::{BigEndian, ByteOrder};
use fs_extra::dir::get_size;
use log::*; use log::*;
pub use rocksdb::Direction as IteratorDirection; pub use rocksdb::Direction as IteratorDirection;
use rocksdb::{ use rocksdb::{
@ -484,6 +485,7 @@ impl TypedColumn for columns::ErasureMeta {
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct Database { pub struct Database {
backend: Arc<Rocks>, backend: Arc<Rocks>,
path: Arc<Path>,
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@ -504,7 +506,10 @@ impl Database {
pub fn open(path: &Path) -> Result<Self> { pub fn open(path: &Path) -> Result<Self> {
let backend = Arc::new(Rocks::open(path)?); let backend = Arc::new(Rocks::open(path)?);
Ok(Database { backend }) Ok(Database {
backend,
path: Arc::from(path),
})
} }
pub fn destroy(path: &Path) -> Result<()> { pub fn destroy(path: &Path) -> Result<()> {
@ -576,6 +581,10 @@ impl Database {
pub fn write(&self, batch: WriteBatch) -> Result<()> { pub fn write(&self, batch: WriteBatch) -> Result<()> {
self.backend.write(batch.write_batch) self.backend.write(batch.write_batch)
} }
pub fn storage_size(&self) -> u64 {
get_size(&self.path).expect("failure while reading ledger directory size")
}
} }
impl<C> LedgerColumn<C> impl<C> LedgerColumn<C>