diff --git a/core/src/ledger_cleanup_service.rs b/core/src/ledger_cleanup_service.rs index 2ddafa4cd..1b90abea2 100644 --- a/core/src/ledger_cleanup_service.rs +++ b/core/src/ledger_cleanup_service.rs @@ -2,6 +2,7 @@ use crate::result::{Error, Result}; use solana_ledger::blocktree::Blocktree; +use solana_metrics::datapoint_debug; use solana_sdk::clock::DEFAULT_SLOTS_PER_EPOCH; use solana_sdk::pubkey::Pubkey; use std::string::ToString; @@ -55,11 +56,27 @@ impl LedgerCleanupService { blocktree: &Arc, max_ledger_slots: u64, ) -> Result<()> { + let disk_utilization_pre = blocktree.storage_size(); + let (slot, _) = slot_full_receiver.recv_timeout(Duration::from_secs(1))?; if slot > max_ledger_slots { //cleanup blocktree.purge_slots(0, Some(slot - max_ledger_slots)); } + + let disk_utilization_post = blocktree.storage_size(); + + datapoint_debug!( + "ledger_disk_utilization", + ("disk_utilization_pre", disk_utilization_pre as i64, i64), + ("disk_utilization_post", disk_utilization_post as i64, i64), + ( + "disk_utilization_delta", + (disk_utilization_pre as i64 - disk_utilization_post as i64), + i64 + ) + ); + Ok(()) } diff --git a/ledger/src/blocktree.rs b/ledger/src/blocktree.rs index 7c9b0dd68..d2a3ab9b4 100644 --- a/ledger/src/blocktree.rs +++ b/ledger/src/blocktree.rs @@ -1469,6 +1469,10 @@ impl Blocktree { // This means blocktree is empty, should never get here aside from right at boot. self.last_root() } + + pub fn storage_size(&self) -> u64 { + self.db.storage_size() + } } fn update_slot_meta( diff --git a/ledger/src/blocktree_db.rs b/ledger/src/blocktree_db.rs index 8e0578065..8d2c93ccb 100644 --- a/ledger/src/blocktree_db.rs +++ b/ledger/src/blocktree_db.rs @@ -1,6 +1,7 @@ use crate::blocktree_meta; use bincode::{deserialize, serialize}; use byteorder::{BigEndian, ByteOrder}; +use fs_extra::dir::get_size; use log::*; pub use rocksdb::Direction as IteratorDirection; use rocksdb::{ @@ -484,6 +485,7 @@ impl TypedColumn for columns::ErasureMeta { #[derive(Debug, Clone)] pub struct Database { backend: Arc, + path: Arc, } #[derive(Debug, Clone)] @@ -504,7 +506,10 @@ impl Database { pub fn open(path: &Path) -> Result { let backend = Arc::new(Rocks::open(path)?); - Ok(Database { backend }) + Ok(Database { + backend, + path: Arc::from(path), + }) } pub fn destroy(path: &Path) -> Result<()> { @@ -576,6 +581,10 @@ impl Database { pub fn write(&self, batch: WriteBatch) -> Result<()> { self.backend.write(batch.write_batch) } + + pub fn storage_size(&self) -> u64 { + get_size(&self.path).expect("failure while reading ledger directory size") + } } impl LedgerColumn