Cleanup ledger-tool analyze-storage command (#22310)

* Make ledger-tool analyze-storage use Blockstore::open()

Opening a large ledger may require setting a larger open file descriptor
limit. Blockstore::open() does this whereas the underlying Database
object that analyze-storage was opening does not.

* Move key_size call lookup to take advantage of traits

* Fix typo where analyze worked on wrong column

* Make analyze-storage analyze all columns
This commit is contained in:
steviez 2022-01-06 23:40:02 -06:00 committed by GitHub
parent 207825d30b
commit 9f1f64e384
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 31 additions and 52 deletions

View File

@ -22,9 +22,7 @@ use {
ancestor_iterator::AncestorIterator, ancestor_iterator::AncestorIterator,
bank_forks_utils, bank_forks_utils,
blockstore::{create_new_ledger, Blockstore, PurgeType}, blockstore::{create_new_ledger, Blockstore, PurgeType},
blockstore_db::{ blockstore_db::{self, AccessType, BlockstoreOptions, BlockstoreRecoveryMode, Database},
self, AccessType, BlockstoreOptions, BlockstoreRecoveryMode, Column, Database,
},
blockstore_processor::ProcessOptions, blockstore_processor::ProcessOptions,
shred::Shred, shred::Shred,
}, },
@ -577,18 +575,17 @@ fn graph_forks(bank_forks: &BankForks, include_all_votes: bool) -> String {
} }
fn analyze_column< fn analyze_column<
T: solana_ledger::blockstore_db::Column + solana_ledger::blockstore_db::ColumnName, C: solana_ledger::blockstore_db::Column + solana_ledger::blockstore_db::ColumnName,
>( >(
db: &Database, db: &Database,
name: &str, name: &str,
key_size: usize,
) { ) {
let mut key_tot: u64 = 0; let mut key_tot: u64 = 0;
let mut val_hist = histogram::Histogram::new(); let mut val_hist = histogram::Histogram::new();
let mut val_tot: u64 = 0; let mut val_tot: u64 = 0;
let mut row_hist = histogram::Histogram::new(); let mut row_hist = histogram::Histogram::new();
let a = key_size as u64; let a = C::key_size() as u64;
for (_x, y) in db.iter::<T>(blockstore_db::IteratorMode::Start).unwrap() { for (_x, y) in db.iter::<C>(blockstore_db::IteratorMode::Start).unwrap() {
let b = y.len() as u64; let b = y.len() as u64;
key_tot += a; key_tot += a;
val_hist.increment(b).unwrap(); val_hist.increment(b).unwrap();
@ -647,30 +644,25 @@ fn analyze_column<
fn analyze_storage(database: &Database) { fn analyze_storage(database: &Database) {
use blockstore_db::columns::*; use blockstore_db::columns::*;
analyze_column::<SlotMeta>(database, "SlotMeta", SlotMeta::key_size()); analyze_column::<SlotMeta>(database, "SlotMeta");
analyze_column::<Orphans>(database, "Orphans", Orphans::key_size()); analyze_column::<Orphans>(database, "Orphans");
analyze_column::<DeadSlots>(database, "DeadSlots", DeadSlots::key_size()); analyze_column::<DeadSlots>(database, "DeadSlots");
analyze_column::<ErasureMeta>(database, "ErasureMeta", ErasureMeta::key_size()); analyze_column::<DuplicateSlots>(database, "DuplicateSlots");
analyze_column::<Root>(database, "Root", Root::key_size()); analyze_column::<ErasureMeta>(database, "ErasureMeta");
analyze_column::<Index>(database, "Index", Index::key_size()); analyze_column::<BankHash>(database, "BankHash");
analyze_column::<ShredData>(database, "ShredData", ShredData::key_size()); analyze_column::<Root>(database, "Root");
analyze_column::<ShredCode>(database, "ShredCode", ShredCode::key_size()); analyze_column::<Index>(database, "Index");
analyze_column::<TransactionStatus>( analyze_column::<ShredData>(database, "ShredData");
database, analyze_column::<ShredCode>(database, "ShredCode");
"TransactionStatus", analyze_column::<TransactionStatus>(database, "TransactionStatus");
TransactionStatus::key_size(), analyze_column::<AddressSignatures>(database, "AddressSignatures");
); analyze_column::<TransactionMemos>(database, "TransactionMemos");
analyze_column::<TransactionStatus>( analyze_column::<TransactionStatusIndex>(database, "TransactionStatusIndex");
database, analyze_column::<Rewards>(database, "Rewards");
"TransactionStatusIndex", analyze_column::<Blocktime>(database, "Blocktime");
TransactionStatusIndex::key_size(), analyze_column::<PerfSamples>(database, "PerfSamples");
); analyze_column::<BlockHeight>(database, "BlockHeight");
analyze_column::<AddressSignatures>( analyze_column::<ProgramCosts>(database, "ProgramCosts");
database,
"AddressSignatures",
AddressSignatures::key_size(),
);
analyze_column::<Rewards>(database, "Rewards", Rewards::key_size());
} }
fn open_blockstore( fn open_blockstore(
@ -694,23 +686,6 @@ fn open_blockstore(
} }
} }
fn open_database(ledger_path: &Path, access_type: AccessType) -> Database {
match Database::open(
&ledger_path.join("rocksdb"),
BlockstoreOptions {
access_type,
recovery_mode: None,
..BlockstoreOptions::default()
},
) {
Ok(database) => database,
Err(err) => {
eprintln!("Unable to read the Ledger rocksdb: {:?}", err);
exit(1);
}
}
}
// This function is duplicated in validator/src/main.rs... // This function is duplicated in validator/src/main.rs...
fn hardforks_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Slot>> { fn hardforks_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Slot>> {
if matches.is_present(name) { if matches.is_present(name) {
@ -3317,10 +3292,14 @@ fn main() {
}; };
} }
("analyze-storage", _) => { ("analyze-storage", _) => {
analyze_storage(&open_database( analyze_storage(
&ledger_path, &open_blockstore(
AccessType::TryPrimaryThenSecondary, &ledger_path,
)); AccessType::TryPrimaryThenSecondary,
wal_recovery_mode,
)
.db(),
);
println!("Ok."); println!("Ok.");
} }
("compute-slot-cost", Some(arg_matches)) => { ("compute-slot-cost", Some(arg_matches)) => {