From 2b269dbe0e0da4b4da64612b3eb221b41b3627bc Mon Sep 17 00:00:00 2001 From: Tyera Eulberg Date: Thu, 2 Dec 2021 17:23:51 -0700 Subject: [PATCH] Ledger-tool: only require ledger dir when necessary (#21575) * Don't canonicalize ledger_path unless ledger_path will be used * Single use statement --- ledger-tool/src/bigtable.rs | 48 +- ledger-tool/src/ledger_path.rs | 30 + ledger-tool/src/main.rs | 3122 ++++++++++++++++---------------- 3 files changed, 1627 insertions(+), 1573 deletions(-) create mode 100644 ledger-tool/src/ledger_path.rs diff --git a/ledger-tool/src/bigtable.rs b/ledger-tool/src/bigtable.rs index 52f6b1767..b3d9ac8e1 100644 --- a/ledger-tool/src/bigtable.rs +++ b/ledger-tool/src/bigtable.rs @@ -1,23 +1,26 @@ /// The `bigtable` subcommand -use clap::{ - value_t, value_t_or_exit, values_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand, -}; -use solana_clap_utils::{ - input_parsers::pubkey_of, - input_validators::{is_slot, is_valid_pubkey}, -}; -use solana_cli_output::{ - display::println_transaction, CliBlock, CliTransaction, CliTransactionConfirmation, - OutputFormat, -}; -use solana_ledger::{blockstore::Blockstore, blockstore_db::AccessType}; -use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature}; -use solana_transaction_status::{ConfirmedBlock, EncodedTransaction, UiTransactionEncoding}; -use std::{ - path::Path, - process::exit, - result::Result, - sync::{atomic::AtomicBool, Arc}, +use { + crate::ledger_path::canonicalize_ledger_path, + clap::{ + value_t, value_t_or_exit, values_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand, + }, + solana_clap_utils::{ + input_parsers::pubkey_of, + input_validators::{is_slot, is_valid_pubkey}, + }, + solana_cli_output::{ + display::println_transaction, CliBlock, CliTransaction, CliTransactionConfirmation, + OutputFormat, + }, + solana_ledger::{blockstore::Blockstore, blockstore_db::AccessType}, + solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature}, + solana_transaction_status::{ConfirmedBlock, EncodedTransaction, UiTransactionEncoding}, + std::{ + path::Path, + process::exit, + result::Result, + sync::{atomic::AtomicBool, Arc}, + }, }; async fn upload( @@ -426,8 +429,11 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { let ending_slot = value_t!(arg_matches, "ending_slot", Slot).ok(); let allow_missing_metadata = arg_matches.is_present("allow_missing_metadata"); let force_reupload = arg_matches.is_present("force_reupload"); - let blockstore = - crate::open_blockstore(ledger_path, AccessType::TryPrimaryThenSecondary, None); + let blockstore = crate::open_blockstore( + &canonicalize_ledger_path(ledger_path), + AccessType::TryPrimaryThenSecondary, + None, + ); runtime.block_on(upload( blockstore, diff --git a/ledger-tool/src/ledger_path.rs b/ledger-tool/src/ledger_path.rs new file mode 100644 index 000000000..b446652c7 --- /dev/null +++ b/ledger-tool/src/ledger_path.rs @@ -0,0 +1,30 @@ +use { + clap::{value_t, ArgMatches}, + std::{ + fs, + path::{Path, PathBuf}, + process::exit, + }, +}; + +pub fn parse_ledger_path(matches: &ArgMatches<'_>, name: &str) -> PathBuf { + PathBuf::from(value_t!(matches, name, String).unwrap_or_else(|_err| { + eprintln!( + "Error: Missing --ledger argument.\n\n{}", + matches.usage() + ); + exit(1); + })) +} + +// Canonicalize ledger path to avoid issues with symlink creation +pub fn canonicalize_ledger_path(ledger_path: &Path) -> PathBuf { + fs::canonicalize(&ledger_path).unwrap_or_else(|err| { + eprintln!( + "Unable to access ledger path '{}': {}", + ledger_path.display(), + err + ); + exit(1); + }) +} diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 1fd2100c1..a618d28dc 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -1,83 +1,87 @@ #![allow(clippy::integer_arithmetic)] -use clap::{ - crate_description, crate_name, value_t, value_t_or_exit, values_t_or_exit, App, AppSettings, - Arg, ArgMatches, SubCommand, -}; -use dashmap::DashMap; -use itertools::Itertools; -use log::*; -use regex::Regex; -use serde::Serialize; -use serde_json::json; -use solana_clap_utils::{ - input_parsers::{cluster_type_of, pubkey_of, pubkeys_of}, - input_validators::{ - is_parsable, is_pow2, is_pubkey, is_pubkey_or_keypair, is_slot, is_valid_percentage, +use { + clap::{ + crate_description, crate_name, value_t, value_t_or_exit, values_t_or_exit, App, + AppSettings, Arg, ArgMatches, SubCommand, }, -}; -use solana_core::system_monitor_service::SystemMonitorService; -use solana_entry::entry::Entry; -use solana_ledger::{ - ancestor_iterator::AncestorIterator, - bank_forks_utils, - blockstore::{create_new_ledger, Blockstore, PurgeType}, - blockstore_db::{self, AccessType, BlockstoreRecoveryMode, Column, Database}, - blockstore_processor::ProcessOptions, - shred::Shred, -}; -use solana_measure::measure::Measure; -use solana_runtime::{ - accounts_db::AccountsDbConfig, - accounts_index::{AccountsIndexConfig, ScanConfig}, - bank::{Bank, RewardCalculationEvent}, - bank_forks::BankForks, - cost_model::CostModel, - cost_tracker::CostTracker, - hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE}, - snapshot_archive_info::SnapshotArchiveInfoGetter, - snapshot_config::SnapshotConfig, - snapshot_utils::{ - self, ArchiveFormat, SnapshotVersion, DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN, - DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN, + dashmap::DashMap, + itertools::Itertools, + log::*, + regex::Regex, + serde::Serialize, + serde_json::json, + solana_clap_utils::{ + input_parsers::{cluster_type_of, pubkey_of, pubkeys_of}, + input_validators::{ + is_parsable, is_pow2, is_pubkey, is_pubkey_or_keypair, is_slot, is_valid_percentage, + }, }, -}; -use solana_sdk::{ - account::{AccountSharedData, ReadableAccount, WritableAccount}, - account_utils::StateMut, - clock::{Epoch, Slot}, - genesis_config::{ClusterType, GenesisConfig}, - hash::Hash, - inflation::Inflation, - native_token::{lamports_to_sol, sol_to_lamports, Sol}, - pubkey::Pubkey, - rent::Rent, - shred_version::compute_shred_version, - stake::{self, state::StakeState}, - system_program, - transaction::{SanitizedTransaction, TransactionError}, -}; -use solana_stake_program::stake_state::{self, PointValue}; -use solana_vote_program::{ - self, - vote_state::{self, VoteState}, -}; -use std::{ - collections::{BTreeMap, BTreeSet, HashMap, HashSet}, - ffi::OsStr, - fs::{self, File}, - io::{self, stdout, BufRead, BufReader, Write}, - path::{Path, PathBuf}, - process::{exit, Command, Stdio}, - str::FromStr, - sync::{ - atomic::{AtomicBool, Ordering}, - mpsc::channel, - Arc, RwLock, + solana_core::system_monitor_service::SystemMonitorService, + solana_entry::entry::Entry, + solana_ledger::{ + ancestor_iterator::AncestorIterator, + bank_forks_utils, + blockstore::{create_new_ledger, Blockstore, PurgeType}, + blockstore_db::{self, AccessType, BlockstoreRecoveryMode, Column, Database}, + blockstore_processor::ProcessOptions, + shred::Shred, + }, + solana_measure::measure::Measure, + solana_runtime::{ + accounts_db::AccountsDbConfig, + accounts_index::{AccountsIndexConfig, ScanConfig}, + bank::{Bank, RewardCalculationEvent}, + bank_forks::BankForks, + cost_model::CostModel, + cost_tracker::CostTracker, + hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE}, + snapshot_archive_info::SnapshotArchiveInfoGetter, + snapshot_config::SnapshotConfig, + snapshot_utils::{ + self, ArchiveFormat, SnapshotVersion, DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN, + DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN, + }, + }, + solana_sdk::{ + account::{AccountSharedData, ReadableAccount, WritableAccount}, + account_utils::StateMut, + clock::{Epoch, Slot}, + genesis_config::{ClusterType, GenesisConfig}, + hash::Hash, + inflation::Inflation, + native_token::{lamports_to_sol, sol_to_lamports, Sol}, + pubkey::Pubkey, + rent::Rent, + shred_version::compute_shred_version, + stake::{self, state::StakeState}, + system_program, + transaction::{SanitizedTransaction, TransactionError}, + }, + solana_stake_program::stake_state::{self, PointValue}, + solana_vote_program::{ + self, + vote_state::{self, VoteState}, + }, + std::{ + collections::{BTreeMap, BTreeSet, HashMap, HashSet}, + ffi::OsStr, + fs::File, + io::{self, stdout, BufRead, BufReader, Write}, + path::{Path, PathBuf}, + process::{exit, Command, Stdio}, + str::FromStr, + sync::{ + atomic::{AtomicBool, Ordering}, + mpsc::channel, + Arc, RwLock, + }, }, }; mod bigtable; use bigtable::*; +mod ledger_path; +use ledger_path::*; #[derive(PartialEq)] enum LedgerOutputMethod { @@ -1625,25 +1629,7 @@ fn main() { info!("{} {}", crate_name!(), solana_version::version!()); - let ledger_path = PathBuf::from(value_t!(matches, "ledger_path", String).unwrap_or_else( - |_err| { - eprintln!( - "Error: Missing --ledger argument.\n\n{}", - matches.usage() - ); - exit(1); - }, - )); - - // Canonicalize ledger path to avoid issues with symlink creation - let ledger_path = fs::canonicalize(&ledger_path).unwrap_or_else(|err| { - eprintln!( - "Unable to access ledger path '{}': {}", - ledger_path.display(), - err - ); - exit(1); - }); + let ledger_path = parse_ledger_path(&matches, "ledger_path"); let snapshot_archive_path = value_t!(matches, "snapshot_archive_path", String) .ok() @@ -1654,1038 +1640,1060 @@ fn main() { .map(BlockstoreRecoveryMode::from); let verbose_level = matches.occurrences_of("verbose"); - match matches.subcommand() { - ("bigtable", Some(arg_matches)) => bigtable_process_command(&ledger_path, arg_matches), - ("print", Some(arg_matches)) => { - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - let ending_slot = value_t!(arg_matches, "ending_slot", Slot).unwrap_or(Slot::MAX); - let num_slots = value_t!(arg_matches, "num_slots", Slot).ok(); - let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); - let only_rooted = arg_matches.is_present("only_rooted"); - output_ledger( - open_blockstore( - &ledger_path, - AccessType::TryPrimaryThenSecondary, - wal_recovery_mode, - ), - starting_slot, - ending_slot, - allow_dead_slots, - LedgerOutputMethod::Print, - num_slots, - verbose_level, - only_rooted, - ); - } - ("copy", Some(arg_matches)) => { - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); - let target_db = PathBuf::from(value_t_or_exit!(arg_matches, "target_db", String)); - let source = open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary, None); - let target = open_blockstore(&target_db, AccessType::PrimaryOnly, None); - for (slot, _meta) in source.slot_meta_iterator(starting_slot).unwrap() { - if slot > ending_slot { - break; - } - if let Ok(shreds) = source.get_data_shreds_for_slot(slot, 0) { - if target.insert_shreds(shreds, None, true).is_err() { - warn!("error inserting shreds for slot {}", slot); + if let ("bigtable", Some(arg_matches)) = matches.subcommand() { + bigtable_process_command(&ledger_path, arg_matches) + } else { + let ledger_path = canonicalize_ledger_path(&ledger_path); + + match matches.subcommand() { + ("print", Some(arg_matches)) => { + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + let ending_slot = value_t!(arg_matches, "ending_slot", Slot).unwrap_or(Slot::MAX); + let num_slots = value_t!(arg_matches, "num_slots", Slot).ok(); + let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); + let only_rooted = arg_matches.is_present("only_rooted"); + output_ledger( + open_blockstore( + &ledger_path, + AccessType::TryPrimaryThenSecondary, + wal_recovery_mode, + ), + starting_slot, + ending_slot, + allow_dead_slots, + LedgerOutputMethod::Print, + num_slots, + verbose_level, + only_rooted, + ); + } + ("copy", Some(arg_matches)) => { + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); + let target_db = PathBuf::from(value_t_or_exit!(arg_matches, "target_db", String)); + let source = + open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary, None); + let target = open_blockstore(&target_db, AccessType::PrimaryOnly, None); + for (slot, _meta) in source.slot_meta_iterator(starting_slot).unwrap() { + if slot > ending_slot { + break; + } + if let Ok(shreds) = source.get_data_shreds_for_slot(slot, 0) { + if target.insert_shreds(shreds, None, true).is_err() { + warn!("error inserting shreds for slot {}", slot); + } } } } - } - ("genesis", Some(arg_matches)) => { - println!("{}", open_genesis_config_by(&ledger_path, arg_matches)); - } - ("genesis-hash", Some(arg_matches)) => { - println!( - "{}", - open_genesis_config_by(&ledger_path, arg_matches).hash() - ); - } - ("modify-genesis", Some(arg_matches)) => { - let mut genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let output_directory = PathBuf::from(arg_matches.value_of("output_directory").unwrap()); - - if let Some(cluster_type) = cluster_type_of(arg_matches, "cluster_type") { - genesis_config.cluster_type = cluster_type; + ("genesis", Some(arg_matches)) => { + println!("{}", open_genesis_config_by(&ledger_path, arg_matches)); } - - if let Some(hashes_per_tick) = arg_matches.value_of("hashes_per_tick") { - genesis_config.poh_config.hashes_per_tick = match hashes_per_tick { - // Note: Unlike `solana-genesis`, "auto" is not supported here. - "sleep" => None, - _ => Some(value_t_or_exit!(arg_matches, "hashes_per_tick", u64)), - } + ("genesis-hash", Some(arg_matches)) => { + println!( + "{}", + open_genesis_config_by(&ledger_path, arg_matches).hash() + ); } + ("modify-genesis", Some(arg_matches)) => { + let mut genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let output_directory = + PathBuf::from(arg_matches.value_of("output_directory").unwrap()); - create_new_ledger( - &output_directory, - &genesis_config, - solana_runtime::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, - AccessType::PrimaryOnly, - ) - .unwrap_or_else(|err| { - eprintln!("Failed to write genesis config: {:?}", err); - exit(1); - }); - - println!("{}", open_genesis_config_by(&output_directory, arg_matches)); - } - ("shred-version", Some(arg_matches)) => { - let process_options = ProcessOptions { - dev_halt_at_slot: Some(0), - new_hard_forks: hardforks_of(arg_matches, "hard_forks"), - poh_verify: false, - ..ProcessOptions::default() - }; - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let blockstore = open_blockstore( - &ledger_path, - AccessType::TryPrimaryThenSecondary, - wal_recovery_mode, - ); - match load_bank_forks( - arg_matches, - &genesis_config, - &blockstore, - process_options, - snapshot_archive_path, - ) { - Ok((bank_forks, ..)) => { - println!( - "{}", - compute_shred_version( - &genesis_config.hash(), - Some(&bank_forks.working_bank().hard_forks().read().unwrap()) - ) - ); + if let Some(cluster_type) = cluster_type_of(arg_matches, "cluster_type") { + genesis_config.cluster_type = cluster_type; } - Err(err) => { - eprintln!("Failed to load ledger: {:?}", err); + + if let Some(hashes_per_tick) = arg_matches.value_of("hashes_per_tick") { + genesis_config.poh_config.hashes_per_tick = match hashes_per_tick { + // Note: Unlike `solana-genesis`, "auto" is not supported here. + "sleep" => None, + _ => Some(value_t_or_exit!(arg_matches, "hashes_per_tick", u64)), + } + } + + create_new_ledger( + &output_directory, + &genesis_config, + solana_runtime::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, + AccessType::PrimaryOnly, + ) + .unwrap_or_else(|err| { + eprintln!("Failed to write genesis config: {:?}", err); exit(1); - } + }); + + println!("{}", open_genesis_config_by(&output_directory, arg_matches)); } - } - ("shred-meta", Some(arg_matches)) => { - #[derive(Debug)] - #[allow(dead_code)] - struct ShredMeta<'a> { - slot: Slot, - full_slot: bool, - shred_index: usize, - data: bool, - code: bool, - last_in_slot: bool, - data_complete: bool, - shred: &'a Shred, - } - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - let ending_slot = value_t!(arg_matches, "ending_slot", Slot).unwrap_or(Slot::MAX); - let ledger = open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary, None); - for (slot, _meta) in ledger - .slot_meta_iterator(starting_slot) - .unwrap() - .take_while(|(slot, _)| *slot <= ending_slot) - { - let full_slot = ledger.is_full(slot); - if let Ok(shreds) = ledger.get_data_shreds_for_slot(slot, 0) { - for (shred_index, shred) in shreds.iter().enumerate() { + ("shred-version", Some(arg_matches)) => { + let process_options = ProcessOptions { + dev_halt_at_slot: Some(0), + new_hard_forks: hardforks_of(arg_matches, "hard_forks"), + poh_verify: false, + ..ProcessOptions::default() + }; + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let blockstore = open_blockstore( + &ledger_path, + AccessType::TryPrimaryThenSecondary, + wal_recovery_mode, + ); + match load_bank_forks( + arg_matches, + &genesis_config, + &blockstore, + process_options, + snapshot_archive_path, + ) { + Ok((bank_forks, ..)) => { println!( - "{:#?}", - ShredMeta { - slot, - full_slot, - shred_index, - data: shred.is_data(), - code: shred.is_code(), - data_complete: shred.data_complete(), - last_in_slot: shred.last_in_slot(), - shred, - } + "{}", + compute_shred_version( + &genesis_config.hash(), + Some(&bank_forks.working_bank().hard_forks().read().unwrap()) + ) ); } + Err(err) => { + eprintln!("Failed to load ledger: {:?}", err); + exit(1); + } } } - } - ("bank-hash", Some(arg_matches)) => { - let process_options = ProcessOptions { - dev_halt_at_slot: Some(0), - new_hard_forks: hardforks_of(arg_matches, "hard_forks"), - poh_verify: false, - ..ProcessOptions::default() - }; - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let blockstore = open_blockstore( - &ledger_path, - AccessType::TryPrimaryThenSecondary, - wal_recovery_mode, - ); - match load_bank_forks( - arg_matches, - &genesis_config, - &blockstore, - process_options, - snapshot_archive_path, - ) { - Ok((bank_forks, ..)) => { - println!("{}", &bank_forks.working_bank().hash()); + ("shred-meta", Some(arg_matches)) => { + #[derive(Debug)] + #[allow(dead_code)] + struct ShredMeta<'a> { + slot: Slot, + full_slot: bool, + shred_index: usize, + data: bool, + code: bool, + last_in_slot: bool, + data_complete: bool, + shred: &'a Shred, } - Err(err) => { - eprintln!("Failed to load ledger: {:?}", err); - exit(1); + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + let ending_slot = value_t!(arg_matches, "ending_slot", Slot).unwrap_or(Slot::MAX); + let ledger = + open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary, None); + for (slot, _meta) in ledger + .slot_meta_iterator(starting_slot) + .unwrap() + .take_while(|(slot, _)| *slot <= ending_slot) + { + let full_slot = ledger.is_full(slot); + if let Ok(shreds) = ledger.get_data_shreds_for_slot(slot, 0) { + for (shred_index, shred) in shreds.iter().enumerate() { + println!( + "{:#?}", + ShredMeta { + slot, + full_slot, + shred_index, + data: shred.is_data(), + code: shred.is_code(), + data_complete: shred.data_complete(), + last_in_slot: shred.last_in_slot(), + shred, + } + ); + } + } } } - } - ("slot", Some(arg_matches)) => { - let slots = values_t_or_exit!(arg_matches, "slots", Slot); - let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); - let blockstore = open_blockstore( - &ledger_path, - AccessType::TryPrimaryThenSecondary, - wal_recovery_mode, - ); - for slot in slots { - println!("Slot {}", slot); - if let Err(err) = output_slot( - &blockstore, - slot, - allow_dead_slots, - &LedgerOutputMethod::Print, - std::u64::MAX, - ) { - eprintln!("{}", err); - } - } - } - ("json", Some(arg_matches)) => { - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); - output_ledger( - open_blockstore( + ("bank-hash", Some(arg_matches)) => { + let process_options = ProcessOptions { + dev_halt_at_slot: Some(0), + new_hard_forks: hardforks_of(arg_matches, "hard_forks"), + poh_verify: false, + ..ProcessOptions::default() + }; + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let blockstore = open_blockstore( &ledger_path, AccessType::TryPrimaryThenSecondary, wal_recovery_mode, - ), - starting_slot, - Slot::MAX, - allow_dead_slots, - LedgerOutputMethod::Json, - None, - std::u64::MAX, - true, - ); - } - ("dead-slots", Some(arg_matches)) => { - let blockstore = open_blockstore( - &ledger_path, - AccessType::TryPrimaryThenSecondary, - wal_recovery_mode, - ); - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - for slot in blockstore.dead_slots_iterator(starting_slot).unwrap() { - println!("{}", slot); - } - } - ("duplicate-slots", Some(arg_matches)) => { - let blockstore = open_blockstore( - &ledger_path, - AccessType::TryPrimaryThenSecondary, - wal_recovery_mode, - ); - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - for slot in blockstore.duplicate_slots_iterator(starting_slot).unwrap() { - println!("{}", slot); - } - } - ("set-dead-slot", Some(arg_matches)) => { - let slots = values_t_or_exit!(arg_matches, "slots", Slot); - let blockstore = - open_blockstore(&ledger_path, AccessType::PrimaryOnly, wal_recovery_mode); - for slot in slots { - match blockstore.set_dead_slot(slot) { - Ok(_) => println!("Slot {} dead", slot), - Err(err) => eprintln!("Failed to set slot {} dead slot: {:?}", slot, err), - } - } - } - ("remove-dead-slot", Some(arg_matches)) => { - let slots = values_t_or_exit!(arg_matches, "slots", Slot); - let blockstore = - open_blockstore(&ledger_path, AccessType::PrimaryOnly, wal_recovery_mode); - for slot in slots { - match blockstore.remove_dead_slot(slot) { - Ok(_) => println!("Slot {} not longer marked dead", slot), + ); + match load_bank_forks( + arg_matches, + &genesis_config, + &blockstore, + process_options, + snapshot_archive_path, + ) { + Ok((bank_forks, ..)) => { + println!("{}", &bank_forks.working_bank().hash()); + } Err(err) => { - eprintln!("Failed to remove dead flag for slot {}, {:?}", slot, err) + eprintln!("Failed to load ledger: {:?}", err); + exit(1); } } } - } - ("parse_full_frozen", Some(arg_matches)) => { - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); - let blockstore = open_blockstore( - &ledger_path, - AccessType::TryPrimaryThenSecondary, - wal_recovery_mode, - ); - let mut ancestors = BTreeSet::new(); - assert!( - blockstore.meta(ending_slot).unwrap().is_some(), - "Ending slot doesn't exist" - ); - for a in AncestorIterator::new(ending_slot, &blockstore) { - ancestors.insert(a); - if a <= starting_slot { - break; + ("slot", Some(arg_matches)) => { + let slots = values_t_or_exit!(arg_matches, "slots", Slot); + let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); + let blockstore = open_blockstore( + &ledger_path, + AccessType::TryPrimaryThenSecondary, + wal_recovery_mode, + ); + for slot in slots { + println!("Slot {}", slot); + if let Err(err) = output_slot( + &blockstore, + slot, + allow_dead_slots, + &LedgerOutputMethod::Print, + std::u64::MAX, + ) { + eprintln!("{}", err); + } } } - println!("ancestors: {:?}", ancestors.iter()); - - let mut frozen = BTreeMap::new(); - let mut full = BTreeMap::new(); - let frozen_regex = Regex::new(r"bank frozen: (\d*)").unwrap(); - let full_regex = Regex::new(r"slot (\d*) is full").unwrap(); - - let log_file = PathBuf::from(value_t_or_exit!(arg_matches, "log_path", String)); - let f = BufReader::new(File::open(log_file).unwrap()); - println!("Reading log file"); - for line in f.lines().flatten() { - let parse_results = { - if let Some(slot_string) = frozen_regex.captures_iter(&line).next() { - Some((slot_string, &mut frozen)) - } else { - full_regex - .captures_iter(&line) - .next() - .map(|slot_string| (slot_string, &mut full)) + ("json", Some(arg_matches)) => { + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); + output_ledger( + open_blockstore( + &ledger_path, + AccessType::TryPrimaryThenSecondary, + wal_recovery_mode, + ), + starting_slot, + Slot::MAX, + allow_dead_slots, + LedgerOutputMethod::Json, + None, + std::u64::MAX, + true, + ); + } + ("dead-slots", Some(arg_matches)) => { + let blockstore = open_blockstore( + &ledger_path, + AccessType::TryPrimaryThenSecondary, + wal_recovery_mode, + ); + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + for slot in blockstore.dead_slots_iterator(starting_slot).unwrap() { + println!("{}", slot); + } + } + ("duplicate-slots", Some(arg_matches)) => { + let blockstore = open_blockstore( + &ledger_path, + AccessType::TryPrimaryThenSecondary, + wal_recovery_mode, + ); + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + for slot in blockstore.duplicate_slots_iterator(starting_slot).unwrap() { + println!("{}", slot); + } + } + ("set-dead-slot", Some(arg_matches)) => { + let slots = values_t_or_exit!(arg_matches, "slots", Slot); + let blockstore = + open_blockstore(&ledger_path, AccessType::PrimaryOnly, wal_recovery_mode); + for slot in slots { + match blockstore.set_dead_slot(slot) { + Ok(_) => println!("Slot {} dead", slot), + Err(err) => eprintln!("Failed to set slot {} dead slot: {:?}", slot, err), } - }; - - if let Some((slot_string, map)) = parse_results { - let slot = slot_string - .get(1) - .expect("Only one match group") - .as_str() - .parse::() - .unwrap(); - if ancestors.contains(&slot) && !map.contains_key(&slot) { - map.insert(slot, line); + } + } + ("remove-dead-slot", Some(arg_matches)) => { + let slots = values_t_or_exit!(arg_matches, "slots", Slot); + let blockstore = + open_blockstore(&ledger_path, AccessType::PrimaryOnly, wal_recovery_mode); + for slot in slots { + match blockstore.remove_dead_slot(slot) { + Ok(_) => println!("Slot {} not longer marked dead", slot), + Err(err) => { + eprintln!("Failed to remove dead flag for slot {}, {:?}", slot, err) + } } - if slot == ending_slot && frozen.contains_key(&slot) && full.contains_key(&slot) - { + } + } + ("parse_full_frozen", Some(arg_matches)) => { + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); + let blockstore = open_blockstore( + &ledger_path, + AccessType::TryPrimaryThenSecondary, + wal_recovery_mode, + ); + let mut ancestors = BTreeSet::new(); + assert!( + blockstore.meta(ending_slot).unwrap().is_some(), + "Ending slot doesn't exist" + ); + for a in AncestorIterator::new(ending_slot, &blockstore) { + ancestors.insert(a); + if a <= starting_slot { break; } } - } + println!("ancestors: {:?}", ancestors.iter()); - for ((slot1, frozen_log), (slot2, full_log)) in frozen.iter().zip(full.iter()) { - assert_eq!(slot1, slot2); - println!( - "Slot: {}\n, full: {}\n, frozen: {}", - slot1, full_log, frozen_log - ); - } - } - ("verify", Some(arg_matches)) => { - let mut accounts_index_config = AccountsIndexConfig::default(); - if let Some(bins) = value_t!(arg_matches, "accounts_index_bins", usize).ok() { - accounts_index_config.bins = Some(bins); - } + let mut frozen = BTreeMap::new(); + let mut full = BTreeMap::new(); + let frozen_regex = Regex::new(r"bank frozen: (\d*)").unwrap(); + let full_regex = Regex::new(r"slot (\d*) is full").unwrap(); - let exit_signal = Arc::new(AtomicBool::new(false)); - let system_monitor_service = SystemMonitorService::new(Arc::clone(&exit_signal), false); - - if let Some(limit) = value_t!(arg_matches, "accounts_index_memory_limit_mb", usize).ok() - { - accounts_index_config.index_limit_mb = Some(limit); - } - - { - let mut accounts_index_paths: Vec = - if arg_matches.is_present("accounts_index_path") { - values_t_or_exit!(arg_matches, "accounts_index_path", String) - .into_iter() - .map(PathBuf::from) - .collect() - } else { - vec![] - }; - if accounts_index_paths.is_empty() { - accounts_index_paths = vec![ledger_path.join("accounts_index")]; - } - accounts_index_config.drives = Some(accounts_index_paths); - } - - let filler_account_count = value_t!(arg_matches, "accounts_filler_count", usize).ok(); - - let accounts_db_config = Some(AccountsDbConfig { - index: Some(accounts_index_config), - accounts_hash_cache_path: Some(ledger_path.clone()), - filler_account_count, - ..AccountsDbConfig::default() - }); - - let process_options = ProcessOptions { - dev_halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(), - new_hard_forks: hardforks_of(arg_matches, "hard_forks"), - poh_verify: !arg_matches.is_present("skip_poh_verify"), - bpf_jit: !matches.is_present("no_bpf_jit"), - accounts_db_caching_enabled: !arg_matches.is_present("no_accounts_db_caching"), - limit_load_slot_count_from_snapshot: value_t!( - arg_matches, - "limit_load_slot_count_from_snapshot", - usize - ) - .ok(), - accounts_db_config, - verify_index: arg_matches.is_present("verify_accounts_index"), - allow_dead_slots: arg_matches.is_present("allow_dead_slots"), - accounts_db_test_hash_calculation: arg_matches - .is_present("accounts_db_test_hash_calculation"), - accounts_db_skip_shrink: arg_matches.is_present("accounts_db_skip_shrink"), - ..ProcessOptions::default() - }; - let print_accounts_stats = arg_matches.is_present("print_accounts_stats"); - println!( - "genesis hash: {}", - open_genesis_config_by(&ledger_path, arg_matches).hash() - ); - - let blockstore = open_blockstore( - &ledger_path, - AccessType::TryPrimaryThenSecondary, - wal_recovery_mode, - ); - let (bank_forks, ..) = load_bank_forks( - arg_matches, - &open_genesis_config_by(&ledger_path, arg_matches), - &blockstore, - process_options, - snapshot_archive_path, - ) - .unwrap_or_else(|err| { - eprintln!("Ledger verification failed: {:?}", err); - exit(1); - }); - if print_accounts_stats { - let working_bank = bank_forks.working_bank(); - working_bank.print_accounts_stats(); - } - exit_signal.store(true, Ordering::Relaxed); - system_monitor_service.join().unwrap(); - println!("Ok"); - } - ("graph", Some(arg_matches)) => { - let output_file = value_t_or_exit!(arg_matches, "graph_filename", String); - - let process_options = ProcessOptions { - dev_halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(), - new_hard_forks: hardforks_of(arg_matches, "hard_forks"), - poh_verify: false, - ..ProcessOptions::default() - }; - - let blockstore = open_blockstore( - &ledger_path, - AccessType::TryPrimaryThenSecondary, - wal_recovery_mode, - ); - match load_bank_forks( - arg_matches, - &open_genesis_config_by(&ledger_path, arg_matches), - &blockstore, - process_options, - snapshot_archive_path, - ) { - Ok((bank_forks, ..)) => { - let dot = graph_forks(&bank_forks, arg_matches.is_present("include_all_votes")); - - let extension = Path::new(&output_file).extension(); - let result = if extension == Some(OsStr::new("pdf")) { - render_dot(dot, &output_file, "pdf") - } else if extension == Some(OsStr::new("png")) { - render_dot(dot, &output_file, "png") - } else { - File::create(&output_file) - .and_then(|mut file| file.write_all(&dot.into_bytes())) + let log_file = PathBuf::from(value_t_or_exit!(arg_matches, "log_path", String)); + let f = BufReader::new(File::open(log_file).unwrap()); + println!("Reading log file"); + for line in f.lines().flatten() { + let parse_results = { + if let Some(slot_string) = frozen_regex.captures_iter(&line).next() { + Some((slot_string, &mut frozen)) + } else { + full_regex + .captures_iter(&line) + .next() + .map(|slot_string| (slot_string, &mut full)) + } }; - match result { - Ok(_) => println!("Wrote {}", output_file), - Err(err) => eprintln!("Unable to write {}: {}", output_file, err), + if let Some((slot_string, map)) = parse_results { + let slot = slot_string + .get(1) + .expect("Only one match group") + .as_str() + .parse::() + .unwrap(); + if ancestors.contains(&slot) && !map.contains_key(&slot) { + map.insert(slot, line); + } + if slot == ending_slot + && frozen.contains_key(&slot) + && full.contains_key(&slot) + { + break; + } } } - Err(err) => { - eprintln!("Failed to load ledger: {:?}", err); - exit(1); + + for ((slot1, frozen_log), (slot2, full_log)) in frozen.iter().zip(full.iter()) { + assert_eq!(slot1, slot2); + println!( + "Slot: {}\n, full: {}\n, frozen: {}", + slot1, full_log, frozen_log + ); } } - } - ("create-snapshot", Some(arg_matches)) => { - let output_directory = value_t!(arg_matches, "output_directory", PathBuf) - .unwrap_or_else(|_| ledger_path.clone()); - let mut warp_slot = value_t!(arg_matches, "warp_slot", Slot).ok(); - let remove_stake_accounts = arg_matches.is_present("remove_stake_accounts"); - let new_hard_forks = hardforks_of(arg_matches, "hard_forks"); + ("verify", Some(arg_matches)) => { + let mut accounts_index_config = AccountsIndexConfig::default(); + if let Some(bins) = value_t!(arg_matches, "accounts_index_bins", usize).ok() { + accounts_index_config.bins = Some(bins); + } - let faucet_pubkey = pubkey_of(arg_matches, "faucet_pubkey"); - let faucet_lamports = value_t!(arg_matches, "faucet_lamports", u64).unwrap_or(0); + let exit_signal = Arc::new(AtomicBool::new(false)); + let system_monitor_service = + SystemMonitorService::new(Arc::clone(&exit_signal), false); - let rent_burn_percentage = value_t!(arg_matches, "rent_burn_percentage", u8); - let hashes_per_tick = arg_matches.value_of("hashes_per_tick"); + if let Some(limit) = + value_t!(arg_matches, "accounts_index_memory_limit_mb", usize).ok() + { + accounts_index_config.index_limit_mb = Some(limit); + } - let bootstrap_stake_authorized_pubkey = - pubkey_of(arg_matches, "bootstrap_stake_authorized_pubkey"); - let bootstrap_validator_lamports = - value_t_or_exit!(arg_matches, "bootstrap_validator_lamports", u64); - let bootstrap_validator_stake_lamports = - value_t_or_exit!(arg_matches, "bootstrap_validator_stake_lamports", u64); - let minimum_stake_lamports = StakeState::get_rent_exempt_reserve(&rent); - if bootstrap_validator_stake_lamports < minimum_stake_lamports { - eprintln!( - "Error: insufficient --bootstrap-validator-stake-lamports. \ - Minimum amount is {}", - minimum_stake_lamports + { + let mut accounts_index_paths: Vec = + if arg_matches.is_present("accounts_index_path") { + values_t_or_exit!(arg_matches, "accounts_index_path", String) + .into_iter() + .map(PathBuf::from) + .collect() + } else { + vec![] + }; + if accounts_index_paths.is_empty() { + accounts_index_paths = vec![ledger_path.join("accounts_index")]; + } + accounts_index_config.drives = Some(accounts_index_paths); + } + + let filler_account_count = + value_t!(arg_matches, "accounts_filler_count", usize).ok(); + + let accounts_db_config = Some(AccountsDbConfig { + index: Some(accounts_index_config), + accounts_hash_cache_path: Some(ledger_path.clone()), + filler_account_count, + ..AccountsDbConfig::default() + }); + + let process_options = ProcessOptions { + dev_halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(), + new_hard_forks: hardforks_of(arg_matches, "hard_forks"), + poh_verify: !arg_matches.is_present("skip_poh_verify"), + bpf_jit: !matches.is_present("no_bpf_jit"), + accounts_db_caching_enabled: !arg_matches.is_present("no_accounts_db_caching"), + limit_load_slot_count_from_snapshot: value_t!( + arg_matches, + "limit_load_slot_count_from_snapshot", + usize + ) + .ok(), + accounts_db_config, + verify_index: arg_matches.is_present("verify_accounts_index"), + allow_dead_slots: arg_matches.is_present("allow_dead_slots"), + accounts_db_test_hash_calculation: arg_matches + .is_present("accounts_db_test_hash_calculation"), + accounts_db_skip_shrink: arg_matches.is_present("accounts_db_skip_shrink"), + ..ProcessOptions::default() + }; + let print_accounts_stats = arg_matches.is_present("print_accounts_stats"); + println!( + "genesis hash: {}", + open_genesis_config_by(&ledger_path, arg_matches).hash() ); - exit(1); + + let blockstore = open_blockstore( + &ledger_path, + AccessType::TryPrimaryThenSecondary, + wal_recovery_mode, + ); + let (bank_forks, ..) = load_bank_forks( + arg_matches, + &open_genesis_config_by(&ledger_path, arg_matches), + &blockstore, + process_options, + snapshot_archive_path, + ) + .unwrap_or_else(|err| { + eprintln!("Ledger verification failed: {:?}", err); + exit(1); + }); + if print_accounts_stats { + let working_bank = bank_forks.working_bank(); + working_bank.print_accounts_stats(); + } + exit_signal.store(true, Ordering::Relaxed); + system_monitor_service.join().unwrap(); + println!("Ok"); } - let bootstrap_validator_pubkeys = pubkeys_of(arg_matches, "bootstrap_validator"); - let accounts_to_remove = - pubkeys_of(arg_matches, "accounts_to_remove").unwrap_or_default(); - let vote_accounts_to_destake: HashSet<_> = - pubkeys_of(arg_matches, "vote_accounts_to_destake") - .unwrap_or_default() - .into_iter() - .collect(); - let snapshot_version = - arg_matches - .value_of("snapshot_version") - .map_or(SnapshotVersion::default(), |s| { + ("graph", Some(arg_matches)) => { + let output_file = value_t_or_exit!(arg_matches, "graph_filename", String); + + let process_options = ProcessOptions { + dev_halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(), + new_hard_forks: hardforks_of(arg_matches, "hard_forks"), + poh_verify: false, + ..ProcessOptions::default() + }; + + let blockstore = open_blockstore( + &ledger_path, + AccessType::TryPrimaryThenSecondary, + wal_recovery_mode, + ); + match load_bank_forks( + arg_matches, + &open_genesis_config_by(&ledger_path, arg_matches), + &blockstore, + process_options, + snapshot_archive_path, + ) { + Ok((bank_forks, ..)) => { + let dot = + graph_forks(&bank_forks, arg_matches.is_present("include_all_votes")); + + let extension = Path::new(&output_file).extension(); + let result = if extension == Some(OsStr::new("pdf")) { + render_dot(dot, &output_file, "pdf") + } else if extension == Some(OsStr::new("png")) { + render_dot(dot, &output_file, "png") + } else { + File::create(&output_file) + .and_then(|mut file| file.write_all(&dot.into_bytes())) + }; + + match result { + Ok(_) => println!("Wrote {}", output_file), + Err(err) => eprintln!("Unable to write {}: {}", output_file, err), + } + } + Err(err) => { + eprintln!("Failed to load ledger: {:?}", err); + exit(1); + } + } + } + ("create-snapshot", Some(arg_matches)) => { + let output_directory = value_t!(arg_matches, "output_directory", PathBuf) + .unwrap_or_else(|_| ledger_path.clone()); + let mut warp_slot = value_t!(arg_matches, "warp_slot", Slot).ok(); + let remove_stake_accounts = arg_matches.is_present("remove_stake_accounts"); + let new_hard_forks = hardforks_of(arg_matches, "hard_forks"); + + let faucet_pubkey = pubkey_of(arg_matches, "faucet_pubkey"); + let faucet_lamports = value_t!(arg_matches, "faucet_lamports", u64).unwrap_or(0); + + let rent_burn_percentage = value_t!(arg_matches, "rent_burn_percentage", u8); + let hashes_per_tick = arg_matches.value_of("hashes_per_tick"); + + let bootstrap_stake_authorized_pubkey = + pubkey_of(arg_matches, "bootstrap_stake_authorized_pubkey"); + let bootstrap_validator_lamports = + value_t_or_exit!(arg_matches, "bootstrap_validator_lamports", u64); + let bootstrap_validator_stake_lamports = + value_t_or_exit!(arg_matches, "bootstrap_validator_stake_lamports", u64); + let minimum_stake_lamports = StakeState::get_rent_exempt_reserve(&rent); + if bootstrap_validator_stake_lamports < minimum_stake_lamports { + eprintln!( + "Error: insufficient --bootstrap-validator-stake-lamports. \ + Minimum amount is {}", + minimum_stake_lamports + ); + exit(1); + } + let bootstrap_validator_pubkeys = pubkeys_of(arg_matches, "bootstrap_validator"); + let accounts_to_remove = + pubkeys_of(arg_matches, "accounts_to_remove").unwrap_or_default(); + let vote_accounts_to_destake: HashSet<_> = + pubkeys_of(arg_matches, "vote_accounts_to_destake") + .unwrap_or_default() + .into_iter() + .collect(); + let snapshot_version = arg_matches.value_of("snapshot_version").map_or( + SnapshotVersion::default(), + |s| { s.parse::().unwrap_or_else(|e| { eprintln!("Error: {}", e); exit(1) }) - }); + }, + ); - let maximum_full_snapshot_archives_to_retain = - value_t_or_exit!(arg_matches, "maximum_full_snapshots_to_retain", usize); - let maximum_incremental_snapshot_archives_to_retain = value_t_or_exit!( - arg_matches, - "maximum_incremental_snapshots_to_retain", - usize - ); - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let blockstore = open_blockstore( - &ledger_path, - AccessType::TryPrimaryThenSecondary, - wal_recovery_mode, - ); - let is_incremental = arg_matches.is_present("incremental"); + let maximum_full_snapshot_archives_to_retain = + value_t_or_exit!(arg_matches, "maximum_full_snapshots_to_retain", usize); + let maximum_incremental_snapshot_archives_to_retain = value_t_or_exit!( + arg_matches, + "maximum_incremental_snapshots_to_retain", + usize + ); + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let blockstore = open_blockstore( + &ledger_path, + AccessType::TryPrimaryThenSecondary, + wal_recovery_mode, + ); + let is_incremental = arg_matches.is_present("incremental"); - let snapshot_slot = if Some("ROOT") == arg_matches.value_of("snapshot_slot") { - blockstore - .rooted_slot_iterator(0) - .expect("Failed to get rooted slot iterator") - .last() - .expect("Failed to get root") - } else { - value_t_or_exit!(arg_matches, "snapshot_slot", Slot) - }; + let snapshot_slot = if Some("ROOT") == arg_matches.value_of("snapshot_slot") { + blockstore + .rooted_slot_iterator(0) + .expect("Failed to get rooted slot iterator") + .last() + .expect("Failed to get root") + } else { + value_t_or_exit!(arg_matches, "snapshot_slot", Slot) + }; - info!( - "Creating {}snapshot of slot {} in {}", - if is_incremental { "incremental " } else { "" }, - snapshot_slot, - output_directory.display() - ); + info!( + "Creating {}snapshot of slot {} in {}", + if is_incremental { "incremental " } else { "" }, + snapshot_slot, + output_directory.display() + ); - match load_bank_forks( - arg_matches, - &genesis_config, - &blockstore, - ProcessOptions { - dev_halt_at_slot: Some(snapshot_slot), - new_hard_forks, - poh_verify: false, - ..ProcessOptions::default() - }, - snapshot_archive_path, - ) { - Ok((bank_forks, .., starting_snapshot_hashes)) => { - let mut bank = bank_forks - .get(snapshot_slot) - .unwrap_or_else(|| { - eprintln!("Error: Slot {} is not available", snapshot_slot); - exit(1); - }) - .clone(); + match load_bank_forks( + arg_matches, + &genesis_config, + &blockstore, + ProcessOptions { + dev_halt_at_slot: Some(snapshot_slot), + new_hard_forks, + poh_verify: false, + ..ProcessOptions::default() + }, + snapshot_archive_path, + ) { + Ok((bank_forks, .., starting_snapshot_hashes)) => { + let mut bank = bank_forks + .get(snapshot_slot) + .unwrap_or_else(|| { + eprintln!("Error: Slot {} is not available", snapshot_slot); + exit(1); + }) + .clone(); - let child_bank_required = rent_burn_percentage.is_ok() - || hashes_per_tick.is_some() - || remove_stake_accounts - || !accounts_to_remove.is_empty() - || !vote_accounts_to_destake.is_empty() - || faucet_pubkey.is_some() - || bootstrap_validator_pubkeys.is_some(); + let child_bank_required = rent_burn_percentage.is_ok() + || hashes_per_tick.is_some() + || remove_stake_accounts + || !accounts_to_remove.is_empty() + || !vote_accounts_to_destake.is_empty() + || faucet_pubkey.is_some() + || bootstrap_validator_pubkeys.is_some(); - if child_bank_required { - let mut child_bank = - Bank::new_from_parent(&bank, bank.collector_id(), bank.slot() + 1); + if child_bank_required { + let mut child_bank = + Bank::new_from_parent(&bank, bank.collector_id(), bank.slot() + 1); - if let Ok(rent_burn_percentage) = rent_burn_percentage { - child_bank.set_rent_burn_percentage(rent_burn_percentage); + if let Ok(rent_burn_percentage) = rent_burn_percentage { + child_bank.set_rent_burn_percentage(rent_burn_percentage); + } + + if let Some(hashes_per_tick) = hashes_per_tick { + child_bank.set_hashes_per_tick(match hashes_per_tick { + // Note: Unlike `solana-genesis`, "auto" is not supported here. + "sleep" => None, + _ => { + Some(value_t_or_exit!(arg_matches, "hashes_per_tick", u64)) + } + }); + } + bank = Arc::new(child_bank); } - if let Some(hashes_per_tick) = hashes_per_tick { - child_bank.set_hashes_per_tick(match hashes_per_tick { - // Note: Unlike `solana-genesis`, "auto" is not supported here. - "sleep" => None, - _ => Some(value_t_or_exit!(arg_matches, "hashes_per_tick", u64)), + if let Some(faucet_pubkey) = faucet_pubkey { + bank.store_account( + &faucet_pubkey, + &AccountSharedData::new(faucet_lamports, 0, &system_program::id()), + ); + } + + if remove_stake_accounts { + for (address, mut account) in bank + .get_program_accounts(&stake::program::id(), &ScanConfig::default()) + .unwrap() + .into_iter() + { + account.set_lamports(0); + bank.store_account(&address, &account); + } + } + + for address in accounts_to_remove { + let mut account = bank.get_account(&address).unwrap_or_else(|| { + eprintln!( + "Error: Account does not exist, unable to remove it: {}", + address + ); + exit(1); }); - } - bank = Arc::new(child_bank); - } - if let Some(faucet_pubkey) = faucet_pubkey { - bank.store_account( - &faucet_pubkey, - &AccountSharedData::new(faucet_lamports, 0, &system_program::id()), - ); - } - - if remove_stake_accounts { - for (address, mut account) in bank - .get_program_accounts(&stake::program::id(), &ScanConfig::default()) - .unwrap() - .into_iter() - { account.set_lamports(0); bank.store_account(&address, &account); } - } - for address in accounts_to_remove { - let mut account = bank.get_account(&address).unwrap_or_else(|| { - eprintln!( - "Error: Account does not exist, unable to remove it: {}", - address - ); - exit(1); - }); - - account.set_lamports(0); - bank.store_account(&address, &account); - } - - if !vote_accounts_to_destake.is_empty() { - for (address, mut account) in bank - .get_program_accounts(&stake::program::id(), &ScanConfig::default()) - .unwrap() - .into_iter() - { - if let Ok(StakeState::Stake(meta, stake)) = account.state() { - if vote_accounts_to_destake.contains(&stake.delegation.voter_pubkey) - { - if verbose_level > 0 { - warn!( - "Undelegating stake account {} from {}", - address, stake.delegation.voter_pubkey, - ); + if !vote_accounts_to_destake.is_empty() { + for (address, mut account) in bank + .get_program_accounts(&stake::program::id(), &ScanConfig::default()) + .unwrap() + .into_iter() + { + if let Ok(StakeState::Stake(meta, stake)) = account.state() { + if vote_accounts_to_destake + .contains(&stake.delegation.voter_pubkey) + { + if verbose_level > 0 { + warn!( + "Undelegating stake account {} from {}", + address, stake.delegation.voter_pubkey, + ); + } + account.set_state(&StakeState::Initialized(meta)).unwrap(); + bank.store_account(&address, &account); } - account.set_state(&StakeState::Initialized(meta)).unwrap(); - bank.store_account(&address, &account); } } } - } - if let Some(bootstrap_validator_pubkeys) = bootstrap_validator_pubkeys { - assert_eq!(bootstrap_validator_pubkeys.len() % 3, 0); + if let Some(bootstrap_validator_pubkeys) = bootstrap_validator_pubkeys { + assert_eq!(bootstrap_validator_pubkeys.len() % 3, 0); - // Ensure there are no duplicated pubkeys in the --bootstrap-validator list - { - let mut v = bootstrap_validator_pubkeys.clone(); - v.sort(); - v.dedup(); - if v.len() != bootstrap_validator_pubkeys.len() { - eprintln!( - "Error: --bootstrap-validator pubkeys cannot be duplicated" + // Ensure there are no duplicated pubkeys in the --bootstrap-validator list + { + let mut v = bootstrap_validator_pubkeys.clone(); + v.sort(); + v.dedup(); + if v.len() != bootstrap_validator_pubkeys.len() { + eprintln!( + "Error: --bootstrap-validator pubkeys cannot be duplicated" + ); + exit(1); + } + } + + // Delete existing vote accounts + for (address, mut account) in bank + .get_program_accounts( + &solana_vote_program::id(), + &ScanConfig::default(), + ) + .unwrap() + .into_iter() + { + account.set_lamports(0); + bank.store_account(&address, &account); + } + + // Add a new identity/vote/stake account for each of the provided bootstrap + // validators + let mut bootstrap_validator_pubkeys_iter = + bootstrap_validator_pubkeys.iter(); + loop { + let identity_pubkey = match bootstrap_validator_pubkeys_iter.next() + { + None => break, + Some(identity_pubkey) => identity_pubkey, + }; + let vote_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap(); + let stake_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap(); + + bank.store_account( + identity_pubkey, + &AccountSharedData::new( + bootstrap_validator_lamports, + 0, + &system_program::id(), + ), ); - exit(1); + + let vote_account = vote_state::create_account_with_authorized( + identity_pubkey, + identity_pubkey, + identity_pubkey, + 100, + VoteState::get_rent_exempt_reserve(&rent).max(1), + ); + + bank.store_account( + stake_pubkey, + &stake_state::create_account( + bootstrap_stake_authorized_pubkey + .as_ref() + .unwrap_or(identity_pubkey), + vote_pubkey, + &vote_account, + &rent, + bootstrap_validator_stake_lamports, + ), + ); + bank.store_account(vote_pubkey, &vote_account); + } + + // Warp ahead at least two epochs to ensure that the leader schedule will be + // updated to reflect the new bootstrap validator(s) + let minimum_warp_slot = + genesis_config.epoch_schedule.get_first_slot_in_epoch( + genesis_config.epoch_schedule.get_epoch(snapshot_slot) + 2, + ); + + if let Some(warp_slot) = warp_slot { + if warp_slot < minimum_warp_slot { + eprintln!( + "Error: --warp-slot too close. Must be >= {}", + minimum_warp_slot + ); + exit(1); + } + } else { + warn!("Warping to slot {}", minimum_warp_slot); + warp_slot = Some(minimum_warp_slot); } } - // Delete existing vote accounts - for (address, mut account) in bank - .get_program_accounts( - &solana_vote_program::id(), - &ScanConfig::default(), - ) - .unwrap() - .into_iter() - { - account.set_lamports(0); - bank.store_account(&address, &account); - } - - // Add a new identity/vote/stake account for each of the provided bootstrap - // validators - let mut bootstrap_validator_pubkeys_iter = - bootstrap_validator_pubkeys.iter(); - loop { - let identity_pubkey = match bootstrap_validator_pubkeys_iter.next() { - None => break, - Some(identity_pubkey) => identity_pubkey, - }; - let vote_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap(); - let stake_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap(); - - bank.store_account( - identity_pubkey, - &AccountSharedData::new( - bootstrap_validator_lamports, - 0, - &system_program::id(), - ), - ); - - let vote_account = vote_state::create_account_with_authorized( - identity_pubkey, - identity_pubkey, - identity_pubkey, - 100, - VoteState::get_rent_exempt_reserve(&rent).max(1), - ); - - bank.store_account( - stake_pubkey, - &stake_state::create_account( - bootstrap_stake_authorized_pubkey - .as_ref() - .unwrap_or(identity_pubkey), - vote_pubkey, - &vote_account, - &rent, - bootstrap_validator_stake_lamports, - ), - ); - bank.store_account(vote_pubkey, &vote_account); - } - - // Warp ahead at least two epochs to ensure that the leader schedule will be - // updated to reflect the new bootstrap validator(s) - let minimum_warp_slot = - genesis_config.epoch_schedule.get_first_slot_in_epoch( - genesis_config.epoch_schedule.get_epoch(snapshot_slot) + 2, - ); - - if let Some(warp_slot) = warp_slot { - if warp_slot < minimum_warp_slot { - eprintln!( - "Error: --warp-slot too close. Must be >= {}", - minimum_warp_slot - ); - exit(1); + if child_bank_required { + while !bank.is_complete() { + bank.register_tick(&Hash::new_unique()); } + } + + bank.set_capitalization(); + + let bank = if let Some(warp_slot) = warp_slot { + Arc::new(Bank::warp_from_parent( + &bank, + bank.collector_id(), + warp_slot, + )) } else { - warn!("Warping to slot {}", minimum_warp_slot); - warp_slot = Some(minimum_warp_slot); - } - } + bank + }; - if child_bank_required { - while !bank.is_complete() { - bank.register_tick(&Hash::new_unique()); - } - } + println!( + "Creating a version {} {}snapshot of slot {}", + snapshot_version, + if is_incremental { "incremental " } else { "" }, + bank.slot(), + ); - bank.set_capitalization(); - - let bank = if let Some(warp_slot) = warp_slot { - Arc::new(Bank::warp_from_parent( - &bank, - bank.collector_id(), - warp_slot, - )) - } else { - bank - }; - - println!( - "Creating a version {} {}snapshot of slot {}", - snapshot_version, - if is_incremental { "incremental " } else { "" }, - bank.slot(), - ); - - if is_incremental { - if starting_snapshot_hashes.is_none() { - eprintln!("Unable to create incremental snapshot without a base full snapshot"); - exit(1); - } - let full_snapshot_slot = starting_snapshot_hashes.unwrap().full.hash.0; - if bank.slot() <= full_snapshot_slot { - eprintln!("Unable to create incremental snapshot: Slot must be greater than full snapshot slot. slot: {}, full snapshot slot: {}", + if is_incremental { + if starting_snapshot_hashes.is_none() { + eprintln!("Unable to create incremental snapshot without a base full snapshot"); + exit(1); + } + let full_snapshot_slot = starting_snapshot_hashes.unwrap().full.hash.0; + if bank.slot() <= full_snapshot_slot { + eprintln!("Unable to create incremental snapshot: Slot must be greater than full snapshot slot. slot: {}, full snapshot slot: {}", bank.slot(), full_snapshot_slot, ); - exit(1); - } - - let incremental_snapshot_archive_info = - snapshot_utils::bank_to_incremental_snapshot_archive( - ledger_path, - &bank, - full_snapshot_slot, - Some(snapshot_version), - output_directory, - ArchiveFormat::TarZstd, - maximum_full_snapshot_archives_to_retain, - maximum_incremental_snapshot_archives_to_retain, - ) - .unwrap_or_else(|err| { - eprintln!("Unable to create incremental snapshot: {}", err); exit(1); - }); + } - println!( + let incremental_snapshot_archive_info = + snapshot_utils::bank_to_incremental_snapshot_archive( + ledger_path, + &bank, + full_snapshot_slot, + Some(snapshot_version), + output_directory, + ArchiveFormat::TarZstd, + maximum_full_snapshot_archives_to_retain, + maximum_incremental_snapshot_archives_to_retain, + ) + .unwrap_or_else(|err| { + eprintln!("Unable to create incremental snapshot: {}", err); + exit(1); + }); + + println!( "Successfully created incremental snapshot for slot {}, hash {}, base slot: {}: {}", bank.slot(), bank.hash(), full_snapshot_slot, incremental_snapshot_archive_info.path().display(), ); - } else { - let full_snapshot_archive_info = - snapshot_utils::bank_to_full_snapshot_archive( - ledger_path, - &bank, - Some(snapshot_version), - output_directory, - ArchiveFormat::TarZstd, - maximum_full_snapshot_archives_to_retain, - maximum_incremental_snapshot_archives_to_retain, - ) - .unwrap_or_else(|err| { - eprintln!("Unable to create snapshot: {}", err); - exit(1); - }); + } else { + let full_snapshot_archive_info = + snapshot_utils::bank_to_full_snapshot_archive( + ledger_path, + &bank, + Some(snapshot_version), + output_directory, + ArchiveFormat::TarZstd, + maximum_full_snapshot_archives_to_retain, + maximum_incremental_snapshot_archives_to_retain, + ) + .unwrap_or_else(|err| { + eprintln!("Unable to create snapshot: {}", err); + exit(1); + }); + + println!( + "Successfully created snapshot for slot {}, hash {}: {}", + bank.slot(), + bank.hash(), + full_snapshot_archive_info.path().display(), + ); + } println!( - "Successfully created snapshot for slot {}, hash {}: {}", - bank.slot(), - bank.hash(), - full_snapshot_archive_info.path().display(), + "Shred version: {}", + compute_shred_version( + &genesis_config.hash(), + Some(&bank.hard_forks().read().unwrap()) + ) ); } - - println!( - "Shred version: {}", - compute_shred_version( - &genesis_config.hash(), - Some(&bank.hard_forks().read().unwrap()) - ) - ); + Err(err) => { + eprintln!("Failed to load ledger: {:?}", err); + exit(1); + } } - Err(err) => { + } + ("accounts", Some(arg_matches)) => { + let dev_halt_at_slot = value_t!(arg_matches, "halt_at_slot", Slot).ok(); + let process_options = ProcessOptions { + dev_halt_at_slot, + new_hard_forks: hardforks_of(arg_matches, "hard_forks"), + poh_verify: false, + ..ProcessOptions::default() + }; + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let include_sysvars = arg_matches.is_present("include_sysvars"); + let blockstore = open_blockstore( + &ledger_path, + AccessType::TryPrimaryThenSecondary, + wal_recovery_mode, + ); + let (bank_forks, ..) = load_bank_forks( + arg_matches, + &genesis_config, + &blockstore, + process_options, + snapshot_archive_path, + ) + .unwrap_or_else(|err| { eprintln!("Failed to load ledger: {:?}", err); exit(1); - } - } - } - ("accounts", Some(arg_matches)) => { - let dev_halt_at_slot = value_t!(arg_matches, "halt_at_slot", Slot).ok(); - let process_options = ProcessOptions { - dev_halt_at_slot, - new_hard_forks: hardforks_of(arg_matches, "hard_forks"), - poh_verify: false, - ..ProcessOptions::default() - }; - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let include_sysvars = arg_matches.is_present("include_sysvars"); - let blockstore = open_blockstore( - &ledger_path, - AccessType::TryPrimaryThenSecondary, - wal_recovery_mode, - ); - let (bank_forks, ..) = load_bank_forks( - arg_matches, - &genesis_config, - &blockstore, - process_options, - snapshot_archive_path, - ) - .unwrap_or_else(|err| { - eprintln!("Failed to load ledger: {:?}", err); - exit(1); - }); + }); - let bank = bank_forks.working_bank(); - let mut measure = Measure::start("getting accounts"); - let accounts: BTreeMap<_, _> = bank - .get_all_accounts_with_modified_slots() - .unwrap() - .into_iter() - .filter(|(pubkey, _account, _slot)| { - include_sysvars || !solana_sdk::sysvar::is_sysvar_id(pubkey) - }) - .map(|(pubkey, account, slot)| (pubkey, (account, slot))) - .collect(); - measure.stop(); - info!("{}", measure); - - let mut measure = Measure::start("calculating total accounts stats"); - let total_accounts_stats = bank.calculate_total_accounts_stats( - accounts - .iter() - .map(|(pubkey, (account, _slot))| (pubkey, account)), - ); - measure.stop(); - info!("{}", measure); - - let print_account_contents = !arg_matches.is_present("no_account_contents"); - if print_account_contents { - let print_account_data = !arg_matches.is_present("no_account_data"); - let mut measure = Measure::start("printing account contents"); - for (pubkey, (account, slot)) in accounts.into_iter() { - let data_len = account.data().len(); - println!("{}:", pubkey); - println!(" - balance: {} SOL", lamports_to_sol(account.lamports())); - println!(" - owner: '{}'", account.owner()); - println!(" - executable: {}", account.executable()); - println!(" - slot: {}", slot); - println!(" - rent_epoch: {}", account.rent_epoch()); - if print_account_data { - println!(" - data: '{}'", bs58::encode(account.data()).into_string()); - } - println!(" - data_len: {}", data_len); - } + let bank = bank_forks.working_bank(); + let mut measure = Measure::start("getting accounts"); + let accounts: BTreeMap<_, _> = bank + .get_all_accounts_with_modified_slots() + .unwrap() + .into_iter() + .filter(|(pubkey, _account, _slot)| { + include_sysvars || !solana_sdk::sysvar::is_sysvar_id(pubkey) + }) + .map(|(pubkey, account, slot)| (pubkey, (account, slot))) + .collect(); measure.stop(); info!("{}", measure); - } - println!("{:#?}", total_accounts_stats); - } - ("capitalization", Some(arg_matches)) => { - let dev_halt_at_slot = value_t!(arg_matches, "halt_at_slot", Slot).ok(); - let process_options = ProcessOptions { - dev_halt_at_slot, - new_hard_forks: hardforks_of(arg_matches, "hard_forks"), - poh_verify: false, - ..ProcessOptions::default() - }; - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let blockstore = open_blockstore( - &ledger_path, - AccessType::TryPrimaryThenSecondary, - wal_recovery_mode, - ); - match load_bank_forks( - arg_matches, - &genesis_config, - &blockstore, - process_options, - snapshot_archive_path, - ) { - Ok((bank_forks, ..)) => { - let slot = bank_forks.working_bank().slot(); - let bank = bank_forks.get(slot).unwrap_or_else(|| { - eprintln!("Error: Slot {} is not available", slot); - exit(1); - }); + let mut measure = Measure::start("calculating total accounts stats"); + let total_accounts_stats = bank.calculate_total_accounts_stats( + accounts + .iter() + .map(|(pubkey, (account, _slot))| (pubkey, account)), + ); + measure.stop(); + info!("{}", measure); - if arg_matches.is_present("recalculate_capitalization") { - println!("Recalculating capitalization"); - let old_capitalization = bank.set_capitalization(); - if old_capitalization == bank.capitalization() { - eprintln!("Capitalization was identical: {}", Sol(old_capitalization)); + let print_account_contents = !arg_matches.is_present("no_account_contents"); + if print_account_contents { + let print_account_data = !arg_matches.is_present("no_account_data"); + let mut measure = Measure::start("printing account contents"); + for (pubkey, (account, slot)) in accounts.into_iter() { + let data_len = account.data().len(); + println!("{}:", pubkey); + println!(" - balance: {} SOL", lamports_to_sol(account.lamports())); + println!(" - owner: '{}'", account.owner()); + println!(" - executable: {}", account.executable()); + println!(" - slot: {}", slot); + println!(" - rent_epoch: {}", account.rent_epoch()); + if print_account_data { + println!(" - data: '{}'", bs58::encode(account.data()).into_string()); } + println!(" - data_len: {}", data_len); } + measure.stop(); + info!("{}", measure); + } - if arg_matches.is_present("warp_epoch") { - let base_bank = bank; - - let raw_warp_epoch = value_t!(arg_matches, "warp_epoch", String).unwrap(); - let warp_epoch = if raw_warp_epoch.starts_with('+') { - base_bank.epoch() + value_t!(arg_matches, "warp_epoch", Epoch).unwrap() - } else { - value_t!(arg_matches, "warp_epoch", Epoch).unwrap() - }; - if warp_epoch < base_bank.epoch() { - eprintln!( - "Error: can't warp epoch backwards: {} => {}", - base_bank.epoch(), - warp_epoch - ); + println!("{:#?}", total_accounts_stats); + } + ("capitalization", Some(arg_matches)) => { + let dev_halt_at_slot = value_t!(arg_matches, "halt_at_slot", Slot).ok(); + let process_options = ProcessOptions { + dev_halt_at_slot, + new_hard_forks: hardforks_of(arg_matches, "hard_forks"), + poh_verify: false, + ..ProcessOptions::default() + }; + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let blockstore = open_blockstore( + &ledger_path, + AccessType::TryPrimaryThenSecondary, + wal_recovery_mode, + ); + match load_bank_forks( + arg_matches, + &genesis_config, + &blockstore, + process_options, + snapshot_archive_path, + ) { + Ok((bank_forks, ..)) => { + let slot = bank_forks.working_bank().slot(); + let bank = bank_forks.get(slot).unwrap_or_else(|| { + eprintln!("Error: Slot {} is not available", slot); exit(1); + }); + + if arg_matches.is_present("recalculate_capitalization") { + println!("Recalculating capitalization"); + let old_capitalization = bank.set_capitalization(); + if old_capitalization == bank.capitalization() { + eprintln!( + "Capitalization was identical: {}", + Sol(old_capitalization) + ); + } } - if let Ok(raw_inflation) = value_t!(arg_matches, "inflation", String) { - let inflation = match raw_inflation.as_str() { - "pico" => Inflation::pico(), - "full" => Inflation::full(), - "none" => Inflation::new_disabled(), - _ => unreachable!(), + if arg_matches.is_present("warp_epoch") { + let base_bank = bank; + + let raw_warp_epoch = + value_t!(arg_matches, "warp_epoch", String).unwrap(); + let warp_epoch = if raw_warp_epoch.starts_with('+') { + base_bank.epoch() + + value_t!(arg_matches, "warp_epoch", Epoch).unwrap() + } else { + value_t!(arg_matches, "warp_epoch", Epoch).unwrap() }; - println!( - "Forcing to: {:?} (was: {:?})", - inflation, - base_bank.inflation() - ); - base_bank.set_inflation(inflation); - } + if warp_epoch < base_bank.epoch() { + eprintln!( + "Error: can't warp epoch backwards: {} => {}", + base_bank.epoch(), + warp_epoch + ); + exit(1); + } - let next_epoch = base_bank - .epoch_schedule() - .get_first_slot_in_epoch(warp_epoch); - // disable eager rent collection because this creates many unrelated - // rent collection account updates - base_bank - .lazy_rent_collection - .store(true, std::sync::atomic::Ordering::Relaxed); + if let Ok(raw_inflation) = value_t!(arg_matches, "inflation", String) { + let inflation = match raw_inflation.as_str() { + "pico" => Inflation::pico(), + "full" => Inflation::full(), + "none" => Inflation::new_disabled(), + _ => unreachable!(), + }; + println!( + "Forcing to: {:?} (was: {:?})", + inflation, + base_bank.inflation() + ); + base_bank.set_inflation(inflation); + } - #[derive(Default, Debug)] - struct PointDetail { - epoch: Epoch, - points: u128, - stake: u128, - credits: u128, - } + let next_epoch = base_bank + .epoch_schedule() + .get_first_slot_in_epoch(warp_epoch); + // disable eager rent collection because this creates many unrelated + // rent collection account updates + base_bank + .lazy_rent_collection + .store(true, std::sync::atomic::Ordering::Relaxed); - #[derive(Default, Debug)] - struct CalculationDetail { - epochs: usize, - voter: Pubkey, - voter_owner: Pubkey, - current_effective_stake: u64, - total_stake: u64, - rent_exempt_reserve: u64, - points: Vec, - base_rewards: u64, - commission: u8, - vote_rewards: u64, - stake_rewards: u64, - activation_epoch: Epoch, - deactivation_epoch: Option, - point_value: Option, - old_credits_observed: Option, - new_credits_observed: Option, - skipped_reasons: String, - } - use solana_stake_program::stake_state::InflationPointCalculationEvent; - let stake_calculation_details: DashMap = - DashMap::new(); - let last_point_value = Arc::new(RwLock::new(None)); - let tracer = |event: &RewardCalculationEvent| { - // Currently RewardCalculationEvent enum has only Staking variant - // because only staking tracing is supported! - #[allow(irrefutable_let_patterns)] - if let RewardCalculationEvent::Staking(pubkey, event) = event { - let mut detail = - stake_calculation_details.entry(**pubkey).or_default(); - match event { + #[derive(Default, Debug)] + struct PointDetail { + epoch: Epoch, + points: u128, + stake: u128, + credits: u128, + } + + #[derive(Default, Debug)] + struct CalculationDetail { + epochs: usize, + voter: Pubkey, + voter_owner: Pubkey, + current_effective_stake: u64, + total_stake: u64, + rent_exempt_reserve: u64, + points: Vec, + base_rewards: u64, + commission: u8, + vote_rewards: u64, + stake_rewards: u64, + activation_epoch: Epoch, + deactivation_epoch: Option, + point_value: Option, + old_credits_observed: Option, + new_credits_observed: Option, + skipped_reasons: String, + } + use solana_stake_program::stake_state::InflationPointCalculationEvent; + let stake_calculation_details: DashMap = + DashMap::new(); + let last_point_value = Arc::new(RwLock::new(None)); + let tracer = |event: &RewardCalculationEvent| { + // Currently RewardCalculationEvent enum has only Staking variant + // because only staking tracing is supported! + #[allow(irrefutable_let_patterns)] + if let RewardCalculationEvent::Staking(pubkey, event) = event { + let mut detail = + stake_calculation_details.entry(**pubkey).or_default(); + match event { InflationPointCalculationEvent::CalculatedPoints( epoch, stake, @@ -2753,315 +2761,324 @@ fn main() { } } } - } - }; - let warped_bank = Bank::new_from_parent_with_tracer( - base_bank, - base_bank.collector_id(), - next_epoch, - tracer, - ); - warped_bank.freeze(); - let mut csv_writer = if arg_matches.is_present("csv_filename") { - let csv_filename = - value_t_or_exit!(arg_matches, "csv_filename", String); - let file = File::create(&csv_filename).unwrap(); - Some(csv::WriterBuilder::new().from_writer(file)) - } else { - None - }; - - println!("Slot: {} => {}", base_bank.slot(), warped_bank.slot()); - println!("Epoch: {} => {}", base_bank.epoch(), warped_bank.epoch()); - assert_capitalization(base_bank); - assert_capitalization(&warped_bank); - let interest_per_epoch = ((warped_bank.capitalization() as f64) - / (base_bank.capitalization() as f64) - * 100_f64) - - 100_f64; - let interest_per_year = interest_per_epoch - / warped_bank.epoch_duration_in_years(base_bank.epoch()); - println!( - "Capitalization: {} => {} (+{} {}%; annualized {}%)", - Sol(base_bank.capitalization()), - Sol(warped_bank.capitalization()), - Sol(warped_bank.capitalization() - base_bank.capitalization()), - interest_per_epoch, - interest_per_year, - ); - - let mut overall_delta = 0; - - let modified_accounts = - warped_bank.get_all_accounts_modified_since_parent(); - let mut rewarded_accounts = modified_accounts - .iter() - .map(|(pubkey, account)| { - ( - pubkey, - account, - base_bank - .get_account(pubkey) - .map(|a| a.lamports()) - .unwrap_or_default(), - ) - }) - .collect::>(); - rewarded_accounts.sort_unstable_by_key( - |(pubkey, account, base_lamports)| { - ( - *account.owner(), - *base_lamports, - account.lamports() - base_lamports, - *pubkey, - ) - }, - ); - - let mut unchanged_accounts = stake_calculation_details - .iter() - .map(|entry| *entry.key()) - .collect::>() - .difference( - &rewarded_accounts - .iter() - .map(|(pubkey, ..)| **pubkey) - .collect(), - ) - .map(|pubkey| (*pubkey, warped_bank.get_account(pubkey).unwrap())) - .collect::>(); - unchanged_accounts.sort_unstable_by_key(|(pubkey, account)| { - (*account.owner(), account.lamports(), *pubkey) - }); - let unchanged_accounts = unchanged_accounts.into_iter(); - - let rewarded_accounts = rewarded_accounts - .into_iter() - .map(|(pubkey, account, ..)| (*pubkey, account.clone())); - - let all_accounts = unchanged_accounts.chain(rewarded_accounts); - for (pubkey, warped_account) in all_accounts { - // Don't output sysvars; it's always updated but not related to - // inflation. - if solana_sdk::sysvar::is_sysvar_id(&pubkey) { - continue; - } - - if let Some(base_account) = base_bank.get_account(&pubkey) { - let delta = warped_account.lamports() - base_account.lamports(); - let detail_ref = stake_calculation_details.get(&pubkey); - let detail: Option<&CalculationDetail> = - detail_ref.as_ref().map(|detail_ref| detail_ref.value()); - println!( - "{:<45}({}): {} => {} (+{} {:>4.9}%) {:?}", - format!("{}", pubkey), // format! is needed to pad/justify correctly. - base_account.owner(), - Sol(base_account.lamports()), - Sol(warped_account.lamports()), - Sol(delta), - ((warped_account.lamports() as f64) - / (base_account.lamports() as f64) - * 100_f64) - - 100_f64, - detail, - ); - if let Some(ref mut csv_writer) = csv_writer { - #[derive(Serialize)] - struct InflationRecord { - cluster_type: String, - rewarded_epoch: Epoch, - account: String, - owner: String, - old_balance: u64, - new_balance: u64, - data_size: usize, - delegation: String, - delegation_owner: String, - effective_stake: String, - delegated_stake: String, - rent_exempt_reserve: String, - activation_epoch: String, - deactivation_epoch: String, - earned_epochs: String, - epoch: String, - epoch_credits: String, - epoch_points: String, - epoch_stake: String, - old_credits_observed: String, - new_credits_observed: String, - base_rewards: String, - stake_rewards: String, - vote_rewards: String, - commission: String, - cluster_rewards: String, - cluster_points: String, - old_capitalization: u64, - new_capitalization: u64, - } - fn format_or_na( - data: Option, - ) -> String { - data.map(|data| format!("{}", data)) - .unwrap_or_else(|| "N/A".to_owned()) - } - let mut point_details = detail - .map(|d| d.points.iter().map(Some).collect::>()) - .unwrap_or_default(); - - // ensure to print even if there is no calculation/point detail - if point_details.is_empty() { - point_details.push(None); - } - - for point_detail in point_details { - let record = InflationRecord { - cluster_type: format!("{:?}", base_bank.cluster_type()), - rewarded_epoch: base_bank.epoch(), - account: format!("{}", pubkey), - owner: format!("{}", base_account.owner()), - old_balance: base_account.lamports(), - new_balance: warped_account.lamports(), - data_size: base_account.data().len(), - delegation: format_or_na(detail.map(|d| d.voter)), - delegation_owner: format_or_na( - detail.map(|d| d.voter_owner), - ), - effective_stake: format_or_na( - detail.map(|d| d.current_effective_stake), - ), - delegated_stake: format_or_na( - detail.map(|d| d.total_stake), - ), - rent_exempt_reserve: format_or_na( - detail.map(|d| d.rent_exempt_reserve), - ), - activation_epoch: format_or_na(detail.map(|d| { - if d.activation_epoch < Epoch::max_value() { - d.activation_epoch - } else { - // bootstraped - 0 - } - })), - deactivation_epoch: format_or_na( - detail.and_then(|d| d.deactivation_epoch), - ), - earned_epochs: format_or_na(detail.map(|d| d.epochs)), - epoch: format_or_na(point_detail.map(|d| d.epoch)), - epoch_credits: format_or_na( - point_detail.map(|d| d.credits), - ), - epoch_points: format_or_na( - point_detail.map(|d| d.points), - ), - epoch_stake: format_or_na( - point_detail.map(|d| d.stake), - ), - old_credits_observed: format_or_na( - detail.and_then(|d| d.old_credits_observed), - ), - new_credits_observed: format_or_na( - detail.and_then(|d| d.new_credits_observed), - ), - base_rewards: format_or_na( - detail.map(|d| d.base_rewards), - ), - stake_rewards: format_or_na( - detail.map(|d| d.stake_rewards), - ), - vote_rewards: format_or_na( - detail.map(|d| d.vote_rewards), - ), - commission: format_or_na(detail.map(|d| d.commission)), - cluster_rewards: format_or_na( - last_point_value - .read() - .unwrap() - .clone() - .map(|pv| pv.rewards), - ), - cluster_points: format_or_na( - last_point_value - .read() - .unwrap() - .clone() - .map(|pv| pv.points), - ), - old_capitalization: base_bank.capitalization(), - new_capitalization: warped_bank.capitalization(), - }; - csv_writer.serialize(&record).unwrap(); - } } - overall_delta += delta; - } else { - error!("new account!?: {}", pubkey); - } - } - if overall_delta > 0 { - println!("Sum of lamports changes: {}", Sol(overall_delta)); - } - } else { - if arg_matches.is_present("recalculate_capitalization") { - eprintln!("Capitalization isn't verified because it's recalculated"); - } - if arg_matches.is_present("inflation") { - eprintln!( - "Forcing inflation isn't meaningful because bank isn't warping" + }; + let warped_bank = Bank::new_from_parent_with_tracer( + base_bank, + base_bank.collector_id(), + next_epoch, + tracer, ); - } + warped_bank.freeze(); + let mut csv_writer = if arg_matches.is_present("csv_filename") { + let csv_filename = + value_t_or_exit!(arg_matches, "csv_filename", String); + let file = File::create(&csv_filename).unwrap(); + Some(csv::WriterBuilder::new().from_writer(file)) + } else { + None + }; - assert_capitalization(bank); - println!("Inflation: {:?}", bank.inflation()); - println!("RentCollector: {:?}", bank.rent_collector()); - println!("Capitalization: {}", Sol(bank.capitalization())); - } - } - Err(err) => { - eprintln!("Failed to load ledger: {:?}", err); - exit(1); - } - } - } - ("purge", Some(arg_matches)) => { - let start_slot = value_t_or_exit!(arg_matches, "start_slot", Slot); - let end_slot = value_t!(arg_matches, "end_slot", Slot).ok(); - let no_compaction = arg_matches.is_present("no_compaction"); - let dead_slots_only = arg_matches.is_present("dead_slots_only"); - let batch_size = value_t_or_exit!(arg_matches, "batch_size", usize); - let access_type = if !no_compaction { - AccessType::PrimaryOnly - } else { - AccessType::PrimaryOnlyForMaintenance - }; - let blockstore = open_blockstore(&ledger_path, access_type, wal_recovery_mode); + println!("Slot: {} => {}", base_bank.slot(), warped_bank.slot()); + println!("Epoch: {} => {}", base_bank.epoch(), warped_bank.epoch()); + assert_capitalization(base_bank); + assert_capitalization(&warped_bank); + let interest_per_epoch = ((warped_bank.capitalization() as f64) + / (base_bank.capitalization() as f64) + * 100_f64) + - 100_f64; + let interest_per_year = interest_per_epoch + / warped_bank.epoch_duration_in_years(base_bank.epoch()); + println!( + "Capitalization: {} => {} (+{} {}%; annualized {}%)", + Sol(base_bank.capitalization()), + Sol(warped_bank.capitalization()), + Sol(warped_bank.capitalization() - base_bank.capitalization()), + interest_per_epoch, + interest_per_year, + ); - let end_slot = match end_slot { - Some(end_slot) => end_slot, - None => match blockstore.slot_meta_iterator(start_slot) { - Ok(metas) => { - let slots: Vec<_> = metas.map(|(slot, _)| slot).collect(); - if slots.is_empty() { - eprintln!("Purge range is empty"); - exit(1); + let mut overall_delta = 0; + + let modified_accounts = + warped_bank.get_all_accounts_modified_since_parent(); + let mut rewarded_accounts = modified_accounts + .iter() + .map(|(pubkey, account)| { + ( + pubkey, + account, + base_bank + .get_account(pubkey) + .map(|a| a.lamports()) + .unwrap_or_default(), + ) + }) + .collect::>(); + rewarded_accounts.sort_unstable_by_key( + |(pubkey, account, base_lamports)| { + ( + *account.owner(), + *base_lamports, + account.lamports() - base_lamports, + *pubkey, + ) + }, + ); + + let mut unchanged_accounts = stake_calculation_details + .iter() + .map(|entry| *entry.key()) + .collect::>() + .difference( + &rewarded_accounts + .iter() + .map(|(pubkey, ..)| **pubkey) + .collect(), + ) + .map(|pubkey| (*pubkey, warped_bank.get_account(pubkey).unwrap())) + .collect::>(); + unchanged_accounts.sort_unstable_by_key(|(pubkey, account)| { + (*account.owner(), account.lamports(), *pubkey) + }); + let unchanged_accounts = unchanged_accounts.into_iter(); + + let rewarded_accounts = rewarded_accounts + .into_iter() + .map(|(pubkey, account, ..)| (*pubkey, account.clone())); + + let all_accounts = unchanged_accounts.chain(rewarded_accounts); + for (pubkey, warped_account) in all_accounts { + // Don't output sysvars; it's always updated but not related to + // inflation. + if solana_sdk::sysvar::is_sysvar_id(&pubkey) { + continue; + } + + if let Some(base_account) = base_bank.get_account(&pubkey) { + let delta = warped_account.lamports() - base_account.lamports(); + let detail_ref = stake_calculation_details.get(&pubkey); + let detail: Option<&CalculationDetail> = + detail_ref.as_ref().map(|detail_ref| detail_ref.value()); + println!( + "{:<45}({}): {} => {} (+{} {:>4.9}%) {:?}", + format!("{}", pubkey), // format! is needed to pad/justify correctly. + base_account.owner(), + Sol(base_account.lamports()), + Sol(warped_account.lamports()), + Sol(delta), + ((warped_account.lamports() as f64) + / (base_account.lamports() as f64) + * 100_f64) + - 100_f64, + detail, + ); + if let Some(ref mut csv_writer) = csv_writer { + #[derive(Serialize)] + struct InflationRecord { + cluster_type: String, + rewarded_epoch: Epoch, + account: String, + owner: String, + old_balance: u64, + new_balance: u64, + data_size: usize, + delegation: String, + delegation_owner: String, + effective_stake: String, + delegated_stake: String, + rent_exempt_reserve: String, + activation_epoch: String, + deactivation_epoch: String, + earned_epochs: String, + epoch: String, + epoch_credits: String, + epoch_points: String, + epoch_stake: String, + old_credits_observed: String, + new_credits_observed: String, + base_rewards: String, + stake_rewards: String, + vote_rewards: String, + commission: String, + cluster_rewards: String, + cluster_points: String, + old_capitalization: u64, + new_capitalization: u64, + } + fn format_or_na( + data: Option, + ) -> String { + data.map(|data| format!("{}", data)) + .unwrap_or_else(|| "N/A".to_owned()) + } + let mut point_details = detail + .map(|d| d.points.iter().map(Some).collect::>()) + .unwrap_or_default(); + + // ensure to print even if there is no calculation/point detail + if point_details.is_empty() { + point_details.push(None); + } + + for point_detail in point_details { + let record = InflationRecord { + cluster_type: format!( + "{:?}", + base_bank.cluster_type() + ), + rewarded_epoch: base_bank.epoch(), + account: format!("{}", pubkey), + owner: format!("{}", base_account.owner()), + old_balance: base_account.lamports(), + new_balance: warped_account.lamports(), + data_size: base_account.data().len(), + delegation: format_or_na(detail.map(|d| d.voter)), + delegation_owner: format_or_na( + detail.map(|d| d.voter_owner), + ), + effective_stake: format_or_na( + detail.map(|d| d.current_effective_stake), + ), + delegated_stake: format_or_na( + detail.map(|d| d.total_stake), + ), + rent_exempt_reserve: format_or_na( + detail.map(|d| d.rent_exempt_reserve), + ), + activation_epoch: format_or_na(detail.map(|d| { + if d.activation_epoch < Epoch::max_value() { + d.activation_epoch + } else { + // bootstraped + 0 + } + })), + deactivation_epoch: format_or_na( + detail.and_then(|d| d.deactivation_epoch), + ), + earned_epochs: format_or_na( + detail.map(|d| d.epochs), + ), + epoch: format_or_na(point_detail.map(|d| d.epoch)), + epoch_credits: format_or_na( + point_detail.map(|d| d.credits), + ), + epoch_points: format_or_na( + point_detail.map(|d| d.points), + ), + epoch_stake: format_or_na( + point_detail.map(|d| d.stake), + ), + old_credits_observed: format_or_na( + detail.and_then(|d| d.old_credits_observed), + ), + new_credits_observed: format_or_na( + detail.and_then(|d| d.new_credits_observed), + ), + base_rewards: format_or_na( + detail.map(|d| d.base_rewards), + ), + stake_rewards: format_or_na( + detail.map(|d| d.stake_rewards), + ), + vote_rewards: format_or_na( + detail.map(|d| d.vote_rewards), + ), + commission: format_or_na( + detail.map(|d| d.commission), + ), + cluster_rewards: format_or_na( + last_point_value + .read() + .unwrap() + .clone() + .map(|pv| pv.rewards), + ), + cluster_points: format_or_na( + last_point_value + .read() + .unwrap() + .clone() + .map(|pv| pv.points), + ), + old_capitalization: base_bank.capitalization(), + new_capitalization: warped_bank.capitalization(), + }; + csv_writer.serialize(&record).unwrap(); + } + } + overall_delta += delta; + } else { + error!("new account!?: {}", pubkey); + } + } + if overall_delta > 0 { + println!("Sum of lamports changes: {}", Sol(overall_delta)); + } + } else { + if arg_matches.is_present("recalculate_capitalization") { + eprintln!( + "Capitalization isn't verified because it's recalculated" + ); + } + if arg_matches.is_present("inflation") { + eprintln!( + "Forcing inflation isn't meaningful because bank isn't warping" + ); + } + + assert_capitalization(bank); + println!("Inflation: {:?}", bank.inflation()); + println!("RentCollector: {:?}", bank.rent_collector()); + println!("Capitalization: {}", Sol(bank.capitalization())); } - *slots.last().unwrap() } Err(err) => { - eprintln!("Unable to read the Ledger: {:?}", err); + eprintln!("Failed to load ledger: {:?}", err); exit(1); } - }, - }; - - if end_slot < start_slot { - eprintln!( - "end slot {} is less than start slot {}", - end_slot, start_slot - ); - exit(1); + } } - info!( + ("purge", Some(arg_matches)) => { + let start_slot = value_t_or_exit!(arg_matches, "start_slot", Slot); + let end_slot = value_t!(arg_matches, "end_slot", Slot).ok(); + let no_compaction = arg_matches.is_present("no_compaction"); + let dead_slots_only = arg_matches.is_present("dead_slots_only"); + let batch_size = value_t_or_exit!(arg_matches, "batch_size", usize); + let access_type = if !no_compaction { + AccessType::PrimaryOnly + } else { + AccessType::PrimaryOnlyForMaintenance + }; + let blockstore = open_blockstore(&ledger_path, access_type, wal_recovery_mode); + + let end_slot = match end_slot { + Some(end_slot) => end_slot, + None => match blockstore.slot_meta_iterator(start_slot) { + Ok(metas) => { + let slots: Vec<_> = metas.map(|(slot, _)| slot).collect(); + if slots.is_empty() { + eprintln!("Purge range is empty"); + exit(1); + } + *slots.last().unwrap() + } + Err(err) => { + eprintln!("Unable to read the Ledger: {:?}", err); + exit(1); + } + }, + }; + + if end_slot < start_slot { + eprintln!( + "end slot {} is less than start slot {}", + end_slot, start_slot + ); + exit(1); + } + info!( "Purging data from slots {} to {} ({} slots) (skip compaction: {}) (dead slot only: {})", start_slot, end_slot, @@ -3069,256 +3086,257 @@ fn main() { no_compaction, dead_slots_only, ); - let purge_from_blockstore = |start_slot, end_slot| { - blockstore.purge_from_next_slots(start_slot, end_slot); - if no_compaction { - blockstore.purge_slots(start_slot, end_slot, PurgeType::Exact); - } else { - blockstore.purge_and_compact_slots(start_slot, end_slot); - } - }; - if !dead_slots_only { - let slots_iter = &(start_slot..=end_slot).chunks(batch_size); - for slots in slots_iter { - let slots = slots.collect::>(); - assert!(!slots.is_empty()); + let purge_from_blockstore = |start_slot, end_slot| { + blockstore.purge_from_next_slots(start_slot, end_slot); + if no_compaction { + blockstore.purge_slots(start_slot, end_slot, PurgeType::Exact); + } else { + blockstore.purge_and_compact_slots(start_slot, end_slot); + } + }; + if !dead_slots_only { + let slots_iter = &(start_slot..=end_slot).chunks(batch_size); + for slots in slots_iter { + let slots = slots.collect::>(); + assert!(!slots.is_empty()); - let start_slot = *slots.first().unwrap(); - let end_slot = *slots.last().unwrap(); - info!( - "Purging chunked slots from {} to {} ({} slots)", - start_slot, - end_slot, - end_slot - start_slot - ); - purge_from_blockstore(start_slot, end_slot); - } - } else { - let dead_slots_iter = blockstore - .dead_slots_iterator(start_slot) - .unwrap() - .take_while(|s| *s <= end_slot); - for dead_slot in dead_slots_iter { - info!("Purging dead slot {}", dead_slot); - purge_from_blockstore(dead_slot, dead_slot); - } - } - } - ("list-roots", Some(arg_matches)) => { - let blockstore = open_blockstore( - &ledger_path, - AccessType::TryPrimaryThenSecondary, - wal_recovery_mode, - ); - let max_height = if let Some(height) = arg_matches.value_of("max_height") { - usize::from_str(height).expect("Maximum height must be a number") - } else { - usize::MAX - }; - let start_root = if let Some(height) = arg_matches.value_of("start_root") { - Slot::from_str(height).expect("Starting root must be a number") - } else { - 0 - }; - let num_roots = if let Some(roots) = arg_matches.value_of("num_roots") { - usize::from_str(roots).expect("Number of roots must be a number") - } else { - usize::from_str(DEFAULT_ROOT_COUNT).unwrap() - }; - - let iter = blockstore - .rooted_slot_iterator(start_root) - .expect("Failed to get rooted slot"); - - let mut slot_hash = Vec::new(); - for (i, slot) in iter.into_iter().enumerate() { - if i > num_roots { - break; - } - if slot <= max_height as u64 { - let blockhash = blockstore - .get_slot_entries(slot, 0) - .unwrap() - .last() - .unwrap() - .hash; - slot_hash.push((slot, blockhash)); - } else { - break; - } - } - - let mut output_file: Box = - if let Some(path) = arg_matches.value_of("slot_list") { - match File::create(path) { - Ok(file) => Box::new(file), - _ => Box::new(stdout()), + let start_slot = *slots.first().unwrap(); + let end_slot = *slots.last().unwrap(); + info!( + "Purging chunked slots from {} to {} ({} slots)", + start_slot, + end_slot, + end_slot - start_slot + ); + purge_from_blockstore(start_slot, end_slot); } } else { - Box::new(stdout()) + let dead_slots_iter = blockstore + .dead_slots_iterator(start_slot) + .unwrap() + .take_while(|s| *s <= end_slot); + for dead_slot in dead_slots_iter { + info!("Purging dead slot {}", dead_slot); + purge_from_blockstore(dead_slot, dead_slot); + } + } + } + ("list-roots", Some(arg_matches)) => { + let blockstore = open_blockstore( + &ledger_path, + AccessType::TryPrimaryThenSecondary, + wal_recovery_mode, + ); + let max_height = if let Some(height) = arg_matches.value_of("max_height") { + usize::from_str(height).expect("Maximum height must be a number") + } else { + usize::MAX + }; + let start_root = if let Some(height) = arg_matches.value_of("start_root") { + Slot::from_str(height).expect("Starting root must be a number") + } else { + 0 + }; + let num_roots = if let Some(roots) = arg_matches.value_of("num_roots") { + usize::from_str(roots).expect("Number of roots must be a number") + } else { + usize::from_str(DEFAULT_ROOT_COUNT).unwrap() }; - slot_hash - .into_iter() - .rev() - .enumerate() - .for_each(|(i, (slot, hash))| { - if i < num_roots { - output_file - .write_all(format!("{:?}: {:?}\n", slot, hash).as_bytes()) - .expect("failed to write"); + let iter = blockstore + .rooted_slot_iterator(start_root) + .expect("Failed to get rooted slot"); + + let mut slot_hash = Vec::new(); + for (i, slot) in iter.into_iter().enumerate() { + if i > num_roots { + break; } - }); - } - ("repair-roots", Some(arg_matches)) => { - let blockstore = open_blockstore( - &ledger_path, - AccessType::TryPrimaryThenSecondary, - wal_recovery_mode, - ); - let start_root = if let Some(root) = arg_matches.value_of("start_root") { - Slot::from_str(root).expect("Before root must be a number") - } else { - blockstore.max_root() - }; - let max_slots = value_t_or_exit!(arg_matches, "max_slots", u64); - let end_root = if let Some(root) = arg_matches.value_of("end_root") { - Slot::from_str(root).expect("Until root must be a number") - } else { - start_root.saturating_sub(max_slots) - }; - assert!(start_root > end_root); - assert!(blockstore.is_root(start_root)); - let num_slots = start_root - end_root - 1; // Adjust by one since start_root need not be checked - if arg_matches.is_present("end_root") && num_slots > max_slots { - eprintln!( - "Requested range {} too large, max {}. \ + if slot <= max_height as u64 { + let blockhash = blockstore + .get_slot_entries(slot, 0) + .unwrap() + .last() + .unwrap() + .hash; + slot_hash.push((slot, blockhash)); + } else { + break; + } + } + + let mut output_file: Box = + if let Some(path) = arg_matches.value_of("slot_list") { + match File::create(path) { + Ok(file) => Box::new(file), + _ => Box::new(stdout()), + } + } else { + Box::new(stdout()) + }; + + slot_hash + .into_iter() + .rev() + .enumerate() + .for_each(|(i, (slot, hash))| { + if i < num_roots { + output_file + .write_all(format!("{:?}: {:?}\n", slot, hash).as_bytes()) + .expect("failed to write"); + } + }); + } + ("repair-roots", Some(arg_matches)) => { + let blockstore = open_blockstore( + &ledger_path, + AccessType::TryPrimaryThenSecondary, + wal_recovery_mode, + ); + let start_root = if let Some(root) = arg_matches.value_of("start_root") { + Slot::from_str(root).expect("Before root must be a number") + } else { + blockstore.max_root() + }; + let max_slots = value_t_or_exit!(arg_matches, "max_slots", u64); + let end_root = if let Some(root) = arg_matches.value_of("end_root") { + Slot::from_str(root).expect("Until root must be a number") + } else { + start_root.saturating_sub(max_slots) + }; + assert!(start_root > end_root); + assert!(blockstore.is_root(start_root)); + let num_slots = start_root - end_root - 1; // Adjust by one since start_root need not be checked + if arg_matches.is_present("end_root") && num_slots > max_slots { + eprintln!( + "Requested range {} too large, max {}. \ Either adjust `--until` value, or pass a larger `--repair-limit` \ to override the limit", - num_slots, max_slots, - ); - exit(1); - } - let ancestor_iterator = - AncestorIterator::new(start_root, &blockstore).take_while(|&slot| slot >= end_root); - let roots_to_fix: Vec<_> = ancestor_iterator - .filter(|slot| !blockstore.is_root(*slot)) - .collect(); - if !roots_to_fix.is_empty() { - eprintln!("{} slots to be rooted", roots_to_fix.len()); - for chunk in roots_to_fix.chunks(100) { - eprintln!("{:?}", chunk); - blockstore - .set_roots(roots_to_fix.iter()) - .unwrap_or_else(|err| { - eprintln!("Unable to set roots {:?}: {}", roots_to_fix, err); - exit(1); - }); - } - } else { - println!( - "No missing roots found in range {} to {}", - end_root, start_root - ); - } - } - ("bounds", Some(arg_matches)) => { - let blockstore = open_blockstore( - &ledger_path, - AccessType::TryPrimaryThenSecondary, - wal_recovery_mode, - ); - match blockstore.slot_meta_iterator(0) { - Ok(metas) => { - let all = arg_matches.is_present("all"); - - let slots: Vec<_> = metas.map(|(slot, _)| slot).collect(); - if slots.is_empty() { - println!("Ledger is empty"); - } else { - let first = slots.first().unwrap(); - let last = slots.last().unwrap_or(first); - if first != last { - println!( - "Ledger has data for {} slots {:?} to {:?}", - slots.len(), - first, - last - ); - if all { - println!("Non-empty slots: {:?}", slots); - } - } else { - println!("Ledger has data for slot {:?}", first); - } - } - if let Ok(rooted) = blockstore.rooted_slot_iterator(0) { - let mut first_rooted = 0; - let mut last_rooted = 0; - let mut total_rooted = 0; - for (i, slot) in rooted.into_iter().enumerate() { - if i == 0 { - first_rooted = slot; - } - last_rooted = slot; - total_rooted += 1; - } - let mut count_past_root = 0; - for slot in slots.iter().rev() { - if *slot > last_rooted { - count_past_root += 1; - } else { - break; - } - } - println!( - " with {} rooted slots from {:?} to {:?}", - total_rooted, first_rooted, last_rooted - ); - println!(" and {} slots past the last root", count_past_root); - } else { - println!(" with no rooted slots"); - } - } - Err(err) => { - eprintln!("Unable to read the Ledger: {:?}", err); + num_slots, max_slots, + ); exit(1); } - }; - } - ("analyze-storage", _) => { - analyze_storage(&open_database( - &ledger_path, - AccessType::TryPrimaryThenSecondary, - )); - println!("Ok."); - } - ("compute-slot-cost", Some(arg_matches)) => { - let blockstore = open_blockstore( - &ledger_path, - AccessType::TryPrimaryThenSecondary, - wal_recovery_mode, - ); - - let mut slots: Vec = vec![]; - if !arg_matches.is_present("slots") { - if let Ok(metas) = blockstore.slot_meta_iterator(0) { - slots = metas.map(|(slot, _)| slot).collect(); - } - } else { - slots = values_t_or_exit!(arg_matches, "slots", Slot); - } - - for slot in slots { - if let Err(err) = compute_slot_cost(&blockstore, slot) { - eprintln!("{}", err); + let ancestor_iterator = AncestorIterator::new(start_root, &blockstore) + .take_while(|&slot| slot >= end_root); + let roots_to_fix: Vec<_> = ancestor_iterator + .filter(|slot| !blockstore.is_root(*slot)) + .collect(); + if !roots_to_fix.is_empty() { + eprintln!("{} slots to be rooted", roots_to_fix.len()); + for chunk in roots_to_fix.chunks(100) { + eprintln!("{:?}", chunk); + blockstore + .set_roots(roots_to_fix.iter()) + .unwrap_or_else(|err| { + eprintln!("Unable to set roots {:?}: {}", roots_to_fix, err); + exit(1); + }); + } + } else { + println!( + "No missing roots found in range {} to {}", + end_root, start_root + ); } } - } - ("", _) => { - eprintln!("{}", matches.usage()); - exit(1); - } - _ => unreachable!(), - }; + ("bounds", Some(arg_matches)) => { + let blockstore = open_blockstore( + &ledger_path, + AccessType::TryPrimaryThenSecondary, + wal_recovery_mode, + ); + match blockstore.slot_meta_iterator(0) { + Ok(metas) => { + let all = arg_matches.is_present("all"); + + let slots: Vec<_> = metas.map(|(slot, _)| slot).collect(); + if slots.is_empty() { + println!("Ledger is empty"); + } else { + let first = slots.first().unwrap(); + let last = slots.last().unwrap_or(first); + if first != last { + println!( + "Ledger has data for {} slots {:?} to {:?}", + slots.len(), + first, + last + ); + if all { + println!("Non-empty slots: {:?}", slots); + } + } else { + println!("Ledger has data for slot {:?}", first); + } + } + if let Ok(rooted) = blockstore.rooted_slot_iterator(0) { + let mut first_rooted = 0; + let mut last_rooted = 0; + let mut total_rooted = 0; + for (i, slot) in rooted.into_iter().enumerate() { + if i == 0 { + first_rooted = slot; + } + last_rooted = slot; + total_rooted += 1; + } + let mut count_past_root = 0; + for slot in slots.iter().rev() { + if *slot > last_rooted { + count_past_root += 1; + } else { + break; + } + } + println!( + " with {} rooted slots from {:?} to {:?}", + total_rooted, first_rooted, last_rooted + ); + println!(" and {} slots past the last root", count_past_root); + } else { + println!(" with no rooted slots"); + } + } + Err(err) => { + eprintln!("Unable to read the Ledger: {:?}", err); + exit(1); + } + }; + } + ("analyze-storage", _) => { + analyze_storage(&open_database( + &ledger_path, + AccessType::TryPrimaryThenSecondary, + )); + println!("Ok."); + } + ("compute-slot-cost", Some(arg_matches)) => { + let blockstore = open_blockstore( + &ledger_path, + AccessType::TryPrimaryThenSecondary, + wal_recovery_mode, + ); + + let mut slots: Vec = vec![]; + if !arg_matches.is_present("slots") { + if let Ok(metas) = blockstore.slot_meta_iterator(0) { + slots = metas.map(|(slot, _)| slot).collect(); + } + } else { + slots = values_t_or_exit!(arg_matches, "slots", Slot); + } + + for slot in slots { + if let Err(err) = compute_slot_cost(&blockstore, slot) { + eprintln!("{}", err); + } + } + } + ("", _) => { + eprintln!("{}", matches.usage()); + exit(1); + } + _ => unreachable!(), + }; + } }