ledger-tool: Condense repeated error handling (#34439)

Several commands call load_and_process_ledger() which can fail in a
number of ways. These callers currently all handle the result in the
same way by matching the return Result:
- The Ok(_) case uses the returned types as normal
- The Err(_) case prints an error message and exits

This error handling is redundant, and a helper could remove the
duplicate code. So, this PR adds a wrapper around that checks the
result and unwraps OR prints error messages and exits.
This commit is contained in:
steviez 2023-12-13 14:50:20 -06:00 committed by GitHub
parent 0b6d939e21
commit 2a67fa8d13
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 860 additions and 932 deletions

View File

@ -106,6 +106,28 @@ pub fn get_shred_storage_type(ledger_path: &Path, message: &str) -> ShredStorage
} }
} }
pub fn load_and_process_ledger_or_exit(
arg_matches: &ArgMatches,
genesis_config: &GenesisConfig,
blockstore: Arc<Blockstore>,
process_options: ProcessOptions,
snapshot_archive_path: Option<PathBuf>,
incremental_snapshot_archive_path: Option<PathBuf>,
) -> (Arc<RwLock<BankForks>>, Option<StartingSnapshotHashes>) {
load_and_process_ledger(
arg_matches,
genesis_config,
blockstore,
process_options,
snapshot_archive_path,
incremental_snapshot_archive_path,
)
.unwrap_or_else(|err| {
eprintln!("Exiting. Failed to load and process ledger: {err}");
exit(1);
})
}
pub fn load_and_process_ledger( pub fn load_and_process_ledger(
arg_matches: &ArgMatches, arg_matches: &ArgMatches,
genesis_config: &GenesisConfig, genesis_config: &GenesisConfig,

View File

@ -2425,15 +2425,15 @@ fn main() {
force_update_to_open, force_update_to_open,
enforce_ulimit_nofile, enforce_ulimit_nofile,
); );
match load_and_process_ledger( let (bank_forks, _) = load_and_process_ledger_or_exit(
arg_matches, arg_matches,
&genesis_config, &genesis_config,
Arc::new(blockstore), Arc::new(blockstore),
process_options, process_options,
snapshot_archive_path, snapshot_archive_path,
incremental_snapshot_archive_path, incremental_snapshot_archive_path,
) { );
Ok((bank_forks, ..)) => {
println!( println!(
"{}", "{}",
compute_shred_version( compute_shred_version(
@ -2442,12 +2442,6 @@ fn main() {
) )
); );
} }
Err(err) => {
eprintln!("Failed to load ledger: {err:?}");
exit(1);
}
}
}
("shred-meta", Some(arg_matches)) => { ("shred-meta", Some(arg_matches)) => {
#[derive(Debug)] #[derive(Debug)]
#[allow(dead_code)] #[allow(dead_code)]
@ -2511,23 +2505,16 @@ fn main() {
force_update_to_open, force_update_to_open,
enforce_ulimit_nofile, enforce_ulimit_nofile,
); );
match load_and_process_ledger( let (bank_forks, _) = load_and_process_ledger_or_exit(
arg_matches, arg_matches,
&genesis_config, &genesis_config,
Arc::new(blockstore), Arc::new(blockstore),
process_options, process_options,
snapshot_archive_path, snapshot_archive_path,
incremental_snapshot_archive_path, incremental_snapshot_archive_path,
) { );
Ok((bank_forks, ..)) => {
println!("{}", &bank_forks.read().unwrap().working_bank().hash()); println!("{}", &bank_forks.read().unwrap().working_bank().hash());
} }
Err(err) => {
eprintln!("Failed to load ledger: {err:?}");
exit(1);
}
}
}
("slot", Some(arg_matches)) => { ("slot", Some(arg_matches)) => {
let slots = values_t_or_exit!(arg_matches, "slots", Slot); let slots = values_t_or_exit!(arg_matches, "slots", Slot);
let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); let allow_dead_slots = arg_matches.is_present("allow_dead_slots");
@ -2763,18 +2750,15 @@ fn main() {
force_update_to_open, force_update_to_open,
enforce_ulimit_nofile, enforce_ulimit_nofile,
); );
let (bank_forks, ..) = load_and_process_ledger( let (bank_forks, _) = load_and_process_ledger_or_exit(
arg_matches, arg_matches,
&genesis_config, &genesis_config,
Arc::new(blockstore), Arc::new(blockstore),
process_options, process_options,
snapshot_archive_path, snapshot_archive_path,
incremental_snapshot_archive_path, incremental_snapshot_archive_path,
) );
.unwrap_or_else(|err| {
eprintln!("Ledger verification failed: {err:?}");
exit(1);
});
if print_accounts_stats { if print_accounts_stats {
let working_bank = bank_forks.read().unwrap().working_bank(); let working_bank = bank_forks.read().unwrap().working_bank();
working_bank.print_accounts_stats(); working_bank.print_accounts_stats();
@ -2814,6 +2798,7 @@ fn main() {
..ProcessOptions::default() ..ProcessOptions::default()
}; };
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
let blockstore = open_blockstore( let blockstore = open_blockstore(
&ledger_path, &ledger_path,
get_access_type(&process_options), get_access_type(&process_options),
@ -2821,17 +2806,16 @@ fn main() {
force_update_to_open, force_update_to_open,
enforce_ulimit_nofile, enforce_ulimit_nofile,
); );
match load_and_process_ledger( let (bank_forks, _) = load_and_process_ledger_or_exit(
arg_matches, arg_matches,
&open_genesis_config_by(&ledger_path, arg_matches), &genesis_config,
Arc::new(blockstore), Arc::new(blockstore),
process_options, process_options,
snapshot_archive_path, snapshot_archive_path,
incremental_snapshot_archive_path, incremental_snapshot_archive_path,
) { );
Ok((bank_forks, ..)) => {
let dot = graph_forks(&bank_forks.read().unwrap(), &graph_config);
let dot = graph_forks(&bank_forks.read().unwrap(), &graph_config);
let extension = Path::new(&output_file).extension(); let extension = Path::new(&output_file).extension();
let result = if extension == Some(OsStr::new("pdf")) { let result = if extension == Some(OsStr::new("pdf")) {
render_dot(dot, &output_file, "pdf") render_dot(dot, &output_file, "pdf")
@ -2847,12 +2831,6 @@ fn main() {
Err(err) => eprintln!("Unable to write {output_file}: {err}"), Err(err) => eprintln!("Unable to write {output_file}: {err}"),
} }
} }
Err(err) => {
eprintln!("Failed to load ledger: {err:?}");
exit(1);
}
}
}
("create-snapshot", Some(arg_matches)) => { ("create-snapshot", Some(arg_matches)) => {
let is_incremental = arg_matches.is_present("incremental"); let is_incremental = arg_matches.is_present("incremental");
let is_minimized = arg_matches.is_present("minimized"); let is_minimized = arg_matches.is_present("minimized");
@ -3007,15 +2985,14 @@ fn main() {
output_directory.display() output_directory.display()
); );
match load_and_process_ledger( let (bank_forks, starting_snapshot_hashes) = load_and_process_ledger_or_exit(
arg_matches, arg_matches,
&genesis_config, &genesis_config,
blockstore.clone(), blockstore.clone(),
process_options, process_options,
snapshot_archive_path, snapshot_archive_path,
incremental_snapshot_archive_path, incremental_snapshot_archive_path,
) { );
Ok((bank_forks, starting_snapshot_hashes)) => {
let mut bank = bank_forks let mut bank = bank_forks
.read() .read()
.unwrap() .unwrap()
@ -3035,11 +3012,8 @@ fn main() {
|| bootstrap_validator_pubkeys.is_some(); || bootstrap_validator_pubkeys.is_some();
if child_bank_required { if child_bank_required {
let mut child_bank = Bank::new_from_parent( let mut child_bank =
bank.clone(), Bank::new_from_parent(bank.clone(), bank.collector_id(), bank.slot() + 1);
bank.collector_id(),
bank.slot() + 1,
);
if let Ok(rent_burn_percentage) = rent_burn_percentage { if let Ok(rent_burn_percentage) = rent_burn_percentage {
child_bank.set_rent_burn_percentage(rent_burn_percentage); child_bank.set_rent_burn_percentage(rent_burn_percentage);
@ -3049,9 +3023,7 @@ fn main() {
child_bank.set_hashes_per_tick(match hashes_per_tick { child_bank.set_hashes_per_tick(match hashes_per_tick {
// Note: Unlike `solana-genesis`, "auto" is not supported here. // Note: Unlike `solana-genesis`, "auto" is not supported here.
"sleep" => None, "sleep" => None,
_ => { _ => Some(value_t_or_exit!(arg_matches, "hashes_per_tick", u64)),
Some(value_t_or_exit!(arg_matches, "hashes_per_tick", u64))
}
}); });
} }
bank = Arc::new(child_bank); bank = Arc::new(child_bank);
@ -3077,9 +3049,7 @@ fn main() {
for address in accounts_to_remove { for address in accounts_to_remove {
let mut account = bank.get_account(&address).unwrap_or_else(|| { let mut account = bank.get_account(&address).unwrap_or_else(|| {
eprintln!( eprintln!("Error: Account does not exist, unable to remove it: {address}");
"Error: Account does not exist, unable to remove it: {address}"
);
exit(1); exit(1);
}); });
@ -3121,18 +3091,14 @@ fn main() {
.into_iter() .into_iter()
{ {
if let Ok(StakeStateV2::Stake(meta, stake, _)) = account.state() { if let Ok(StakeStateV2::Stake(meta, stake, _)) = account.state() {
if vote_accounts_to_destake if vote_accounts_to_destake.contains(&stake.delegation.voter_pubkey) {
.contains(&stake.delegation.voter_pubkey)
{
if verbose_level > 0 { if verbose_level > 0 {
warn!( warn!(
"Undelegating stake account {} from {}", "Undelegating stake account {} from {}",
address, stake.delegation.voter_pubkey, address, stake.delegation.voter_pubkey,
); );
} }
account account.set_state(&StakeStateV2::Initialized(meta)).unwrap();
.set_state(&StakeStateV2::Initialized(meta))
.unwrap();
bank.store_account(&address, &account); bank.store_account(&address, &account);
} }
} }
@ -3148,19 +3114,14 @@ fn main() {
v.sort(); v.sort();
v.dedup(); v.dedup();
if v.len() != bootstrap_validator_pubkeys.len() { if v.len() != bootstrap_validator_pubkeys.len() {
eprintln!( eprintln!("Error: --bootstrap-validator pubkeys cannot be duplicated");
"Error: --bootstrap-validator pubkeys cannot be duplicated"
);
exit(1); exit(1);
} }
} }
// Delete existing vote accounts // Delete existing vote accounts
for (address, mut account) in bank for (address, mut account) in bank
.get_program_accounts( .get_program_accounts(&solana_vote_program::id(), &ScanConfig::default())
&solana_vote_program::id(),
&ScanConfig::default(),
)
.unwrap() .unwrap()
.into_iter() .into_iter()
{ {
@ -3170,11 +3131,9 @@ fn main() {
// Add a new identity/vote/stake account for each of the provided bootstrap // Add a new identity/vote/stake account for each of the provided bootstrap
// validators // validators
let mut bootstrap_validator_pubkeys_iter = let mut bootstrap_validator_pubkeys_iter = bootstrap_validator_pubkeys.iter();
bootstrap_validator_pubkeys.iter();
loop { loop {
let Some(identity_pubkey) = bootstrap_validator_pubkeys_iter.next() let Some(identity_pubkey) = bootstrap_validator_pubkeys_iter.next() else {
else {
break; break;
}; };
let vote_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap(); let vote_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap();
@ -3214,8 +3173,7 @@ fn main() {
// Warp ahead at least two epochs to ensure that the leader schedule will be // Warp ahead at least two epochs to ensure that the leader schedule will be
// updated to reflect the new bootstrap validator(s) // updated to reflect the new bootstrap validator(s)
let minimum_warp_slot = let minimum_warp_slot = genesis_config.epoch_schedule.get_first_slot_in_epoch(
genesis_config.epoch_schedule.get_first_slot_in_epoch(
genesis_config.epoch_schedule.get_epoch(snapshot_slot) + 2, genesis_config.epoch_schedule.get_epoch(snapshot_slot) + 2,
); );
@ -3344,8 +3302,7 @@ fn main() {
if is_minimized { if is_minimized {
let starting_epoch = bank.epoch_schedule().get_epoch(snapshot_slot); let starting_epoch = bank.epoch_schedule().get_epoch(snapshot_slot);
let ending_epoch = let ending_epoch = bank.epoch_schedule().get_epoch(ending_slot.unwrap());
bank.epoch_schedule().get_epoch(ending_slot.unwrap());
if starting_epoch != ending_epoch { if starting_epoch != ending_epoch {
warn!( warn!(
"Minimized snapshot range crosses epoch boundary ({} to \ "Minimized snapshot range crosses epoch boundary ({} to \
@ -3353,8 +3310,7 @@ fn main() {
full snapshot", full snapshot",
starting_epoch, starting_epoch,
ending_epoch, ending_epoch,
bank.epoch_schedule() bank.epoch_schedule().get_last_slot_in_epoch(starting_epoch)
.get_last_slot_in_epoch(starting_epoch)
); );
} }
@ -3373,12 +3329,6 @@ fn main() {
compute_shred_version(&genesis_config.hash(), Some(&bank.hard_forks())) compute_shred_version(&genesis_config.hash(), Some(&bank.hard_forks()))
); );
} }
Err(err) => {
eprintln!("Failed to load ledger: {err:?}");
exit(1);
}
}
}
("accounts", Some(arg_matches)) => { ("accounts", Some(arg_matches)) => {
let halt_at_slot = value_t!(arg_matches, "halt_at_slot", Slot).ok(); let halt_at_slot = value_t!(arg_matches, "halt_at_slot", Slot).ok();
let process_options = ProcessOptions { let process_options = ProcessOptions {
@ -3402,18 +3352,14 @@ fn main() {
force_update_to_open, force_update_to_open,
enforce_ulimit_nofile, enforce_ulimit_nofile,
); );
let (bank_forks, ..) = load_and_process_ledger( let (bank_forks, _) = load_and_process_ledger_or_exit(
arg_matches, arg_matches,
&genesis_config, &genesis_config,
Arc::new(blockstore), Arc::new(blockstore),
process_options, process_options,
snapshot_archive_path, snapshot_archive_path,
incremental_snapshot_archive_path, incremental_snapshot_archive_path,
) );
.unwrap_or_else(|err| {
eprintln!("Failed to load ledger: {err:?}");
exit(1);
});
let bank = bank_forks.read().unwrap().working_bank(); let bank = bank_forks.read().unwrap().working_bank();
let mut serializer = serde_json::Serializer::new(stdout()); let mut serializer = serde_json::Serializer::new(stdout());
@ -3496,15 +3442,14 @@ fn main() {
force_update_to_open, force_update_to_open,
enforce_ulimit_nofile, enforce_ulimit_nofile,
); );
match load_and_process_ledger( let (bank_forks, _) = load_and_process_ledger_or_exit(
arg_matches, arg_matches,
&genesis_config, &genesis_config,
Arc::new(blockstore), Arc::new(blockstore),
process_options, process_options,
snapshot_archive_path, snapshot_archive_path,
incremental_snapshot_archive_path, incremental_snapshot_archive_path,
) { );
Ok((bank_forks, ..)) => {
let bank_forks = bank_forks.read().unwrap(); let bank_forks = bank_forks.read().unwrap();
let slot = bank_forks.working_bank().slot(); let slot = bank_forks.working_bank().slot();
let bank = bank_forks.get(slot).unwrap_or_else(|| { let bank = bank_forks.get(slot).unwrap_or_else(|| {
@ -3516,21 +3461,16 @@ fn main() {
println!("Recalculating capitalization"); println!("Recalculating capitalization");
let old_capitalization = bank.set_capitalization(); let old_capitalization = bank.set_capitalization();
if old_capitalization == bank.capitalization() { if old_capitalization == bank.capitalization() {
eprintln!( eprintln!("Capitalization was identical: {}", Sol(old_capitalization));
"Capitalization was identical: {}",
Sol(old_capitalization)
);
} }
} }
if arg_matches.is_present("warp_epoch") { if arg_matches.is_present("warp_epoch") {
let base_bank = bank; let base_bank = bank;
let raw_warp_epoch = let raw_warp_epoch = value_t!(arg_matches, "warp_epoch", String).unwrap();
value_t!(arg_matches, "warp_epoch", String).unwrap();
let warp_epoch = if raw_warp_epoch.starts_with('+') { let warp_epoch = if raw_warp_epoch.starts_with('+') {
base_bank.epoch() base_bank.epoch() + value_t!(arg_matches, "warp_epoch", Epoch).unwrap()
+ value_t!(arg_matches, "warp_epoch", Epoch).unwrap()
} else { } else {
value_t!(arg_matches, "warp_epoch", Epoch).unwrap() value_t!(arg_matches, "warp_epoch", Epoch).unwrap()
}; };
@ -3567,10 +3507,8 @@ fn main() {
.lazy_rent_collection .lazy_rent_collection
.store(true, std::sync::atomic::Ordering::Relaxed); .store(true, std::sync::atomic::Ordering::Relaxed);
let feature_account_balance = std::cmp::max( let feature_account_balance =
genesis_config.rent.minimum_balance(Feature::size_of()), std::cmp::max(genesis_config.rent.minimum_balance(Feature::size_of()), 1);
1,
);
if arg_matches.is_present("enable_credits_auto_rewind") { if arg_matches.is_present("enable_credits_auto_rewind") {
base_bank.unfreeze_for_ledger_tool(); base_bank.unfreeze_for_ledger_tool();
let mut force_enabled_count = 0; let mut force_enabled_count = 0;
@ -3588,9 +3526,7 @@ fn main() {
force_enabled_count += 1; force_enabled_count += 1;
} }
if force_enabled_count == 0 { if force_enabled_count == 0 {
warn!( warn!("Already credits_auto_rewind is activated (or scheduled)");
"Already credits_auto_rewind is activated (or scheduled)"
);
} }
let mut store_failed_count = 0; let mut store_failed_count = 0;
if force_enabled_count >= 1 { if force_enabled_count >= 1 {
@ -3665,8 +3601,7 @@ fn main() {
// because only staking tracing is supported! // because only staking tracing is supported!
#[allow(irrefutable_let_patterns)] #[allow(irrefutable_let_patterns)]
if let RewardCalculationEvent::Staking(pubkey, event) = event { if let RewardCalculationEvent::Staking(pubkey, event) = event {
let mut detail = let mut detail = stake_calculation_details.entry(**pubkey).or_default();
stake_calculation_details.entry(**pubkey).or_default();
match event { match event {
InflationPointCalculationEvent::CalculatedPoints( InflationPointCalculationEvent::CalculatedPoints(
epoch, epoch,
@ -3676,7 +3611,12 @@ fn main() {
) => { ) => {
if *points > 0 { if *points > 0 {
detail.epochs += 1; detail.epochs += 1;
detail.points.push(PointDetail {epoch: *epoch, points: *points, stake: *stake, credits: *credits}); detail.points.push(PointDetail {
epoch: *epoch,
points: *points,
stake: *stake,
credits: *credits,
});
} }
} }
InflationPointCalculationEvent::SplitRewards( InflationPointCalculationEvent::SplitRewards(
@ -3698,7 +3638,9 @@ fn main() {
*last_point_value = Some(point_value.clone()); *last_point_value = Some(point_value.clone());
} }
} }
InflationPointCalculationEvent::EffectiveStakeAtRewardedEpoch(stake) => { InflationPointCalculationEvent::EffectiveStakeAtRewardedEpoch(
stake,
) => {
detail.current_effective_stake = *stake; detail.current_effective_stake = *stake;
} }
InflationPointCalculationEvent::Commission(commission) => { InflationPointCalculationEvent::Commission(commission) => {
@ -3714,10 +3656,7 @@ fn main() {
detail.old_credits_observed = Some(*old_credits_observed); detail.old_credits_observed = Some(*old_credits_observed);
detail.new_credits_observed = *new_credits_observed; detail.new_credits_observed = *new_credits_observed;
} }
InflationPointCalculationEvent::Delegation( InflationPointCalculationEvent::Delegation(delegation, owner) => {
delegation,
owner,
) => {
detail.voter = delegation.voter_pubkey; detail.voter = delegation.voter_pubkey;
detail.voter_owner = *owner; detail.voter_owner = *owner;
detail.total_stake = delegation.stake; detail.total_stake = delegation.stake;
@ -3732,7 +3671,10 @@ fn main() {
detail.skipped_reasons = format!("{skipped_reason:?}"); detail.skipped_reasons = format!("{skipped_reason:?}");
} else { } else {
use std::fmt::Write; use std::fmt::Write;
let _ = write!(&mut detail.skipped_reasons, "/{skipped_reason:?}"); let _ = write!(
&mut detail.skipped_reasons,
"/{skipped_reason:?}"
);
} }
} }
} }
@ -3746,8 +3688,7 @@ fn main() {
); );
warped_bank.freeze(); warped_bank.freeze();
let mut csv_writer = if arg_matches.is_present("csv_filename") { let mut csv_writer = if arg_matches.is_present("csv_filename") {
let csv_filename = let csv_filename = value_t_or_exit!(arg_matches, "csv_filename", String);
value_t_or_exit!(arg_matches, "csv_filename", String);
let file = File::create(csv_filename).unwrap(); let file = File::create(csv_filename).unwrap();
Some(csv::WriterBuilder::new().from_writer(file)) Some(csv::WriterBuilder::new().from_writer(file))
} else { } else {
@ -3762,8 +3703,8 @@ fn main() {
/ (base_bank.capitalization() as f64) / (base_bank.capitalization() as f64)
* 100_f64) * 100_f64)
- 100_f64; - 100_f64;
let interest_per_year = interest_per_epoch let interest_per_year =
/ warped_bank.epoch_duration_in_years(base_bank.epoch()); interest_per_epoch / warped_bank.epoch_duration_in_years(base_bank.epoch());
println!( println!(
"Capitalization: {} => {} (+{} {}%; annualized {}%)", "Capitalization: {} => {} (+{} {}%; annualized {}%)",
Sol(base_bank.capitalization()), Sol(base_bank.capitalization()),
@ -3775,8 +3716,7 @@ fn main() {
let mut overall_delta = 0; let mut overall_delta = 0;
let modified_accounts = let modified_accounts = warped_bank.get_all_accounts_modified_since_parent();
warped_bank.get_all_accounts_modified_since_parent();
let mut rewarded_accounts = modified_accounts let mut rewarded_accounts = modified_accounts
.iter() .iter()
.map(|(pubkey, account)| { .map(|(pubkey, account)| {
@ -3790,16 +3730,14 @@ fn main() {
) )
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
rewarded_accounts.sort_unstable_by_key( rewarded_accounts.sort_unstable_by_key(|(pubkey, account, base_lamports)| {
|(pubkey, account, base_lamports)| {
( (
*account.owner(), *account.owner(),
*base_lamports, *base_lamports,
account.lamports() - base_lamports, account.lamports() - base_lamports,
*pubkey, *pubkey,
) )
}, });
);
let mut unchanged_accounts = stake_calculation_details let mut unchanged_accounts = stake_calculation_details
.iter() .iter()
@ -3881,9 +3819,7 @@ fn main() {
old_capitalization: u64, old_capitalization: u64,
new_capitalization: u64, new_capitalization: u64,
} }
fn format_or_na<T: std::fmt::Display>( fn format_or_na<T: std::fmt::Display>(data: Option<T>) -> String {
data: Option<T>,
) -> String {
data.map(|data| format!("{data}")) data.map(|data| format!("{data}"))
.unwrap_or_else(|| "N/A".to_owned()) .unwrap_or_else(|| "N/A".to_owned())
} }
@ -3897,8 +3833,7 @@ fn main() {
} }
for point_detail in point_details { for point_detail in point_details {
let (cluster_rewards, cluster_points) = let (cluster_rewards, cluster_points) = last_point_value
last_point_value
.read() .read()
.unwrap() .unwrap()
.clone() .clone()
@ -3906,10 +3841,7 @@ fn main() {
(Some(pv.rewards), Some(pv.points)) (Some(pv.rewards), Some(pv.points))
}); });
let record = InflationRecord { let record = InflationRecord {
cluster_type: format!( cluster_type: format!("{:?}", base_bank.cluster_type()),
"{:?}",
base_bank.cluster_type()
),
rewarded_epoch: base_bank.epoch(), rewarded_epoch: base_bank.epoch(),
account: format!("{pubkey}"), account: format!("{pubkey}"),
owner: format!("{}", base_account.owner()), owner: format!("{}", base_account.owner()),
@ -3940,37 +3872,25 @@ fn main() {
deactivation_epoch: format_or_na( deactivation_epoch: format_or_na(
detail.and_then(|d| d.deactivation_epoch), detail.and_then(|d| d.deactivation_epoch),
), ),
earned_epochs: format_or_na( earned_epochs: format_or_na(detail.map(|d| d.epochs)),
detail.map(|d| d.epochs),
),
epoch: format_or_na(point_detail.map(|d| d.epoch)), epoch: format_or_na(point_detail.map(|d| d.epoch)),
epoch_credits: format_or_na( epoch_credits: format_or_na(
point_detail.map(|d| d.credits), point_detail.map(|d| d.credits),
), ),
epoch_points: format_or_na( epoch_points: format_or_na(point_detail.map(|d| d.points)),
point_detail.map(|d| d.points), epoch_stake: format_or_na(point_detail.map(|d| d.stake)),
),
epoch_stake: format_or_na(
point_detail.map(|d| d.stake),
),
old_credits_observed: format_or_na( old_credits_observed: format_or_na(
detail.and_then(|d| d.old_credits_observed), detail.and_then(|d| d.old_credits_observed),
), ),
new_credits_observed: format_or_na( new_credits_observed: format_or_na(
detail.and_then(|d| d.new_credits_observed), detail.and_then(|d| d.new_credits_observed),
), ),
base_rewards: format_or_na( base_rewards: format_or_na(detail.map(|d| d.base_rewards)),
detail.map(|d| d.base_rewards),
),
stake_rewards: format_or_na( stake_rewards: format_or_na(
detail.map(|d| d.stake_rewards), detail.map(|d| d.stake_rewards),
), ),
vote_rewards: format_or_na( vote_rewards: format_or_na(detail.map(|d| d.vote_rewards)),
detail.map(|d| d.vote_rewards), commission: format_or_na(detail.map(|d| d.commission)),
),
commission: format_or_na(
detail.map(|d| d.commission),
),
cluster_rewards: format_or_na(cluster_rewards), cluster_rewards: format_or_na(cluster_rewards),
cluster_points: format_or_na(cluster_points), cluster_points: format_or_na(cluster_points),
old_capitalization: base_bank.capitalization(), old_capitalization: base_bank.capitalization(),
@ -3989,14 +3909,10 @@ fn main() {
} }
} else { } else {
if arg_matches.is_present("recalculate_capitalization") { if arg_matches.is_present("recalculate_capitalization") {
eprintln!( eprintln!("Capitalization isn't verified because it's recalculated");
"Capitalization isn't verified because it's recalculated"
);
} }
if arg_matches.is_present("inflation") { if arg_matches.is_present("inflation") {
eprintln!( eprintln!("Forcing inflation isn't meaningful because bank isn't warping");
"Forcing inflation isn't meaningful because bank isn't warping"
);
} }
assert_capitalization(&bank); assert_capitalization(&bank);
@ -4005,12 +3921,6 @@ fn main() {
println!("Capitalization: {}", Sol(bank.capitalization())); println!("Capitalization: {}", Sol(bank.capitalization()));
} }
} }
Err(err) => {
eprintln!("Failed to load ledger: {err:?}");
exit(1);
}
}
}
("purge", Some(arg_matches)) => { ("purge", Some(arg_matches)) => {
let start_slot = value_t_or_exit!(arg_matches, "start_slot", Slot); let start_slot = value_t_or_exit!(arg_matches, "start_slot", Slot);
let end_slot = value_t!(arg_matches, "end_slot", Slot).ok(); let end_slot = value_t!(arg_matches, "end_slot", Slot).ok();

View File

@ -119,18 +119,14 @@ fn load_blockstore(ledger_path: &Path, arg_matches: &ArgMatches<'_>) -> Arc<Bank
force_update_to_open, force_update_to_open,
enforce_ulimit_nofile, enforce_ulimit_nofile,
); );
let (bank_forks, ..) = load_and_process_ledger( let (bank_forks, ..) = load_and_process_ledger_or_exit(
arg_matches, arg_matches,
&genesis_config, &genesis_config,
Arc::new(blockstore), Arc::new(blockstore),
process_options, process_options,
snapshot_archive_path, snapshot_archive_path,
incremental_snapshot_archive_path, incremental_snapshot_archive_path,
) );
.unwrap_or_else(|err| {
eprintln!("Ledger loading failed: {err:?}");
exit(1);
});
let bank = bank_forks.read().unwrap().working_bank(); let bank = bank_forks.read().unwrap().working_bank();
bank bank
} }