ledger-tool: Condense repeated error handling (#34439)

Several commands call load_and_process_ledger() which can fail in a
number of ways. These callers currently all handle the result in the
same way by matching the return Result:
- The Ok(_) case uses the returned types as normal
- The Err(_) case prints an error message and exits

This error handling is redundant, and a helper could remove the
duplicate code. So, this PR adds a wrapper around that checks the
result and unwraps OR prints error messages and exits.
This commit is contained in:
steviez 2023-12-13 14:50:20 -06:00 committed by GitHub
parent 0b6d939e21
commit 2a67fa8d13
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 860 additions and 932 deletions

View File

@ -106,6 +106,28 @@ pub fn get_shred_storage_type(ledger_path: &Path, message: &str) -> ShredStorage
}
}
pub fn load_and_process_ledger_or_exit(
arg_matches: &ArgMatches,
genesis_config: &GenesisConfig,
blockstore: Arc<Blockstore>,
process_options: ProcessOptions,
snapshot_archive_path: Option<PathBuf>,
incremental_snapshot_archive_path: Option<PathBuf>,
) -> (Arc<RwLock<BankForks>>, Option<StartingSnapshotHashes>) {
load_and_process_ledger(
arg_matches,
genesis_config,
blockstore,
process_options,
snapshot_archive_path,
incremental_snapshot_archive_path,
)
.unwrap_or_else(|err| {
eprintln!("Exiting. Failed to load and process ledger: {err}");
exit(1);
})
}
pub fn load_and_process_ledger(
arg_matches: &ArgMatches,
genesis_config: &GenesisConfig,

View File

@ -2425,15 +2425,15 @@ fn main() {
force_update_to_open,
enforce_ulimit_nofile,
);
match load_and_process_ledger(
let (bank_forks, _) = load_and_process_ledger_or_exit(
arg_matches,
&genesis_config,
Arc::new(blockstore),
process_options,
snapshot_archive_path,
incremental_snapshot_archive_path,
) {
Ok((bank_forks, ..)) => {
);
println!(
"{}",
compute_shred_version(
@ -2442,12 +2442,6 @@ fn main() {
)
);
}
Err(err) => {
eprintln!("Failed to load ledger: {err:?}");
exit(1);
}
}
}
("shred-meta", Some(arg_matches)) => {
#[derive(Debug)]
#[allow(dead_code)]
@ -2511,23 +2505,16 @@ fn main() {
force_update_to_open,
enforce_ulimit_nofile,
);
match load_and_process_ledger(
let (bank_forks, _) = load_and_process_ledger_or_exit(
arg_matches,
&genesis_config,
Arc::new(blockstore),
process_options,
snapshot_archive_path,
incremental_snapshot_archive_path,
) {
Ok((bank_forks, ..)) => {
);
println!("{}", &bank_forks.read().unwrap().working_bank().hash());
}
Err(err) => {
eprintln!("Failed to load ledger: {err:?}");
exit(1);
}
}
}
("slot", Some(arg_matches)) => {
let slots = values_t_or_exit!(arg_matches, "slots", Slot);
let allow_dead_slots = arg_matches.is_present("allow_dead_slots");
@ -2763,18 +2750,15 @@ fn main() {
force_update_to_open,
enforce_ulimit_nofile,
);
let (bank_forks, ..) = load_and_process_ledger(
let (bank_forks, _) = load_and_process_ledger_or_exit(
arg_matches,
&genesis_config,
Arc::new(blockstore),
process_options,
snapshot_archive_path,
incremental_snapshot_archive_path,
)
.unwrap_or_else(|err| {
eprintln!("Ledger verification failed: {err:?}");
exit(1);
});
);
if print_accounts_stats {
let working_bank = bank_forks.read().unwrap().working_bank();
working_bank.print_accounts_stats();
@ -2814,6 +2798,7 @@ fn main() {
..ProcessOptions::default()
};
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
let blockstore = open_blockstore(
&ledger_path,
get_access_type(&process_options),
@ -2821,17 +2806,16 @@ fn main() {
force_update_to_open,
enforce_ulimit_nofile,
);
match load_and_process_ledger(
let (bank_forks, _) = load_and_process_ledger_or_exit(
arg_matches,
&open_genesis_config_by(&ledger_path, arg_matches),
&genesis_config,
Arc::new(blockstore),
process_options,
snapshot_archive_path,
incremental_snapshot_archive_path,
) {
Ok((bank_forks, ..)) => {
let dot = graph_forks(&bank_forks.read().unwrap(), &graph_config);
);
let dot = graph_forks(&bank_forks.read().unwrap(), &graph_config);
let extension = Path::new(&output_file).extension();
let result = if extension == Some(OsStr::new("pdf")) {
render_dot(dot, &output_file, "pdf")
@ -2847,12 +2831,6 @@ fn main() {
Err(err) => eprintln!("Unable to write {output_file}: {err}"),
}
}
Err(err) => {
eprintln!("Failed to load ledger: {err:?}");
exit(1);
}
}
}
("create-snapshot", Some(arg_matches)) => {
let is_incremental = arg_matches.is_present("incremental");
let is_minimized = arg_matches.is_present("minimized");
@ -3007,15 +2985,14 @@ fn main() {
output_directory.display()
);
match load_and_process_ledger(
let (bank_forks, starting_snapshot_hashes) = load_and_process_ledger_or_exit(
arg_matches,
&genesis_config,
blockstore.clone(),
process_options,
snapshot_archive_path,
incremental_snapshot_archive_path,
) {
Ok((bank_forks, starting_snapshot_hashes)) => {
);
let mut bank = bank_forks
.read()
.unwrap()
@ -3035,11 +3012,8 @@ fn main() {
|| bootstrap_validator_pubkeys.is_some();
if child_bank_required {
let mut child_bank = Bank::new_from_parent(
bank.clone(),
bank.collector_id(),
bank.slot() + 1,
);
let mut child_bank =
Bank::new_from_parent(bank.clone(), bank.collector_id(), bank.slot() + 1);
if let Ok(rent_burn_percentage) = rent_burn_percentage {
child_bank.set_rent_burn_percentage(rent_burn_percentage);
@ -3049,9 +3023,7 @@ fn main() {
child_bank.set_hashes_per_tick(match hashes_per_tick {
// Note: Unlike `solana-genesis`, "auto" is not supported here.
"sleep" => None,
_ => {
Some(value_t_or_exit!(arg_matches, "hashes_per_tick", u64))
}
_ => Some(value_t_or_exit!(arg_matches, "hashes_per_tick", u64)),
});
}
bank = Arc::new(child_bank);
@ -3077,9 +3049,7 @@ fn main() {
for address in accounts_to_remove {
let mut account = bank.get_account(&address).unwrap_or_else(|| {
eprintln!(
"Error: Account does not exist, unable to remove it: {address}"
);
eprintln!("Error: Account does not exist, unable to remove it: {address}");
exit(1);
});
@ -3121,18 +3091,14 @@ fn main() {
.into_iter()
{
if let Ok(StakeStateV2::Stake(meta, stake, _)) = account.state() {
if vote_accounts_to_destake
.contains(&stake.delegation.voter_pubkey)
{
if vote_accounts_to_destake.contains(&stake.delegation.voter_pubkey) {
if verbose_level > 0 {
warn!(
"Undelegating stake account {} from {}",
address, stake.delegation.voter_pubkey,
);
}
account
.set_state(&StakeStateV2::Initialized(meta))
.unwrap();
account.set_state(&StakeStateV2::Initialized(meta)).unwrap();
bank.store_account(&address, &account);
}
}
@ -3148,19 +3114,14 @@ fn main() {
v.sort();
v.dedup();
if v.len() != bootstrap_validator_pubkeys.len() {
eprintln!(
"Error: --bootstrap-validator pubkeys cannot be duplicated"
);
eprintln!("Error: --bootstrap-validator pubkeys cannot be duplicated");
exit(1);
}
}
// Delete existing vote accounts
for (address, mut account) in bank
.get_program_accounts(
&solana_vote_program::id(),
&ScanConfig::default(),
)
.get_program_accounts(&solana_vote_program::id(), &ScanConfig::default())
.unwrap()
.into_iter()
{
@ -3170,11 +3131,9 @@ fn main() {
// Add a new identity/vote/stake account for each of the provided bootstrap
// validators
let mut bootstrap_validator_pubkeys_iter =
bootstrap_validator_pubkeys.iter();
let mut bootstrap_validator_pubkeys_iter = bootstrap_validator_pubkeys.iter();
loop {
let Some(identity_pubkey) = bootstrap_validator_pubkeys_iter.next()
else {
let Some(identity_pubkey) = bootstrap_validator_pubkeys_iter.next() else {
break;
};
let vote_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap();
@ -3214,8 +3173,7 @@ fn main() {
// Warp ahead at least two epochs to ensure that the leader schedule will be
// updated to reflect the new bootstrap validator(s)
let minimum_warp_slot =
genesis_config.epoch_schedule.get_first_slot_in_epoch(
let minimum_warp_slot = genesis_config.epoch_schedule.get_first_slot_in_epoch(
genesis_config.epoch_schedule.get_epoch(snapshot_slot) + 2,
);
@ -3344,8 +3302,7 @@ fn main() {
if is_minimized {
let starting_epoch = bank.epoch_schedule().get_epoch(snapshot_slot);
let ending_epoch =
bank.epoch_schedule().get_epoch(ending_slot.unwrap());
let ending_epoch = bank.epoch_schedule().get_epoch(ending_slot.unwrap());
if starting_epoch != ending_epoch {
warn!(
"Minimized snapshot range crosses epoch boundary ({} to \
@ -3353,8 +3310,7 @@ fn main() {
full snapshot",
starting_epoch,
ending_epoch,
bank.epoch_schedule()
.get_last_slot_in_epoch(starting_epoch)
bank.epoch_schedule().get_last_slot_in_epoch(starting_epoch)
);
}
@ -3373,12 +3329,6 @@ fn main() {
compute_shred_version(&genesis_config.hash(), Some(&bank.hard_forks()))
);
}
Err(err) => {
eprintln!("Failed to load ledger: {err:?}");
exit(1);
}
}
}
("accounts", Some(arg_matches)) => {
let halt_at_slot = value_t!(arg_matches, "halt_at_slot", Slot).ok();
let process_options = ProcessOptions {
@ -3402,18 +3352,14 @@ fn main() {
force_update_to_open,
enforce_ulimit_nofile,
);
let (bank_forks, ..) = load_and_process_ledger(
let (bank_forks, _) = load_and_process_ledger_or_exit(
arg_matches,
&genesis_config,
Arc::new(blockstore),
process_options,
snapshot_archive_path,
incremental_snapshot_archive_path,
)
.unwrap_or_else(|err| {
eprintln!("Failed to load ledger: {err:?}");
exit(1);
});
);
let bank = bank_forks.read().unwrap().working_bank();
let mut serializer = serde_json::Serializer::new(stdout());
@ -3496,15 +3442,14 @@ fn main() {
force_update_to_open,
enforce_ulimit_nofile,
);
match load_and_process_ledger(
let (bank_forks, _) = load_and_process_ledger_or_exit(
arg_matches,
&genesis_config,
Arc::new(blockstore),
process_options,
snapshot_archive_path,
incremental_snapshot_archive_path,
) {
Ok((bank_forks, ..)) => {
);
let bank_forks = bank_forks.read().unwrap();
let slot = bank_forks.working_bank().slot();
let bank = bank_forks.get(slot).unwrap_or_else(|| {
@ -3516,21 +3461,16 @@ fn main() {
println!("Recalculating capitalization");
let old_capitalization = bank.set_capitalization();
if old_capitalization == bank.capitalization() {
eprintln!(
"Capitalization was identical: {}",
Sol(old_capitalization)
);
eprintln!("Capitalization was identical: {}", Sol(old_capitalization));
}
}
if arg_matches.is_present("warp_epoch") {
let base_bank = bank;
let raw_warp_epoch =
value_t!(arg_matches, "warp_epoch", String).unwrap();
let raw_warp_epoch = value_t!(arg_matches, "warp_epoch", String).unwrap();
let warp_epoch = if raw_warp_epoch.starts_with('+') {
base_bank.epoch()
+ value_t!(arg_matches, "warp_epoch", Epoch).unwrap()
base_bank.epoch() + value_t!(arg_matches, "warp_epoch", Epoch).unwrap()
} else {
value_t!(arg_matches, "warp_epoch", Epoch).unwrap()
};
@ -3567,10 +3507,8 @@ fn main() {
.lazy_rent_collection
.store(true, std::sync::atomic::Ordering::Relaxed);
let feature_account_balance = std::cmp::max(
genesis_config.rent.minimum_balance(Feature::size_of()),
1,
);
let feature_account_balance =
std::cmp::max(genesis_config.rent.minimum_balance(Feature::size_of()), 1);
if arg_matches.is_present("enable_credits_auto_rewind") {
base_bank.unfreeze_for_ledger_tool();
let mut force_enabled_count = 0;
@ -3588,9 +3526,7 @@ fn main() {
force_enabled_count += 1;
}
if force_enabled_count == 0 {
warn!(
"Already credits_auto_rewind is activated (or scheduled)"
);
warn!("Already credits_auto_rewind is activated (or scheduled)");
}
let mut store_failed_count = 0;
if force_enabled_count >= 1 {
@ -3665,8 +3601,7 @@ fn main() {
// because only staking tracing is supported!
#[allow(irrefutable_let_patterns)]
if let RewardCalculationEvent::Staking(pubkey, event) = event {
let mut detail =
stake_calculation_details.entry(**pubkey).or_default();
let mut detail = stake_calculation_details.entry(**pubkey).or_default();
match event {
InflationPointCalculationEvent::CalculatedPoints(
epoch,
@ -3676,7 +3611,12 @@ fn main() {
) => {
if *points > 0 {
detail.epochs += 1;
detail.points.push(PointDetail {epoch: *epoch, points: *points, stake: *stake, credits: *credits});
detail.points.push(PointDetail {
epoch: *epoch,
points: *points,
stake: *stake,
credits: *credits,
});
}
}
InflationPointCalculationEvent::SplitRewards(
@ -3698,7 +3638,9 @@ fn main() {
*last_point_value = Some(point_value.clone());
}
}
InflationPointCalculationEvent::EffectiveStakeAtRewardedEpoch(stake) => {
InflationPointCalculationEvent::EffectiveStakeAtRewardedEpoch(
stake,
) => {
detail.current_effective_stake = *stake;
}
InflationPointCalculationEvent::Commission(commission) => {
@ -3714,10 +3656,7 @@ fn main() {
detail.old_credits_observed = Some(*old_credits_observed);
detail.new_credits_observed = *new_credits_observed;
}
InflationPointCalculationEvent::Delegation(
delegation,
owner,
) => {
InflationPointCalculationEvent::Delegation(delegation, owner) => {
detail.voter = delegation.voter_pubkey;
detail.voter_owner = *owner;
detail.total_stake = delegation.stake;
@ -3732,7 +3671,10 @@ fn main() {
detail.skipped_reasons = format!("{skipped_reason:?}");
} else {
use std::fmt::Write;
let _ = write!(&mut detail.skipped_reasons, "/{skipped_reason:?}");
let _ = write!(
&mut detail.skipped_reasons,
"/{skipped_reason:?}"
);
}
}
}
@ -3746,8 +3688,7 @@ fn main() {
);
warped_bank.freeze();
let mut csv_writer = if arg_matches.is_present("csv_filename") {
let csv_filename =
value_t_or_exit!(arg_matches, "csv_filename", String);
let csv_filename = value_t_or_exit!(arg_matches, "csv_filename", String);
let file = File::create(csv_filename).unwrap();
Some(csv::WriterBuilder::new().from_writer(file))
} else {
@ -3762,8 +3703,8 @@ fn main() {
/ (base_bank.capitalization() as f64)
* 100_f64)
- 100_f64;
let interest_per_year = interest_per_epoch
/ warped_bank.epoch_duration_in_years(base_bank.epoch());
let interest_per_year =
interest_per_epoch / warped_bank.epoch_duration_in_years(base_bank.epoch());
println!(
"Capitalization: {} => {} (+{} {}%; annualized {}%)",
Sol(base_bank.capitalization()),
@ -3775,8 +3716,7 @@ fn main() {
let mut overall_delta = 0;
let modified_accounts =
warped_bank.get_all_accounts_modified_since_parent();
let modified_accounts = warped_bank.get_all_accounts_modified_since_parent();
let mut rewarded_accounts = modified_accounts
.iter()
.map(|(pubkey, account)| {
@ -3790,16 +3730,14 @@ fn main() {
)
})
.collect::<Vec<_>>();
rewarded_accounts.sort_unstable_by_key(
|(pubkey, account, base_lamports)| {
rewarded_accounts.sort_unstable_by_key(|(pubkey, account, base_lamports)| {
(
*account.owner(),
*base_lamports,
account.lamports() - base_lamports,
*pubkey,
)
},
);
});
let mut unchanged_accounts = stake_calculation_details
.iter()
@ -3881,9 +3819,7 @@ fn main() {
old_capitalization: u64,
new_capitalization: u64,
}
fn format_or_na<T: std::fmt::Display>(
data: Option<T>,
) -> String {
fn format_or_na<T: std::fmt::Display>(data: Option<T>) -> String {
data.map(|data| format!("{data}"))
.unwrap_or_else(|| "N/A".to_owned())
}
@ -3897,8 +3833,7 @@ fn main() {
}
for point_detail in point_details {
let (cluster_rewards, cluster_points) =
last_point_value
let (cluster_rewards, cluster_points) = last_point_value
.read()
.unwrap()
.clone()
@ -3906,10 +3841,7 @@ fn main() {
(Some(pv.rewards), Some(pv.points))
});
let record = InflationRecord {
cluster_type: format!(
"{:?}",
base_bank.cluster_type()
),
cluster_type: format!("{:?}", base_bank.cluster_type()),
rewarded_epoch: base_bank.epoch(),
account: format!("{pubkey}"),
owner: format!("{}", base_account.owner()),
@ -3940,37 +3872,25 @@ fn main() {
deactivation_epoch: format_or_na(
detail.and_then(|d| d.deactivation_epoch),
),
earned_epochs: format_or_na(
detail.map(|d| d.epochs),
),
earned_epochs: format_or_na(detail.map(|d| d.epochs)),
epoch: format_or_na(point_detail.map(|d| d.epoch)),
epoch_credits: format_or_na(
point_detail.map(|d| d.credits),
),
epoch_points: format_or_na(
point_detail.map(|d| d.points),
),
epoch_stake: format_or_na(
point_detail.map(|d| d.stake),
),
epoch_points: format_or_na(point_detail.map(|d| d.points)),
epoch_stake: format_or_na(point_detail.map(|d| d.stake)),
old_credits_observed: format_or_na(
detail.and_then(|d| d.old_credits_observed),
),
new_credits_observed: format_or_na(
detail.and_then(|d| d.new_credits_observed),
),
base_rewards: format_or_na(
detail.map(|d| d.base_rewards),
),
base_rewards: format_or_na(detail.map(|d| d.base_rewards)),
stake_rewards: format_or_na(
detail.map(|d| d.stake_rewards),
),
vote_rewards: format_or_na(
detail.map(|d| d.vote_rewards),
),
commission: format_or_na(
detail.map(|d| d.commission),
),
vote_rewards: format_or_na(detail.map(|d| d.vote_rewards)),
commission: format_or_na(detail.map(|d| d.commission)),
cluster_rewards: format_or_na(cluster_rewards),
cluster_points: format_or_na(cluster_points),
old_capitalization: base_bank.capitalization(),
@ -3989,14 +3909,10 @@ fn main() {
}
} else {
if arg_matches.is_present("recalculate_capitalization") {
eprintln!(
"Capitalization isn't verified because it's recalculated"
);
eprintln!("Capitalization isn't verified because it's recalculated");
}
if arg_matches.is_present("inflation") {
eprintln!(
"Forcing inflation isn't meaningful because bank isn't warping"
);
eprintln!("Forcing inflation isn't meaningful because bank isn't warping");
}
assert_capitalization(&bank);
@ -4005,12 +3921,6 @@ fn main() {
println!("Capitalization: {}", Sol(bank.capitalization()));
}
}
Err(err) => {
eprintln!("Failed to load ledger: {err:?}");
exit(1);
}
}
}
("purge", Some(arg_matches)) => {
let start_slot = value_t_or_exit!(arg_matches, "start_slot", Slot);
let end_slot = value_t!(arg_matches, "end_slot", Slot).ok();

View File

@ -119,18 +119,14 @@ fn load_blockstore(ledger_path: &Path, arg_matches: &ArgMatches<'_>) -> Arc<Bank
force_update_to_open,
enforce_ulimit_nofile,
);
let (bank_forks, ..) = load_and_process_ledger(
let (bank_forks, ..) = load_and_process_ledger_or_exit(
arg_matches,
&genesis_config,
Arc::new(blockstore),
process_options,
snapshot_archive_path,
incremental_snapshot_archive_path,
)
.unwrap_or_else(|err| {
eprintln!("Ledger loading failed: {err:?}");
exit(1);
});
);
let bank = bank_forks.read().unwrap().working_bank();
bank
}