Enables fastboot for ledger-tool (#31921)
This commit is contained in:
parent
c86e160438
commit
7b37a1672f
|
@ -977,6 +977,16 @@ fn assert_capitalization(bank: &Bank) {
|
|||
let debug_verify = true;
|
||||
assert!(bank.calculate_and_verify_capitalization(debug_verify));
|
||||
}
|
||||
|
||||
/// Get the AccessType required, based on `process_options`
|
||||
fn get_access_type(process_options: &ProcessOptions) -> AccessType {
|
||||
if process_options.boot_from_local_state {
|
||||
AccessType::PrimaryForMaintenance
|
||||
} else {
|
||||
AccessType::Secondary
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_env = "msvc"))]
|
||||
use jemallocator::Jemalloc;
|
||||
|
||||
|
@ -1160,6 +1170,21 @@ fn main() {
|
|||
.multiple(true)
|
||||
.takes_value(true)
|
||||
.help("Log when transactions are processed that reference the given key(s).");
|
||||
let boot_from_local_state = Arg::with_name("boot_from_local_state")
|
||||
.long("boot-from-local-state")
|
||||
.takes_value(false)
|
||||
.hidden(hidden_unless_forced())
|
||||
.help("Boot from state already on disk")
|
||||
.long_help(
|
||||
"Boot from state already on disk, instead of \
|
||||
extracting it from a snapshot archive. \
|
||||
This requires primary access, so another instance of \
|
||||
solana-ledger-tool or solana-validator cannot \
|
||||
simultaneously use the same ledger/accounts. \
|
||||
Note, this will use the latest state available, \
|
||||
which may be newer than the latest snapshot archive.",
|
||||
)
|
||||
.conflicts_with("no_snapshot");
|
||||
|
||||
let default_max_full_snapshot_archives_to_retain =
|
||||
&DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN.to_string();
|
||||
|
@ -1519,6 +1544,7 @@ fn main() {
|
|||
.arg(&max_genesis_archive_unpacked_size_arg)
|
||||
.arg(&debug_key_arg)
|
||||
.arg(&geyser_plugin_args)
|
||||
.arg(&boot_from_local_state)
|
||||
.arg(
|
||||
Arg::with_name("skip_poh_verify")
|
||||
.long("skip-poh-verify")
|
||||
|
@ -1574,6 +1600,7 @@ fn main() {
|
|||
.arg(&halt_at_slot_arg)
|
||||
.arg(&hard_forks_arg)
|
||||
.arg(&max_genesis_archive_unpacked_size_arg)
|
||||
.arg(&boot_from_local_state)
|
||||
.arg(
|
||||
Arg::with_name("include_all_votes")
|
||||
.long("include-all-votes")
|
||||
|
@ -1801,6 +1828,7 @@ fn main() {
|
|||
.arg(&hard_forks_arg)
|
||||
.arg(&geyser_plugin_args)
|
||||
.arg(&accounts_data_encoding_arg)
|
||||
.arg(&boot_from_local_state)
|
||||
.arg(
|
||||
Arg::with_name("include_sysvars")
|
||||
.long("include-sysvars")
|
||||
|
@ -1833,6 +1861,7 @@ fn main() {
|
|||
.arg(&hard_forks_arg)
|
||||
.arg(&max_genesis_archive_unpacked_size_arg)
|
||||
.arg(&geyser_plugin_args)
|
||||
.arg(&boot_from_local_state)
|
||||
.arg(
|
||||
Arg::with_name("warp_epoch")
|
||||
.required(false)
|
||||
|
@ -2197,7 +2226,7 @@ fn main() {
|
|||
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
|
||||
let blockstore = open_blockstore(
|
||||
&ledger_path,
|
||||
AccessType::Secondary,
|
||||
get_access_type(&process_options),
|
||||
wal_recovery_mode,
|
||||
force_update_to_open,
|
||||
);
|
||||
|
@ -2289,7 +2318,7 @@ fn main() {
|
|||
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
|
||||
let blockstore = open_blockstore(
|
||||
&ledger_path,
|
||||
AccessType::Secondary,
|
||||
get_access_type(&process_options),
|
||||
wal_recovery_mode,
|
||||
force_update_to_open,
|
||||
);
|
||||
|
@ -2519,6 +2548,7 @@ fn main() {
|
|||
.is_present("accounts_db_test_hash_calculation"),
|
||||
accounts_db_skip_shrink: arg_matches.is_present("accounts_db_skip_shrink"),
|
||||
runtime_config: RuntimeConfig::default(),
|
||||
boot_from_local_state: arg_matches.is_present("boot_from_local_state"),
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let print_accounts_stats = arg_matches.is_present("print_accounts_stats");
|
||||
|
@ -2527,7 +2557,7 @@ fn main() {
|
|||
|
||||
let blockstore = open_blockstore(
|
||||
&ledger_path,
|
||||
AccessType::Secondary,
|
||||
get_access_type(&process_options),
|
||||
wal_recovery_mode,
|
||||
force_update_to_open,
|
||||
);
|
||||
|
@ -2566,12 +2596,13 @@ fn main() {
|
|||
halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(),
|
||||
run_verification: false,
|
||||
accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)),
|
||||
boot_from_local_state: arg_matches.is_present("boot_from_local_state"),
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
|
||||
let blockstore = open_blockstore(
|
||||
&ledger_path,
|
||||
AccessType::Secondary,
|
||||
get_access_type(&process_options),
|
||||
wal_recovery_mode,
|
||||
force_update_to_open,
|
||||
);
|
||||
|
@ -3106,13 +3137,14 @@ fn main() {
|
|||
halt_at_slot,
|
||||
run_verification: false,
|
||||
accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)),
|
||||
boot_from_local_state: arg_matches.is_present("boot_from_local_state"),
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
|
||||
let include_sysvars = arg_matches.is_present("include_sysvars");
|
||||
let blockstore = open_blockstore(
|
||||
&ledger_path,
|
||||
AccessType::Secondary,
|
||||
get_access_type(&process_options),
|
||||
wal_recovery_mode,
|
||||
force_update_to_open,
|
||||
);
|
||||
|
@ -3196,12 +3228,13 @@ fn main() {
|
|||
halt_at_slot,
|
||||
run_verification: false,
|
||||
accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)),
|
||||
boot_from_local_state: arg_matches.is_present("boot_from_local_state"),
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
|
||||
let blockstore = open_blockstore(
|
||||
&ledger_path,
|
||||
AccessType::Secondary,
|
||||
get_access_type(&process_options),
|
||||
wal_recovery_mode,
|
||||
force_update_to_open,
|
||||
);
|
||||
|
|
|
@ -204,56 +204,84 @@ fn bank_forks_from_snapshot(
|
|||
process::exit(1);
|
||||
}
|
||||
|
||||
// Given that we are going to boot from an archive, the accountvecs held in the snapshot dirs for fast-boot should
|
||||
// be released. They will be released by the account_background_service anyway. But in the case of the account_paths
|
||||
// using memory-mounted file system, they are not released early enough to give space for the new append-vecs from
|
||||
// the archives, causing the out-of-memory problem. So, purge the snapshot dirs upfront before loading from the archive.
|
||||
snapshot_utils::purge_old_bank_snapshots(&snapshot_config.bank_snapshots_dir, 0, None);
|
||||
let (deserialized_bank, starting_snapshot_hashes) = if process_options.boot_from_local_state {
|
||||
assert!(
|
||||
!snapshot_config.should_generate_snapshots(),
|
||||
"booting from local state does not support generating snapshots yet",
|
||||
);
|
||||
|
||||
let (deserialized_bank, full_snapshot_archive_info, incremental_snapshot_archive_info) =
|
||||
snapshot_utils::bank_from_latest_snapshot_archives(
|
||||
let bank = snapshot_utils::bank_from_latest_snapshot_dir(
|
||||
&snapshot_config.bank_snapshots_dir,
|
||||
&snapshot_config.full_snapshot_archives_dir,
|
||||
&snapshot_config.incremental_snapshot_archives_dir,
|
||||
&account_paths,
|
||||
genesis_config,
|
||||
&process_options.runtime_config,
|
||||
&account_paths,
|
||||
process_options.debug_keys.clone(),
|
||||
None,
|
||||
process_options.account_indexes.clone(),
|
||||
process_options.limit_load_slot_count_from_snapshot,
|
||||
process_options.shrink_ratio,
|
||||
process_options.accounts_db_test_hash_calculation,
|
||||
process_options.accounts_db_skip_shrink,
|
||||
process_options.verify_index,
|
||||
process_options.accounts_db_config.clone(),
|
||||
accounts_update_notifier,
|
||||
exit,
|
||||
)
|
||||
.expect("Load from snapshot failed");
|
||||
.expect("load bank from local state");
|
||||
|
||||
(bank, None)
|
||||
} else {
|
||||
// Given that we are going to boot from an archive, the accountvecs held in the snapshot dirs for fast-boot should
|
||||
// be released. They will be released by the account_background_service anyway. But in the case of the account_paths
|
||||
// using memory-mounted file system, they are not released early enough to give space for the new append-vecs from
|
||||
// the archives, causing the out-of-memory problem. So, purge the snapshot dirs upfront before loading from the archive.
|
||||
snapshot_utils::purge_old_bank_snapshots(&snapshot_config.bank_snapshots_dir, 0, None);
|
||||
|
||||
let (deserialized_bank, full_snapshot_archive_info, incremental_snapshot_archive_info) =
|
||||
snapshot_utils::bank_from_latest_snapshot_archives(
|
||||
&snapshot_config.bank_snapshots_dir,
|
||||
&snapshot_config.full_snapshot_archives_dir,
|
||||
&snapshot_config.incremental_snapshot_archives_dir,
|
||||
&account_paths,
|
||||
genesis_config,
|
||||
&process_options.runtime_config,
|
||||
process_options.debug_keys.clone(),
|
||||
None,
|
||||
process_options.account_indexes.clone(),
|
||||
process_options.limit_load_slot_count_from_snapshot,
|
||||
process_options.shrink_ratio,
|
||||
process_options.accounts_db_test_hash_calculation,
|
||||
process_options.accounts_db_skip_shrink,
|
||||
process_options.verify_index,
|
||||
process_options.accounts_db_config.clone(),
|
||||
accounts_update_notifier,
|
||||
exit,
|
||||
)
|
||||
.expect("load bank from snapshot archives");
|
||||
|
||||
let full_snapshot_hash = FullSnapshotHash((
|
||||
full_snapshot_archive_info.slot(),
|
||||
*full_snapshot_archive_info.hash(),
|
||||
));
|
||||
let starting_incremental_snapshot_hash =
|
||||
incremental_snapshot_archive_info.map(|incremental_snapshot_archive_info| {
|
||||
IncrementalSnapshotHash((
|
||||
incremental_snapshot_archive_info.slot(),
|
||||
*incremental_snapshot_archive_info.hash(),
|
||||
))
|
||||
});
|
||||
let starting_snapshot_hashes = StartingSnapshotHashes {
|
||||
full: full_snapshot_hash,
|
||||
incremental: starting_incremental_snapshot_hash,
|
||||
};
|
||||
|
||||
(deserialized_bank, Some(starting_snapshot_hashes))
|
||||
};
|
||||
|
||||
if let Some(shrink_paths) = shrink_paths {
|
||||
deserialized_bank.set_shrink_paths(shrink_paths);
|
||||
}
|
||||
|
||||
let full_snapshot_hash = FullSnapshotHash((
|
||||
full_snapshot_archive_info.slot(),
|
||||
*full_snapshot_archive_info.hash(),
|
||||
));
|
||||
let starting_incremental_snapshot_hash =
|
||||
incremental_snapshot_archive_info.map(|incremental_snapshot_archive_info| {
|
||||
IncrementalSnapshotHash((
|
||||
incremental_snapshot_archive_info.slot(),
|
||||
*incremental_snapshot_archive_info.hash(),
|
||||
))
|
||||
});
|
||||
let starting_snapshot_hashes = StartingSnapshotHashes {
|
||||
full: full_snapshot_hash,
|
||||
incremental: starting_incremental_snapshot_hash,
|
||||
};
|
||||
|
||||
(
|
||||
Arc::new(RwLock::new(BankForks::new(deserialized_bank))),
|
||||
Some(starting_snapshot_hashes),
|
||||
starting_snapshot_hashes,
|
||||
)
|
||||
}
|
||||
|
|
|
@ -632,6 +632,8 @@ pub struct ProcessOptions {
|
|||
/// true if after processing the contents of the blockstore at startup, we should run an accounts hash calc
|
||||
/// This is useful for debugging.
|
||||
pub run_final_accounts_hash_calc: bool,
|
||||
/// Enable booting from state already on disk, instead of extracting if from a snapshot archive.
|
||||
pub boot_from_local_state: bool,
|
||||
}
|
||||
|
||||
pub fn test_process_blockstore(
|
||||
|
|
Loading…
Reference in New Issue