ledger-tool: Add flag to ignore open file descriptor limit error (#32624)

The current desired open file descriptor limit is 1,000,000. This is
quite a large number, and not needed for every command. Namely, commands
that do not unpack a snapshot and create an AccountsDB will likely not
use this many files.

There is already an option in BlockstoreOptions to ignore errors if the
desired value cannot be set; this PR just bubbles that option up to a
CLI flag in ledger-tool.
This commit is contained in:
steviez 2023-07-27 11:50:45 -05:00 committed by GitHub
parent 0b52b8a46e
commit 028f10d3d1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 51 additions and 4 deletions

View File

@ -990,6 +990,7 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) {
let verbose = matches.is_present("verbose");
let force_update_to_open = matches.is_present("force_update_to_open");
let output_format = OutputFormat::from_matches(matches, "output_format", verbose);
let enforce_ulimit_nofile = !matches.is_present("ignore_ulimit_nofile_error");
let (subcommand, sub_matches) = matches.subcommand();
let instance_name = get_global_subcommand_arg(
@ -1015,6 +1016,7 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) {
AccessType::Secondary,
None,
force_update_to_open,
enforce_ulimit_nofile,
);
let config = solana_storage_bigtable::LedgerStorageConfig {
read_only: false,

View File

@ -304,6 +304,7 @@ pub fn load_and_process_ledger(
AccessType::PrimaryForMaintenance,
None,
false,
false,
))
} else {
blockstore.clone()
@ -356,6 +357,7 @@ pub fn open_blockstore(
access_type: AccessType,
wal_recovery_mode: Option<BlockstoreRecoveryMode>,
force_update_to_open: bool,
enforce_ulimit_nofile: bool,
) -> Blockstore {
let shred_storage_type = get_shred_storage_type(
ledger_path,
@ -370,7 +372,7 @@ pub fn open_blockstore(
BlockstoreOptions {
access_type: access_type.clone(),
recovery_mode: wal_recovery_mode.clone(),
enforce_ulimit_nofile: true,
enforce_ulimit_nofile,
column_options: LedgerColumnOptions {
shred_storage_type,
..LedgerColumnOptions::default()

View File

@ -1338,6 +1338,15 @@ fn main() {
.help("Allow commands that would otherwise not alter the \
blockstore to make necessary updates in order to open it"),
)
.arg(
Arg::with_name("ignore_ulimit_nofile_error")
.long("ignore-ulimit-nofile-error")
.value_name("FORMAT")
.global(true)
.help("Allow opening the blockstore to succeed even if the desired open file \
descriptor limit cannot be configured. Use with caution as some commands may \
run fine with a reduced file descriptor limit while others will not"),
)
.arg(
Arg::with_name("snapshot_archive_path")
.long("snapshot-archive-path")
@ -2158,6 +2167,7 @@ fn main() {
.value_of("wal_recovery_mode")
.map(BlockstoreRecoveryMode::from);
let force_update_to_open = matches.is_present("force_update_to_open");
let enforce_ulimit_nofile = !matches.is_present("ignore_ulimit_nofile_error");
let verbose_level = matches.occurrences_of("verbose");
if let ("bigtable", Some(arg_matches)) = matches.subcommand() {
@ -2178,6 +2188,7 @@ fn main() {
AccessType::Secondary,
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
),
starting_slot,
ending_slot,
@ -2198,6 +2209,7 @@ fn main() {
AccessType::Secondary,
None,
force_update_to_open,
enforce_ulimit_nofile,
);
// Check if shred storage type can be inferred; if not, a new
@ -2215,8 +2227,13 @@ fn main() {
&target_db
),
);
let target =
open_blockstore(&target_db, AccessType::Primary, None, force_update_to_open);
let target = open_blockstore(
&target_db,
AccessType::Primary,
None,
force_update_to_open,
enforce_ulimit_nofile,
);
for (slot, _meta) in source.slot_meta_iterator(starting_slot).unwrap() {
if slot > ending_slot {
break;
@ -2297,6 +2314,7 @@ fn main() {
get_access_type(&process_options),
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
);
match load_and_process_ledger(
arg_matches,
@ -2341,6 +2359,7 @@ fn main() {
AccessType::Secondary,
None,
force_update_to_open,
enforce_ulimit_nofile,
);
for (slot, _meta) in ledger
.slot_meta_iterator(starting_slot)
@ -2381,6 +2400,7 @@ fn main() {
get_access_type(&process_options),
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
);
match load_and_process_ledger(
arg_matches,
@ -2407,6 +2427,7 @@ fn main() {
AccessType::Secondary,
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
);
for slot in slots {
println!("Slot {slot}");
@ -2431,6 +2452,7 @@ fn main() {
AccessType::Secondary,
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
),
starting_slot,
Slot::MAX,
@ -2447,6 +2469,7 @@ fn main() {
AccessType::Secondary,
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
);
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
for slot in blockstore.dead_slots_iterator(starting_slot).unwrap() {
@ -2459,6 +2482,7 @@ fn main() {
AccessType::Secondary,
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
);
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
for slot in blockstore.duplicate_slots_iterator(starting_slot).unwrap() {
@ -2472,6 +2496,7 @@ fn main() {
AccessType::Primary,
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
);
for slot in slots {
match blockstore.set_dead_slot(slot) {
@ -2487,6 +2512,7 @@ fn main() {
AccessType::Primary,
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
);
for slot in slots {
match blockstore.remove_dead_slot(slot) {
@ -2505,6 +2531,7 @@ fn main() {
AccessType::Secondary,
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
);
let mut ancestors = BTreeSet::new();
assert!(
@ -2624,6 +2651,7 @@ fn main() {
get_access_type(&process_options),
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
);
let (bank_forks, ..) = load_and_process_ledger(
arg_matches,
@ -2673,6 +2701,7 @@ fn main() {
get_access_type(&process_options),
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
);
match load_and_process_ledger(
arg_matches,
@ -2803,6 +2832,7 @@ fn main() {
get_access_type(&process_options),
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
));
let snapshot_slot = if Some("ROOT") == arg_matches.value_of("snapshot_slot") {
@ -3221,6 +3251,7 @@ fn main() {
get_access_type(&process_options),
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
);
let (bank_forks, ..) = load_and_process_ledger(
arg_matches,
@ -3315,6 +3346,7 @@ fn main() {
get_access_type(&process_options),
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
);
match load_and_process_ledger(
arg_matches,
@ -3845,6 +3877,7 @@ fn main() {
AccessType::PrimaryForMaintenance,
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
);
let end_slot = match end_slot {
@ -3918,6 +3951,7 @@ fn main() {
AccessType::Secondary,
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
);
let max_height = if let Some(height) = arg_matches.value_of("max_height") {
usize::from_str(height).expect("Maximum height must be a number")
@ -3971,6 +4005,7 @@ fn main() {
AccessType::Secondary,
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
);
let num_slots = value_t_or_exit!(arg_matches, "num_slots", usize);
let exclude_vote_only_slots = arg_matches.is_present("exclude_vote_only_slots");
@ -4006,6 +4041,7 @@ fn main() {
AccessType::Primary,
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
);
let start_root = if let Some(root) = arg_matches.value_of("start_root") {
Slot::from_str(root).expect("Before root must be a number")
@ -4043,6 +4079,7 @@ fn main() {
AccessType::Secondary,
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
);
match blockstore.slot_meta_iterator(0) {
@ -4115,6 +4152,7 @@ fn main() {
AccessType::Secondary,
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
)
.db(),
);
@ -4125,6 +4163,7 @@ fn main() {
AccessType::Secondary,
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
);
let mut slots: Vec<u64> = vec![];
@ -4148,6 +4187,7 @@ fn main() {
AccessType::Secondary,
wal_recovery_mode,
false,
enforce_ulimit_nofile,
);
let sst_file_name = arg_matches.value_of("file_name");
if let Err(err) = print_blockstore_file_metadata(&blockstore, &sst_file_name) {

View File

@ -77,6 +77,7 @@ fn load_blockstore(ledger_path: &Path, arg_matches: &ArgMatches<'_>) -> Arc<Bank
let debug_keys = pubkeys_of(arg_matches, "debug_key")
.map(|pubkeys| Arc::new(pubkeys.into_iter().collect::<HashSet<_>>()));
let force_update_to_open = arg_matches.is_present("force_update_to_open");
let enforce_ulimit_nofile = !arg_matches.is_present("ignore_ulimit_nofile_error");
let process_options = ProcessOptions {
new_hard_forks: hardforks_of(arg_matches, "hard_forks"),
run_verification: false,
@ -116,6 +117,7 @@ fn load_blockstore(ledger_path: &Path, arg_matches: &ArgMatches<'_>) -> Arc<Bank
AccessType::Secondary,
wal_recovery_mode,
force_update_to_open,
enforce_ulimit_nofile,
);
let (bank_forks, ..) = load_and_process_ledger(
arg_matches,

View File

@ -8,7 +8,8 @@ pub struct BlockstoreOptions {
pub access_type: AccessType,
// Whether to open a blockstore under a recovery mode. Default: None.
pub recovery_mode: Option<BlockstoreRecoveryMode>,
// Whether to allow unlimited number of open files. Default: true.
// When opening the Blockstore, determines whether to error or not if the
// desired open file descriptor limit cannot be configured. Default: true.
pub enforce_ulimit_nofile: bool,
pub column_options: LedgerColumnOptions,
}