Don't require increased open file limit in solana-test-validator

Travis CI in particular does not allow the open file limit to be
increased.
This commit is contained in:
Michael Vines 2020-12-16 17:56:38 -08:00
parent 8d700c3b94
commit 0b92720fdb
6 changed files with 45 additions and 22 deletions

View File

@ -346,6 +346,7 @@ impl TestValidator {
compression: CompressionType::NoCompression, compression: CompressionType::NoCompression,
snapshot_version: SnapshotVersion::default(), snapshot_version: SnapshotVersion::default(),
}), }),
enforce_ulimit_nofile: false,
..ValidatorConfig::default() ..ValidatorConfig::default()
}; };

View File

@ -341,7 +341,7 @@ pub mod tests {
ledger_signal_receiver, ledger_signal_receiver,
completed_slots_receiver, completed_slots_receiver,
.. ..
} = Blockstore::open_with_signal(&blockstore_path, None) } = Blockstore::open_with_signal(&blockstore_path, None, true)
.expect("Expected to successfully open ledger"); .expect("Expected to successfully open ledger");
let blockstore = Arc::new(blockstore); let blockstore = Arc::new(blockstore);
let bank = bank_forks.working_bank(); let bank = bank_forks.working_bank();

View File

@ -86,6 +86,7 @@ pub struct ValidatorConfig {
pub max_ledger_shreds: Option<u64>, pub max_ledger_shreds: Option<u64>,
pub broadcast_stage_type: BroadcastStageType, pub broadcast_stage_type: BroadcastStageType,
pub enable_partition: Option<Arc<AtomicBool>>, pub enable_partition: Option<Arc<AtomicBool>>,
pub enforce_ulimit_nofile: bool,
pub fixed_leader_schedule: Option<FixedSchedule>, pub fixed_leader_schedule: Option<FixedSchedule>,
pub wait_for_supermajority: Option<Slot>, pub wait_for_supermajority: Option<Slot>,
pub new_hard_forks: Option<Vec<Slot>>, pub new_hard_forks: Option<Vec<Slot>>,
@ -123,6 +124,7 @@ impl Default for ValidatorConfig {
snapshot_config: None, snapshot_config: None,
broadcast_stage_type: BroadcastStageType::Standard, broadcast_stage_type: BroadcastStageType::Standard,
enable_partition: None, enable_partition: None,
enforce_ulimit_nofile: true,
fixed_leader_schedule: None, fixed_leader_schedule: None,
wait_for_supermajority: None, wait_for_supermajority: None,
new_hard_forks: None, new_hard_forks: None,
@ -300,6 +302,7 @@ impl Validator {
ledger_path, ledger_path,
config.poh_verify, config.poh_verify,
&exit, &exit,
config.enforce_ulimit_nofile,
); );
let leader_schedule_cache = Arc::new(leader_schedule_cache); let leader_schedule_cache = Arc::new(leader_schedule_cache);
@ -822,6 +825,7 @@ fn new_banks_from_ledger(
ledger_path: &Path, ledger_path: &Path,
poh_verify: bool, poh_verify: bool,
exit: &Arc<AtomicBool>, exit: &Arc<AtomicBool>,
enforce_ulimit_nofile: bool,
) -> ( ) -> (
GenesisConfig, GenesisConfig,
BankForks, BankForks,
@ -859,8 +863,12 @@ fn new_banks_from_ledger(
ledger_signal_receiver, ledger_signal_receiver,
completed_slots_receiver, completed_slots_receiver,
.. ..
} = Blockstore::open_with_signal(ledger_path, config.wal_recovery_mode.clone()) } = Blockstore::open_with_signal(
.expect("Failed to open ledger database"); ledger_path,
config.wal_recovery_mode.clone(),
enforce_ulimit_nofile,
)
.expect("Failed to open ledger database");
blockstore.set_no_compaction(config.no_rocksdb_compaction); blockstore.set_no_compaction(config.no_rocksdb_compaction);
let restored_tower = Tower::restore(ledger_path, &validator_identity); let restored_tower = Tower::restore(ledger_path, &validator_identity);

View File

@ -615,7 +615,7 @@ fn open_blockstore(
access_type: AccessType, access_type: AccessType,
wal_recovery_mode: Option<BlockstoreRecoveryMode>, wal_recovery_mode: Option<BlockstoreRecoveryMode>,
) -> Blockstore { ) -> Blockstore {
match Blockstore::open_with_access_type(ledger_path, access_type, wal_recovery_mode) { match Blockstore::open_with_access_type(ledger_path, access_type, wal_recovery_mode, true) {
Ok(blockstore) => blockstore, Ok(blockstore) => blockstore,
Err(err) => { Err(err) => {
eprintln!("Failed to open ledger at {:?}: {:?}", ledger_path, err); eprintln!("Failed to open ledger at {:?}: {:?}", ledger_path, err);

View File

@ -253,26 +253,33 @@ impl Blockstore {
/// Opens a Ledger in directory, provides "infinite" window of shreds /// Opens a Ledger in directory, provides "infinite" window of shreds
pub fn open(ledger_path: &Path) -> Result<Blockstore> { pub fn open(ledger_path: &Path) -> Result<Blockstore> {
Self::do_open(ledger_path, AccessType::PrimaryOnly, None) Self::do_open(ledger_path, AccessType::PrimaryOnly, None, true)
} }
pub fn open_with_access_type( pub fn open_with_access_type(
ledger_path: &Path, ledger_path: &Path,
access_type: AccessType, access_type: AccessType,
recovery_mode: Option<BlockstoreRecoveryMode>, recovery_mode: Option<BlockstoreRecoveryMode>,
enforce_ulimit_nofile: bool,
) -> Result<Blockstore> { ) -> Result<Blockstore> {
Self::do_open(ledger_path, access_type, recovery_mode) Self::do_open(
ledger_path,
access_type,
recovery_mode,
enforce_ulimit_nofile,
)
} }
fn do_open( fn do_open(
ledger_path: &Path, ledger_path: &Path,
access_type: AccessType, access_type: AccessType,
recovery_mode: Option<BlockstoreRecoveryMode>, recovery_mode: Option<BlockstoreRecoveryMode>,
enforce_ulimit_nofile: bool,
) -> Result<Blockstore> { ) -> Result<Blockstore> {
fs::create_dir_all(&ledger_path)?; fs::create_dir_all(&ledger_path)?;
let blockstore_path = ledger_path.join(BLOCKSTORE_DIRECTORY); let blockstore_path = ledger_path.join(BLOCKSTORE_DIRECTORY);
adjust_ulimit_nofile()?; adjust_ulimit_nofile(enforce_ulimit_nofile)?;
// Open the database // Open the database
let mut measure = Measure::start("open"); let mut measure = Measure::start("open");
@ -363,9 +370,14 @@ impl Blockstore {
pub fn open_with_signal( pub fn open_with_signal(
ledger_path: &Path, ledger_path: &Path,
recovery_mode: Option<BlockstoreRecoveryMode>, recovery_mode: Option<BlockstoreRecoveryMode>,
enforce_ulimit_nofile: bool,
) -> Result<BlockstoreSignals> { ) -> Result<BlockstoreSignals> {
let mut blockstore = let mut blockstore = Self::open_with_access_type(
Self::open_with_access_type(ledger_path, AccessType::PrimaryOnly, recovery_mode)?; ledger_path,
AccessType::PrimaryOnly,
recovery_mode,
enforce_ulimit_nofile,
)?;
let (ledger_signal_sender, ledger_signal_receiver) = sync_channel(1); let (ledger_signal_sender, ledger_signal_receiver) = sync_channel(1);
let (completed_slots_sender, completed_slots_receiver) = let (completed_slots_sender, completed_slots_receiver) =
sync_channel(MAX_COMPLETED_SLOTS_IN_CHANNEL); sync_channel(MAX_COMPLETED_SLOTS_IN_CHANNEL);
@ -3282,7 +3294,7 @@ pub fn create_new_ledger(
genesis_config.write(&ledger_path)?; genesis_config.write(&ledger_path)?;
// Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger. // Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger.
let blockstore = Blockstore::open_with_access_type(ledger_path, access_type, None)?; let blockstore = Blockstore::open_with_access_type(ledger_path, access_type, None, true)?;
let ticks_per_slot = genesis_config.ticks_per_slot; let ticks_per_slot = genesis_config.ticks_per_slot;
let hashes_per_tick = genesis_config.poh_config.hashes_per_tick.unwrap_or(0); let hashes_per_tick = genesis_config.poh_config.hashes_per_tick.unwrap_or(0);
let entries = create_ticks(ticks_per_slot, hashes_per_tick, genesis_config.hash()); let entries = create_ticks(ticks_per_slot, hashes_per_tick, genesis_config.hash());
@ -3526,12 +3538,12 @@ pub fn make_chaining_slot_entries(
} }
#[cfg(not(unix))] #[cfg(not(unix))]
fn adjust_ulimit_nofile() -> Result<()> { fn adjust_ulimit_nofile(_enforce_ulimit_nofile: bool) -> Result<()> {
Ok(()) Ok(())
} }
#[cfg(unix)] #[cfg(unix)]
fn adjust_ulimit_nofile() -> Result<()> { fn adjust_ulimit_nofile(enforce_ulimit_nofile: bool) -> Result<()> {
// Rocks DB likes to have many open files. The default open file descriptor limit is // Rocks DB likes to have many open files. The default open file descriptor limit is
// usually not enough // usually not enough
let desired_nofile = 500000; let desired_nofile = 500000;
@ -3562,7 +3574,9 @@ fn adjust_ulimit_nofile() -> Result<()> {
desired_nofile, desired_nofile, desired_nofile, desired_nofile,
); );
} }
return Err(BlockstoreError::UnableToSetOpenFileDescriptorLimit); if enforce_ulimit_nofile {
return Err(BlockstoreError::UnableToSetOpenFileDescriptorLimit);
}
} }
nofile = get_nofile(); nofile = get_nofile();
@ -4223,7 +4237,7 @@ pub mod tests {
fn test_data_set_completed_on_insert() { fn test_data_set_completed_on_insert() {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let BlockstoreSignals { blockstore, .. } = let BlockstoreSignals { blockstore, .. } =
Blockstore::open_with_signal(&ledger_path, None).unwrap(); Blockstore::open_with_signal(&ledger_path, None, true).unwrap();
// Create enough entries to fill 2 shreds, only the later one is data complete // Create enough entries to fill 2 shreds, only the later one is data complete
let slot = 0; let slot = 0;
@ -4264,7 +4278,7 @@ pub mod tests {
blockstore: ledger, blockstore: ledger,
ledger_signal_receiver: recvr, ledger_signal_receiver: recvr,
.. ..
} = Blockstore::open_with_signal(&ledger_path, None).unwrap(); } = Blockstore::open_with_signal(&ledger_path, None, true).unwrap();
let ledger = Arc::new(ledger); let ledger = Arc::new(ledger);
let entries_per_slot = 50; let entries_per_slot = 50;
@ -4348,7 +4362,7 @@ pub mod tests {
blockstore: ledger, blockstore: ledger,
completed_slots_receiver: recvr, completed_slots_receiver: recvr,
.. ..
} = Blockstore::open_with_signal(&ledger_path, None).unwrap(); } = Blockstore::open_with_signal(&ledger_path, None, true).unwrap();
let ledger = Arc::new(ledger); let ledger = Arc::new(ledger);
let entries_per_slot = 10; let entries_per_slot = 10;
@ -4374,7 +4388,7 @@ pub mod tests {
blockstore: ledger, blockstore: ledger,
completed_slots_receiver: recvr, completed_slots_receiver: recvr,
.. ..
} = Blockstore::open_with_signal(&ledger_path, None).unwrap(); } = Blockstore::open_with_signal(&ledger_path, None, true).unwrap();
let ledger = Arc::new(ledger); let ledger = Arc::new(ledger);
let entries_per_slot = 10; let entries_per_slot = 10;
@ -4418,7 +4432,7 @@ pub mod tests {
blockstore: ledger, blockstore: ledger,
completed_slots_receiver: recvr, completed_slots_receiver: recvr,
.. ..
} = Blockstore::open_with_signal(&ledger_path, None).unwrap(); } = Blockstore::open_with_signal(&ledger_path, None, true).unwrap();
let ledger = Arc::new(ledger); let ledger = Arc::new(ledger);
let entries_per_slot = 10; let entries_per_slot = 10;

View File

@ -1663,11 +1663,10 @@ fn test_validator_saves_tower() {
} }
fn open_blockstore(ledger_path: &Path) -> Blockstore { fn open_blockstore(ledger_path: &Path) -> Blockstore {
Blockstore::open_with_access_type(ledger_path, AccessType::PrimaryOnly, None).unwrap_or_else( Blockstore::open_with_access_type(ledger_path, AccessType::PrimaryOnly, None, true)
|e| { .unwrap_or_else(|e| {
panic!("Failed to open ledger at {:?}, err: {}", ledger_path, e); panic!("Failed to open ledger at {:?}, err: {}", ledger_path, e);
}, })
)
} }
fn purge_slots(blockstore: &Blockstore, start_slot: Slot, slot_count: Slot) { fn purge_slots(blockstore: &Blockstore, start_slot: Slot, slot_count: Slot) {
@ -1887,6 +1886,7 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b
&val_a_ledger_path, &val_a_ledger_path,
AccessType::TryPrimaryThenSecondary, AccessType::TryPrimaryThenSecondary,
None, None,
true,
) )
.unwrap(); .unwrap();
let mut ancestors = AncestorIterator::new(last_vote, &blockstore); let mut ancestors = AncestorIterator::new(last_vote, &blockstore);