From a8e000a2a6fa438595911cddaef9ee6715697f38 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" <75863576+jeffwashington@users.noreply.github.com> Date: Mon, 11 Oct 2021 12:46:27 -0500 Subject: [PATCH] add filler accounts to bloat validator and predict failure (#20491) * add filler accounts to bloat validator and predict failure * assert no accounts match filler * cleanup magic numbers * panic if can't load from snapshot with filler accounts specified * some renames * renames * into_par_iter * clean filler accts, too --- core/src/accounts_hash_verifier.rs | 1 + ledger-tool/src/main.rs | 10 ++ ledger/src/bank_forks_utils.rs | 10 ++ runtime/src/accounts.rs | 1 + runtime/src/accounts_db.rs | 195 +++++++++++++++++++++++++--- runtime/src/accounts_hash.rs | 88 +++++++------ runtime/src/bank.rs | 32 +++-- runtime/src/rent_collector.rs | 15 ++- runtime/src/serde_snapshot.rs | 8 +- runtime/src/serde_snapshot/tests.rs | 5 +- validator/src/main.rs | 12 +- 11 files changed, 303 insertions(+), 74 deletions(-) diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index 1e345a296d..62ff6af61d 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -134,6 +134,7 @@ impl AccountsHashVerifier { HashStats::default(), false, None, + None, // this will fail with filler accounts ) .unwrap(); diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 2b7ce225fe..282cc1290b 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -893,6 +893,12 @@ fn main() { .validator(is_parsable::) .takes_value(true) .help("How much memory the accounts index can consume. If this is exceeded, some account index entries will be stored on disk. If missing, the entire index is stored in memory."); + let accounts_filler_count = Arg::with_name("accounts_filler_count") + .long("accounts-filler-count") + .value_name("COUNT") + .validator(is_parsable::) + .takes_value(true) + .help("How many accounts to add to stress the system. Accounts are ignored in operations related to correctness."); let account_paths_arg = Arg::with_name("account_paths") .long("accounts") .value_name("PATHS") @@ -1217,6 +1223,7 @@ fn main() { .arg(&limit_load_slot_count_from_snapshot_arg) .arg(&accounts_index_bins) .arg(&accounts_index_limit) + .arg(&accounts_filler_count) .arg(&verify_index_arg) .arg(&hard_forks_arg) .arg(&no_accounts_db_caching_arg) @@ -1961,9 +1968,12 @@ fn main() { accounts_index_config.drives = Some(accounts_index_paths); } + let filler_account_count = value_t!(arg_matches, "accounts_filler_count", usize).ok(); + let accounts_db_config = Some(AccountsDbConfig { index: Some(accounts_index_config), accounts_hash_cache_path: Some(ledger_path.clone()), + filler_account_count, }); let process_options = ProcessOptions { diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index 1b5258475a..9323dd8dff 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -97,6 +97,16 @@ pub fn load( info!("Snapshots disabled; will load from genesis"); } + if process_options + .accounts_db_config + .as_ref() + .and_then(|config| config.filler_account_count) + .unwrap_or_default() + > 0 + { + panic!("filler accounts specified, but not loading from snapshot"); + } + load_from_genesis( genesis_config, blockstore, diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 64ff24de6f..14646aa5f2 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -273,6 +273,7 @@ impl Accounts { key, &mut account, rent_for_sysvars, + self.accounts_db.filler_account_suffix.as_ref(), ); (account, rent_due) } else { diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 582d7d12ee..46734133ca 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -52,7 +52,7 @@ use solana_measure::measure::Measure; use solana_rayon_threadlimit::get_thread_count; use solana_sdk::{ account::{AccountSharedData, ReadableAccount}, - clock::{BankId, Epoch, Slot}, + clock::{BankId, Epoch, Slot, SlotCount}, genesis_config::ClusterType, hash::{Hash, Hasher}, pubkey::Pubkey, @@ -68,6 +68,7 @@ use std::{ io::{Error as IoError, Result as IoResult}, ops::{Range, RangeBounds}, path::{Path, PathBuf}, + str::FromStr, sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, sync::{Arc, Condvar, Mutex, MutexGuard, RwLock}, thread::Builder, @@ -129,10 +130,12 @@ const CACHE_VIRTUAL_STORED_SIZE: usize = 0; pub const ACCOUNTS_DB_CONFIG_FOR_TESTING: AccountsDbConfig = AccountsDbConfig { index: Some(ACCOUNTS_INDEX_CONFIG_FOR_TESTING), accounts_hash_cache_path: None, + filler_account_count: None, }; pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig { index: Some(ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS), accounts_hash_cache_path: None, + filler_account_count: None, }; pub type BinnedHashData = Vec>; @@ -141,6 +144,7 @@ pub type BinnedHashData = Vec>; pub struct AccountsDbConfig { pub index: Option, pub accounts_hash_cache_path: Option, + pub filler_account_count: Option, } struct FoundStoredAccount<'a> { @@ -1044,6 +1048,9 @@ pub struct AccountsDb { /// AccountsDbPlugin accounts update notifier accounts_update_notifier: Option, + + filler_account_count: usize, + pub filler_account_suffix: Option, } #[derive(Debug, Default)] @@ -1536,6 +1543,8 @@ impl AccountsDb { dirty_stores: DashMap::default(), zero_lamport_accounts_to_purge_after_full_snapshot: DashSet::default(), accounts_update_notifier: None, + filler_account_count: 0, + filler_account_suffix: None, } } @@ -1565,6 +1574,15 @@ impl AccountsDb { let accounts_hash_cache_path = accounts_db_config .as_ref() .and_then(|x| x.accounts_hash_cache_path.clone()); + let filler_account_count = accounts_db_config + .as_ref() + .and_then(|cfg| cfg.filler_account_count) + .unwrap_or_default(); + let filler_account_suffix = if filler_account_count > 0 { + Some(solana_sdk::pubkey::new_rand()) + } else { + None + }; let paths_is_empty = paths.is_empty(); let mut new = Self { paths, @@ -1573,6 +1591,8 @@ impl AccountsDb { caching_enabled, shrink_ratio, accounts_update_notifier, + filler_account_count, + filler_account_suffix, ..Self::default_with_accounts_index(accounts_index, accounts_hash_cache_path) }; if paths_is_empty { @@ -4855,6 +4875,9 @@ impl AccountsDb { let result: Vec = pubkeys .iter() .filter_map(|pubkey| { + if self.is_filler_account(pubkey) { + return None; + } if let AccountIndexGetResult::Found(lock, index) = self.accounts_index.get(pubkey, Some(ancestors), Some(slot)) { @@ -4880,7 +4903,7 @@ impl AccountsDb { |loaded_account| { let loaded_hash = loaded_account.loaded_hash(); let balance = account_info.lamports; - if check_hash { + if check_hash && !self.is_filler_account(pubkey) { let computed_hash = loaded_account.compute_hash(*slot, pubkey); if computed_hash != loaded_hash { @@ -5233,6 +5256,11 @@ impl AccountsDb { timings, check_hash, accounts_cache_and_ancestors, + if self.filler_account_count > 0 { + self.filler_account_suffix.as_ref() + } else { + None + }, ) } else { self.calculate_accounts_hash(slot, ancestors, check_hash) @@ -5318,6 +5346,7 @@ impl AccountsDb { &Ancestors, &AccountInfoAccountsIndex, )>, + filler_account_suffix: Option<&Pubkey>, ) -> Result, BankHashVerificationError> { let bin_calculator = PubkeyBinCalculator16::new(bins); assert!(bin_range.start < bins && bin_range.end <= bins && bin_range.start < bin_range.end); @@ -5352,7 +5381,7 @@ impl AccountsDb { let source_item = CalculateHashIntermediate::new(loaded_account.loaded_hash(), balance, *pubkey); - if check_hash { + if check_hash && !Self::is_filler_account_helper(pubkey, filler_account_suffix) { let computed_hash = loaded_account.compute_hash(slot, pubkey); if computed_hash != source_item.hash { info!( @@ -5425,6 +5454,7 @@ impl AccountsDb { &Ancestors, &AccountInfoAccountsIndex, )>, + filler_account_suffix: Option<&Pubkey>, ) -> Result<(Hash, u64), BankHashVerificationError> { let mut scan_and_hash = move || { assert_eq!( @@ -5450,9 +5480,13 @@ impl AccountsDb { &bounds, check_hash, accounts_cache_and_ancestors, + filler_account_suffix, )?; - let (hash, lamports, for_next_pass) = AccountsHash::rest_of_hash_calculation( + let hash = AccountsHash { + filler_account_suffix: filler_account_suffix.cloned(), + }; + let (hash, lamports, for_next_pass) = hash.rest_of_hash_calculation( result, &mut stats, pass == NUM_SCAN_PASSES - 1, @@ -5570,12 +5604,19 @@ impl AccountsDb { .scan_account_storage( slot, |loaded_account: LoadedAccount| { - // Cache only has one version per key, don't need to worry about versioning - Some((*loaded_account.pubkey(), loaded_account.loaded_hash())) + if self.is_filler_account(loaded_account.pubkey()) { + None + } else { + // Cache only has one version per key, don't need to worry about versioning + Some((*loaded_account.pubkey(), loaded_account.loaded_hash())) + } }, |accum: &DashMap, loaded_account: LoadedAccount| { let loaded_write_version = loaded_account.write_version(); let loaded_hash = loaded_account.loaded_hash(); + if self.is_filler_account(loaded_account.pubkey()) { + return; + } let should_insert = if let Some(existing_entry) = accum.get(loaded_account.pubkey()) { loaded_write_version > existing_entry.value().version() @@ -6369,9 +6410,10 @@ impl AccountsDb { (result, slots) } - fn process_storage_slot( - storage_maps: &[Arc], - ) -> GenerateIndexAccountsMap<'_> { + fn process_storage_slot<'a>( + &self, + storage_maps: &'a [Arc], + ) -> GenerateIndexAccountsMap<'a> { let num_accounts = storage_maps .iter() .map(|storage| storage.approx_stored_count()) @@ -6381,7 +6423,9 @@ impl AccountsDb { let accounts = storage.all_accounts(); accounts.into_iter().for_each(|stored_account| { let this_version = stored_account.meta.write_version; - match accounts_map.entry(stored_account.meta.pubkey) { + let pubkey = stored_account.meta.pubkey; + assert!(!self.is_filler_account(&pubkey)); + match accounts_map.entry(pubkey) { std::collections::hash_map::Entry::Vacant(entry) => { entry.insert(IndexAccountMapEntry { write_version: this_version, @@ -6398,7 +6442,7 @@ impl AccountsDb { stored_account, }); } else { - assert!(occupied_version != this_version); + assert_ne!(occupied_version, this_version); } } } @@ -6462,6 +6506,123 @@ impl AccountsDb { insert_us } + fn filler_unique_id_bytes() -> usize { + std::mem::size_of::() + } + + fn filler_rent_partition_prefix_bytes() -> usize { + std::mem::size_of::() + } + + fn filler_prefix_bytes() -> usize { + Self::filler_unique_id_bytes() + Self::filler_rent_partition_prefix_bytes() + } + + pub fn is_filler_account_helper( + pubkey: &Pubkey, + filler_account_suffix: Option<&Pubkey>, + ) -> bool { + let offset = Self::filler_prefix_bytes(); + filler_account_suffix + .as_ref() + .map(|filler_account_suffix| { + pubkey.as_ref()[offset..] == filler_account_suffix.as_ref()[offset..] + }) + .unwrap_or_default() + } + + pub fn is_filler_account(&self, pubkey: &Pubkey) -> bool { + Self::is_filler_account_helper(pubkey, self.filler_account_suffix.as_ref()) + } + + /// filler accounts are space-holding accounts which are ignored by hash calculations and rent. + /// They are designed to allow a validator to run against a network successfully while simulating having many more accounts present. + /// All filler accounts share a common pubkey suffix. The suffix is randomly generated per validator on startup. + /// The filler accounts are added to each slot in the snapshot after index generation. + /// The accounts added in a slot are setup to have pubkeys such that rent will be collected from them before (or when?) their slot becomes an epoch old. + /// Thus, the filler accounts are rewritten by rent and the old slot can be thrown away successfully. + pub fn maybe_add_filler_accounts(&self, ticks_per_slot: SlotCount) { + if self.filler_account_count == 0 { + return; + } + + info!("adding {} filler accounts", self.filler_account_count); + // break this up to force the accounts out of memory after each pass + let passes = 100; + let roots = self.storage.all_slots(); + let root_count = roots.len(); + let per_pass = std::cmp::max(1, root_count / passes); + let overall_index = AtomicUsize::new(0); + let string = "FiLLERACCoUNTooooooooooooooooooooooooooooooo"; + let hash = Hash::from_str(string).unwrap(); + let owner = Pubkey::from_str(string).unwrap(); + let lamports = 100_000_000; + let space = 0; + let account = AccountSharedData::new(lamports, space, &owner); + let added = AtomicUsize::default(); + for pass in 0..=passes { + self.accounts_index.set_startup(true); + let roots_in_this_pass = roots + .iter() + .skip(pass * per_pass) + .take(per_pass) + .collect::>(); + let slot_count_in_two_day = + crate::bank::Bank::slot_count_in_two_day_helper(ticks_per_slot); + self.thread_pool.install(|| { + roots_in_this_pass.into_par_iter().for_each(|slot| { + let storage_maps: Vec> = self + .storage + .get_slot_storage_entries(*slot) + .unwrap_or_default(); + if storage_maps.is_empty() { + return; + } + + let partition = *crate::bank::Bank::get_partitions( + *slot, + slot.saturating_sub(1), + slot_count_in_two_day, + ) + .last() + .unwrap(); + let subrange = crate::bank::Bank::pubkey_range_from_partition(partition); + + let idx = overall_index.fetch_add(1, Ordering::Relaxed); + let filler_entries = (idx + 1) * self.filler_account_count / root_count + - idx * self.filler_account_count / root_count; + let accounts = (0..filler_entries) + .map(|_| { + let my_id = added.fetch_add(1, Ordering::Relaxed); + let my_id_bytes = u32::to_be_bytes(my_id as u32); + + // pubkey begins life as entire filler 'suffix' pubkey + let mut key = self.filler_account_suffix.unwrap(); + let rent_prefix_bytes = Self::filler_rent_partition_prefix_bytes(); + // first bytes are replaced with rent partition range: filler_rent_partition_prefix_bytes + key.as_mut()[0..rent_prefix_bytes] + .copy_from_slice(&subrange.start().as_ref()[0..rent_prefix_bytes]); + // next bytes are replaced with my_id: filler_unique_id_bytes + key.as_mut()[rent_prefix_bytes + ..(rent_prefix_bytes + Self::filler_unique_id_bytes())] + .copy_from_slice(&my_id_bytes); + assert!(subrange.contains(&key)); + key + }) + .collect::>(); + let add = accounts + .iter() + .map(|key| (key, &account)) + .collect::>(); + let hashes = (0..filler_entries).map(|_| hash).collect::>(); + self.store_accounts_frozen(*slot, &add[..], Some(&hashes[..]), None, None); + }) + }); + self.accounts_index.set_startup(false); + } + info!("added {} filler accounts", added.load(Ordering::Relaxed)); + } + #[allow(clippy::needless_collect)] pub fn generate_index(&self, limit_load_slot_count_from_snapshot: Option, verify: bool) { let mut slots = self.storage.all_slots(); @@ -6501,7 +6662,7 @@ impl AccountsDb { .storage .get_slot_storage_entries(*slot) .unwrap_or_default(); - let accounts_map = Self::process_storage_slot(&storage_maps); + let accounts_map = self.process_storage_slot(&storage_maps); scan_time.stop(); scan_time_sum += scan_time.as_us(); Self::update_storage_info( @@ -7005,6 +7166,7 @@ pub mod tests { bin_range, check_hash, None, + None, ) } } @@ -7351,6 +7513,7 @@ pub mod tests { HashStats::default(), false, None, + None, ) .unwrap(); let expected_hash = Hash::from_str("GKot5hBsd81kMupNCXHaqbhv3huEbxAFMLnpcX2hniwn").unwrap(); @@ -7374,6 +7537,7 @@ pub mod tests { HashStats::default(), false, None, + None, ) .unwrap(); @@ -12736,7 +12900,7 @@ pub mod tests { .get_slot_storage_entries(slot0) .unwrap_or_default(); let storage_info = StorageSizeAndCountMap::default(); - let accounts_map = AccountsDb::process_storage_slot(&storage_maps[..]); + let accounts_map = accounts.process_storage_slot(&storage_maps[..]); AccountsDb::update_storage_info(&storage_info, &accounts_map, &Mutex::default()); assert_eq!(storage_info.len(), 1); for entry in storage_info.iter() { @@ -12749,9 +12913,10 @@ pub mod tests { #[test] fn test_calculate_storage_count_and_alive_bytes_0_accounts() { + let accounts = AccountsDb::new_single_for_tests(); let storage_maps = vec![]; let storage_info = StorageSizeAndCountMap::default(); - let accounts_map = AccountsDb::process_storage_slot(&storage_maps[..]); + let accounts_map = accounts.process_storage_slot(&storage_maps[..]); AccountsDb::update_storage_info(&storage_info, &accounts_map, &Mutex::default()); assert!(storage_info.is_empty()); } @@ -12786,7 +12951,7 @@ pub mod tests { .get_slot_storage_entries(slot0) .unwrap_or_default(); let storage_info = StorageSizeAndCountMap::default(); - let accounts_map = AccountsDb::process_storage_slot(&storage_maps[..]); + let accounts_map = accounts.process_storage_slot(&storage_maps[..]); AccountsDb::update_storage_info(&storage_info, &accounts_map, &Mutex::default()); assert_eq!(storage_info.len(), 1); for entry in storage_info.iter() { diff --git a/runtime/src/accounts_hash.rs b/runtime/src/accounts_hash.rs index d9bb0270b1..ac2cd48d65 100644 --- a/runtime/src/accounts_hash.rs +++ b/runtime/src/accounts_hash.rs @@ -195,9 +195,9 @@ impl CumulativeOffsets { } } -#[derive(Debug)] +#[derive(Debug, Default)] pub struct AccountsHash { - pub dummy: i32, + pub filler_account_suffix: Option, } impl AccountsHash { @@ -504,6 +504,7 @@ impl AccountsHash { } fn de_dup_and_eliminate_zeros( + &self, sorted_data_by_pubkey: Vec>>, stats: &mut HashStats, max_bin: usize, @@ -520,7 +521,7 @@ impl AccountsHash { .into_par_iter() .map(|bin| { let (hashes, lamports_bin, unreduced_entries_count) = - Self::de_dup_accounts_in_parallel(&sorted_data_by_pubkey, bin); + self.de_dup_accounts_in_parallel(&sorted_data_by_pubkey, bin); { let mut lock = min_max_sum_entries_hashes.lock().unwrap(); let (mut min, mut max, mut lamports_sum, mut entries, mut hash_total) = *lock; @@ -594,6 +595,7 @@ impl AccountsHash { // b. lamport sum // c. unreduced count (ie. including duplicates and zero lamport) fn de_dup_accounts_in_parallel( + &self, pubkey_division: &[Vec>], pubkey_bin: usize, ) -> (Vec, u64, usize) { @@ -658,7 +660,8 @@ impl AccountsHash { pubkey_division, &mut indexes, ); - if item.lamports != ZERO_RAW_LAMPORTS_SENTINEL { + if !self.is_filler_account(&item.pubkey) && item.lamports != ZERO_RAW_LAMPORTS_SENTINEL + { overall_sum = Self::checked_cast_for_capitalization( item.lamports as u128 + overall_sum as u128, ); @@ -668,11 +671,19 @@ impl AccountsHash { (hashes, overall_sum, item_len) } + fn is_filler_account(&self, pubkey: &Pubkey) -> bool { + crate::accounts_db::AccountsDb::is_filler_account_helper( + pubkey, + self.filler_account_suffix.as_ref(), + ) + } + // input: // vec: group of slot data, ordered by Slot (low to high) // vec: [0..bins] - where bins are pubkey ranges (these are ordered by Pubkey range) // vec: [..] - items which fit in the containing bin. Sorted by: Pubkey, higher Slot, higher Write version (if pubkey =) pub fn rest_of_hash_calculation( + &self, data_sections_by_pubkey: Vec>>, mut stats: &mut HashStats, is_last_pass: bool, @@ -680,7 +691,7 @@ impl AccountsHash { max_bin: usize, ) -> (Hash, u64, PreviousPass) { let (mut hashes, mut total_lamports) = - Self::de_dup_and_eliminate_zeros(data_sections_by_pubkey, stats, max_bin); + self.de_dup_and_eliminate_zeros(data_sections_by_pubkey, stats, max_bin); total_lamports += previous_state.lamports; @@ -823,7 +834,8 @@ pub mod tests { let val = CalculateHashIntermediate::new(hash, ZERO_RAW_LAMPORTS_SENTINEL, key); account_maps.push(val); - let result = AccountsHash::rest_of_hash_calculation( + let accounts_hash = AccountsHash::default(); + let result = accounts_hash.rest_of_hash_calculation( for_rest(account_maps.clone()), &mut HashStats::default(), true, @@ -839,7 +851,7 @@ pub mod tests { let val = CalculateHashIntermediate::new(hash, 20, key); account_maps.insert(0, val); - let result = AccountsHash::rest_of_hash_calculation( + let result = accounts_hash.rest_of_hash_calculation( for_rest(account_maps.clone()), &mut HashStats::default(), true, @@ -855,7 +867,7 @@ pub mod tests { let val = CalculateHashIntermediate::new(hash, 30, key); account_maps.insert(1, val); - let result = AccountsHash::rest_of_hash_calculation( + let result = accounts_hash.rest_of_hash_calculation( for_rest(account_maps), &mut HashStats::default(), true, @@ -898,9 +910,10 @@ pub mod tests { let mut previous_pass = PreviousPass::default(); + let accounts_index = AccountsHash::default(); if pass == 0 { // first pass that is not last and is empty - let result = AccountsHash::rest_of_hash_calculation( + let result = accounts_index.rest_of_hash_calculation( vec![vec![vec![]]], &mut HashStats::default(), false, // not last pass @@ -915,7 +928,7 @@ pub mod tests { assert_eq!(previous_pass.lamports, 0); } - let result = AccountsHash::rest_of_hash_calculation( + let result = accounts_index.rest_of_hash_calculation( for_rest(account_maps.clone()), &mut HashStats::default(), false, // not last pass @@ -932,8 +945,9 @@ pub mod tests { let expected_hash = Hash::from_str("8j9ARGFv4W2GfML7d3sVJK2MePwrikqYnu6yqer28cCa").unwrap(); + let accounts_index = AccountsHash::default(); if pass == 2 { - let result = AccountsHash::rest_of_hash_calculation( + let result = accounts_index.rest_of_hash_calculation( vec![vec![vec![]]], &mut HashStats::default(), false, @@ -947,7 +961,7 @@ pub mod tests { assert_eq!(previous_pass.lamports, account_maps[0].lamports); } - let result = AccountsHash::rest_of_hash_calculation( + let result = accounts_index.rest_of_hash_calculation( vec![vec![vec![]]], &mut HashStats::default(), true, // finally, last pass @@ -979,8 +993,8 @@ pub mod tests { let hash = Hash::new(&[2u8; 32]); let val = CalculateHashIntermediate::new(hash, 20, key); account_maps.push(val); - - let result = AccountsHash::rest_of_hash_calculation( + let accounts_hash = AccountsHash::default(); + let result = accounts_hash.rest_of_hash_calculation( for_rest(vec![account_maps[0].clone()]), &mut HashStats::default(), false, // not last pass @@ -995,7 +1009,7 @@ pub mod tests { assert_eq!(previous_pass.reduced_hashes.len(), 0); assert_eq!(previous_pass.lamports, account_maps[0].lamports); - let result = AccountsHash::rest_of_hash_calculation( + let result = accounts_hash.rest_of_hash_calculation( for_rest(vec![account_maps[1].clone()]), &mut HashStats::default(), false, // not last pass @@ -1014,7 +1028,7 @@ pub mod tests { let total_lamports_expected = account_maps[0].lamports + account_maps[1].lamports; assert_eq!(previous_pass.lamports, total_lamports_expected); - let result = AccountsHash::rest_of_hash_calculation( + let result = accounts_hash.rest_of_hash_calculation( vec![vec![vec![]]], &mut HashStats::default(), true, @@ -1046,6 +1060,7 @@ pub mod tests { solana_logger::setup(); let mut account_maps = Vec::new(); + let accounts_hash = AccountsHash::default(); const TARGET_FANOUT_LEVEL: usize = 3; let target_fanout = MERKLE_FANOUT.pow(TARGET_FANOUT_LEVEL as u32); @@ -1065,7 +1080,7 @@ pub mod tests { let sorted = chunk.clone(); // first 4097 hashes (1 left over) - let result = AccountsHash::rest_of_hash_calculation( + let result = accounts_hash.rest_of_hash_calculation( for_rest(chunk), &mut HashStats::default(), false, // not last pass @@ -1110,7 +1125,7 @@ pub mod tests { ); // second 4097 hashes (2 left over) - let result = AccountsHash::rest_of_hash_calculation( + let result = accounts_hash.rest_of_hash_calculation( for_rest(chunk), &mut HashStats::default(), false, // not last pass @@ -1138,7 +1153,7 @@ pub mod tests { .sum::() ); - let result = AccountsHash::rest_of_hash_calculation( + let result = accounts_hash.rest_of_hash_calculation( vec![vec![vec![]]], &mut HashStats::default(), true, @@ -1169,10 +1184,8 @@ pub mod tests { #[test] fn test_accountsdb_de_dup_accounts_zero_chunks() { - let (hashes, lamports, _) = AccountsHash::de_dup_accounts_in_parallel( - &[vec![vec![CalculateHashIntermediate::default()]]], - 0, - ); + let (hashes, lamports, _) = AccountsHash::default() + .de_dup_accounts_in_parallel(&[vec![vec![CalculateHashIntermediate::default()]]], 0); assert_eq!(vec![Hash::default()], hashes); assert_eq!(lamports, 0); } @@ -1180,8 +1193,9 @@ pub mod tests { #[test] fn test_accountsdb_de_dup_accounts_empty() { solana_logger::setup(); + let accounts_hash = AccountsHash::default(); - let (hashes, lamports) = AccountsHash::de_dup_and_eliminate_zeros( + let (hashes, lamports) = accounts_hash.de_dup_and_eliminate_zeros( vec![vec![], vec![]], &mut HashStats::default(), one_range(), @@ -1192,7 +1206,7 @@ pub mod tests { ); assert_eq!(lamports, 0); - let (hashes, lamports) = AccountsHash::de_dup_and_eliminate_zeros( + let (hashes, lamports) = accounts_hash.de_dup_and_eliminate_zeros( vec![], &mut HashStats::default(), zero_range(), @@ -1201,11 +1215,11 @@ pub mod tests { assert_eq!(empty, hashes); assert_eq!(lamports, 0); - let (hashes, lamports, _) = AccountsHash::de_dup_accounts_in_parallel(&[], 1); + let (hashes, lamports, _) = accounts_hash.de_dup_accounts_in_parallel(&[], 1); assert_eq!(vec![Hash::default(); 0], hashes); assert_eq!(lamports, 0); - let (hashes, lamports, _) = AccountsHash::de_dup_accounts_in_parallel(&[], 2); + let (hashes, lamports, _) = accounts_hash.de_dup_accounts_in_parallel(&[], 2); assert_eq!(vec![Hash::default(); 0], hashes); assert_eq!(lamports, 0); } @@ -1276,6 +1290,7 @@ pub mod tests { result }).collect(); + let hash = AccountsHash::default(); let mut expected_index = 0; for last_slice in 0..2 { for start in 0..COUNT { @@ -1286,21 +1301,19 @@ pub mod tests { let slice2 = vec![vec![slice.to_vec()]]; let slice = &slice2[..]; - let (hashes2, lamports2, _) = - AccountsHash::de_dup_accounts_in_parallel(slice, 0); - let (hashes3, lamports3, _) = - AccountsHash::de_dup_accounts_in_parallel(slice, 0); - let (hashes4, lamports4) = AccountsHash::de_dup_and_eliminate_zeros( + let (hashes2, lamports2, _) = hash.de_dup_accounts_in_parallel(slice, 0); + let (hashes3, lamports3, _) = hash.de_dup_accounts_in_parallel(slice, 0); + let (hashes4, lamports4) = hash.de_dup_and_eliminate_zeros( slice.to_vec(), &mut HashStats::default(), end - start, ); - let (hashes5, lamports5) = AccountsHash::de_dup_and_eliminate_zeros( + let (hashes5, lamports5) = hash.de_dup_and_eliminate_zeros( slice.to_vec(), &mut HashStats::default(), end - start, ); - let (hashes6, lamports6) = AccountsHash::de_dup_and_eliminate_zeros( + let (hashes6, lamports6) = hash.de_dup_and_eliminate_zeros( slice.to_vec(), &mut HashStats::default(), end - start, @@ -1413,7 +1426,8 @@ pub mod tests { fn test_de_dup_accounts_in_parallel( account_maps: &[CalculateHashIntermediate], ) -> (Vec, u64, usize) { - AccountsHash::de_dup_accounts_in_parallel(&vec![vec![account_maps.to_vec()]][..], 0) + AccountsHash::default() + .de_dup_accounts_in_parallel(&vec![vec![account_maps.to_vec()]][..], 0) } #[test] @@ -1803,7 +1817,7 @@ pub mod tests { ), CalculateHashIntermediate::new(Hash::new(&[2u8; 32]), offset + 1, Pubkey::new_unique()), ]; - AccountsHash::de_dup_accounts_in_parallel(&[vec![input]], 0); + AccountsHash::default().de_dup_accounts_in_parallel(&[vec![input]], 0); } #[test] @@ -1824,7 +1838,7 @@ pub mod tests { Pubkey::new_unique(), )], ]; - AccountsHash::de_dup_and_eliminate_zeros( + AccountsHash::default().de_dup_and_eliminate_zeros( vec![input], &mut HashStats::default(), 2, // accounts above are in 2 groups diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 87e4f09640..3c2b675f00 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4410,6 +4410,7 @@ impl Bank { &pubkey, &mut account, rent_for_sysvars, + self.rc.accounts.accounts_db.filler_account_suffix.as_ref(), ); total_rent += rent; // Store all of them unconditionally to purge old AppendVec, @@ -4431,7 +4432,7 @@ impl Bank { // start_index..=end_index. But it has some exceptional cases, including // this important and valid one: // 0..=0: the first partition in the new epoch when crossing epochs - fn pubkey_range_from_partition( + pub fn pubkey_range_from_partition( (start_index, end_index, partition_count): Partition, ) -> RangeInclusive { assert!(start_index <= end_index); @@ -4496,13 +4497,15 @@ impl Bank { Pubkey::new_from_array(start_pubkey)..=Pubkey::new_from_array(end_pubkey) } - fn fixed_cycle_partitions(&self) -> Vec { - let slot_count_in_two_day = self.slot_count_in_two_day(); - - let parent_cycle = self.parent_slot() / slot_count_in_two_day; - let current_cycle = self.slot() / slot_count_in_two_day; - let mut parent_cycle_index = self.parent_slot() % slot_count_in_two_day; - let current_cycle_index = self.slot() % slot_count_in_two_day; + pub fn get_partitions( + slot: Slot, + parent_slot: Slot, + slot_count_in_two_day: SlotCount, + ) -> Vec { + let parent_cycle = parent_slot / slot_count_in_two_day; + let current_cycle = slot / slot_count_in_two_day; + let mut parent_cycle_index = parent_slot % slot_count_in_two_day; + let current_cycle_index = slot % slot_count_in_two_day; let mut partitions = vec![]; if parent_cycle < current_cycle { if current_cycle_index > 0 { @@ -4531,6 +4534,11 @@ impl Bank { partitions } + fn fixed_cycle_partitions(&self) -> Vec { + let slot_count_in_two_day = self.slot_count_in_two_day(); + Self::get_partitions(self.slot(), self.parent_slot(), slot_count_in_two_day) + } + fn variable_cycle_partitions(&self) -> Vec { let (current_epoch, current_slot_index) = self.get_epoch_and_slot_index(self.slot()); let (parent_epoch, mut parent_slot_index) = @@ -4722,11 +4730,15 @@ impl Bank { && self.slot_count_per_normal_epoch() < self.slot_count_in_two_day() } + fn slot_count_in_two_day(&self) -> SlotCount { + Self::slot_count_in_two_day_helper(self.ticks_per_slot) + } + // This value is specially chosen to align with slots per epoch in mainnet-beta and testnet // Also, assume 500GB account data set as the extreme, then for 2 day (=48 hours) to collect // rent eagerly, we'll consume 5.7 MB/s IO bandwidth, bidirectionally. - fn slot_count_in_two_day(&self) -> SlotCount { - 2 * DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY / self.ticks_per_slot + pub fn slot_count_in_two_day_helper(ticks_per_slot: SlotCount) -> SlotCount { + 2 * DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY / ticks_per_slot } fn slot_count_per_normal_epoch(&self) -> SlotCount { diff --git a/runtime/src/rent_collector.rs b/runtime/src/rent_collector.rs index 9593f1b423..308660ccf3 100644 --- a/runtime/src/rent_collector.rs +++ b/runtime/src/rent_collector.rs @@ -61,11 +61,13 @@ impl RentCollector { address: &Pubkey, account: &mut AccountSharedData, rent_for_sysvars: bool, + filler_account_suffix: Option<&Pubkey>, ) -> u64 { if account.executable() // executable accounts must be rent-exempt balance || account.rent_epoch() > self.epoch || (!rent_for_sysvars && sysvar::check_id(account.owner())) || *address == incinerator::id() + || crate::accounts_db::AccountsDb::is_filler_account_helper(address, filler_account_suffix) { 0 } else { @@ -120,7 +122,7 @@ impl RentCollector { ) -> u64 { // initialize rent_epoch as created at this epoch account.set_rent_epoch(self.epoch); - self.collect_from_existing_account(address, account, rent_for_sysvars) + self.collect_from_existing_account(address, account, rent_for_sysvars, None) } } @@ -162,6 +164,7 @@ mod tests { &solana_sdk::pubkey::new_rand(), &mut existing_account, true, + None, ); assert!(existing_account.lamports() < old_lamports); assert_eq!(existing_account.lamports() + collected, old_lamports); @@ -188,7 +191,7 @@ mod tests { let rent_collector = RentCollector::default().clone_with_epoch(epoch); // first mark account as being collected while being rent-exempt - collected = rent_collector.collect_from_existing_account(&pubkey, &mut account, true); + collected = rent_collector.collect_from_existing_account(&pubkey, &mut account, true, None); assert_eq!(account.lamports(), huge_lamports); assert_eq!(collected, 0); @@ -196,7 +199,7 @@ mod tests { account.set_lamports(tiny_lamports); // ... and trigger another rent collection on the same epoch and check that rent is working - collected = rent_collector.collect_from_existing_account(&pubkey, &mut account, true); + collected = rent_collector.collect_from_existing_account(&pubkey, &mut account, true, None); assert_eq!(account.lamports(), tiny_lamports - collected); assert_ne!(collected, 0); } @@ -216,12 +219,14 @@ mod tests { let rent_collector = RentCollector::default().clone_with_epoch(epoch); // old behavior: sysvars are special-cased - let collected = rent_collector.collect_from_existing_account(&pubkey, &mut account, false); + let collected = + rent_collector.collect_from_existing_account(&pubkey, &mut account, false, None); assert_eq!(account.lamports(), tiny_lamports); assert_eq!(collected, 0); // new behavior: sysvars are NOT special-cased - let collected = rent_collector.collect_from_existing_account(&pubkey, &mut account, true); + let collected = + rent_collector.collect_from_existing_account(&pubkey, &mut account, true, None); assert_eq!(account.lamports(), 0); assert_eq!(collected, 1); } diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 6e0378a450..f744848d4f 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -29,7 +29,6 @@ use { clock::{Epoch, Slot, UnixTimestamp}, epoch_schedule::EpochSchedule, fee_calculator::{FeeCalculator, FeeRateGovernor}, - genesis_config::ClusterType, genesis_config::GenesisConfig, hard_forks::HardForks, hash::Hash, @@ -347,7 +346,7 @@ where snapshot_accounts_db_fields, account_paths, unpacked_append_vec_map, - &genesis_config.cluster_type, + genesis_config, account_secondary_indexes, caching_enabled, limit_load_slot_count_from_snapshot, @@ -402,7 +401,7 @@ fn reconstruct_accountsdb_from_fields( snapshot_accounts_db_fields: SnapshotAccountsDbFields, account_paths: &[PathBuf], unpacked_append_vec_map: UnpackedAppendVecMap, - cluster_type: &ClusterType, + genesis_config: &GenesisConfig, account_secondary_indexes: AccountSecondaryIndexes, caching_enabled: bool, limit_load_slot_count_from_snapshot: Option, @@ -416,7 +415,7 @@ where { let mut accounts_db = AccountsDb::new_with_config( account_paths.to_vec(), - cluster_type, + &genesis_config.cluster_type, account_secondary_indexes, caching_enabled, shrink_ratio, @@ -536,6 +535,7 @@ where .write_version .fetch_add(snapshot_version, Ordering::Relaxed); accounts_db.generate_index(limit_load_slot_count_from_snapshot, verify_index); + accounts_db.maybe_add_filler_accounts(genesis_config.ticks_per_slot()); let mut measure_notify = Measure::start("accounts_notify"); accounts_db.notify_account_restore_from_snapshot(); diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index c79812f24e..4b9c85649f 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -77,7 +77,10 @@ where snapshot_accounts_db_fields, account_paths, unpacked_append_vec_map, - &ClusterType::Development, + &GenesisConfig { + cluster_type: ClusterType::Development, + ..GenesisConfig::default() + }, AccountSecondaryIndexes::default(), false, None, diff --git a/validator/src/main.rs b/validator/src/main.rs index 4b922b930a..0d73b58181 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1380,8 +1380,14 @@ pub fn main() { .help("Persistent accounts-index location. \ May be specified multiple times. \ [default: [ledger]/accounts_index]"), - ) - .arg( + ) + .arg(Arg::with_name("accounts_filler_count") + .long("accounts-filler-count") + .value_name("COUNT") + .validator(is_parsable::) + .takes_value(true) + .help("How many accounts to add to stress the system. Accounts are ignored in operations related to correctness.")) + .arg( Arg::with_name("accounts_db_test_hash_calculation") .long("accounts-db-test-hash-calculation") .help("Enables testing of hash calculation using stores in \ @@ -1949,9 +1955,11 @@ pub fn main() { accounts_index_config.drives = Some(accounts_index_paths); } + let filler_account_count = value_t!(matches, "accounts_filler_count", usize).ok(); let accounts_db_config = Some(AccountsDbConfig { index: Some(accounts_index_config), accounts_hash_cache_path: Some(ledger_path.clone()), + filler_account_count, }); let accountsdb_repl_service_config = if matches.is_present("enable_accountsdb_repl") {