diff --git a/accounts-bench/src/main.rs b/accounts-bench/src/main.rs index c439a6dd11..1857314a92 100644 --- a/accounts-bench/src/main.rs +++ b/accounts-bench/src/main.rs @@ -9,7 +9,7 @@ use { accounts::Accounts, accounts_db::{ test_utils::{create_test_accounts, update_accounts_bench}, - AccountShrinkThreshold, CalcAccountsHashDataSource, INCLUDE_SLOT_IN_HASH_TESTS, + AccountShrinkThreshold, CalcAccountsHashDataSource, }, accounts_index::AccountSecondaryIndexes, ancestors::Ancestors, @@ -134,7 +134,6 @@ fn main() { &EpochSchedule::default(), &RentCollector::default(), true, - INCLUDE_SLOT_IN_HASH_TESTS, ); time_store.stop(); if results != results_store { diff --git a/accounts-db/benches/append_vec.rs b/accounts-db/benches/append_vec.rs index 9f287eeed1..83517e7ac4 100644 --- a/accounts-db/benches/append_vec.rs +++ b/accounts-db/benches/append_vec.rs @@ -7,7 +7,6 @@ use { account_storage::meta::{ StorableAccountsWithHashesAndWriteVersions, StoredAccountInfo, StoredMeta, }, - accounts_db::INCLUDE_SLOT_IN_HASH_TESTS, accounts_hash::AccountHash, append_vec::{ test_utils::{create_test_account, get_append_vec_path}, @@ -39,7 +38,7 @@ fn append_account( let slot_ignored = Slot::MAX; let accounts = [(&storage_meta.pubkey, account)]; let slice = &accounts[..]; - let accounts = (slot_ignored, slice, INCLUDE_SLOT_IN_HASH_TESTS); + let accounts = (slot_ignored, slice); let storable_accounts = StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( &accounts, diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 47b372d981..7265626d89 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -3,10 +3,9 @@ use { account_overrides::AccountOverrides, account_rent_state::{check_rent_state_with_account, RentState}, accounts_db::{ - AccountShrinkThreshold, AccountsAddRootTiming, AccountsDb, AccountsDbConfig, - IncludeSlotInHash, LoadHint, LoadedAccount, ScanStorageResult, - VerifyAccountsHashAndLamportsConfig, ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, - ACCOUNTS_DB_CONFIG_FOR_TESTING, + AccountShrinkThreshold, AccountsAddRootTiming, AccountsDb, AccountsDbConfig, LoadHint, + LoadedAccount, ScanStorageResult, VerifyAccountsHashAndLamportsConfig, + ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, ACCOUNTS_DB_CONFIG_FOR_TESTING, }, accounts_index::{ AccountSecondaryIndexes, IndexKey, ScanConfig, ScanError, ScanResult, ZeroLamport, @@ -1311,7 +1310,6 @@ impl Accounts { rent_collector: &RentCollector, durable_nonce: &DurableNonce, lamports_per_signature: u64, - include_slot_in_hash: IncludeSlotInHash, ) { let (accounts_to_store, transactions) = self.collect_accounts_to_store( txs, @@ -1321,10 +1319,8 @@ impl Accounts { durable_nonce, lamports_per_signature, ); - self.accounts_db.store_cached_inline_update_index( - (slot, &accounts_to_store[..], include_slot_in_hash), - Some(&transactions), - ); + self.accounts_db + .store_cached_inline_update_index((slot, &accounts_to_store[..]), Some(&transactions)); } pub fn store_accounts_cached<'a, T: ReadableAccount + Sync + ZeroLamport + 'a>( diff --git a/accounts-db/src/accounts_cache.rs b/accounts-db/src/accounts_cache.rs index cb38243fd2..e612ad741e 100644 --- a/accounts-db/src/accounts_cache.rs +++ b/accounts-db/src/accounts_cache.rs @@ -1,8 +1,5 @@ use { - crate::{ - accounts_db::{AccountsDb, IncludeSlotInHash}, - accounts_hash::AccountHash, - }, + crate::{accounts_db::AccountsDb, accounts_hash::AccountHash}, dashmap::DashMap, seqlock::SeqLock, solana_sdk::{ @@ -68,20 +65,12 @@ impl SlotCacheInner { self.cache.iter().map(|item| *item.key()).collect() } - pub fn insert( - &self, - pubkey: &Pubkey, - account: AccountSharedData, - slot: Slot, - include_slot_in_hash: IncludeSlotInHash, - ) -> CachedAccount { + pub fn insert(&self, pubkey: &Pubkey, account: AccountSharedData) -> CachedAccount { let data_len = account.data().len() as u64; let item = Arc::new(CachedAccountInner { account, hash: SeqLock::new(None), - slot, pubkey: *pubkey, - include_slot_in_hash, }); if let Some(old) = self.cache.insert(*pubkey, item.clone()) { self.same_account_writes.fetch_add(1, Ordering::Relaxed); @@ -145,11 +134,7 @@ pub type CachedAccount = Arc; pub struct CachedAccountInner { pub account: AccountSharedData, hash: SeqLock>, - slot: Slot, pubkey: Pubkey, - /// temporarily here during feature activation - /// since we calculate the hash later, or in the background, we need knowledge of whether this slot uses the slot in the hash or not - pub include_slot_in_hash: IncludeSlotInHash, } impl CachedAccountInner { @@ -158,12 +143,7 @@ impl CachedAccountInner { match hash { Some(hash) => hash, None => { - let hash = AccountsDb::hash_account( - self.slot, - &self.account, - &self.pubkey, - self.include_slot_in_hash, - ); + let hash = AccountsDb::hash_account(&self.account, &self.pubkey); *self.hash.lock_write() = Some(hash); hash } @@ -228,13 +208,7 @@ impl AccountsCache { ); } - pub fn store( - &self, - slot: Slot, - pubkey: &Pubkey, - account: AccountSharedData, - include_slot_in_hash: IncludeSlotInHash, - ) -> CachedAccount { + pub fn store(&self, slot: Slot, pubkey: &Pubkey, account: AccountSharedData) -> CachedAccount { let slot_cache = self.slot_cache(slot).unwrap_or_else(|| // DashMap entry.or_insert() returns a RefMut, essentially a write lock, // which is dropped after this block ends, minimizing time held by the lock. @@ -246,7 +220,7 @@ impl AccountsCache { .or_insert(self.new_inner()) .clone()); - slot_cache.insert(pubkey, account, slot, include_slot_in_hash) + slot_cache.insert(pubkey, account) } pub fn load(&self, slot: Slot, pubkey: &Pubkey) -> Option { @@ -338,7 +312,7 @@ impl AccountsCache { #[cfg(test)] pub mod tests { - use {super::*, crate::accounts_db::INCLUDE_SLOT_IN_HASH_TESTS}; + use super::*; #[test] fn test_remove_slots_le() { @@ -350,7 +324,6 @@ pub mod tests { inserted_slot, &Pubkey::new_unique(), AccountSharedData::new(1, 0, &Pubkey::default()), - INCLUDE_SLOT_IN_HASH_TESTS, ); // If the cache is told the size limit is 0, it should return the one slot let removed = cache.remove_slots_le(0); @@ -368,7 +341,6 @@ pub mod tests { inserted_slot, &Pubkey::new_unique(), AccountSharedData::new(1, 0, &Pubkey::default()), - INCLUDE_SLOT_IN_HASH_TESTS, ); // If the cache is told the size limit is 0, it should return nothing, because there's no diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 711c893dfe..78208cd79b 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -143,22 +143,6 @@ const MAX_ITEMS_PER_CHUNK: Slot = 2_500; // This allows us to split up accounts index accesses across multiple threads. const SHRINK_COLLECT_CHUNK_SIZE: usize = 50; -/// temporary enum during feature activation of -/// ignore slot when calculating an account hash #28420 -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum IncludeSlotInHash { - /// this is the status quo, prior to feature activation - /// INCLUDE the slot in the account hash calculation - IncludeSlot, - /// this is the value once feature activation occurs - /// do NOT include the slot in the account hash calculation - RemoveSlot, - /// this option should not be used. - /// If it is, this is a panic worthy event. - /// There are code paths where the feature activation status isn't known, but this value should not possibly be used. - IrrelevantAssertOnUse, -} - #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] pub enum CreateAncientStorage { /// ancient storages are created by appending @@ -227,7 +211,6 @@ pub struct VerifyAccountsHashAndLamportsConfig<'a> { pub store_detailed_debug_info: bool, /// true to use dedicated background thread pool for verification pub use_bg_thread_pool: bool, - pub include_slot_in_hash: IncludeSlotInHash, } pub(crate) trait ShrinkCollectRefs<'a>: Sync + Send { @@ -329,20 +312,6 @@ impl<'a> ShrinkCollectRefs<'a> for ShrinkCollectAliveSeparatedByRefs<'a> { } } -/// used by tests for 'include_slot_in_hash' parameter -/// Tests just need to be self-consistent, so any value should work here. -pub const INCLUDE_SLOT_IN_HASH_TESTS: IncludeSlotInHash = IncludeSlotInHash::IncludeSlot; - -// This value is irrelevant because we are reading from append vecs and the hash is already computed and saved. -// The hash will just be loaded from the append vec as opposed to being calculated initially. -// A shrink-type operation involves reading from an append vec and writing a subset of the read accounts to a new append vec. -// So, by definition, we will just read hashes and write hashes. The hash will not be calculated. -// The 'store' apis are shared, such that the initial store from a bank (where we need to know whether to include the slot) -// must include a feature-based value for 'include_slot_in_hash'. Other uses, specifically shrink, do NOT need to pass this -// parameter, but the shared api requires a value. -pub const INCLUDE_SLOT_IN_HASH_IRRELEVANT_APPEND_VEC_OPERATION: IncludeSlotInHash = - IncludeSlotInHash::IrrelevantAssertOnUse; - pub enum StoreReclaims { /// normal reclaim mode Default, @@ -414,12 +383,7 @@ impl CurrentAncientAppendVec { let accounts = accounts_to_store.get(storage_selector); db.store_accounts_frozen( - ( - self.slot(), - accounts, - INCLUDE_SLOT_IN_HASH_IRRELEVANT_APPEND_VEC_OPERATION, - accounts_to_store.slot, - ), + (self.slot(), accounts, accounts_to_store.slot), None::>, self.append_vec(), None, @@ -944,21 +908,13 @@ impl<'a> LoadedAccount<'a> { } } - pub fn compute_hash( - &self, - slot: Slot, - pubkey: &Pubkey, - include_slot: IncludeSlotInHash, - ) -> AccountHash { + pub fn compute_hash(&self, pubkey: &Pubkey) -> AccountHash { match self { - LoadedAccount::Stored(stored_account_meta) => AccountsDb::hash_account( - slot, - stored_account_meta, - stored_account_meta.pubkey(), - include_slot, - ), + LoadedAccount::Stored(stored_account_meta) => { + AccountsDb::hash_account(stored_account_meta, stored_account_meta.pubkey()) + } LoadedAccount::Cached(cached_account) => { - AccountsDb::hash_account(slot, &cached_account.account, pubkey, include_slot) + AccountsDb::hash_account(&cached_account.account, pubkey) } } } @@ -2451,11 +2407,7 @@ impl<'a> AppendVecScan for ScanState<'a> { if (self.config.check_hash || hash_is_missing) && !AccountsDb::is_filler_account_helper(pubkey, self.filler_account_suffix) { - let computed_hash = loaded_account.compute_hash( - self.current_slot, - pubkey, - self.config.include_slot_in_hash, - ); + let computed_hash = loaded_account.compute_hash(pubkey); if hash_is_missing { loaded_hash = computed_hash; } else if self.config.check_hash && computed_hash != loaded_hash { @@ -4176,11 +4128,7 @@ impl AccountsDb { // without use of rather wide locks in this whole function, because we're // mutating rooted slots; There should be no writers to them. stats_sub.store_accounts_timing = self.store_accounts_frozen( - ( - slot, - &shrink_collect.alive_accounts.alive_accounts()[..], - INCLUDE_SLOT_IN_HASH_IRRELEVANT_APPEND_VEC_OPERATION, - ), + (slot, &shrink_collect.alive_accounts.alive_accounts()[..]), None::>, shrink_in_progress.new_storage(), None, @@ -6242,33 +6190,24 @@ impl AccountsDb { } } - pub fn hash_account( - slot: Slot, - account: &T, - pubkey: &Pubkey, - include_slot: IncludeSlotInHash, - ) -> AccountHash { + pub fn hash_account(account: &T, pubkey: &Pubkey) -> AccountHash { Self::hash_account_data( - slot, account.lamports(), account.owner(), account.executable(), account.rent_epoch(), account.data(), pubkey, - include_slot, ) } fn hash_account_data( - slot: Slot, lamports: u64, owner: &Pubkey, executable: bool, rent_epoch: Epoch, data: &[u8], pubkey: &Pubkey, - include_slot: IncludeSlotInHash, ) -> AccountHash { if lamports == 0 { return AccountHash(Hash::default()); @@ -6285,16 +6224,6 @@ impl AccountsDb { // collect lamports, slot, rent_epoch into buffer to hash buffer.extend_from_slice(&lamports.to_le_bytes()); - match include_slot { - IncludeSlotInHash::IncludeSlot => { - // upon feature activation, stop including slot# in the account hash - buffer.extend_from_slice(&slot.to_le_bytes()); - } - IncludeSlotInHash::RemoveSlot => {} - IncludeSlotInHash::IrrelevantAssertOnUse => { - panic!("IncludeSlotInHash is irrelevant, but we are calculating hash"); - } - } buffer.extend_from_slice(&rent_epoch.to_le_bytes()); if data.len() > DATA_SIZE_CAN_FIT { @@ -6681,10 +6610,8 @@ impl AccountsDb { // updates to the index happen, so anybody that sees a real entry in the index, // will be able to find the account in storage let flushed_store = self.create_and_insert_store(slot, total_size, "flush_slot_cache"); - // irrelevant - account will already be hashed since it was used in bank hash previously - let include_slot_in_hash = IncludeSlotInHash::IrrelevantAssertOnUse; self.store_accounts_frozen( - (slot, &accounts[..], include_slot_in_hash), + (slot, &accounts[..]), Some(hashes), &flushed_store, None, @@ -6702,7 +6629,7 @@ impl AccountsDb { hashes.push(hash); }); self.store_accounts_frozen( - (slot, &accounts[..], include_slot_in_hash), + (slot, &accounts[..]), Some(hashes), &flushed_store, None, @@ -6765,11 +6692,7 @@ impl AccountsDb { // store all unique accounts into new storage let accounts = accum.values().collect::>(); - let to_store = ( - slot, - &accounts[..], - INCLUDE_SLOT_IN_HASH_IRRELEVANT_APPEND_VEC_OPERATION, - ); + let to_store = (slot, &accounts[..]); let storable = StorableAccountsWithHashesAndWriteVersions::<'_, '_, _, _, &AccountHash>::new( &to_store, @@ -6835,7 +6758,6 @@ impl AccountsDb { slot: Slot, accounts_and_meta_to_store: &impl StorableAccounts<'b, T>, txn_iter: Box> + 'a>, - include_slot_in_hash: IncludeSlotInHash, mut write_version_producer: P, ) -> Vec where @@ -6858,12 +6780,9 @@ impl AccountsDb { &mut write_version_producer, ); - let cached_account = self.accounts_cache.store( - slot, - accounts_and_meta_to_store.pubkey(i), - account, - include_slot_in_hash, - ); + let cached_account = + self.accounts_cache + .store(slot, accounts_and_meta_to_store.pubkey(i), account); // hash this account in the bg match &self.sender_bg_hasher { Some(ref sender) => { @@ -6912,13 +6831,7 @@ impl AccountsDb { None => Box::new(std::iter::repeat(&None).take(accounts.len())), }; - self.write_accounts_to_cache( - slot, - accounts, - txn_iter, - accounts.include_slot_in_hash(), - write_version_producer, - ) + self.write_accounts_to_cache(slot, accounts, txn_iter, write_version_producer) } StoreTo::Storage(storage) => { if accounts.has_hash_and_write_version() { @@ -6951,10 +6864,8 @@ impl AccountsDb { for index in 0..accounts.len() { let (pubkey, account) = (accounts.pubkey(index), accounts.account(index)); let hash = Self::hash_account( - slot, account, pubkey, - accounts.include_slot_in_hash(), ); hashes.push(hash); } @@ -7108,7 +7019,7 @@ impl AccountsDb { let hash_is_missing = loaded_hash == AccountHash(Hash::default()); if (config.check_hash || hash_is_missing) && !self.is_filler_account(pubkey) { let computed_hash = - loaded_account.compute_hash(*slot, pubkey, config.include_slot_in_hash); + loaded_account.compute_hash(pubkey); if hash_is_missing { loaded_hash = computed_hash; } @@ -7188,7 +7099,6 @@ impl AccountsDb { &EpochSchedule::default(), &RentCollector::default(), is_startup, - INCLUDE_SLOT_IN_HASH_TESTS, ) } @@ -7536,7 +7446,6 @@ impl AccountsDb { epoch_schedule: &EpochSchedule, rent_collector: &RentCollector, is_startup: bool, - include_slot_in_hash: IncludeSlotInHash, ) -> (AccountsHash, u64) { let check_hash = false; let (accounts_hash, total_lamports) = self @@ -7551,7 +7460,6 @@ impl AccountsDb { epoch_schedule, rent_collector, store_detailed_debug_info_on_failure: false, - include_slot_in_hash, }, expected_capitalization, ) @@ -7931,7 +7839,6 @@ impl AccountsDb { epoch_schedule: config.epoch_schedule, rent_collector: config.rent_collector, store_detailed_debug_info_on_failure: config.store_detailed_debug_info, - include_slot_in_hash: config.include_slot_in_hash, }; let hash_mismatch_is_error = !config.ignore_mismatch; @@ -8586,7 +8493,7 @@ impl AccountsDb { pub fn store_uncached(&self, slot: Slot, accounts: &[(&Pubkey, &AccountSharedData)]) { let storage = self.find_storage_candidate(slot, 1); self.store( - (slot, accounts, INCLUDE_SLOT_IN_HASH_TESTS), + (slot, accounts), &StoreTo::Storage(&storage), None, StoreReclaims::Default, @@ -9826,7 +9733,7 @@ impl AccountsDb { /// callers used to call store_uncached. But, this is not allowed anymore. pub fn store_for_tests(&self, slot: Slot, accounts: &[(&Pubkey, &AccountSharedData)]) { self.store( - (slot, accounts, INCLUDE_SLOT_IN_HASH_TESTS), + (slot, accounts), &StoreTo::Cache, None, StoreReclaims::Default, @@ -9966,7 +9873,6 @@ impl<'a> VerifyAccountsHashAndLamportsConfig<'a> { ignore_mismatch: false, store_detailed_debug_info: false, use_bg_thread_pool: false, - include_slot_in_hash: INCLUDE_SLOT_IN_HASH_TESTS, } } } @@ -10076,7 +9982,6 @@ pub mod tests { bins: usize, bin_range: &Range, check_hash: bool, - include_slot_in_hash: IncludeSlotInHash, ) -> Result, AccountsHashVerificationError> { let temp_dir = TempDir::new().unwrap(); let accounts_hash_cache_path = temp_dir.path().to_path_buf(); @@ -10088,7 +9993,6 @@ pub mod tests { bin_range, &CalcAccountsHashConfig { check_hash, - include_slot_in_hash, ..CalcAccountsHashConfig::default() }, None, @@ -10106,32 +10010,6 @@ pub mod tests { } } - /// This impl exists until this feature is activated: - /// ignore slot when calculating an account hash #28420 - /// For now, all test code will continue to work thanks to this impl - /// Tests will use INCLUDE_SLOT_IN_HASH_TESTS for 'include_slot_in_hash' calls. - impl<'a, T: ReadableAccount + Sync> StorableAccounts<'a, T> for (Slot, &'a [(&'a Pubkey, &'a T)]) { - fn pubkey(&self, index: usize) -> &Pubkey { - self.1[index].0 - } - fn account(&self, index: usize) -> &T { - self.1[index].1 - } - fn slot(&self, _index: usize) -> Slot { - // per-index slot is not unique per slot when per-account slot is not included in the source data - self.target_slot() - } - fn target_slot(&self) -> Slot { - self.0 - } - fn len(&self) -> usize { - self.1.len() - } - fn include_slot_in_hash(&self) -> IncludeSlotInHash { - INCLUDE_SLOT_IN_HASH_TESTS - } - } - /// this tuple contains slot info PER account impl<'a, T: ReadableAccount + Sync> StorableAccounts<'a, T> for (Slot, &'a [(&'a Pubkey, &'a T, Slot)]) @@ -10162,9 +10040,6 @@ pub mod tests { false } } - fn include_slot_in_hash(&self) -> IncludeSlotInHash { - INCLUDE_SLOT_IN_HASH_TESTS - } } impl CurrentAncientAppendVec { @@ -10202,7 +10077,7 @@ pub mod tests { } let expected_accounts_data_len = data.last().unwrap().1.data().len(); let expected_alive_bytes = aligned_stored_size(expected_accounts_data_len); - let storable = (slot0, &data[..], INCLUDE_SLOT_IN_HASH_TESTS); + let storable = (slot0, &data[..]); let hashes = data .iter() .map(|_| AccountHash(Hash::default())) @@ -10480,14 +10355,7 @@ pub mod tests { let accounts_db = AccountsDb::new_single_for_tests(); accounts_db - .scan_snapshot_stores( - &empty_storages(), - &mut stats, - 2, - &bounds, - false, - INCLUDE_SLOT_IN_HASH_TESTS, - ) + .scan_snapshot_stores(&empty_storages(), &mut stats, 2, &bounds, false) .unwrap(); } #[test] @@ -10500,14 +10368,7 @@ pub mod tests { let accounts_db = AccountsDb::new_single_for_tests(); accounts_db - .scan_snapshot_stores( - &empty_storages(), - &mut stats, - 2, - &bounds, - false, - INCLUDE_SLOT_IN_HASH_TESTS, - ) + .scan_snapshot_stores(&empty_storages(), &mut stats, 2, &bounds, false) .unwrap(); } @@ -10521,21 +10382,13 @@ pub mod tests { let accounts_db = AccountsDb::new_single_for_tests(); accounts_db - .scan_snapshot_stores( - &empty_storages(), - &mut stats, - 2, - &bounds, - false, - INCLUDE_SLOT_IN_HASH_TESTS, - ) + .scan_snapshot_stores(&empty_storages(), &mut stats, 2, &bounds, false) .unwrap(); } fn sample_storages_and_account_in_slot( slot: Slot, accounts: &AccountsDb, - include_slot_in_hash: IncludeSlotInHash, ) -> ( Vec>, Vec, @@ -10569,10 +10422,10 @@ pub mod tests { ]; let expected_hashes = [ - AccountHash(Hash::from_str("5K3NW73xFHwgTWVe4LyCg4QfQda8f88uZj2ypDx2kmmH").unwrap()), - AccountHash(Hash::from_str("84ozw83MZ8oeSF4hRAg7SeW1Tqs9LMXagX1BrDRjtZEx").unwrap()), - AccountHash(Hash::from_str("5XqtnEJ41CG2JWNp7MAg9nxkRUAnyjLxfsKsdrLxQUbC").unwrap()), - AccountHash(Hash::from_str("DpvwJcznzwULYh19Zu5CuAA4AT6WTBe4H6n15prATmqj").unwrap()), + AccountHash(Hash::from_str("EkyjPt4oL7KpRMEoAdygngnkhtVwCxqJ2MkwaGV4kUU4").unwrap()), + AccountHash(Hash::from_str("4N7T4C2MK3GbHudqhfGsCyi2GpUU3roN6nhwViA41LYL").unwrap()), + AccountHash(Hash::from_str("HzWMbUEnSfkrPiMdZeM6zSTdU5czEvGkvDcWBApToGC9").unwrap()), + AccountHash(Hash::from_str("AsWzo1HphgrrgQ6V2zFUVDssmfaBipx2XfwGZRqcJjir").unwrap()), ]; let mut raw_accounts = Vec::default(); @@ -10583,15 +10436,8 @@ pub mod tests { 1, AccountSharedData::default().owner(), )); - let hash = AccountsDb::hash_account( - slot, - &raw_accounts[i], - &raw_expected[i].pubkey, - include_slot_in_hash, - ); - if slot == 1 && matches!(include_slot_in_hash, IncludeSlotInHash::IncludeSlot) { - assert_eq!(hash, expected_hashes[i]); - } + let hash = AccountsDb::hash_account(&raw_accounts[i], &raw_expected[i].pubkey); + assert_eq!(hash, expected_hashes[i]); raw_expected[i].hash = hash.0; } @@ -10617,12 +10463,11 @@ pub mod tests { fn sample_storages_and_accounts( accounts: &AccountsDb, - include_slot_in_hash: IncludeSlotInHash, ) -> ( Vec>, Vec, ) { - sample_storages_and_account_in_slot(1, accounts, include_slot_in_hash) + sample_storages_and_account_in_slot(1, accounts) } fn get_storage_refs(input: &[Arc]) -> SortedStorages { @@ -10688,54 +10533,47 @@ pub mod tests { #[test] fn test_accountsdb_scan_snapshot_stores_hash_not_stored() { - solana_logger::setup(); - for include_slot_in_hash in [ - IncludeSlotInHash::IncludeSlot, - IncludeSlotInHash::RemoveSlot, - ] { - let accounts_db = AccountsDb::new_single_for_tests(); - let (storages, raw_expected) = - sample_storages_and_accounts(&accounts_db, include_slot_in_hash); - storages.iter().for_each(|storage| { - accounts_db.storage.remove(&storage.slot(), false); - }); + let accounts_db = AccountsDb::new_single_for_tests(); + let (storages, raw_expected) = sample_storages_and_accounts(&accounts_db); + storages.iter().for_each(|storage| { + accounts_db.storage.remove(&storage.slot(), false); + }); - let hash = AccountHash(Hash::default()); + let hash = AccountHash(Hash::default()); - // replace the sample storages, storing default hash values so that we rehash during scan - let storages = storages - .iter() - .map(|storage| { - let slot = storage.slot(); - let copied_storage = accounts_db.create_and_insert_store(slot, 10000, "test"); - let all_accounts = storage - .all_accounts() - .iter() - .map(|acct| (*acct.pubkey(), acct.to_account_shared_data())) - .collect::>(); - let accounts = all_accounts - .iter() - .map(|stored| (&stored.0, &stored.1)) - .collect::>(); - let slice = &accounts[..]; - let account_data = (slot, slice, include_slot_in_hash); - let hashes = (0..account_data.len()).map(|_| &hash).collect(); - let write_versions = (0..account_data.len()).map(|_| 0).collect(); - let storable_accounts = - StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( - &account_data, - hashes, - write_versions, - ); - copied_storage - .accounts - .append_accounts(&storable_accounts, 0); - copied_storage - }) - .collect::>(); + // replace the sample storages, storing default hash values so that we rehash during scan + let storages = storages + .iter() + .map(|storage| { + let slot = storage.slot(); + let copied_storage = accounts_db.create_and_insert_store(slot, 10000, "test"); + let all_accounts = storage + .all_accounts() + .iter() + .map(|acct| (*acct.pubkey(), acct.to_account_shared_data())) + .collect::>(); + let accounts = all_accounts + .iter() + .map(|stored| (&stored.0, &stored.1)) + .collect::>(); + let slice = &accounts[..]; + let account_data = (slot, slice); + let hashes = (0..account_data.len()).map(|_| &hash).collect(); + let write_versions = (0..account_data.len()).map(|_| 0).collect(); + let storable_accounts = + StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( + &account_data, + hashes, + write_versions, + ); + copied_storage + .accounts + .append_accounts(&storable_accounts, 0); + copied_storage + }) + .collect::>(); - assert_test_scan(accounts_db, storages, raw_expected, include_slot_in_hash); - } + assert_test_scan(accounts_db, storages, raw_expected); } #[test] @@ -10743,8 +10581,7 @@ pub mod tests { fn test_accountsdb_scan_snapshot_stores_check_hash() { solana_logger::setup(); let accounts_db = AccountsDb::new_single_for_tests(); - let (storages, _raw_expected) = - sample_storages_and_accounts(&accounts_db, INCLUDE_SLOT_IN_HASH_TESTS); + let (storages, _raw_expected) = sample_storages_and_accounts(&accounts_db); let max_slot = storages.iter().map(|storage| storage.slot()).max().unwrap(); let hash = @@ -10766,7 +10603,7 @@ pub mod tests { .map(|stored| (&stored.0, &stored.1)) .collect::>(); let slice = &accounts[..]; - let account_data = (slot, slice, INCLUDE_SLOT_IN_HASH_TESTS); + let account_data = (slot, slice); let hashes = (0..account_data.len()).map(|_| &hash).collect(); let write_versions = (0..account_data.len()).map(|_| 0).collect(); let storable_accounts = @@ -10795,7 +10632,6 @@ pub mod tests { end: bins, }, true, // checking hash here - INCLUDE_SLOT_IN_HASH_TESTS, ) .unwrap(); } @@ -10804,22 +10640,15 @@ pub mod tests { fn test_accountsdb_scan_snapshot_stores() { solana_logger::setup(); let accounts_db = AccountsDb::new_single_for_tests(); - let (storages, raw_expected) = - sample_storages_and_accounts(&accounts_db, INCLUDE_SLOT_IN_HASH_TESTS); + let (storages, raw_expected) = sample_storages_and_accounts(&accounts_db); - assert_test_scan( - accounts_db, - storages, - raw_expected, - INCLUDE_SLOT_IN_HASH_TESTS, - ); + assert_test_scan(accounts_db, storages, raw_expected); } fn assert_test_scan( accounts_db: AccountsDb, storages: Vec>, raw_expected: Vec, - include_slot_in_hash: IncludeSlotInHash, ) { let bins = 1; let mut stats = HashStats::default(); @@ -10834,7 +10663,6 @@ pub mod tests { end: bins, }, true, // checking hash here - include_slot_in_hash, ) .unwrap(); assert_scan(result, vec![vec![raw_expected.clone()]], bins, 0, bins); @@ -10851,7 +10679,6 @@ pub mod tests { end: bins, }, false, - include_slot_in_hash, ) .unwrap(); let mut expected = vec![Vec::new(); bins]; @@ -10873,7 +10700,6 @@ pub mod tests { end: bins, }, false, - include_slot_in_hash, ) .unwrap(); let mut expected = vec![Vec::new(); bins]; @@ -10895,7 +10721,6 @@ pub mod tests { end: bins, }, false, - include_slot_in_hash, ) .unwrap(); let mut expected = vec![Vec::new(); bins]; @@ -10912,8 +10737,7 @@ pub mod tests { // enough stores to get to 2nd chunk let bins = 1; let slot = MAX_ITEMS_PER_CHUNK as Slot; - let (storages, raw_expected) = - sample_storages_and_account_in_slot(slot, &accounts_db, INCLUDE_SLOT_IN_HASH_TESTS); + let (storages, raw_expected) = sample_storages_and_account_in_slot(slot, &accounts_db); let storage_data = [(&storages[0], slot)]; let sorted_storages = @@ -10930,7 +10754,6 @@ pub mod tests { end: bins, }, false, - INCLUDE_SLOT_IN_HASH_TESTS, ) .unwrap(); @@ -10941,8 +10764,7 @@ pub mod tests { fn test_accountsdb_scan_snapshot_stores_binning() { let mut stats = HashStats::default(); let accounts_db = AccountsDb::new_single_for_tests(); - let (storages, raw_expected) = - sample_storages_and_accounts(&accounts_db, INCLUDE_SLOT_IN_HASH_TESTS); + let (storages, raw_expected) = sample_storages_and_accounts(&accounts_db); // just the first bin of 2 let bins = 2; @@ -10957,7 +10779,6 @@ pub mod tests { end: half_bins, }, false, - INCLUDE_SLOT_IN_HASH_TESTS, ) .unwrap(); let mut expected = vec![Vec::new(); half_bins]; @@ -10977,7 +10798,6 @@ pub mod tests { end: bins, }, false, - INCLUDE_SLOT_IN_HASH_TESTS, ) .unwrap(); @@ -11002,7 +10822,6 @@ pub mod tests { end: bin + 1, }, false, - INCLUDE_SLOT_IN_HASH_TESTS, ) .unwrap(); let mut expected = vec![Vec::new(); 1]; @@ -11025,7 +10844,6 @@ pub mod tests { end: bin + range, }, false, - INCLUDE_SLOT_IN_HASH_TESTS, ) .unwrap(); let mut expected = vec![]; @@ -11051,8 +10869,7 @@ pub mod tests { // range is for only 1 bin out of 256. let bins = 256; let slot = MAX_ITEMS_PER_CHUNK as Slot; - let (storages, raw_expected) = - sample_storages_and_account_in_slot(slot, &accounts_db, INCLUDE_SLOT_IN_HASH_TESTS); + let (storages, raw_expected) = sample_storages_and_account_in_slot(slot, &accounts_db); let storage_data = [(&storages[0], slot)]; let sorted_storages = @@ -11071,7 +10888,6 @@ pub mod tests { end: start + range, }, false, - INCLUDE_SLOT_IN_HASH_TESTS, ) .unwrap(); assert_eq!(result.len(), 1); // 2 chunks, but 1 is empty so not included @@ -11106,8 +10922,7 @@ pub mod tests { solana_logger::setup(); let db = AccountsDb::new(Vec::new(), &ClusterType::Development); - let (storages, raw_expected) = - sample_storages_and_accounts(&db, INCLUDE_SLOT_IN_HASH_TESTS); + let (storages, raw_expected) = sample_storages_and_accounts(&db); let expected_hash = AccountsHasher::compute_merkle_root_loop(raw_expected.clone(), MERKLE_FANOUT, |item| { &item.hash @@ -12744,7 +12559,6 @@ pub mod tests { #[test] fn test_hash_stored_account() { // Number are just sequential. - let slot: Slot = 0x01_02_03_04_05_06_07_08; let meta = StoredMeta { write_version_obsolete: 0x09_0a_0b_0c_0d_0e_0f_10, data_len: 0x11_12_13_14_15_16_17_18, @@ -12784,25 +12598,15 @@ pub mod tests { let account = stored_account.to_account_shared_data(); let expected_account_hash = - AccountHash(Hash::from_str("6VeAL4x4PVkECKL1hD1avwPE1uMCRoWiZJzVMvVNYhTq").unwrap()); + AccountHash(Hash::from_str("4xuaE8UfH8EYsPyDZvJXUScoZSyxUJf2BpzVMLTFh497").unwrap()); assert_eq!( - AccountsDb::hash_account( - slot, - &stored_account, - stored_account.pubkey(), - INCLUDE_SLOT_IN_HASH_TESTS - ), + AccountsDb::hash_account(&stored_account, stored_account.pubkey(),), expected_account_hash, "StoredAccountMeta's data layout might be changed; update hashing if needed." ); assert_eq!( - AccountsDb::hash_account( - slot, - &account, - stored_account.pubkey(), - INCLUDE_SLOT_IN_HASH_TESTS - ), + AccountsDb::hash_account(&account, stored_account.pubkey(),), expected_account_hash, "Account-based hashing must be consistent with StoredAccountMeta-based one." ); @@ -12893,7 +12697,6 @@ pub mod tests { epoch_schedule: &EPOCH_SCHEDULE, rent_collector: &RENT_COLLECTOR, store_detailed_debug_info_on_failure: false, - include_slot_in_hash: INCLUDE_SLOT_IN_HASH_TESTS, } } } @@ -18241,9 +18044,9 @@ pub mod tests { // Calculate the expected full accounts hash here and ensure it matches. // Ensure the zero-lamport accounts are NOT included in the full accounts hash. - let full_account_hashes = [(2, 0), (3, 0), (4, 1)].into_iter().map(|(index, slot)| { + let full_account_hashes = [(2, 0), (3, 0), (4, 1)].into_iter().map(|(index, _slot)| { let (pubkey, account) = &accounts[index]; - AccountsDb::hash_account(slot, account, pubkey, INCLUDE_SLOT_IN_HASH_TESTS).0 + AccountsDb::hash_account(account, pubkey).0 }); let expected_accounts_hash = AccountsHash(compute_merkle_root(full_account_hashes)); assert_eq!(full_accounts_hash.0, expected_accounts_hash); @@ -18316,7 +18119,7 @@ pub mod tests { let incremental_account_hashes = [(2, 2), (3, 3), (5, 3), (6, 2), (7, 3)] .into_iter() - .map(|(index, slot)| { + .map(|(index, _slot)| { let (pubkey, account) = &accounts[index]; if account.is_zero_lamport() { // For incremental accounts hash, the hash of a zero lamport account is the hash of its pubkey. @@ -18324,8 +18127,7 @@ pub mod tests { let hash = blake3::hash(bytemuck::bytes_of(pubkey)); Hash::new_from_array(hash.into()) } else { - AccountsDb::hash_account(slot, account, pubkey, INCLUDE_SLOT_IN_HASH_TESTS) - .0 + AccountsDb::hash_account(account, pubkey).0 } }); let expected_accounts_hash = diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 315c732896..cd89ee1164 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -1,6 +1,6 @@ use { crate::{ - accounts_db::{AccountStorageEntry, IncludeSlotInHash, PUBKEY_BINS_FOR_CALCULATING_HASHES}, + accounts_db::{AccountStorageEntry, PUBKEY_BINS_FOR_CALCULATING_HASHES}, active_stats::{ActiveStatItem, ActiveStats}, ancestors::Ancestors, pubkey_bins::PubkeyBinCalculator24, @@ -177,7 +177,6 @@ pub struct CalcAccountsHashConfig<'a> { pub rent_collector: &'a RentCollector, /// used for tracking down hash mismatches after the fact pub store_detailed_debug_info_on_failure: bool, - pub include_slot_in_hash: IncludeSlotInHash, } // smallest, 3 quartiles, largest, average diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 2fe48db026..a93669d186 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -9,7 +9,6 @@ use { accounts_db::{ AccountStorageEntry, AccountsDb, AliveAccounts, GetUniqueAccountsResult, ShrinkCollect, ShrinkCollectAliveSeparatedByRefs, ShrinkStatsSub, StoreReclaims, - INCLUDE_SLOT_IN_HASH_IRRELEVANT_APPEND_VEC_OPERATION, }, accounts_file::AccountsFile, accounts_hash::AccountHash, @@ -693,11 +692,7 @@ impl AccountsDb { bytes: bytes_total, accounts: accounts_to_write, } = packed; - let accounts_to_write = StorableAccountsBySlot::new( - target_slot, - accounts_to_write, - INCLUDE_SLOT_IN_HASH_IRRELEVANT_APPEND_VEC_OPERATION, - ); + let accounts_to_write = StorableAccountsBySlot::new(target_slot, accounts_to_write); self.shrink_ancient_stats .bytes_ancient_created @@ -953,7 +948,7 @@ pub mod tests { create_db_with_storages_and_index, create_storages_and_update_index, get_all_accounts, remove_account_for_tests, CAN_RANDOMLY_SHRINK_FALSE, }, - ShrinkCollectRefs, INCLUDE_SLOT_IN_HASH_TESTS, MAX_RECYCLE_STORES, + ShrinkCollectRefs, MAX_RECYCLE_STORES, }, accounts_index::UpsertReclaim, append_vec::{aligned_stored_size, AppendVec, AppendVecStoredAccountMeta}, @@ -2699,11 +2694,7 @@ pub mod tests { .collect::>(); let target_slot = slots.clone().nth(combine_into).unwrap_or(slots.start); - let accounts_to_write = StorableAccountsBySlot::new( - target_slot, - &accounts, - INCLUDE_SLOT_IN_HASH_TESTS, - ); + let accounts_to_write = StorableAccountsBySlot::new(target_slot, &accounts); let bytes = storages .iter() diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index 8cc2a6f5b3..bd789aa309 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -648,7 +648,6 @@ impl AppendVec { pub mod tests { use { super::{test_utils::*, *}, - crate::accounts_db::INCLUDE_SLOT_IN_HASH_TESTS, assert_matches::assert_matches, memoffset::offset_of, rand::{thread_rng, Rng}, @@ -738,19 +737,18 @@ pub mod tests { #[should_panic(expected = "accounts.has_hash_and_write_version()")] fn test_storable_accounts_with_hashes_and_write_versions_new() { let account = AccountSharedData::default(); - // for (Slot, &'a [(&'a Pubkey, &'a T)], IncludeSlotInHash) + // for (Slot, &'a [(&'a Pubkey, &'a T)]) let slot = 0 as Slot; let pubkey = Pubkey::default(); StorableAccountsWithHashesAndWriteVersions::<'_, '_, _, _, &AccountHash>::new(&( slot, &[(&pubkey, &account)][..], - INCLUDE_SLOT_IN_HASH_TESTS, )); } fn test_mismatch(correct_hashes: bool, correct_write_versions: bool) { let account = AccountSharedData::default(); - // for (Slot, &'a [(&'a Pubkey, &'a T)], IncludeSlotInHash) + // for (Slot, &'a [(&'a Pubkey, &'a T)]) let slot = 0 as Slot; let pubkey = Pubkey::default(); // mismatch between lens of accounts, hashes, write_versions @@ -763,7 +761,7 @@ pub mod tests { write_versions.push(0); } StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( - &(slot, &[(&pubkey, &account)][..], INCLUDE_SLOT_IN_HASH_TESTS), + &(slot, &[(&pubkey, &account)][..]), hashes, write_versions, ); @@ -795,7 +793,7 @@ pub mod tests { #[test] fn test_storable_accounts_with_hashes_and_write_versions_empty() { - // for (Slot, &'a [(&'a Pubkey, &'a T)], IncludeSlotInHash) + // for (Slot, &'a [(&'a Pubkey, &'a T)]) let account = AccountSharedData::default(); let slot = 0 as Slot; let pubkeys = [Pubkey::default()]; @@ -803,7 +801,7 @@ pub mod tests { let write_versions = Vec::default(); let mut accounts = vec![(&pubkeys[0], &account)]; accounts.clear(); - let accounts2 = (slot, &accounts[..], INCLUDE_SLOT_IN_HASH_TESTS); + let accounts2 = (slot, &accounts[..]); let storable = StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( &accounts2, @@ -816,7 +814,7 @@ pub mod tests { #[test] fn test_storable_accounts_with_hashes_and_write_versions_hash_and_write_version() { - // for (Slot, &'a [(&'a Pubkey, &'a T)], IncludeSlotInHash) + // for (Slot, &'a [(&'a Pubkey, &'a T)]) let account = AccountSharedData::default(); let slot = 0 as Slot; let pubkeys = [Pubkey::from([5; 32]), Pubkey::from([6; 32])]; @@ -826,7 +824,7 @@ pub mod tests { ]; let write_versions = vec![42, 43]; let accounts = [(&pubkeys[0], &account), (&pubkeys[1], &account)]; - let accounts2 = (slot, &accounts[..], INCLUDE_SLOT_IN_HASH_TESTS); + let accounts2 = (slot, &accounts[..]); let storable = StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( &accounts2, @@ -851,13 +849,13 @@ pub mod tests { ..Account::default() } .to_account_shared_data(); - // for (Slot, &'a [(&'a Pubkey, &'a T)], IncludeSlotInHash) + // for (Slot, &'a [(&'a Pubkey, &'a T)]) let slot = 0 as Slot; let pubkey = Pubkey::default(); let hashes = vec![AccountHash(Hash::default())]; let write_versions = vec![0]; let accounts = [(&pubkey, &account)]; - let accounts2 = (slot, &accounts[..], INCLUDE_SLOT_IN_HASH_TESTS); + let accounts2 = (slot, &accounts[..]); let storable = StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( &accounts2, @@ -874,9 +872,9 @@ pub mod tests { ..Account::default() } .to_account_shared_data(); - // for (Slot, &'a [(&'a Pubkey, &'a T)], IncludeSlotInHash) + // for (Slot, &'a [(&'a Pubkey, &'a T)]) let accounts = [(&pubkey, &account)]; - let accounts2 = (slot, &accounts[..], INCLUDE_SLOT_IN_HASH_TESTS); + let accounts2 = (slot, &accounts[..]); let storable = StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( &accounts2, diff --git a/accounts-db/src/stake_rewards.rs b/accounts-db/src/stake_rewards.rs index 349139c9a1..9918c84747 100644 --- a/accounts-db/src/stake_rewards.rs +++ b/accounts-db/src/stake_rewards.rs @@ -1,7 +1,7 @@ //! Code for stake and vote rewards use { - crate::{accounts_db::IncludeSlotInHash, storable_accounts::StorableAccounts}, + crate::storable_accounts::StorableAccounts, solana_sdk::{ account::AccountSharedData, clock::Slot, pubkey::Pubkey, reward_type::RewardType, }, @@ -32,7 +32,7 @@ impl StakeReward { } /// allow [StakeReward] to be passed to `StoreAccounts` directly without copies or vec construction -impl<'a> StorableAccounts<'a, AccountSharedData> for (Slot, &'a [StakeReward], IncludeSlotInHash) { +impl<'a> StorableAccounts<'a, AccountSharedData> for (Slot, &'a [StakeReward]) { fn pubkey(&self, index: usize) -> &Pubkey { &self.1[index].stake_pubkey } @@ -49,9 +49,6 @@ impl<'a> StorableAccounts<'a, AccountSharedData> for (Slot, &'a [StakeReward], I fn len(&self) -> usize { self.1.len() } - fn include_slot_in_hash(&self) -> IncludeSlotInHash { - self.2 - } } #[cfg(feature = "dev-context-only-utils")] diff --git a/accounts-db/src/storable_accounts.rs b/accounts-db/src/storable_accounts.rs index 7e12063a05..7fbd31ee7d 100644 --- a/accounts-db/src/storable_accounts.rs +++ b/accounts-db/src/storable_accounts.rs @@ -1,9 +1,6 @@ //! trait for abstracting underlying storage of pubkey and account pairs to be written use { - crate::{ - account_storage::meta::StoredAccountMeta, accounts_db::IncludeSlotInHash, - accounts_hash::AccountHash, - }, + crate::{account_storage::meta::StoredAccountMeta, accounts_hash::AccountHash}, solana_sdk::{account::ReadableAccount, clock::Slot, pubkey::Pubkey}, }; @@ -37,8 +34,6 @@ pub trait StorableAccounts<'a, T: ReadableAccount + Sync>: Sync { fn contains_multiple_slots(&self) -> bool { false } - /// true iff hashing these accounts should include the slot - fn include_slot_in_hash(&self) -> IncludeSlotInHash; /// true iff the impl can provide hash and write_version /// Otherwise, hash and write_version have to be provided separately to store functions. @@ -71,8 +66,6 @@ pub struct StorableAccountsMovingSlots<'a, T: ReadableAccount + Sync> { pub target_slot: Slot, /// slot where accounts are currently stored pub old_slot: Slot, - /// This is temporarily here until feature activation. - pub include_slot_in_hash: IncludeSlotInHash, } impl<'a, T: ReadableAccount + Sync> StorableAccounts<'a, T> for StorableAccountsMovingSlots<'a, T> { @@ -92,16 +85,9 @@ impl<'a, T: ReadableAccount + Sync> StorableAccounts<'a, T> for StorableAccounts fn len(&self) -> usize { self.accounts.len() } - fn include_slot_in_hash(&self) -> IncludeSlotInHash { - self.include_slot_in_hash - } } -/// The last parameter exists until this feature is activated: -/// ignore slot when calculating an account hash #28420 -impl<'a, T: ReadableAccount + Sync> StorableAccounts<'a, T> - for (Slot, &'a [(&'a Pubkey, &'a T)], IncludeSlotInHash) -{ +impl<'a, T: ReadableAccount + Sync> StorableAccounts<'a, T> for (Slot, &'a [(&'a Pubkey, &'a T)]) { fn pubkey(&self, index: usize) -> &Pubkey { self.1[index].0 } @@ -118,17 +104,10 @@ impl<'a, T: ReadableAccount + Sync> StorableAccounts<'a, T> fn len(&self) -> usize { self.1.len() } - fn include_slot_in_hash(&self) -> IncludeSlotInHash { - self.2 - } } #[allow(dead_code)] -/// The last parameter exists until this feature is activated: -/// ignore slot when calculating an account hash #28420 -impl<'a, T: ReadableAccount + Sync> StorableAccounts<'a, T> - for (Slot, &'a [&'a (Pubkey, T)], IncludeSlotInHash) -{ +impl<'a, T: ReadableAccount + Sync> StorableAccounts<'a, T> for (Slot, &'a [&'a (Pubkey, T)]) { fn pubkey(&self, index: usize) -> &Pubkey { &self.1[index].0 } @@ -145,16 +124,9 @@ impl<'a, T: ReadableAccount + Sync> StorableAccounts<'a, T> fn len(&self) -> usize { self.1.len() } - fn include_slot_in_hash(&self) -> IncludeSlotInHash { - self.2 - } } -/// The last parameter exists until this feature is activated: -/// ignore slot when calculating an account hash #28420 -impl<'a> StorableAccounts<'a, StoredAccountMeta<'a>> - for (Slot, &'a [&'a StoredAccountMeta<'a>], IncludeSlotInHash) -{ +impl<'a> StorableAccounts<'a, StoredAccountMeta<'a>> for (Slot, &'a [&'a StoredAccountMeta<'a>]) { fn pubkey(&self, index: usize) -> &Pubkey { self.account(index).pubkey() } @@ -171,9 +143,6 @@ impl<'a> StorableAccounts<'a, StoredAccountMeta<'a>> fn len(&self) -> usize { self.1.len() } - fn include_slot_in_hash(&self) -> IncludeSlotInHash { - self.2 - } fn has_hash_and_write_version(&self) -> bool { true } @@ -190,7 +159,6 @@ pub struct StorableAccountsBySlot<'a> { target_slot: Slot, /// each element is (source slot, accounts moving FROM source slot) slots_and_accounts: &'a [(Slot, &'a [&'a StoredAccountMeta<'a>])], - include_slot_in_hash: IncludeSlotInHash, /// This is calculated based off slots_and_accounts. /// cumulative offset of all account slices prior to this one @@ -209,7 +177,6 @@ impl<'a> StorableAccountsBySlot<'a> { pub fn new( target_slot: Slot, slots_and_accounts: &'a [(Slot, &'a [&'a StoredAccountMeta<'a>])], - include_slot_in_hash: IncludeSlotInHash, ) -> Self { let mut cumulative_len = 0usize; let mut starting_offsets = Vec::with_capacity(slots_and_accounts.len()); @@ -227,7 +194,6 @@ impl<'a> StorableAccountsBySlot<'a> { target_slot, slots_and_accounts, starting_offsets, - include_slot_in_hash, contains_multiple_slots, len: cumulative_len, } @@ -252,8 +218,6 @@ impl<'a> StorableAccountsBySlot<'a> { } } -/// The last parameter exists until this feature is activated: -/// ignore slot when calculating an account hash #28420 impl<'a> StorableAccounts<'a, StoredAccountMeta<'a>> for StorableAccountsBySlot<'a> { fn pubkey(&self, index: usize) -> &Pubkey { self.account(index).pubkey() @@ -275,9 +239,6 @@ impl<'a> StorableAccounts<'a, StoredAccountMeta<'a>> for StorableAccountsBySlot< fn contains_multiple_slots(&self) -> bool { self.contains_multiple_slots } - fn include_slot_in_hash(&self) -> IncludeSlotInHash { - self.include_slot_in_hash - } fn has_hash_and_write_version(&self) -> bool { true } @@ -292,12 +253,7 @@ impl<'a> StorableAccounts<'a, StoredAccountMeta<'a>> for StorableAccountsBySlot< /// this tuple contains a single different source slot that applies to all accounts /// accounts are StoredAccountMeta impl<'a> StorableAccounts<'a, StoredAccountMeta<'a>> - for ( - Slot, - &'a [&'a StoredAccountMeta<'a>], - IncludeSlotInHash, - Slot, - ) + for (Slot, &'a [&'a StoredAccountMeta<'a>], Slot) { fn pubkey(&self, index: usize) -> &Pubkey { self.account(index).pubkey() @@ -307,7 +263,7 @@ impl<'a> StorableAccounts<'a, StoredAccountMeta<'a>> } fn slot(&self, _index: usize) -> Slot { // same other slot for all accounts - self.3 + self.2 } fn target_slot(&self) -> Slot { self.0 @@ -315,9 +271,6 @@ impl<'a> StorableAccounts<'a, StoredAccountMeta<'a>> fn len(&self) -> usize { self.1.len() } - fn include_slot_in_hash(&self) -> IncludeSlotInHash { - self.2 - } fn has_hash_and_write_version(&self) -> bool { true } @@ -335,7 +288,6 @@ pub mod tests { super::*, crate::{ account_storage::meta::{AccountMeta, StoredAccountMeta, StoredMeta}, - accounts_db::INCLUDE_SLOT_IN_HASH_TESTS, append_vec::AppendVecStoredAccountMeta, }, solana_sdk::{ @@ -355,7 +307,6 @@ pub mod tests { assert_eq!(a.target_slot(), b.target_slot()); assert_eq!(a.len(), b.len()); assert_eq!(a.is_empty(), b.is_empty()); - assert_eq!(a.include_slot_in_hash(), b.include_slot_in_hash()); (0..a.len()).for_each(|i| { assert_eq!(a.pubkey(i), b.pubkey(i)); assert!(accounts_equal(a.account(i), b.account(i))); @@ -394,12 +345,7 @@ pub mod tests { hash: &hash, }); - let test3 = ( - slot, - &vec![&stored_account, &stored_account][..], - INCLUDE_SLOT_IN_HASH_TESTS, - slot, - ); + let test3 = (slot, &vec![&stored_account, &stored_account][..], slot); assert!(!test3.contains_multiple_slots()); } @@ -466,33 +412,19 @@ pub mod tests { three.push(raw2); four_pubkey_and_account_value.push(raw4); }); - let test2 = (target_slot, &two[..], INCLUDE_SLOT_IN_HASH_TESTS); - let test4 = ( - target_slot, - &four_pubkey_and_account_value[..], - INCLUDE_SLOT_IN_HASH_TESTS, - ); + let test2 = (target_slot, &two[..]); + let test4 = (target_slot, &four_pubkey_and_account_value[..]); let source_slot = starting_slot % max_slots; - let test3 = ( - target_slot, - &three[..], - INCLUDE_SLOT_IN_HASH_TESTS, - source_slot, - ); + let test3 = (target_slot, &three[..], source_slot); let old_slot = starting_slot; let test_moving_slots = StorableAccountsMovingSlots { accounts: &two[..], target_slot, old_slot, - include_slot_in_hash: INCLUDE_SLOT_IN_HASH_TESTS, }; let for_slice = [(old_slot, &three[..])]; - let test_moving_slots2 = StorableAccountsBySlot::new( - target_slot, - &for_slice, - INCLUDE_SLOT_IN_HASH_TESTS, - ); + let test_moving_slots2 = StorableAccountsBySlot::new(target_slot, &for_slice); compare(&test2, &test3); compare(&test2, &test4); compare(&test2, &test_moving_slots); @@ -594,11 +526,7 @@ pub mod tests { }) }) .collect::>(); - let storable = StorableAccountsBySlot::new( - 99, - &slots_and_accounts[..], - INCLUDE_SLOT_IN_HASH_TESTS, - ); + let storable = StorableAccountsBySlot::new(99, &slots_and_accounts[..]); assert!(storable.has_hash_and_write_version()); assert_eq!(99, storable.target_slot()); assert_eq!(entries0 != entries, storable.contains_multiple_slots()); diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index cb87cdc513..384567571b 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -400,7 +400,6 @@ impl AccountsHashVerifier { epoch_schedule: &accounts_package.epoch_schedule, rent_collector: &accounts_package.rent_collector, store_detailed_debug_info_on_failure: false, - include_slot_in_hash: accounts_package.include_slot_in_hash, }; let slot = accounts_package.slot; @@ -480,7 +479,6 @@ impl AccountsHashVerifier { epoch_schedule: &accounts_package.epoch_schedule, rent_collector: &accounts_package.rent_collector, store_detailed_debug_info_on_failure: false, - include_slot_in_hash: accounts_package.include_slot_in_hash, }; let (incremental_accounts_hash, measure_hash_us) = measure_us!( diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index 2595361573..2ee63e6ef6 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -5,9 +5,7 @@ use { crate::snapshot_utils::create_tmp_accounts_dir_for_tests, log::*, solana_accounts_db::{ - accounts_db::{ - AccountShrinkThreshold, CalcAccountsHashDataSource, INCLUDE_SLOT_IN_HASH_TESTS, - }, + accounts_db::{AccountShrinkThreshold, CalcAccountsHashDataSource}, accounts_hash::CalcAccountsHashConfig, accounts_index::AccountSecondaryIndexes, epoch_accounts_hash::EpochAccountsHash, @@ -331,7 +329,6 @@ fn test_epoch_accounts_hash_basic(test_environment: TestEnvironment) { epoch_schedule: bank.epoch_schedule(), rent_collector: bank.rent_collector(), store_detailed_debug_info_on_failure: false, - include_slot_in_hash: INCLUDE_SLOT_IN_HASH_TESTS, }, ) .unwrap(); diff --git a/runtime/benches/accounts.rs b/runtime/benches/accounts.rs index fcd784a21e..4336166924 100644 --- a/runtime/benches/accounts.rs +++ b/runtime/benches/accounts.rs @@ -11,7 +11,7 @@ use { accounts::{AccountAddressFilter, Accounts}, accounts_db::{ test_utils::create_test_accounts, AccountShrinkThreshold, - VerifyAccountsHashAndLamportsConfig, INCLUDE_SLOT_IN_HASH_TESTS, + VerifyAccountsHashAndLamportsConfig, }, accounts_index::{AccountSecondaryIndexes, ScanConfig}, ancestors::Ancestors, @@ -118,7 +118,6 @@ fn test_accounts_hash_bank_hash(bencher: &mut Bencher) { ignore_mismatch: false, store_detailed_debug_info: false, use_bg_thread_pool: false, - include_slot_in_hash: INCLUDE_SLOT_IN_HASH_TESTS, } )) }); diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index b826f8eddc..72a6f72f11 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -370,7 +370,6 @@ impl SnapshotRequestHandler { epoch_schedule: snapshot_root_bank.epoch_schedule(), rent_collector: snapshot_root_bank.rent_collector(), store_detailed_debug_info_on_failure: false, - include_slot_in_hash: snapshot_root_bank.include_slot_in_hash(), }, ) .unwrap(); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index bd0786a9b2..66d9a37e5c 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -76,7 +76,7 @@ use { }, accounts_db::{ AccountShrinkThreshold, AccountStorageEntry, AccountsDbConfig, - CalcAccountsHashDataSource, IncludeSlotInHash, VerifyAccountsHashAndLamportsConfig, + CalcAccountsHashDataSource, VerifyAccountsHashAndLamportsConfig, ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, ACCOUNTS_DB_CONFIG_FOR_TESTING, }, accounts_hash::{AccountsHash, CalcAccountsHashConfig, HashStats, IncrementalAccountsHash}, @@ -3305,7 +3305,6 @@ impl Bank { // because credits observed has changed let now = Instant::now(); let slot = self.slot(); - let include_slot_in_hash = self.include_slot_in_hash(); self.stakes_cache.update_stake_accounts( thread_pool, stake_rewards, @@ -3313,11 +3312,9 @@ impl Bank { ); assert!(!self.freeze_started()); thread_pool.install(|| { - stake_rewards.par_chunks(512).for_each(|chunk| { - self.rc - .accounts - .store_accounts_cached((slot, chunk, include_slot_in_hash)) - }) + stake_rewards + .par_chunks(512) + .for_each(|chunk| self.rc.accounts.store_accounts_cached((slot, chunk))) }); metrics .store_stake_accounts_us @@ -3350,7 +3347,7 @@ impl Bank { } } - self.store_accounts((self.slot(), stake_rewards, self.include_slot_in_hash())); + self.store_accounts((self.slot(), stake_rewards)); stake_rewards .iter() .map(|stake_reward| stake_reward.stake_reward_info.lamports) @@ -3373,7 +3370,7 @@ impl Bank { .enumerate() .map(|(i, account)| (&vote_account_rewards.rewards[i].0, account)) .collect::>(); - self.store_accounts((self.slot(), &to_store[..], self.include_slot_in_hash())); + self.store_accounts((self.slot(), &to_store[..])); }); metrics @@ -3838,16 +3835,6 @@ impl Bank { // Bootstrap validator collects fees until `new_from_parent` is called. self.fee_rate_governor = genesis_config.fee_rate_governor.clone(); - // Make sure to activate the account_hash_ignore_slot feature - // before calculating any account hashes. - if genesis_config - .accounts - .iter() - .any(|(pubkey, _)| pubkey == &feature_set::account_hash_ignore_slot::id()) - { - self.activate_feature(&feature_set::account_hash_ignore_slot::id()); - } - for (pubkey, account) in genesis_config.accounts.iter() { assert!( self.get_account(pubkey).is_none(), @@ -5594,7 +5581,6 @@ impl Bank { &self.rent_collector, &durable_nonce, lamports_per_signature, - self.include_slot_in_hash(), ); let rent_debits = self.collect_rent(&execution_results, loaded_txs); @@ -6081,11 +6067,8 @@ impl Bank { if !accounts_to_store.is_empty() { // TODO: Maybe do not call `store_accounts()` here. Instead return `accounts_to_store` // and have `collect_rent_in_partition()` perform all the stores. - let (_, measure) = measure!(self.store_accounts(( - self.slot(), - &accounts_to_store[..], - self.include_slot_in_hash() - ))); + let (_, measure) = + measure!(self.store_accounts((self.slot(), &accounts_to_store[..],))); time_storing_accounts_us += measure.as_us(); } @@ -6098,19 +6081,6 @@ impl Bank { } } - /// true if we should include the slot in account hash - /// This is governed by a feature. - pub(crate) fn include_slot_in_hash(&self) -> IncludeSlotInHash { - if self - .feature_set - .is_active(&feature_set::account_hash_ignore_slot::id()) - { - IncludeSlotInHash::RemoveSlot - } else { - IncludeSlotInHash::IncludeSlot - } - } - /// convert 'partition' to a pubkey range and 'collect_rent_in_range' fn collect_rent_in_partition(&self, partition: Partition, metrics: &RentMetrics) { let subrange_full = accounts_partition::pubkey_range_from_partition(partition); @@ -6620,11 +6590,7 @@ impl Bank { pubkey: &Pubkey, account: &T, ) { - self.store_accounts(( - self.slot(), - &[(pubkey, account)][..], - self.include_slot_in_hash(), - )) + self.store_accounts((self.slot(), &[(pubkey, account)][..])) } pub fn store_accounts<'a, T: ReadableAccount + Sync + ZeroLamport + 'a>( @@ -7243,7 +7209,6 @@ impl Bank { let cap = self.capitalization(); let epoch_schedule = self.epoch_schedule(); let rent_collector = self.rent_collector(); - let include_slot_in_hash = self.include_slot_in_hash(); if config.run_in_background { let ancestors = ancestors.clone(); let accounts = Arc::clone(accounts); @@ -7267,7 +7232,6 @@ impl Bank { ignore_mismatch: config.ignore_mismatch, store_detailed_debug_info: config.store_hash_raw_data_for_debug, use_bg_thread_pool: true, - include_slot_in_hash, }, ); accounts_ @@ -7293,7 +7257,6 @@ impl Bank { ignore_mismatch: config.ignore_mismatch, store_detailed_debug_info: config.store_hash_raw_data_for_debug, use_bg_thread_pool: false, // fg is waiting for this to run, so we can use the fg thread pool - include_slot_in_hash, }, ); self.set_initial_accounts_hash_verification_completed(); @@ -7426,7 +7389,6 @@ impl Bank { self.epoch_schedule(), &self.rent_collector, is_startup, - self.include_slot_in_hash(), ) .1 } @@ -7535,7 +7497,6 @@ impl Bank { self.epoch_schedule(), &self.rent_collector, is_startup, - self.include_slot_in_hash(), ); if total_lamports != self.capitalization() { datapoint_info!( @@ -7561,7 +7522,6 @@ impl Bank { self.epoch_schedule(), &self.rent_collector, is_startup, - self.include_slot_in_hash(), ); } @@ -7588,7 +7548,6 @@ impl Bank { epoch_schedule: &self.epoch_schedule, rent_collector: &self.rent_collector, store_detailed_debug_info_on_failure: false, - include_slot_in_hash: self.include_slot_in_hash(), }; let storages = self.get_snapshot_storages(Some(base_slot)); let sorted_storages = SortedStorages::new(&storages); diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 773a971371..343e87975b 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -6555,25 +6555,25 @@ fn test_bank_hash_consistency() { if bank.slot == 0 { assert_eq!( bank.hash().to_string(), - "3kzRo3M5q9j47Dxfdp9ZeEXfUTA5rxVud7jRKuttHxFz" + "3KE2bigpBiiMLGYNqmWkgbrQGSqMt5ccG6ED87CFCVpt" ); } if bank.slot == 32 { assert_eq!( bank.hash().to_string(), - "bWPR5AQjsfhMypn1nLUjugmitbjHwV4rmnyTDFqCdv1" + "FpNDsd21HXznXf6tRpMNiWhFyhZ4aCCECQm3gL4jGV22" ); } if bank.slot == 64 { assert_eq!( bank.hash().to_string(), - "74hNYEVcvKU5JZwSNBYUcUWgf9Jw2Mag4b55967VPVjG" + "7gDCoXPfFtKPALi212akhhQHEuLdAqyf7DE3yUN4bR2p" ); } if bank.slot == 128 { assert_eq!( bank.hash().to_string(), - "BvYViztQiksU8vDvMqZYBo9Lc4cgjJEmijPpqktBRMkS" + "6FREbeHdTNYnEXg4zobL2mqGfevukg75frkQJqKpYnk4" ); break; } @@ -12922,7 +12922,7 @@ fn test_epoch_credit_rewards_and_history_update() { .map(|_| StakeReward::new_random()) .collect::>(); - bank.store_accounts((bank.slot(), &stake_rewards[..], bank.include_slot_in_hash())); + bank.store_accounts((bank.slot(), &stake_rewards[..])); // Simulate rewards let mut expected_rewards = 0; diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 1757c00a9a..62ac8285b1 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -2310,7 +2310,6 @@ mod tests { epoch_schedule: deserialized_bank.epoch_schedule(), rent_collector: deserialized_bank.rent_collector(), store_detailed_debug_info_on_failure: false, - include_slot_in_hash: bank.include_slot_in_hash(), }, &SortedStorages::new(&other_incremental_snapshot_storages), HashStats::default(), diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index dc1276b8de..37db9eea72 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -368,11 +368,7 @@ impl<'a> SnapshotMinimizer<'a> { shrink_in_progress = Some(self.accounts_db().get_store_for_shrink(slot, aligned_total)); let new_storage = shrink_in_progress.as_ref().unwrap().new_storage(); self.accounts_db().store_accounts_frozen( - ( - slot, - &accounts[..], - solana_accounts_db::accounts_db::INCLUDE_SLOT_IN_HASH_IRRELEVANT_APPEND_VEC_OPERATION, - ), + (slot, &accounts[..]), Some(hashes), new_storage, Some(Box::new(write_versions.into_iter())), diff --git a/runtime/src/snapshot_package.rs b/runtime/src/snapshot_package.rs index c86820f2ab..6685cd269d 100644 --- a/runtime/src/snapshot_package.rs +++ b/runtime/src/snapshot_package.rs @@ -8,7 +8,7 @@ use { log::*, solana_accounts_db::{ accounts::Accounts, - accounts_db::{AccountStorageEntry, IncludeSlotInHash, INCLUDE_SLOT_IN_HASH_TESTS}, + accounts_db::AccountStorageEntry, accounts_hash::{AccountsHash, AccountsHashKind}, epoch_accounts_hash::EpochAccountsHash, rent_collector::RentCollector, @@ -36,7 +36,6 @@ pub struct AccountsPackage { pub epoch_schedule: EpochSchedule, pub rent_collector: RentCollector, pub is_incremental_accounts_hash_feature_enabled: bool, - pub include_slot_in_hash: IncludeSlotInHash, /// Supplemental information needed for snapshots pub snapshot_info: Option, @@ -152,7 +151,6 @@ impl AccountsPackage { epoch_schedule: *bank.epoch_schedule(), rent_collector: bank.rent_collector().clone(), is_incremental_accounts_hash_feature_enabled, - include_slot_in_hash: bank.include_slot_in_hash(), snapshot_info, enqueued: Instant::now(), } @@ -172,7 +170,6 @@ impl AccountsPackage { epoch_schedule: EpochSchedule::default(), rent_collector: RentCollector::default(), is_incremental_accounts_hash_feature_enabled: bool::default(), - include_slot_in_hash: INCLUDE_SLOT_IN_HASH_TESTS, snapshot_info: Some(SupplementalSnapshotInfo { bank_snapshot_dir: PathBuf::default(), archive_format: ArchiveFormat::Tar, diff --git a/runtime/tests/accounts.rs b/runtime/tests/accounts.rs index 3ea26e8a82..8c07cf2a14 100644 --- a/runtime/tests/accounts.rs +++ b/runtime/tests/accounts.rs @@ -3,7 +3,7 @@ use { rand::{thread_rng, Rng}, rayon::prelude::*, solana_accounts_db::{ - accounts_db::{AccountsDb, LoadHint, INCLUDE_SLOT_IN_HASH_TESTS}, + accounts_db::{AccountsDb, LoadHint}, ancestors::Ancestors, }, solana_sdk::{ @@ -60,14 +60,7 @@ fn test_shrink_and_clean() { for (pubkey, account) in alive_accounts.iter_mut() { account.checked_sub_lamports(1).unwrap(); - accounts.store_cached( - ( - current_slot, - &[(&*pubkey, &*account)][..], - INCLUDE_SLOT_IN_HASH_TESTS, - ), - None, - ); + accounts.store_cached((current_slot, &[(&*pubkey, &*account)][..]), None); } accounts.add_root(current_slot); accounts.flush_accounts_cache(true, Some(current_slot)); @@ -133,16 +126,13 @@ fn test_bad_bank_hash() { .iter() .map(|idx| (&accounts_keys[*idx].0, &accounts_keys[*idx].1)) .collect(); - db.store_cached( - (some_slot, &account_refs[..], INCLUDE_SLOT_IN_HASH_TESTS), - None, - ); + db.store_cached((some_slot, &account_refs[..]), None); for pass in 0..2 { for (key, account) in &account_refs { assert_eq!( db.load_account_hash(&ancestors, key, Some(some_slot), LoadHint::Unspecified) .unwrap(), - AccountsDb::hash_account(some_slot, *account, key, INCLUDE_SLOT_IN_HASH_TESTS) + AccountsDb::hash_account(*account, key) ); } if pass == 0 {