From 14055fc3e2f2034712de8875cc2adfc45553c9e0 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 11 Jan 2023 19:57:44 -0600 Subject: [PATCH] remove type SnapshotStorage and uses (#29661) --- runtime/src/accounts_db.rs | 20 +++++++++----------- runtime/src/serde_snapshot.rs | 12 ++++++------ runtime/src/serde_snapshot/tests.rs | 2 +- runtime/src/snapshot_minimizer.rs | 11 +++++++---- runtime/src/sorted_storages.rs | 9 ++++----- 5 files changed, 27 insertions(+), 27 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index ab0fed762d..487e896392 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -609,11 +609,10 @@ impl<'a> MultiThreadProgress<'a> { /// An offset into the AccountsDb::storage vector pub type AtomicAppendVecId = AtomicU32; pub type AppendVecId = u32; -pub type SnapshotStorage = Vec>; pub type SnapshotStorageOne = Arc; -pub type SnapshotStorages = Vec; +pub type SnapshotStorages = Vec>; /// exactly 1 append vec per slot -pub type SnapshotStoragesOne = SnapshotStorage; +pub type SnapshotStoragesOne = Vec; // Each slot has a set of storage entries. pub(crate) type SlotStores = Arc>>>; @@ -1143,7 +1142,7 @@ impl RecycleStores { self.entries.iter() } - fn add_entries(&mut self, new_entries: SnapshotStorage) { + fn add_entries(&mut self, new_entries: Vec) { let now = Instant::now(); for new_entry in new_entries { self.total_bytes += new_entry.total_bytes(); @@ -1151,7 +1150,7 @@ impl RecycleStores { } } - fn expire_old_entries(&mut self) -> SnapshotStorage { + fn expire_old_entries(&mut self) -> Vec { let mut expired = vec![]; let now = Instant::now(); let mut expired_bytes = 0; @@ -3917,7 +3916,7 @@ impl AccountsDb { slot: Slot, add_dirty_stores: bool, shrink_in_progress: Option, - ) -> SnapshotStorage { + ) -> Vec { let mut dead_storages = Vec::default(); let mut not_retaining_store = |store: &Arc| { @@ -3945,7 +3944,7 @@ impl AccountsDb { pub(crate) fn drop_or_recycle_stores( &self, - dead_storages: SnapshotStorage, + dead_storages: Vec, stats: &ShrinkStats, ) { let mut recycle_stores_write_elapsed = Measure::start("recycle_stores_write_time"); @@ -4168,7 +4167,7 @@ impl AccountsDb { } #[cfg(test)] - fn get_storages_for_slot(&self, slot: Slot) -> Option { + fn get_storages_for_slot(&self, slot: Slot) -> Option> { self.storage .get_slot_storage_entry(slot) .map(|storage| vec![storage]) @@ -13877,7 +13876,7 @@ pub mod tests { } } - fn slot_stores(db: &AccountsDb, slot: Slot) -> SnapshotStorage { + fn slot_stores(db: &AccountsDb, slot: Slot) -> Vec { db.get_storages_for_slot(slot).unwrap_or_default() } @@ -14233,8 +14232,7 @@ pub mod tests { impl AccountsDb { fn get_and_assert_single_storage(&self, slot: Slot) -> Arc { - let mut storage_maps: SnapshotStorage = - self.get_storages_for_slot(slot).unwrap_or_default(); + let mut storage_maps = self.get_storages_for_slot(slot).unwrap_or_default(); assert_eq!(storage_maps.len(), 1); storage_maps.pop().unwrap() diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 2fbc7b1f41..262c4d5dfb 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -3,7 +3,7 @@ use { accounts::Accounts, accounts_db::{ AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, AppendVecId, - AtomicAppendVecId, BankHashInfo, IndexGenerationInfo, SnapshotStorage, + AtomicAppendVecId, BankHashInfo, IndexGenerationInfo, SnapshotStorageOne, }, accounts_hash::AccountsHash, accounts_index::AccountSecondaryIndexes, @@ -345,7 +345,7 @@ pub(crate) fn bank_to_stream( serde_style: SerdeStyle, stream: &mut BufWriter, bank: &Bank, - snapshot_storages: &[SnapshotStorage], + snapshot_storages: &[Vec], ) -> Result<(), Error> where W: Write, @@ -367,7 +367,7 @@ pub(crate) fn bank_to_stream_no_extra_fields( serde_style: SerdeStyle, stream: &mut BufWriter, bank: &Bank, - snapshot_storages: &[SnapshotStorage], + snapshot_storages: &[Vec], ) -> Result<(), Error> where W: Write, @@ -445,7 +445,7 @@ pub fn reserialize_bank_with_new_accounts_hash( struct SerializableBankAndStorage<'a, C> { bank: &'a Bank, - snapshot_storages: &'a [SnapshotStorage], + snapshot_storages: &'a [Vec], phantom: std::marker::PhantomData, } @@ -461,7 +461,7 @@ impl<'a, C: TypeContext<'a>> Serialize for SerializableBankAndStorage<'a, C> { #[cfg(test)] struct SerializableBankAndStorageNoExtra<'a, C> { bank: &'a Bank, - snapshot_storages: &'a [SnapshotStorage], + snapshot_storages: &'a [Vec], phantom: std::marker::PhantomData, } @@ -494,7 +494,7 @@ impl<'a, C> From> for SerializableBankA struct SerializableAccountsDb<'a, C> { accounts_db: &'a AccountsDb, slot: Slot, - account_storage_entries: &'a [SnapshotStorage], + account_storage_entries: &'a [Vec], phantom: std::marker::PhantomData, } diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 14f07313bc..0c5ac2934c 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -148,7 +148,7 @@ fn accountsdb_to_stream( stream: &mut W, accounts_db: &AccountsDb, slot: Slot, - account_storage_entries: &[SnapshotStorage], + account_storage_entries: &[Vec], ) -> Result<(), Error> where W: Write, diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index de27747561..2cd0a20dd7 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -3,8 +3,7 @@ use { crate::{ accounts_db::{ - AccountsDb, GetUniqueAccountsResult, PurgeStats, SnapshotStorage, SnapshotStorageOne, - StoreReclaims, + AccountsDb, GetUniqueAccountsResult, PurgeStats, SnapshotStorageOne, StoreReclaims, }, bank::Bank, builtins, static_ids, @@ -274,7 +273,7 @@ impl<'a> SnapshotMinimizer<'a> { fn process_snapshot_storages( &self, minimized_slot_set: DashSet, - ) -> (Vec, SnapshotStorage) { + ) -> (Vec, Vec) { let snapshot_storages = self .accounts_db() .get_snapshot_storages(..=self.starting_slot, None) @@ -300,7 +299,11 @@ impl<'a> SnapshotMinimizer<'a> { } /// Creates new storage replacing `storages` that contains only accounts in `minimized_account_set`. - fn filter_storage(&self, storage: &SnapshotStorageOne, dead_storages: &Mutex) { + fn filter_storage( + &self, + storage: &SnapshotStorageOne, + dead_storages: &Mutex>, + ) { let slot = storage.slot(); let GetUniqueAccountsResult { stored_accounts, .. diff --git a/runtime/src/sorted_storages.rs b/runtime/src/sorted_storages.rs index 403ce203f1..3ea64fcba9 100644 --- a/runtime/src/sorted_storages.rs +++ b/runtime/src/sorted_storages.rs @@ -28,7 +28,7 @@ impl<'a> SortedStorages<'a> { } } - /// primary method of retrieving (Slot, SnapshotStorage) + /// primary method of retrieving (Slot, SnapshotStorageOne) pub fn iter_range(&'a self, range: &R) -> SortedStoragesIter<'a> where R: RangeBounds, @@ -60,9 +60,8 @@ impl<'a> SortedStorages<'a> { self.storages.len() } - // assumptions: - // 1. each SnapshotStorage.!is_empty() - // 2. SnapshotStorage.first().unwrap().get_slot() is unique from all other SnapshotStorage items. + // assumption: + // source.slot() is unique from all other items in 'source' pub fn new(source: &'a [SnapshotStorageOne]) -> Self { let slots = source.iter().map(|storage| { storage.slot() // this must be unique. Will be enforced in new_with_slots @@ -71,7 +70,7 @@ impl<'a> SortedStorages<'a> { } /// create `SortedStorages` from 'source' iterator. - /// 'source' contains a SnapshotStorage and its associated slot + /// 'source' contains a SnapshotStorageOne and its associated slot /// 'source' does not have to be sorted in any way, but is assumed to not have duplicate slot #s pub fn new_with_slots( source: impl Iterator + Clone,