diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 140ea707da..e20ecfdd6c 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -535,15 +535,15 @@ impl Accounts { } } - pub fn bank_hash_at(&self, slot_id: Slot) -> Hash { - self.bank_hash_info_at(slot_id).hash + pub fn bank_hash_at(&self, slot: Slot) -> Hash { + self.bank_hash_info_at(slot).hash } - pub fn bank_hash_info_at(&self, slot_id: Slot) -> BankHashInfo { - let delta_hash = self.accounts_db.get_accounts_delta_hash(slot_id); + pub fn bank_hash_info_at(&self, slot: Slot) -> BankHashInfo { + let delta_hash = self.accounts_db.get_accounts_delta_hash(slot); let bank_hashes = self.accounts_db.bank_hashes.read().unwrap(); let mut hash_info = bank_hashes - .get(&slot_id) + .get(&slot) .expect("No bank hash was found for this bank, that should not be possible") .clone(); hash_info.hash = delta_hash; diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 26e772480d..6e31336398 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -19,7 +19,7 @@ //! commit for each slot entry would be indexed. use crate::{ - accounts_index::AccountsIndex, + accounts_index::{AccountsIndex, SlotList, SlotSlice}, append_vec::{AppendVec, StoredAccount, StoredMeta}, bank::deserialize_from_snapshot, }; @@ -115,11 +115,11 @@ impl<'de> Visitor<'de> for AccountStorageVisitor { M: MapAccess<'de>, { let mut map = HashMap::new(); - while let Some((slot_id, storage_entries)) = access.next_entry()? { + while let Some((slot, storage_entries)) = access.next_entry()? { let storage_entries: Vec = storage_entries; - let storage_slot_map = map.entry(slot_id).or_insert_with(HashMap::new); + let storage_slot_map = map.entry(slot).or_insert_with(HashMap::new); for mut storage in storage_entries { - storage.slot_id = slot_id; + storage.slot = slot; storage_slot_map.insert(storage.id, Arc::new(storage)); } } @@ -157,7 +157,7 @@ impl<'a> Serialize for AccountStorageSerialize<'a> { let mut count = 0; let mut serialize_account_storage_timer = Measure::start("serialize_account_storage_ms"); for storage_entries in self.account_storage_entries { - map.serialize_entry(&storage_entries.first().unwrap().slot_id, storage_entries)?; + map.serialize_entry(&storage_entries.first().unwrap().slot, storage_entries)?; count += storage_entries.len(); } serialize_account_storage_timer.stop(); @@ -201,7 +201,7 @@ pub struct AccountStorageEntry { id: AppendVecId, #[serde(skip)] - slot_id: Slot, + slot: Slot, /// storage holding the accounts accounts: AppendVec, @@ -215,14 +215,14 @@ pub struct AccountStorageEntry { } impl AccountStorageEntry { - pub fn new(path: &Path, slot_id: Slot, id: usize, file_size: u64) -> Self { - let tail = AppendVec::new_relative_path(slot_id, id); + pub fn new(path: &Path, slot: Slot, id: usize, file_size: u64) -> Self { + let tail = AppendVec::new_relative_path(slot, id); let path = Path::new(path).join(&tail); let accounts = AppendVec::new(&path, true, file_size as usize); AccountStorageEntry { id, - slot_id, + slot, accounts, count_and_status: RwLock::new((0, AccountStorageStatus::Available)), } @@ -261,8 +261,8 @@ impl AccountStorageEntry { self.count() > 0 } - pub fn slot_id(&self) -> Slot { - self.slot_id + pub fn slot(&self) -> Slot { + self.slot } pub fn append_vec_id(&self) -> AppendVecId { @@ -315,7 +315,7 @@ impl AccountStorageEntry { assert!( count > 0, "double remove of account in slot: {}/store: {}!!", - self.slot_id, + self.slot, self.id ); @@ -572,7 +572,7 @@ impl AccountsDB { let new_storage_map: Result, IOError> = storage .0 .into_iter() - .map(|(slot_id, mut slot_storage)| { + .map(|(slot, mut slot_storage)| { let mut new_slot_storage = HashMap::new(); for (id, storage_entry) in slot_storage.drain() { let path_index = thread_rng().gen_range(0, self.paths.len()); @@ -583,7 +583,7 @@ impl AccountsDB { // Move the corresponding AppendVec from the snapshot into the directory pointed // at by `local_dir` let append_vec_relative_path = - AppendVec::new_relative_path(slot_id, storage_entry.id); + AppendVec::new_relative_path(slot, storage_entry.id); let append_vec_abs_path = stream_append_vecs_path .as_ref() .join(&append_vec_relative_path); @@ -616,7 +616,7 @@ impl AccountsDB { .map_err(|e| AccountsDB::get_io_error(&e.to_string()))?; new_slot_storage.insert(id, Arc::new(u_storage_entry)); } - Ok((slot_id, new_slot_storage)) + Ok((slot, new_slot_storage)) }) .collect(); @@ -626,7 +626,7 @@ impl AccountsDB { // discard any slots with no storage entries // this can happen if a non-root slot was serialized // but non-root stores should not be included in the snapshot - storage.0.retain(|_slot_id, stores| !stores.is_empty()); + storage.0.retain(|_slot, stores| !stores.is_empty()); let version: u64 = deserialize_from(&mut stream) .map_err(|_| AccountsDB::get_io_error("write version deserialize error"))?; @@ -655,10 +655,10 @@ impl AccountsDB { Ok(()) } - fn new_storage_entry(&self, slot_id: Slot, path: &Path, size: u64) -> AccountStorageEntry { + fn new_storage_entry(&self, slot: Slot, path: &Path, size: u64) -> AccountStorageEntry { AccountStorageEntry::new( path, - slot_id, + slot, self.next_id.fetch_add(1, Ordering::Relaxed), size, ) @@ -761,8 +761,8 @@ impl AccountsDB { let mut store_counts: HashMap = HashMap::new(); let storage = self.storage.read().unwrap(); for account_infos in purges.values() { - for (slot_id, account_info) in account_infos { - let slot_storage = storage.0.get(&slot_id).unwrap(); + for (slot, account_info) in account_infos { + let slot_storage = storage.0.get(&slot).unwrap(); let store = slot_storage.get(&account_info.store_id).unwrap(); if let Some(store_count) = store_counts.get_mut(&account_info.store_id) { *store_count -= 1; @@ -777,14 +777,14 @@ impl AccountsDB { for account_infos in purges.values() { let mut no_delete = false; - for (_slot_id, account_info) in account_infos { + for (_slot, account_info) in account_infos { if *store_counts.get(&account_info.store_id).unwrap() != 0 { no_delete = true; break; } } if no_delete { - for (_slot_id, account_info) in account_infos { + for (_slot, account_info) in account_infos { *store_counts.get_mut(&account_info.store_id).unwrap() += 1; } } @@ -796,7 +796,7 @@ impl AccountsDB { let mut purge_filter = Measure::start("purge_filter"); purges.retain(|pubkey, account_infos| { let mut would_unref_count = 0; - for (_slot_id, account_info) in account_infos { + for (_slot, account_info) in account_infos { if *store_counts.get(&account_info.store_id).unwrap() == 0 { would_unref_count += 1; } else { @@ -838,7 +838,7 @@ impl AccountsDB { ); } - fn handle_reclaims(&self, reclaims: &[(Slot, AccountInfo)]) { + fn handle_reclaims(&self, reclaims: SlotSlice) { let mut dead_accounts = Measure::start("reclaims::remove_dead_accounts"); let dead_slots = self.remove_dead_accounts(reclaims); dead_accounts.stop(); @@ -906,7 +906,7 @@ impl AccountsDB { /// Scan a specific slot through all the account storage in parallel with sequential read // PERF: Sequentially read each storage entry in parallel - pub fn scan_account_storage(&self, slot_id: Slot, scan_func: F) -> Vec + pub fn scan_account_storage(&self, slot: Slot, scan_func: F) -> Vec where F: Fn(&StoredAccount, AppendVecId, &mut B) -> () + Send + Sync, B: Send + Default, @@ -916,7 +916,7 @@ impl AccountsDB { .read() .unwrap() .0 - .get(&slot_id) + .get(&slot) .unwrap_or(&HashMap::new()) .values() .cloned() @@ -984,11 +984,11 @@ impl AccountsDB { Self::load(&storage, ancestors, &accounts_index, pubkey) } - fn find_storage_candidate(&self, slot_id: Slot) -> Arc { + fn find_storage_candidate(&self, slot: Slot) -> Arc { let mut create_extra = false; let stores = self.storage.read().unwrap(); - if let Some(slot_stores) = stores.0.get(&slot_id) { + if let Some(slot_stores) = stores.0.get(&slot) { if !slot_stores.is_empty() { if slot_stores.len() <= self.min_num_stores { let mut total_accounts = 0; @@ -1010,7 +1010,7 @@ impl AccountsDB { let ret = store.clone(); drop(stores); if create_extra { - self.create_and_insert_store(slot_id, self.file_size); + self.create_and_insert_store(slot, self.file_size); } return ret; } @@ -1024,19 +1024,19 @@ impl AccountsDB { drop(stores); - let store = self.create_and_insert_store(slot_id, self.file_size); + let store = self.create_and_insert_store(slot, self.file_size); store.try_available(); store } - fn create_and_insert_store(&self, slot_id: Slot, size: u64) -> Arc { + fn create_and_insert_store(&self, slot: Slot, size: u64) -> Arc { let path_index = thread_rng().gen_range(0, self.paths.len()); let store = - Arc::new(self.new_storage_entry(slot_id, &Path::new(&self.paths[path_index]), size)); + Arc::new(self.new_storage_entry(slot, &Path::new(&self.paths[path_index]), size)); let store_for_index = store.clone(); let mut stores = self.storage.write().unwrap(); - let slot_storage = stores.0.entry(slot_id).or_insert_with(HashMap::new); + let slot_storage = stores.0.entry(slot).or_insert_with(HashMap::new); slot_storage.insert(store.id, store_for_index); store } @@ -1137,7 +1137,7 @@ impl AccountsDB { fn store_accounts( &self, - slot_id: Slot, + slot: Slot, accounts: &[(&Pubkey, &Account)], hashes: &[Hash], ) -> Vec { @@ -1164,7 +1164,7 @@ impl AccountsDB { .collect(); let mut infos: Vec = Vec::with_capacity(with_meta.len()); while infos.len() < with_meta.len() { - let storage = self.find_storage_candidate(slot_id); + let storage = self.find_storage_candidate(slot); let rvs = storage .accounts .append_accounts(&with_meta[infos.len()..], &hashes[infos.len()..]); @@ -1174,7 +1174,7 @@ impl AccountsDB { // See if an account overflows the default append vec size. let data_len = (with_meta[infos.len()].1.data.len() + 4096) as u64; if data_len > self.file_size { - self.create_and_insert_store(slot_id, data_len * 2); + self.create_and_insert_store(slot, data_len * 2); } continue; } @@ -1364,10 +1364,10 @@ impl AccountsDB { } } - pub fn get_accounts_delta_hash(&self, slot_id: Slot) -> Hash { + pub fn get_accounts_delta_hash(&self, slot: Slot) -> Hash { let mut scan = Measure::start("scan"); let mut accumulator: Vec> = self.scan_account_storage( - slot_id, + slot, |stored_account: &StoredAccount, _store_id: AppendVecId, accum: &mut HashMap| { @@ -1397,11 +1397,11 @@ impl AccountsDB { fn update_index( &self, - slot_id: Slot, + slot: Slot, infos: Vec, accounts: &[(&Pubkey, &Account)], - ) -> Vec<(Slot, AccountInfo)> { - let mut reclaims: Vec<(Slot, AccountInfo)> = Vec::with_capacity(infos.len() * 2); + ) -> SlotList { + let mut reclaims = SlotList::::with_capacity(infos.len() * 2); let index = self.accounts_index.read().unwrap(); let mut update_index_work = Measure::start("update_index_work"); let inserts: Vec<_> = infos @@ -1410,7 +1410,7 @@ impl AccountsDB { .filter_map(|(info, pubkey_account)| { let pubkey = pubkey_account.0; index - .update(slot_id, pubkey, info, &mut reclaims) + .update(slot, pubkey, info, &mut reclaims) .map(|info| (pubkey, info)) }) .collect(); @@ -1419,26 +1419,26 @@ impl AccountsDB { if !inserts.is_empty() { let mut index = self.accounts_index.write().unwrap(); for (pubkey, info) in inserts { - index.insert(slot_id, pubkey, info, &mut reclaims); + index.insert(slot, pubkey, info, &mut reclaims); } } update_index_work.stop(); reclaims } - fn remove_dead_accounts(&self, reclaims: &[(Slot, AccountInfo)]) -> HashSet { + fn remove_dead_accounts(&self, reclaims: SlotSlice) -> HashSet { let storage = self.storage.read().unwrap(); let mut dead_slots = HashSet::new(); - for (slot_id, account_info) in reclaims { - if let Some(slot_storage) = storage.0.get(slot_id) { + for (slot, account_info) in reclaims { + if let Some(slot_storage) = storage.0.get(slot) { if let Some(store) = slot_storage.get(&account_info.store_id) { assert_eq!( - *slot_id, store.slot_id, + *slot, store.slot, "AccountDB::accounts_index corrupted. Storage should only point to one slot" ); let count = store.remove_account(); if count == 0 { - dead_slots.insert(*slot_id); + dead_slots.insert(*slot); } } } @@ -1511,19 +1511,19 @@ impl AccountsDB { } } - fn hash_accounts(&self, slot_id: Slot, accounts: &[(&Pubkey, &Account)]) -> Vec { + fn hash_accounts(&self, slot: Slot, accounts: &[(&Pubkey, &Account)]) -> Vec { let mut stats = BankHashStats::default(); let hashes: Vec<_> = accounts .iter() .map(|(pubkey, account)| { stats.update(account); - Self::hash_account(slot_id, account, pubkey) + Self::hash_account(slot, account, pubkey) }) .collect(); let mut bank_hashes = self.bank_hashes.write().unwrap(); let slot_info = bank_hashes - .entry(slot_id) + .entry(slot) .or_insert_with(BankHashInfo::default); slot_info.stats.merge(&stats); @@ -1584,19 +1584,19 @@ impl AccountsDB { } /// Store the account update. - pub fn store(&self, slot_id: Slot, accounts: &[(&Pubkey, &Account)]) { + pub fn store(&self, slot: Slot, accounts: &[(&Pubkey, &Account)]) { self.assert_frozen_accounts(accounts); - let hashes = self.hash_accounts(slot_id, accounts); - self.store_with_hashes(slot_id, accounts, &hashes); + let hashes = self.hash_accounts(slot, accounts); + self.store_with_hashes(slot, accounts, &hashes); } - fn store_with_hashes(&self, slot_id: Slot, accounts: &[(&Pubkey, &Account)], hashes: &[Hash]) { + fn store_with_hashes(&self, slot: Slot, accounts: &[(&Pubkey, &Account)], hashes: &[Hash]) { let mut store_accounts = Measure::start("store::store_accounts"); - let infos = self.store_accounts(slot_id, accounts, hashes); + let infos = self.store_accounts(slot, accounts, hashes); store_accounts.stop(); let mut update_index = Measure::start("store::update_index"); - let reclaims = self.update_index(slot_id, infos, accounts); + let reclaims = self.update_index(slot, infos, accounts); update_index.stop(); trace!("reclaim: {}", reclaims.len()); @@ -1651,10 +1651,10 @@ impl AccountsDB { let mut slots: Vec = storage.0.keys().cloned().collect(); slots.sort(); let mut accounts_index = self.accounts_index.write().unwrap(); - for slot_id in slots.iter() { + for slot in slots.iter() { let accumulator: Vec>> = self .scan_account_storage( - *slot_id, + *slot, |stored_account: &StoredAccount, store_id: AppendVecId, accum: &mut HashMap>| { @@ -1687,19 +1687,14 @@ impl AccountsDB { for (pubkey, account_infos) in accounts_map.iter_mut() { account_infos.sort_by(|a, b| a.0.cmp(&b.0)); for (_, account_info) in account_infos { - accounts_index.insert( - *slot_id, - pubkey, - account_info.clone(), - &mut _reclaims, - ); + accounts_index.insert(*slot, pubkey, account_info.clone(), &mut _reclaims); } } } } // Need to add these last, otherwise older updates will be cleaned - for slot_id in slots { - accounts_index.add_root(slot_id); + for slot in slots { + accounts_index.add_root(slot); } let mut counts = HashMap::new(); @@ -2086,7 +2081,7 @@ pub mod tests { .values() .flat_map(|x| x.values()) { - *append_vec_histogram.entry(storage.slot_id).or_insert(0) += 1; + *append_vec_histogram.entry(storage.slot).or_insert(0) += 1; } for count in append_vec_histogram.values() { assert!(*count >= 2); @@ -2806,7 +2801,7 @@ pub mod tests { #[test] #[ignore] fn test_store_account_stress() { - let slot_id = 42; + let slot = 42; let num_threads = 2; let min_file_bytes = std::mem::size_of::() @@ -2814,7 +2809,7 @@ pub mod tests { let db = Arc::new(AccountsDB::new_sized(Vec::new(), min_file_bytes as u64)); - db.add_root(slot_id); + db.add_root(slot); let thread_hdls: Vec<_> = (0..num_threads) .into_iter() .map(|_| { @@ -2828,11 +2823,11 @@ pub mod tests { loop { let account_bal = thread_rng().gen_range(1, 99); account.lamports = account_bal; - db.store(slot_id, &[(&pubkey, &account)]); + db.store(slot, &[(&pubkey, &account)]); let (account, slot) = db.load_slow(&HashMap::new(), &pubkey).expect( &format!("Could not fetch stored account {}, iter {}", pubkey, i), ); - assert_eq!(slot, slot_id); + assert_eq!(slot, slot); assert_eq!(account.lamports, account_bal); i += 1; } diff --git a/runtime/src/accounts_index.rs b/runtime/src/accounts_index.rs index 243f85531d..314e78fc76 100644 --- a/runtime/src/accounts_index.rs +++ b/runtime/src/accounts_index.rs @@ -1,12 +1,12 @@ -use solana_sdk::pubkey::Pubkey; +use solana_sdk::{clock::Slot, pubkey::Pubkey}; use std::sync::atomic::{AtomicU64, Ordering}; use std::{ collections::{HashMap, HashSet}, sync::{RwLock, RwLockReadGuard}, }; -pub type Slot = u64; -type SlotList = Vec<(Slot, T)>; +pub type SlotList = Vec<(Slot, T)>; +pub type SlotSlice<'s, T> = &'s [(Slot, T)]; pub type RefCount = u64; type AccountMapEntry = (AtomicU64, RwLock>); @@ -32,35 +32,36 @@ impl AccountsIndex { } } - fn get_rooted_entries(&self, list: &[(Slot, T)]) -> Vec<(Slot, T)> { - list.iter() + fn get_rooted_entries(&self, slice: SlotSlice) -> SlotList { + slice + .iter() .filter(|(slot, _)| self.is_root(*slot)) .cloned() .collect() } - pub fn would_purge(&self, pubkey: &Pubkey) -> Vec<(Slot, T)> { + pub fn would_purge(&self, pubkey: &Pubkey) -> SlotList { let list = &self.account_maps.get(&pubkey).unwrap().1.read().unwrap(); self.get_rooted_entries(&list) } // filter any rooted entries and return them along with a bool that indicates // if this account has no more entries. - pub fn purge(&self, pubkey: &Pubkey) -> (Vec<(Slot, T)>, bool) { + pub fn purge(&self, pubkey: &Pubkey) -> (SlotList, bool) { let list = &mut self.account_maps.get(&pubkey).unwrap().1.write().unwrap(); let reclaims = self.get_rooted_entries(&list); list.retain(|(slot, _)| !self.is_root(*slot)); (reclaims, list.is_empty()) } - // find the latest slot and T in a list for a given ancestor - // returns index into 'list' if found, None if not. - fn latest_slot(&self, ancestors: &HashMap, list: &[(Slot, T)]) -> Option { + // find the latest slot and T in a slice for a given ancestor + // returns index into 'slice' if found, None if not. + fn latest_slot(&self, ancestors: &HashMap, slice: SlotSlice) -> Option { let mut max = 0; let mut rv = None; - for (i, (slot, _t)) in list.iter().rev().enumerate() { + for (i, (slot, _t)) in slice.iter().rev().enumerate() { if *slot >= max && (ancestors.contains_key(slot) || self.is_root(*slot)) { - rv = Some((list.len() - 1) - i); + rv = Some((slice.len() - 1) - i); max = *slot; } } @@ -82,9 +83,9 @@ impl AccountsIndex { }) } - pub fn get_max_root(roots: &HashSet, slot_vec: &[(Slot, T)]) -> Slot { + pub fn get_max_root(roots: &HashSet, slice: SlotSlice) -> Slot { let mut max_root = 0; - for (f, _) in slot_vec.iter() { + for (f, _) in slice.iter() { if *f > max_root && roots.contains(f) { max_root = *f; } @@ -97,12 +98,11 @@ impl AccountsIndex { slot: Slot, pubkey: &Pubkey, account_info: T, - reclaims: &mut Vec<(Slot, T)>, + reclaims: &mut SlotList, ) { - let _slot_vec = self - .account_maps + self.account_maps .entry(*pubkey) - .or_insert_with(|| (AtomicU64::new(0), RwLock::new(Vec::with_capacity(32)))); + .or_insert_with(|| (AtomicU64::new(0), RwLock::new(SlotList::with_capacity(32)))); self.update(slot, pubkey, account_info, reclaims); } @@ -115,18 +115,18 @@ impl AccountsIndex { slot: Slot, pubkey: &Pubkey, account_info: T, - reclaims: &mut Vec<(Slot, T)>, + reclaims: &mut SlotList, ) -> Option { if let Some(lock) = self.account_maps.get(pubkey) { - let mut slot_vec = &mut lock.1.write().unwrap(); + let mut list = &mut lock.1.write().unwrap(); // filter out other dirty entries - reclaims.extend(slot_vec.iter().filter(|(f, _)| *f == slot).cloned()); - slot_vec.retain(|(f, _)| *f != slot); + reclaims.extend(list.iter().filter(|(f, _)| *f == slot).cloned()); + list.retain(|(f, _)| *f != slot); lock.0.fetch_add(1, Ordering::Relaxed); - slot_vec.push((slot, account_info)); + list.push((slot, account_info)); // now, do lazy clean - self.purge_older_root_entries(&mut slot_vec, reclaims); + self.purge_older_root_entries(&mut list, reclaims); None } else { @@ -135,43 +135,38 @@ impl AccountsIndex { } pub fn unref_from_storage(&self, pubkey: &Pubkey) { - let locked_slot_vec = self.account_maps.get(pubkey); - if let Some(slot_vec) = locked_slot_vec { - slot_vec.0.fetch_sub(1, Ordering::Relaxed); + let locked_entry = self.account_maps.get(pubkey); + if let Some(entry) = locked_entry { + entry.0.fetch_sub(1, Ordering::Relaxed); } } pub fn ref_count_from_storage(&self, pubkey: &Pubkey) -> RefCount { - let locked_slot_vec = self.account_maps.get(pubkey); - if let Some(slot_vec) = locked_slot_vec { - slot_vec.0.load(Ordering::Relaxed) + let locked_entry = self.account_maps.get(pubkey); + if let Some(entry) = locked_entry { + entry.0.load(Ordering::Relaxed) } else { 0 } } - fn purge_older_root_entries( - &self, - slot_vec: &mut Vec<(Slot, T)>, - reclaims: &mut Vec<(Slot, T)>, - ) { + fn purge_older_root_entries(&self, list: &mut SlotList, reclaims: &mut SlotList) { let roots = &self.roots; - let max_root = Self::get_max_root(roots, &slot_vec); + let max_root = Self::get_max_root(roots, &list); reclaims.extend( - slot_vec - .iter() + list.iter() .filter(|(slot, _)| Self::can_purge(max_root, *slot)) .cloned(), ); - slot_vec.retain(|(slot, _)| !Self::can_purge(max_root, *slot)); + list.retain(|(slot, _)| !Self::can_purge(max_root, *slot)); } - pub fn clean_rooted_entries(&self, pubkey: &Pubkey, reclaims: &mut Vec<(Slot, T)>) { - if let Some(lock) = self.account_maps.get(pubkey) { - let mut slot_vec = lock.1.write().unwrap(); - self.purge_older_root_entries(&mut slot_vec, reclaims); + pub fn clean_rooted_entries(&self, pubkey: &Pubkey, reclaims: &mut SlotList) { + if let Some(locked_entry) = self.account_maps.get(pubkey) { + let mut list = locked_entry.1.write().unwrap(); + self.purge_older_root_entries(&mut list, reclaims); } } diff --git a/runtime/src/append_vec.rs b/runtime/src/append_vec.rs index 2edf290e8b..fb14f1c1bf 100644 --- a/runtime/src/append_vec.rs +++ b/runtime/src/append_vec.rs @@ -1,7 +1,12 @@ use bincode::{deserialize_from, serialize_into}; use memmap::MmapMut; use serde::{Deserialize, Serialize}; -use solana_sdk::{account::Account, clock::Epoch, hash::Hash, pubkey::Pubkey}; +use solana_sdk::{ + account::Account, + clock::{Epoch, Slot}, + hash::Hash, + pubkey::Pubkey, +}; use std::{ fmt, fs::{remove_file, OpenOptions}, @@ -223,8 +228,8 @@ impl AppendVec { append_vec_path.as_ref().file_name().map(PathBuf::from) } - pub fn new_relative_path(fork_id: u64, id: usize) -> PathBuf { - PathBuf::from(&format!("{}.{}", fork_id, id)) + pub fn new_relative_path(slot: Slot, id: usize) -> PathBuf { + PathBuf::from(&format!("{}.{}", slot, id)) } #[allow(clippy::mutex_atomic)]