Finer grained AccountsIndex locking (#12787)

Co-authored-by: Carl Lin <carl@solana.com>
This commit is contained in:
carllin 2020-10-21 17:05:27 -07:00 committed by GitHub
parent dd6cccaf7e
commit e6b821c392
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 890 additions and 445 deletions

29
Cargo.lock generated
View File

@ -2325,6 +2325,28 @@ dependencies = [
"vcpkg",
]
[[package]]
name = "ouroboros"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "776469997c56d7130b691a5edea1cbe63d7fef495f98b1b0ced0f3a5571c37aa"
dependencies = [
"ouroboros_macro",
"stable_deref_trait",
]
[[package]]
name = "ouroboros_macro"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85165ba45bb44e86ddb86e33ad92640522d572229c5c326eb7eb81ef31b06ce7"
dependencies = [
"Inflector",
"proc-macro2 1.0.19",
"quote 1.0.6",
"syn 1.0.27",
]
[[package]]
name = "parity-ws"
version = "0.10.0"
@ -4410,6 +4432,7 @@ dependencies = [
"num-derive",
"num-traits",
"num_cpus",
"ouroboros",
"rand 0.7.3",
"rayon",
"regex",
@ -4948,6 +4971,12 @@ dependencies = [
"thiserror",
]
[[package]]
name = "stable_deref_trait"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
[[package]]
name = "standback"
version = "0.2.9"

View File

@ -1,5 +1,15 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
[[package]]
name = "Inflector"
version = "0.11.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3"
dependencies = [
"lazy_static",
"regex",
]
[[package]]
name = "addr2line"
version = "0.12.1"
@ -1241,6 +1251,28 @@ version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5"
[[package]]
name = "ouroboros"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "776469997c56d7130b691a5edea1cbe63d7fef495f98b1b0ced0f3a5571c37aa"
dependencies = [
"ouroboros_macro",
"stable_deref_trait",
]
[[package]]
name = "ouroboros_macro"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85165ba45bb44e86ddb86e33ad92640522d572229c5c326eb7eb81ef31b06ce7"
dependencies = [
"Inflector",
"proc-macro2 1.0.19",
"quote 1.0.6",
"syn 1.0.27",
]
[[package]]
name = "parking_lot"
version = "0.9.0"
@ -2110,6 +2142,7 @@ dependencies = [
"num-derive 0.3.0",
"num-traits",
"num_cpus",
"ouroboros",
"rand",
"rayon",
"regex",
@ -2263,6 +2296,12 @@ version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
[[package]]
name = "stable_deref_trait"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
[[package]]
name = "subtle"
version = "1.0.0"

View File

@ -29,6 +29,7 @@ memmap = "0.7.0"
num-derive = { version = "0.3" }
num-traits = { version = "0.2" }
num_cpus = "1.13.0"
ouroboros = "0.4.0"
rand = "0.7.0"
rayon = "1.4.1"
regex = "1.3.9"

View File

@ -149,14 +149,18 @@ fn bench_delete_dependencies(bencher: &mut Bencher) {
});
}
#[bench]
#[ignore]
fn bench_concurrent_read_write(bencher: &mut Bencher) {
fn store_accounts_with_possible_contention<F: 'static>(
bench_name: &str,
bencher: &mut Bencher,
reader_f: F,
) where
F: Fn(&Accounts, &[Pubkey]) + Send + Copy,
{
let num_readers = 5;
let accounts = Arc::new(Accounts::new(
vec![
PathBuf::from(std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()))
.join("concurrent_read_write"),
.join(bench_name),
],
&ClusterType::Development,
));
@ -180,11 +184,7 @@ fn bench_concurrent_read_write(bencher: &mut Bencher) {
Builder::new()
.name("readers".to_string())
.spawn(move || {
let mut rng = rand::thread_rng();
loop {
let i = rng.gen_range(0, num_keys);
test::black_box(accounts.load_slow(&HashMap::new(), &pubkeys[i]).unwrap());
}
reader_f(&accounts, &pubkeys);
})
.unwrap();
}
@ -203,6 +203,30 @@ fn bench_concurrent_read_write(bencher: &mut Bencher) {
})
}
#[bench]
#[ignore]
fn bench_concurrent_read_write(bencher: &mut Bencher) {
store_accounts_with_possible_contention(
"concurrent_read_write",
bencher,
|accounts, pubkeys| {
let mut rng = rand::thread_rng();
loop {
let i = rng.gen_range(0, pubkeys.len());
test::black_box(accounts.load_slow(&HashMap::new(), &pubkeys[i]).unwrap());
}
},
)
}
#[bench]
#[ignore]
fn bench_concurrent_scan_write(bencher: &mut Bencher) {
store_accounts_with_possible_contention("concurrent_scan_write", bencher, |accounts, _| loop {
test::black_box(accounts.load_by_program(&HashMap::new(), &Account::default().owner));
})
}
#[bench]
#[ignore]
fn bench_dashmap_single_reader_with_n_writers(bencher: &mut Bencher) {

View File

@ -15,10 +15,10 @@ fn bench_accounts_index(bencher: &mut Bencher) {
const NUM_FORKS: u64 = 16;
let mut reclaims = vec![];
let mut index = AccountsIndex::<AccountInfo>::default();
let index = AccountsIndex::<AccountInfo>::default();
for f in 0..NUM_FORKS {
for pubkey in pubkeys.iter().take(NUM_PUBKEYS) {
index.insert(f, pubkey, AccountInfo::default(), &mut reclaims);
index.upsert(f, pubkey, AccountInfo::default(), &mut reclaims);
}
}
@ -27,7 +27,7 @@ fn bench_accounts_index(bencher: &mut Bencher) {
bencher.iter(|| {
for _p in 0..NUM_PUBKEYS {
let pubkey = thread_rng().gen_range(0, NUM_PUBKEYS);
index.insert(
index.upsert(
fork,
&pubkeys[pubkey],
AccountInfo::default(),

View File

@ -306,9 +306,7 @@ impl Accounts {
rent_collector: &RentCollector,
feature_set: &FeatureSet,
) -> Vec<(Result<TransactionLoadResult>, Option<HashAgeKind>)> {
//PERF: hold the lock to scan for the references, but not to clone the accounts
//TODO: two locks usually leads to deadlocks, should this be one structure?
let accounts_index = self.accounts_db.accounts_index.read().unwrap();
let accounts_index = &self.accounts_db.accounts_index;
let fee_config = FeeConfig {
secp256k1_program_enabled: feature_set
@ -335,7 +333,7 @@ impl Accounts {
let load_res = self.load_tx_accounts(
&self.accounts_db.storage,
ancestors,
&accounts_index,
accounts_index,
tx,
fee,
error_counters,
@ -350,7 +348,7 @@ impl Accounts {
let load_res = Self::load_loaders(
&self.accounts_db.storage,
ancestors,
&accounts_index,
accounts_index,
tx,
error_counters,
);
@ -1519,12 +1517,11 @@ mod tests {
let mut error_counters = ErrorCounters::default();
let ancestors = vec![(0, 0)].into_iter().collect();
let accounts_index = accounts.accounts_db.accounts_index.read().unwrap();
assert_eq!(
Accounts::load_executable_accounts(
&accounts.accounts_db.storage,
&ancestors,
&accounts_index,
&accounts.accounts_db.accounts_index,
&Pubkey::new_rand(),
&mut error_counters
),

View File

@ -401,7 +401,7 @@ struct FrozenAccountInfo {
#[derive(Debug)]
pub struct AccountsDB {
/// Keeps tracks of index into AppendVec on a per slot basis
pub accounts_index: RwLock<AccountsIndex<AccountInfo>>,
pub accounts_index: AccountsIndex<AccountInfo>,
pub storage: AccountStorage,
@ -489,7 +489,7 @@ impl Default for AccountsDB {
let mut bank_hashes = HashMap::new();
bank_hashes.insert(0, BankHashInfo::default());
AccountsDB {
accounts_index: RwLock::new(AccountsIndex::default()),
accounts_index: AccountsIndex::default(),
storage: AccountStorage(DashMap::new()),
next_id: AtomicUsize::new(0),
shrink_candidate_slots: Mutex::new(Vec::new()),
@ -587,9 +587,12 @@ impl AccountsDB {
.par_chunks(INDEX_CLEAN_BULK_COUNT)
.map(|pubkeys: &[Pubkey]| {
let mut reclaims = Vec::new();
let accounts_index = self.accounts_index.read().unwrap();
for pubkey in pubkeys {
accounts_index.clean_rooted_entries(&pubkey, &mut reclaims, max_clean_root);
self.accounts_index.clean_rooted_entries(
&pubkey,
&mut reclaims,
max_clean_root,
);
}
reclaims
});
@ -611,11 +614,7 @@ impl AccountsDB {
candidates: &mut MutexGuard<Vec<Slot>>,
max_clean_root: Option<Slot>,
) {
let previous_roots = self
.accounts_index
.write()
.unwrap()
.reset_uncleaned_roots(max_clean_root);
let previous_roots = self.accounts_index.reset_uncleaned_roots(max_clean_root);
candidates.extend(previous_roots);
}
@ -699,9 +698,8 @@ impl AccountsDB {
let mut reclaims = Vec::new();
let mut dead_keys = Vec::new();
let accounts_index = self.accounts_index.read().unwrap();
for (pubkey, slots_set) in pubkey_to_slot_set {
let (new_reclaims, is_empty) = accounts_index.purge_exact(&pubkey, slots_set);
let (new_reclaims, is_empty) = self.accounts_index.purge_exact(&pubkey, slots_set);
if is_empty {
dead_keys.push(pubkey);
}
@ -724,8 +722,14 @@ impl AccountsDB {
self.report_store_stats();
let mut accounts_scan = Measure::start("accounts_scan");
let accounts_index = self.accounts_index.read().unwrap();
let pubkeys: Vec<Pubkey> = accounts_index.account_maps.keys().cloned().collect();
let pubkeys: Vec<Pubkey> = self
.accounts_index
.account_maps
.read()
.unwrap()
.keys()
.cloned()
.collect();
// parallel scan the index.
let (mut purges, purges_in_root) = pubkeys
.par_chunks(4096)
@ -733,19 +737,27 @@ impl AccountsDB {
let mut purges_in_root = Vec::new();
let mut purges = HashMap::new();
for pubkey in pubkeys {
if let Some((list, index)) = accounts_index.get(pubkey, None, max_clean_root) {
let (slot, account_info) = &list[index];
if let Some((locked_entry, index)) =
self.accounts_index.get(pubkey, None, max_clean_root)
{
let (slot, account_info) = &locked_entry.slot_list()[index];
if account_info.lamports == 0 {
debug!("purging zero lamport {}, slot: {}", pubkey, slot);
purges.insert(*pubkey, accounts_index.would_purge(pubkey));
purges.insert(
*pubkey,
self.accounts_index.roots_and_ref_count(&locked_entry),
);
}
if accounts_index.uncleaned_roots.contains(slot) {
// Release the lock
let slot = *slot;
drop(locked_entry);
if self.accounts_index.is_uncleaned_root(slot) {
// Assertion enforced by `accounts_index.get()`, the latest slot
// will not be greater than the given `max_clean_root`
if let Some(max_clean_root) = max_clean_root {
assert!(*slot <= max_clean_root);
assert!(slot <= max_clean_root);
}
debug!("purging uncleaned {}, slot: {}", pubkey, slot);
purges_in_root.push(*pubkey);
}
}
@ -761,8 +773,6 @@ impl AccountsDB {
m1
},
);
drop(accounts_index);
accounts_scan.stop();
let mut clean_old_rooted = Measure::start("clean_old_roots");
@ -778,11 +788,7 @@ impl AccountsDB {
let mut store_counts: HashMap<AppendVecId, (usize, HashSet<Pubkey>)> = HashMap::new();
for (key, (account_infos, ref_count)) in purges.iter_mut() {
if purged_account_slots.contains_key(&key) {
*ref_count = self
.accounts_index
.read()
.unwrap()
.ref_count_from_storage(&key);
*ref_count = self.accounts_index.ref_count_from_storage(&key);
}
account_infos.retain(|(slot, account_info)| {
let was_slot_purged = purged_account_slots
@ -856,7 +862,7 @@ impl AccountsDB {
let (reclaims, dead_keys) = self.purge_keys_exact(pubkey_to_slot_set);
self.handle_dead_keys(dead_keys);
self.accounts_index.handle_dead_keys(&dead_keys);
self.handle_reclaims(&reclaims, None, false, None);
@ -871,19 +877,6 @@ impl AccountsDB {
);
}
fn handle_dead_keys(&self, dead_keys: Vec<Pubkey>) {
if !dead_keys.is_empty() {
let mut accounts_index = self.accounts_index.write().unwrap();
for key in &dead_keys {
if let Some((_ref_count, list)) = accounts_index.account_maps.get(key) {
if list.read().unwrap().is_empty() {
accounts_index.account_maps.remove(key);
}
}
}
}
}
// Removes the accounts in the input `reclaims` from the tracked "count" of
// their corresponding storage entries. Note this does not actually free
// the memory from the storage entries until all the storage entries for
@ -1026,7 +1019,6 @@ impl AccountsDB {
let mut index_read_elapsed = Measure::start("index_read_elapsed");
let alive_accounts: Vec<_> = {
let accounts_index = self.accounts_index.read().unwrap();
stored_accounts
.iter()
.filter(
@ -1038,8 +1030,11 @@ impl AccountsDB {
(store_id, offset),
_write_version,
)| {
if let Some((list, _)) = accounts_index.get(pubkey, None, None) {
list.iter()
if let Some((locked_entry, _)) = self.accounts_index.get(pubkey, None, None)
{
locked_entry
.slot_list()
.iter()
.any(|(_slot, i)| i.store_id == *store_id && i.offset == *offset)
} else {
false
@ -1189,8 +1184,7 @@ impl AccountsDB {
}
fn all_root_slots_in_index(&self) -> Vec<Slot> {
let index = self.accounts_index.read().unwrap();
index.roots.iter().cloned().collect()
self.accounts_index.all_roots()
}
fn all_slots_in_storage(&self) -> Vec<Slot> {
@ -1235,13 +1229,13 @@ impl AccountsDB {
A: Default,
{
let mut collector = A::default();
let accounts_index = self.accounts_index.read().unwrap();
accounts_index.scan_accounts(ancestors, |pubkey, (account_info, slot)| {
let account_slot = self
.get_account_from_storage(slot, account_info)
.map(|account| (pubkey, account, slot));
scan_func(&mut collector, account_slot)
});
self.accounts_index
.scan_accounts(ancestors, |pubkey, (account_info, slot)| {
let account_slot = self
.get_account_from_storage(slot, account_info)
.map(|account| (pubkey, account, slot));
scan_func(&mut collector, account_slot)
});
collector
}
@ -1252,13 +1246,16 @@ impl AccountsDB {
R: RangeBounds<Pubkey>,
{
let mut collector = A::default();
let accounts_index = self.accounts_index.read().unwrap();
accounts_index.range_scan_accounts(ancestors, range, |pubkey, (account_info, slot)| {
let account_slot = self
.get_account_from_storage(slot, account_info)
.map(|account| (pubkey, account, slot));
scan_func(&mut collector, account_slot)
});
self.accounts_index.range_scan_accounts(
ancestors,
range,
|pubkey, (account_info, slot)| {
let account_slot = self
.get_account_from_storage(slot, account_info)
.map(|account| (pubkey, account, slot));
scan_func(&mut collector, account_slot)
},
);
collector
}
@ -1320,37 +1317,58 @@ impl AccountsDB {
accounts_index: &AccountsIndex<AccountInfo>,
pubkey: &Pubkey,
) -> Option<(Account, Slot)> {
let (lock, index) = accounts_index.get(pubkey, Some(ancestors), None)?;
let slot = lock[index].0;
let (slot, store_id, offset) = {
let (lock, index) = accounts_index.get(pubkey, Some(ancestors), None)?;
let slot_list = lock.slot_list();
let (
slot,
AccountInfo {
store_id, offset, ..
},
) = slot_list[index];
(slot, store_id, offset)
// `lock` released here
};
//TODO: thread this as a ref
storage
.get_account_storage_entry(slot, lock[index].1.store_id)
.get_account_storage_entry(slot, store_id)
.and_then(|store| {
let info = &lock[index].1;
store
.accounts
.get_account(info.offset)
.get_account(offset)
.map(|account| (account.0.clone_account(), slot))
})
}
#[cfg(test)]
fn load_account_hash(&self, ancestors: &Ancestors, pubkey: &Pubkey) -> Hash {
let accounts_index = self.accounts_index.read().unwrap();
let (lock, index) = accounts_index.get(pubkey, Some(ancestors), None).unwrap();
let slot = lock[index].0;
let info = &lock[index].1;
let (slot, store_id, offset) = {
let (lock, index) = self
.accounts_index
.get(pubkey, Some(ancestors), None)
.unwrap();
let slot_list = lock.slot_list();
let (
slot,
AccountInfo {
store_id, offset, ..
},
) = slot_list[index];
(slot, store_id, offset)
// lock released here
};
let entry = self
.storage
.get_account_storage_entry(slot, info.store_id)
.get_account_storage_entry(slot, store_id)
.unwrap();
let account = entry.accounts.get_account(info.offset);
let account = entry.accounts.get_account(offset);
*account.as_ref().unwrap().0.hash
}
pub fn load_slow(&self, ancestors: &Ancestors, pubkey: &Pubkey) -> Option<(Account, Slot)> {
let accounts_index = self.accounts_index.read().unwrap();
Self::load(&self.storage, ancestors, &accounts_index, pubkey)
Self::load(&self.storage, ancestors, &self.accounts_index, pubkey)
}
fn get_account_from_storage(&self, slot: Slot, account_info: &AccountInfo) -> Option<Account> {
@ -1436,12 +1454,10 @@ impl AccountsDB {
fn purge_slots(&self, slots: &HashSet<Slot>) {
//add_root should be called first
let accounts_index = self.accounts_index.read().unwrap();
let non_roots: Vec<_> = slots
.iter()
.filter(|slot| !accounts_index.is_root(**slot))
.filter(|slot| !self.accounts_index.is_root(**slot))
.collect();
drop(accounts_index);
let mut all_removed_slot_storages = vec![];
let mut total_removed_storage_entries = 0;
let mut total_removed_bytes = 0;
@ -1493,7 +1509,7 @@ impl AccountsDB {
}
pub fn remove_unrooted_slot(&self, remove_slot: Slot) {
if self.accounts_index.read().unwrap().is_root(remove_slot) {
if self.accounts_index.is_root(remove_slot) {
panic!("Trying to remove accounts for rooted slot {}", remove_slot);
}
@ -1508,10 +1524,12 @@ impl AccountsDB {
let mut reclaims = vec![];
{
let pubkeys = pubkey_sets.iter().flatten();
let accounts_index = self.accounts_index.read().unwrap();
for pubkey in pubkeys {
accounts_index.clean_unrooted_entries_by_slot(remove_slot, pubkey, &mut reclaims);
self.accounts_index.clean_unrooted_entries_by_slot(
remove_slot,
pubkey,
&mut reclaims,
);
}
}
@ -1965,15 +1983,22 @@ impl AccountsDB {
) -> Result<(Hash, u64), BankHashVerificationError> {
use BankHashVerificationError::*;
let mut scan = Measure::start("scan");
let accounts_index = self.accounts_index.read().unwrap();
let keys: Vec<_> = accounts_index.account_maps.keys().collect();
let keys: Vec<_> = self
.accounts_index
.account_maps
.read()
.unwrap()
.keys()
.cloned()
.collect();
let mismatch_found = AtomicU64::new(0);
let hashes: Vec<_> = keys
let hashes: Vec<(Pubkey, Hash, u64)> = keys
.par_iter()
.filter_map(|pubkey| {
if let Some((list, index)) = accounts_index.get(pubkey, Some(ancestors), Some(slot))
if let Some((lock, index)) =
self.accounts_index.get(pubkey, Some(ancestors), Some(slot))
{
let (slot, account_info) = &list[index];
let (slot, account_info) = &lock.slot_list()[index];
if account_info.lamports != 0 {
self.storage
.get_account_storage_entry(*slot, account_info.store_id)
@ -1999,7 +2024,7 @@ impl AccountsDB {
}
}
Some((**pubkey, *account.hash, balance))
Some((*pubkey, *account.hash, balance))
})
} else {
None
@ -2131,24 +2156,10 @@ impl AccountsDB {
accounts: &[(&Pubkey, &Account)],
) -> SlotList<AccountInfo> {
let mut reclaims = SlotList::<AccountInfo>::with_capacity(infos.len() * 2);
let index = self.accounts_index.read().unwrap();
let inserts: Vec<_> = infos
.into_iter()
.zip(accounts.iter())
.filter_map(|(info, pubkey_account)| {
let pubkey = pubkey_account.0;
index
.update(slot, pubkey, info, &mut reclaims)
.map(|info| (pubkey, info))
})
.collect();
drop(index);
if !inserts.is_empty() {
let mut index = self.accounts_index.write().unwrap();
for (pubkey, info) in inserts {
index.insert(slot, pubkey, info, &mut reclaims);
}
for (info, pubkey_account) in infos.into_iter().zip(accounts.iter()) {
let pubkey = pubkey_account.0;
self.accounts_index
.upsert(slot, pubkey, info, &mut reclaims);
}
reclaims
}
@ -2205,49 +2216,44 @@ impl AccountsDB {
dead_slots: &HashSet<Slot>,
mut purged_account_slots: Option<&mut AccountSlots>,
) {
{
let mut measure = Measure::start("clean_dead_slots-ms");
let mut stores: Vec<Arc<AccountStorageEntry>> = vec![];
for slot in dead_slots.iter() {
if let Some(slot_storage) = self.storage.get_slot_stores(*slot) {
for store in slot_storage.read().unwrap().values() {
stores.push(store.clone());
}
let mut measure = Measure::start("clean_dead_slots-ms");
let mut stores: Vec<Arc<AccountStorageEntry>> = vec![];
for slot in dead_slots.iter() {
if let Some(slot_storage) = self.storage.get_slot_stores(*slot) {
for store in slot_storage.read().unwrap().values() {
stores.push(store.clone());
}
}
datapoint_debug!("clean_dead_slots", ("stores", stores.len(), i64));
let slot_pubkeys: HashSet<(Slot, Pubkey)> = {
self.thread_pool_clean.install(|| {
stores
.into_par_iter()
.map(|store| {
let accounts = store.accounts.accounts(0);
accounts
.into_iter()
.map(|account| (store.slot, account.meta.pubkey))
.collect::<HashSet<(Slot, Pubkey)>>()
})
.reduce(HashSet::new, |mut reduced, store_pubkeys| {
reduced.extend(store_pubkeys);
reduced
})
})
};
let index = self.accounts_index.read().unwrap();
for (slot, pubkey) in slot_pubkeys {
if let Some(ref mut purged_account_slots) = purged_account_slots {
purged_account_slots.entry(pubkey).or_default().insert(slot);
}
index.unref_from_storage(&pubkey);
}
datapoint_debug!("clean_dead_slots", ("stores", stores.len(), i64));
let slot_pubkeys: HashSet<(Slot, Pubkey)> = {
self.thread_pool_clean.install(|| {
stores
.into_par_iter()
.map(|store| {
let accounts = store.accounts.accounts(0);
accounts
.into_iter()
.map(|account| (store.slot, account.meta.pubkey))
.collect::<HashSet<(Slot, Pubkey)>>()
})
.reduce(HashSet::new, |mut reduced, store_pubkeys| {
reduced.extend(store_pubkeys);
reduced
})
})
};
for (slot, pubkey) in slot_pubkeys {
if let Some(ref mut purged_account_slots) = purged_account_slots {
purged_account_slots.entry(pubkey).or_default().insert(slot);
}
drop(index);
measure.stop();
inc_new_counter_info!("clean_dead_slots-unref-ms", measure.as_ms() as usize);
self.accounts_index.unref_from_storage(&pubkey);
}
measure.stop();
inc_new_counter_info!("clean_dead_slots-unref-ms", measure.as_ms() as usize);
let mut index = self.accounts_index.write().unwrap();
for slot in dead_slots.iter() {
index.clean_dead_slot(*slot);
}
for slot in dead_slots.iter() {
self.accounts_index.clean_dead_slot(*slot);
}
{
let mut bank_hashes = self.bank_hashes.write().unwrap();
@ -2445,17 +2451,16 @@ impl AccountsDB {
}
pub fn add_root(&self, slot: Slot) {
self.accounts_index.write().unwrap().add_root(slot)
self.accounts_index.add_root(slot)
}
pub fn get_snapshot_storages(&self, snapshot_slot: Slot) -> SnapshotStorages {
let accounts_index = self.accounts_index.read().unwrap();
self.storage
.0
.iter()
.filter(|iter_item| {
let slot = *iter_item.key();
slot <= snapshot_slot && accounts_index.is_root(slot)
slot <= snapshot_slot && self.accounts_index.is_root(slot)
})
.map(|iter_item| {
iter_item
@ -2486,7 +2491,6 @@ impl AccountsDB {
}
pub fn generate_index(&self) {
let mut accounts_index = self.accounts_index.write().unwrap();
let mut slots = self.storage.all_slots();
slots.sort();
@ -2533,19 +2537,24 @@ impl AccountsDB {
for (pubkey, account_infos) in accounts_map.iter_mut() {
account_infos.sort_by(|a, b| a.0.cmp(&b.0));
for (_, account_info) in account_infos {
accounts_index.insert(*slot, pubkey, account_info.clone(), &mut _reclaims);
self.accounts_index.upsert(
*slot,
pubkey,
account_info.clone(),
&mut _reclaims,
);
}
}
}
}
// Need to add these last, otherwise older updates will be cleaned
for slot in slots {
accounts_index.add_root(slot);
self.accounts_index.add_root(slot);
}
let mut counts = HashMap::new();
for slot_list in accounts_index.account_maps.values() {
for (_slot, account_entry) in slot_list.1.read().unwrap().iter() {
for account_entry in self.accounts_index.account_maps.read().unwrap().values() {
for (_slot, account_entry) in account_entry.slot_list.read().unwrap().iter() {
*counts.entry(account_entry.store_id).or_insert(0) += 1;
}
}
@ -2576,19 +2585,15 @@ impl AccountsDB {
}
fn print_index(&self, label: &'static str) {
let mut roots: Vec<_> = self
.accounts_index
.read()
.unwrap()
.roots
.iter()
.cloned()
.collect();
let mut roots: Vec<_> = self.accounts_index.all_roots();
roots.sort();
info!("{}: accounts_index roots: {:?}", label, roots,);
for (pubkey, list) in &self.accounts_index.read().unwrap().account_maps {
for (pubkey, account_entry) in self.accounts_index.account_maps.read().unwrap().iter() {
info!(" key: {}", pubkey);
info!(" slots: {:?}", *list.1.read().unwrap());
info!(
" slots: {:?}",
*account_entry.slot_list.read().unwrap()
);
}
}
@ -2619,9 +2624,8 @@ impl AccountsDB {
#[cfg(test)]
pub fn get_append_vec_id(&self, pubkey: &Pubkey, slot: Slot) -> Option<AppendVecId> {
let ancestors = vec![(slot, 1)].into_iter().collect();
let accounts_index = self.accounts_index.read().unwrap();
let result = accounts_index.get(&pubkey, Some(&ancestors), None);
result.map(|(list, index)| list[index].1.store_id)
let result = self.accounts_index.get(&pubkey, Some(&ancestors), None);
result.map(|(list, index)| list.slot_list()[index].1.store_id)
}
}
@ -2870,8 +2874,6 @@ pub mod tests {
.insert(unrooted_slot, BankHashInfo::default());
assert!(db
.accounts_index
.read()
.unwrap()
.get(&key, Some(&ancestors), None)
.is_some());
assert_load_account(&db, unrooted_slot, key, 1);
@ -2883,16 +2885,11 @@ pub mod tests {
assert!(db.storage.0.get(&unrooted_slot).is_none());
assert!(db
.accounts_index
.read()
.unwrap()
.account_maps
.get(&key)
.map(|pubkey_entry| pubkey_entry.1.read().unwrap().is_empty())
.get_account_read_entry(&key)
.map(|locked_entry| locked_entry.slot_list().is_empty())
.unwrap_or(true));
assert!(db
.accounts_index
.read()
.unwrap()
.get(&key, Some(&ancestors), None)
.is_none());
@ -3208,9 +3205,11 @@ pub mod tests {
accounts.store(0, &[(&pubkey, &account)]);
let ancestors = vec![(0, 0)].into_iter().collect();
let id = {
let index = accounts.accounts_index.read().unwrap();
let (list, idx) = index.get(&pubkey, Some(&ancestors), None).unwrap();
list[idx].1.store_id
let (lock, idx) = accounts
.accounts_index
.get(&pubkey, Some(&ancestors), None)
.unwrap();
lock.slot_list()[idx].1.store_id
};
accounts.add_root(1);
@ -3272,14 +3271,7 @@ pub mod tests {
}
fn ref_count_for_pubkey(&self, pubkey: &Pubkey) -> RefCount {
self.accounts_index
.read()
.unwrap()
.ref_count_from_storage(&pubkey)
}
fn uncleaned_root_count(&self) -> usize {
self.accounts_index.read().unwrap().uncleaned_roots.len()
self.accounts_index.ref_count_from_storage(&pubkey)
}
}
@ -3303,17 +3295,13 @@ pub mod tests {
let ancestors: HashMap<Slot, usize> = vec![(0, 1)].into_iter().collect();
let (slot1, account_info1) = accounts
.accounts_index
.read()
.unwrap()
.get(&pubkey1, Some(&ancestors), None)
.map(|(account_list1, index1)| account_list1[index1].clone())
.map(|(account_list1, index1)| account_list1.slot_list()[index1].clone())
.unwrap();
let (slot2, account_info2) = accounts
.accounts_index
.read()
.unwrap()
.get(&pubkey2, Some(&ancestors), None)
.map(|(account_list2, index2)| account_list2[index2].clone())
.map(|(account_list2, index2)| account_list2.slot_list()[index2].clone())
.unwrap();
assert_eq!(slot1, 0);
assert_eq!(slot1, slot2);
@ -3379,12 +3367,7 @@ pub mod tests {
// zero lamport account, should no longer exist in accounts index
// because it has been removed
assert!(accounts
.accounts_index
.read()
.unwrap()
.get(&pubkey, None, None)
.is_none());
assert!(accounts.accounts_index.get(&pubkey, None, None).is_none());
}
#[test]
@ -3482,12 +3465,7 @@ pub mod tests {
// Pubkey 1, a zero lamport account, should no longer exist in accounts index
// because it has been removed
assert!(accounts
.accounts_index
.read()
.unwrap()
.get(&pubkey1, None, None)
.is_none());
assert!(accounts.accounts_index.get(&pubkey1, None, None).is_none());
}
#[test]
@ -3515,12 +3493,7 @@ pub mod tests {
accounts.clean_accounts(Some(0));
assert_eq!(accounts.alive_account_count_in_store(0), 1);
assert_eq!(accounts.alive_account_count_in_store(1), 1);
assert!(accounts
.accounts_index
.read()
.unwrap()
.get(&pubkey, None, None)
.is_some());
assert!(accounts.accounts_index.get(&pubkey, None, None).is_some());
// Now the account can be cleaned up
accounts.clean_accounts(Some(1));
@ -3529,12 +3502,7 @@ pub mod tests {
// The zero lamport account, should no longer exist in accounts index
// because it has been removed
assert!(accounts
.accounts_index
.read()
.unwrap()
.get(&pubkey, None, None)
.is_none());
assert!(accounts.accounts_index.get(&pubkey, None, None).is_none());
}
#[test]
@ -3546,15 +3514,15 @@ pub mod tests {
let account = Account::new(1, 0, &Account::default().owner);
//store an account
accounts.store(0, &[(&pubkey, &account)]);
assert_eq!(accounts.uncleaned_root_count(), 0);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0);
// simulate slots are rooted after while
accounts.add_root(0);
assert_eq!(accounts.uncleaned_root_count(), 1);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 1);
//now uncleaned roots are cleaned up
accounts.clean_accounts(None);
assert_eq!(accounts.uncleaned_root_count(), 0);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0);
}
#[test]
@ -3563,15 +3531,15 @@ pub mod tests {
let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development);
assert_eq!(accounts.uncleaned_root_count(), 0);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0);
// simulate slots are rooted after while
accounts.add_root(0);
assert_eq!(accounts.uncleaned_root_count(), 1);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 1);
//now uncleaned roots are cleaned up
accounts.clean_accounts(None);
assert_eq!(accounts.uncleaned_root_count(), 0);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0);
}
#[test]
@ -3736,17 +3704,13 @@ pub mod tests {
accounts.add_root(current_slot);
let (slot1, account_info1) = accounts
.accounts_index
.read()
.unwrap()
.get(&pubkey, None, None)
.map(|(account_list1, index1)| account_list1[index1].clone())
.map(|(account_list1, index1)| account_list1.slot_list()[index1].clone())
.unwrap();
let (slot2, account_info2) = accounts
.accounts_index
.read()
.unwrap()
.get(&pubkey2, None, None)
.map(|(account_list2, index2)| account_list2[index2].clone())
.map(|(account_list2, index2)| account_list2.slot_list()[index2].clone())
.unwrap();
assert_eq!(slot1, current_slot);
assert_eq!(slot1, slot2);
@ -3771,9 +3735,11 @@ pub mod tests {
// The earlier entry for pubkey in the account index is purged,
let (slot_list_len, index_slot) = {
let index = accounts.accounts_index.read().unwrap();
let account_entry = index.account_maps.get(&pubkey).unwrap();
let slot_list = account_entry.1.read().unwrap();
let account_entry = accounts
.accounts_index
.get_account_read_entry(&pubkey)
.unwrap();
let slot_list = account_entry.slot_list();
(slot_list.len(), slot_list[0].0)
};
assert_eq!(slot_list_len, 1);
@ -3840,10 +3806,7 @@ pub mod tests {
// Make sure the index is for pubkey cleared
assert!(accounts
.accounts_index
.read()
.unwrap()
.account_maps
.get(&pubkey)
.get_account_read_entry(&pubkey)
.is_none());
// slot 1 & 2 should not have any stores
@ -4088,7 +4051,7 @@ pub mod tests {
let account2 = Account::new(3, 0, &key);
db.store(2, &[(&key1, &account2)]);
db.handle_dead_keys(dead_keys);
db.accounts_index.handle_dead_keys(&dead_keys);
db.print_accounts_stats("post");
let ancestors = vec![(2, 0)].into_iter().collect();
@ -4852,7 +4815,7 @@ pub mod tests {
actual_slots.sort();
assert_eq!(actual_slots, vec![0, 1, 2]);
accounts.accounts_index.write().unwrap().roots.clear();
accounts.accounts_index.clear_roots();
let mut actual_slots = (0..5)
.map(|_| accounts.next_shrink_slot())
.collect::<Vec<_>>();
@ -4982,7 +4945,7 @@ pub mod tests {
#[test]
fn test_delete_dependencies() {
solana_logger::setup();
let mut accounts_index = AccountsIndex::default();
let accounts_index = AccountsIndex::default();
let key0 = Pubkey::new_from_array([0u8; 32]);
let key1 = Pubkey::new_from_array([1u8; 32]);
let key2 = Pubkey::new_from_array([2u8; 32]);
@ -5007,20 +4970,23 @@ pub mod tests {
lamports: 0,
};
let mut reclaims = vec![];
accounts_index.insert(0, &key0, info0, &mut reclaims);
accounts_index.insert(1, &key0, info1.clone(), &mut reclaims);
accounts_index.insert(1, &key1, info1, &mut reclaims);
accounts_index.insert(2, &key1, info2.clone(), &mut reclaims);
accounts_index.insert(2, &key2, info2, &mut reclaims);
accounts_index.insert(3, &key2, info3, &mut reclaims);
accounts_index.upsert(0, &key0, info0, &mut reclaims);
accounts_index.upsert(1, &key0, info1.clone(), &mut reclaims);
accounts_index.upsert(1, &key1, info1, &mut reclaims);
accounts_index.upsert(2, &key1, info2.clone(), &mut reclaims);
accounts_index.upsert(2, &key2, info2, &mut reclaims);
accounts_index.upsert(3, &key2, info3, &mut reclaims);
accounts_index.add_root(0);
accounts_index.add_root(1);
accounts_index.add_root(2);
accounts_index.add_root(3);
let mut purges = HashMap::new();
purges.insert(key0, accounts_index.would_purge(&key0));
purges.insert(key1, accounts_index.would_purge(&key1));
purges.insert(key2, accounts_index.would_purge(&key2));
let (key0_entry, _) = accounts_index.get(&key0, None, None).unwrap();
purges.insert(key0, accounts_index.roots_and_ref_count(&key0_entry));
let (key1_entry, _) = accounts_index.get(&key1, None, None).unwrap();
purges.insert(key1, accounts_index.roots_and_ref_count(&key1_entry));
let (key2_entry, _) = accounts_index.get(&key2, None, None).unwrap();
purges.insert(key2, accounts_index.roots_and_ref_count(&key2_entry));
for (key, (list, ref_count)) in &purges {
info!(" purge {} ref_count {} =>", key, ref_count);
for x in list {

View File

@ -1,38 +1,262 @@
use ouroboros::self_referencing;
use solana_sdk::{clock::Slot, pubkey::Pubkey};
use std::ops::{
Bound,
Bound::{Excluded, Included, Unbounded},
};
use std::sync::atomic::{AtomicU64, Ordering};
use std::{
collections::{BTreeMap, HashMap, HashSet},
ops::RangeBounds,
sync::{RwLock, RwLockReadGuard},
collections::{
btree_map::{self, BTreeMap},
HashMap, HashSet,
},
ops::{Range, RangeBounds},
sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard},
};
const ITER_BATCH_SIZE: usize = 1000;
pub type SlotList<T> = Vec<(Slot, T)>;
pub type SlotSlice<'s, T> = &'s [(Slot, T)];
pub type Ancestors = HashMap<Slot, usize>;
pub type RefCount = u64;
type AccountMapEntry<T> = (AtomicU64, RwLock<SlotList<T>>);
pub type AccountMap<K, V> = BTreeMap<K, V>;
type AccountMapEntry<T> = Arc<AccountMapEntryInner<T>>;
#[derive(Debug)]
pub struct AccountMapEntryInner<T> {
ref_count: AtomicU64,
pub slot_list: RwLock<SlotList<T>>,
}
#[self_referencing]
pub struct ReadAccountMapEntry<T: 'static> {
pub owned_entry: AccountMapEntry<T>,
#[borrows(owned_entry)]
pub slot_list_guard: RwLockReadGuard<'this, SlotList<T>>,
}
impl<T: Clone> ReadAccountMapEntry<T> {
pub fn from_account_map_entry(account_map_entry: AccountMapEntry<T>) -> Self {
ReadAccountMapEntryBuilder {
owned_entry: account_map_entry,
slot_list_guard_builder: |lock| lock.slot_list.read().unwrap(),
}
.build()
}
pub fn slot_list(&self) -> &SlotList<T> {
&*self.slot_list_guard
}
pub fn ref_count(&self) -> &AtomicU64 {
&self.owned_entry.ref_count
}
}
#[self_referencing]
pub struct WriteAccountMapEntry<T: 'static> {
pub owned_entry: AccountMapEntry<T>,
#[borrows(owned_entry)]
pub slot_list_guard: RwLockWriteGuard<'this, SlotList<T>>,
}
impl<T: 'static + Clone> WriteAccountMapEntry<T> {
pub fn from_account_map_entry(account_map_entry: AccountMapEntry<T>) -> Self {
WriteAccountMapEntryBuilder {
owned_entry: account_map_entry,
slot_list_guard_builder: |lock| lock.slot_list.write().unwrap(),
}
.build()
}
pub fn slot_list(&mut self) -> &SlotList<T> {
&*self.slot_list_guard
}
pub fn slot_list_mut(&mut self) -> &mut SlotList<T> {
&mut *self.slot_list_guard
}
pub fn ref_count(&self) -> &AtomicU64 {
&self.owned_entry.ref_count
}
// Try to update an item in the slot list the given `slot` If an item for the slot
// already exists in the list, remove the older item, add it to `reclaims`, and insert
// the new item.
pub fn update(&mut self, slot: Slot, account_info: T, reclaims: &mut SlotList<T>) {
// filter out other dirty entries from the same slot
let mut same_slot_previous_updates: Vec<(usize, &(Slot, T))> = self
.slot_list()
.iter()
.enumerate()
.filter(|(_, (s, _))| *s == slot)
.collect();
assert!(same_slot_previous_updates.len() <= 1);
if let Some((list_index, (s, previous_update_value))) = same_slot_previous_updates.pop() {
reclaims.push((*s, previous_update_value.clone()));
self.slot_list_mut().remove(list_index);
} else {
// Only increment ref count if the account was not prevously updated in this slot
self.ref_count().fetch_add(1, Ordering::Relaxed);
}
self.slot_list_mut().push((slot, account_info));
}
}
#[derive(Debug, Default)]
pub struct RootsTracker {
roots: HashSet<Slot>,
uncleaned_roots: HashSet<Slot>,
previous_uncleaned_roots: HashSet<Slot>,
}
pub struct AccountsIndexIterator<'a, T> {
account_maps: &'a RwLock<AccountMap<Pubkey, AccountMapEntry<T>>>,
start_bound: Bound<Pubkey>,
end_bound: Bound<Pubkey>,
is_finished: bool,
}
impl<'a, T> AccountsIndexIterator<'a, T> {
fn clone_bound(bound: Bound<&Pubkey>) -> Bound<Pubkey> {
match bound {
Unbounded => Unbounded,
Included(k) => Included(*k),
Excluded(k) => Excluded(*k),
}
}
pub fn new<R>(
account_maps: &'a RwLock<AccountMap<Pubkey, AccountMapEntry<T>>>,
range: Option<R>,
) -> Self
where
R: RangeBounds<Pubkey>,
{
Self {
start_bound: range
.as_ref()
.map(|r| Self::clone_bound(r.start_bound()))
.unwrap_or(Unbounded),
end_bound: range
.as_ref()
.map(|r| Self::clone_bound(r.end_bound()))
.unwrap_or(Unbounded),
account_maps,
is_finished: false,
}
}
}
impl<'a, T: 'static + Clone> Iterator for AccountsIndexIterator<'a, T> {
type Item = Vec<(Pubkey, AccountMapEntry<T>)>;
fn next(&mut self) -> Option<Self::Item> {
if self.is_finished {
return None;
}
let chunk: Vec<(Pubkey, AccountMapEntry<T>)> = self
.account_maps
.read()
.unwrap()
.range((self.start_bound, self.end_bound))
.map(|(pubkey, account_map_entry)| (*pubkey, account_map_entry.clone()))
.take(ITER_BATCH_SIZE)
.collect();
if chunk.is_empty() {
self.is_finished = true;
return None;
}
self.start_bound = Excluded(chunk.last().unwrap().0);
Some(chunk)
}
}
#[derive(Debug, Default)]
pub struct AccountsIndex<T> {
pub account_maps: AccountMap<Pubkey, AccountMapEntry<T>>,
pub roots: HashSet<Slot>,
pub uncleaned_roots: HashSet<Slot>,
pub previous_uncleaned_roots: HashSet<Slot>,
pub account_maps: RwLock<AccountMap<Pubkey, AccountMapEntry<T>>>,
roots_tracker: RwLock<RootsTracker>,
}
impl<'a, T: 'a + Clone> AccountsIndex<T> {
fn do_scan_accounts<F, I>(&self, ancestors: &Ancestors, mut func: F, iter: I)
impl<T: 'static + Clone> AccountsIndex<T> {
fn iter<R>(&self, range: Option<R>) -> AccountsIndexIterator<T>
where
R: RangeBounds<Pubkey>,
{
AccountsIndexIterator::new(&self.account_maps, range)
}
fn do_scan_accounts<'a, F, R>(&'a self, ancestors: &Ancestors, mut func: F, range: Option<R>)
where
F: FnMut(&Pubkey, (&T, Slot)),
I: Iterator<Item = (&'a Pubkey, &'a AccountMapEntry<T>)>,
R: RangeBounds<Pubkey>,
{
for (pubkey, list) in iter {
let list_r = &list.1.read().unwrap();
if let Some(index) = self.latest_slot(Some(ancestors), &list_r, None) {
func(pubkey, (&list_r[index].1, list_r[index].0));
for pubkey_list in self.iter(range) {
for (pubkey, list) in pubkey_list {
let list_r = &list.slot_list.read().unwrap();
if let Some(index) = self.latest_slot(Some(ancestors), &list_r, None) {
func(&pubkey, (&list_r[index].1, list_r[index].0));
}
}
}
}
pub fn get_account_read_entry(&self, pubkey: &Pubkey) -> Option<ReadAccountMapEntry<T>> {
self.account_maps
.read()
.unwrap()
.get(pubkey)
.cloned()
.map(ReadAccountMapEntry::from_account_map_entry)
}
fn get_account_write_entry(&self, pubkey: &Pubkey) -> Option<WriteAccountMapEntry<T>> {
self.account_maps
.read()
.unwrap()
.get(pubkey)
.cloned()
.map(WriteAccountMapEntry::from_account_map_entry)
}
fn get_account_write_entry_else_create(
&self,
pubkey: &Pubkey,
) -> (WriteAccountMapEntry<T>, bool) {
let mut w_account_entry = self.get_account_write_entry(pubkey);
let mut is_newly_inserted = false;
if w_account_entry.is_none() {
let new_entry = Arc::new(AccountMapEntryInner {
ref_count: AtomicU64::new(0),
slot_list: RwLock::new(SlotList::with_capacity(32)),
});
let mut w_account_maps = self.account_maps.write().unwrap();
let account_entry = w_account_maps.entry(*pubkey).or_insert_with(|| {
is_newly_inserted = true;
new_entry
});
w_account_entry = Some(WriteAccountMapEntry::from_account_map_entry(
account_entry.clone(),
));
}
(w_account_entry.unwrap(), is_newly_inserted)
}
pub fn handle_dead_keys(&self, dead_keys: &[Pubkey]) {
if !dead_keys.is_empty() {
for key in dead_keys.iter() {
let mut w_index = self.account_maps.write().unwrap();
if let btree_map::Entry::Occupied(index_entry) = w_index.entry(*key) {
if index_entry.get().slot_list.read().unwrap().is_empty() {
index_entry.remove();
}
}
}
}
}
@ -42,7 +266,7 @@ impl<'a, T: 'a + Clone> AccountsIndex<T> {
where
F: FnMut(&Pubkey, (&T, Slot)),
{
self.do_scan_accounts(ancestors, func, self.account_maps.iter());
self.do_scan_accounts(ancestors, func, None::<Range<Pubkey>>);
}
/// call func with every pubkey and index visible from a given set of ancestors with range
@ -51,10 +275,10 @@ impl<'a, T: 'a + Clone> AccountsIndex<T> {
F: FnMut(&Pubkey, (&T, Slot)),
R: RangeBounds<Pubkey>,
{
self.do_scan_accounts(ancestors, func, self.account_maps.range(range));
self.do_scan_accounts(ancestors, func, Some(range));
}
fn get_rooted_entries(&self, slice: SlotSlice<T>) -> SlotList<T> {
pub fn get_rooted_entries(&self, slice: SlotSlice<T>) -> SlotList<T> {
slice
.iter()
.filter(|(slot, _)| self.is_root(*slot))
@ -63,33 +287,36 @@ impl<'a, T: 'a + Clone> AccountsIndex<T> {
}
// returns the rooted entries and the storage ref count
pub fn would_purge(&self, pubkey: &Pubkey) -> (SlotList<T>, RefCount) {
let (ref_count, slots_list) = self.account_maps.get(&pubkey).unwrap();
let slots_list_r = &slots_list.read().unwrap();
pub fn roots_and_ref_count(
&self,
locked_account_entry: &ReadAccountMapEntry<T>,
) -> (SlotList<T>, RefCount) {
(
self.get_rooted_entries(&slots_list_r),
ref_count.load(Ordering::Relaxed),
self.get_rooted_entries(&locked_account_entry.slot_list()),
locked_account_entry.ref_count().load(Ordering::Relaxed),
)
}
// filter any rooted entries and return them along with a bool that indicates
// if this account has no more entries.
pub fn purge(&self, pubkey: &Pubkey) -> (SlotList<T>, bool) {
let list = &mut self.account_maps.get(&pubkey).unwrap().1.write().unwrap();
let reclaims = self.get_rooted_entries(&list);
list.retain(|(slot, _)| !self.is_root(*slot));
(reclaims, list.is_empty())
let mut write_account_map_entry = self.get_account_write_entry(pubkey).unwrap();
let slot_list = write_account_map_entry.slot_list_mut();
let reclaims = self.get_rooted_entries(&slot_list);
slot_list.retain(|(slot, _)| !self.is_root(*slot));
(reclaims, slot_list.is_empty())
}
pub fn purge_exact(&self, pubkey: &Pubkey, slots: HashSet<Slot>) -> (SlotList<T>, bool) {
let list = &mut self.account_maps.get(&pubkey).unwrap().1.write().unwrap();
let reclaims = list
let mut write_account_map_entry = self.get_account_write_entry(pubkey).unwrap();
let slot_list = write_account_map_entry.slot_list_mut();
let reclaims = slot_list
.iter()
.filter(|(slot, _)| slots.contains(&slot))
.cloned()
.collect();
list.retain(|(slot, _)| !slots.contains(slot));
(reclaims, list.is_empty())
slot_list.retain(|(slot, _)| !slots.contains(slot));
(reclaims, slot_list.is_empty())
}
// Given a SlotSlice `L`, a list of ancestors and a maximum slot, find the latest element
@ -131,13 +358,13 @@ impl<'a, T: 'a + Clone> AccountsIndex<T> {
pubkey: &Pubkey,
ancestors: Option<&Ancestors>,
max_root: Option<Slot>,
) -> Option<(RwLockReadGuard<SlotList<T>>, usize)> {
self.account_maps.get(pubkey).and_then(|list| {
let list_r = list.1.read().unwrap();
let lock = &list_r;
let found_index = self.latest_slot(ancestors, &lock, max_root)?;
Some((list_r, found_index))
})
) -> Option<(ReadAccountMapEntry<T>, usize)> {
self.get_account_read_entry(pubkey)
.and_then(|locked_entry| {
let found_index =
self.latest_slot(ancestors, &locked_entry.slot_list(), max_root)?;
Some((locked_entry, found_index))
})
}
// Get the maximum root <= `max_allowed_root` from the given `slice`
@ -160,72 +387,31 @@ impl<'a, T: 'a + Clone> AccountsIndex<T> {
max_root
}
pub fn insert(
&mut self,
slot: Slot,
pubkey: &Pubkey,
account_info: T,
reclaims: &mut SlotList<T>,
) {
self.account_maps
.entry(*pubkey)
.or_insert_with(|| (AtomicU64::new(0), RwLock::new(SlotList::with_capacity(32))));
self.update(slot, pubkey, account_info, reclaims);
}
// Try to update an item in account_maps. If the account is not
// already present, then the function will return back Some(account_info) which
// the caller can then take the write lock and do an 'insert' with the item.
// It returns None if the item is already present and thus successfully updated.
pub fn update(
// Updates the given pubkey at the given slot with the new account information.
// Returns true if the pubkey was newly inserted into the index, otherwise, if the
// pubkey updates an existing entry in the index, returns false.
pub fn upsert(
&self,
slot: Slot,
pubkey: &Pubkey,
account_info: T,
reclaims: &mut SlotList<T>,
) -> Option<T> {
if let Some(lock) = self.account_maps.get(pubkey) {
let list = &mut lock.1.write().unwrap();
// filter out other dirty entries from the same slot
let mut same_slot_previous_updates: Vec<(usize, (Slot, &T))> = list
.iter()
.enumerate()
.filter_map(|(i, (s, value))| {
if *s == slot {
Some((i, (*s, value)))
} else {
None
}
})
.collect();
assert!(same_slot_previous_updates.len() <= 1);
if let Some((list_index, (s, previous_update_value))) = same_slot_previous_updates.pop()
{
reclaims.push((s, previous_update_value.clone()));
list.remove(list_index);
} else {
// Only increment ref count if the account was not prevously updated in this slot
lock.0.fetch_add(1, Ordering::Relaxed);
}
list.push((slot, account_info));
None
} else {
Some(account_info)
}
) -> bool {
let (mut w_account_entry, is_newly_inserted) =
self.get_account_write_entry_else_create(pubkey);
w_account_entry.update(slot, account_info, reclaims);
is_newly_inserted
}
pub fn unref_from_storage(&self, pubkey: &Pubkey) {
let locked_entry = self.account_maps.get(pubkey);
if let Some(entry) = locked_entry {
entry.0.fetch_sub(1, Ordering::Relaxed);
if let Some(locked_entry) = self.get_account_read_entry(pubkey) {
locked_entry.ref_count().fetch_sub(1, Ordering::Relaxed);
}
}
pub fn ref_count_from_storage(&self, pubkey: &Pubkey) -> RefCount {
let locked_entry = self.account_maps.get(pubkey);
if let Some(entry) = locked_entry {
entry.0.load(Ordering::Relaxed)
if let Some(locked_entry) = self.get_account_read_entry(pubkey) {
locked_entry.ref_count().load(Ordering::Relaxed)
} else {
0
}
@ -237,9 +423,9 @@ impl<'a, T: 'a + Clone> AccountsIndex<T> {
reclaims: &mut SlotList<T>,
max_clean_root: Option<Slot>,
) {
let roots = &self.roots;
let roots_traker = &self.roots_tracker.read().unwrap();
let max_root = Self::get_max_root(roots, &list, max_clean_root);
let max_root = Self::get_max_root(&roots_traker.roots, &list, max_clean_root);
reclaims.extend(
list.iter()
@ -255,9 +441,8 @@ impl<'a, T: 'a + Clone> AccountsIndex<T> {
reclaims: &mut SlotList<T>,
max_clean_root: Option<Slot>,
) {
if let Some(locked_entry) = self.account_maps.get(pubkey) {
let mut list = locked_entry.1.write().unwrap();
self.purge_older_root_entries(&mut list, reclaims, max_clean_root);
if let Some(mut locked_entry) = self.get_account_write_entry(pubkey) {
self.purge_older_root_entries(locked_entry.slot_list_mut(), reclaims, max_clean_root);
}
}
@ -267,9 +452,9 @@ impl<'a, T: 'a + Clone> AccountsIndex<T> {
pubkey: &Pubkey,
reclaims: &mut SlotList<T>,
) {
if let Some(entry) = self.account_maps.get(pubkey) {
let mut list = entry.1.write().unwrap();
list.retain(|(slot, entry)| {
if let Some(mut locked_entry) = self.get_account_write_entry(pubkey) {
let slot_list = locked_entry.slot_list_mut();
slot_list.retain(|(slot, entry)| {
if *slot == purge_slot {
reclaims.push((*slot, entry.clone()));
}
@ -278,37 +463,32 @@ impl<'a, T: 'a + Clone> AccountsIndex<T> {
}
}
pub fn add_index(&mut self, slot: Slot, pubkey: &Pubkey, account_info: T) {
let entry = self
.account_maps
.entry(*pubkey)
.or_insert_with(|| (AtomicU64::new(1), RwLock::new(vec![])));
entry.1.write().unwrap().push((slot, account_info));
}
pub fn can_purge(max_root: Slot, slot: Slot) -> bool {
slot < max_root
}
pub fn is_root(&self, slot: Slot) -> bool {
self.roots.contains(&slot)
self.roots_tracker.read().unwrap().roots.contains(&slot)
}
pub fn add_root(&mut self, slot: Slot) {
self.roots.insert(slot);
self.uncleaned_roots.insert(slot);
pub fn add_root(&self, slot: Slot) {
let mut w_roots_tracker = self.roots_tracker.write().unwrap();
w_roots_tracker.roots.insert(slot);
w_roots_tracker.uncleaned_roots.insert(slot);
}
/// Remove the slot when the storage for the slot is freed
/// Accounts no longer reference this slot.
pub fn clean_dead_slot(&mut self, slot: Slot) {
self.roots.remove(&slot);
self.uncleaned_roots.remove(&slot);
self.previous_uncleaned_roots.remove(&slot);
pub fn clean_dead_slot(&self, slot: Slot) {
let mut w_roots_tracker = self.roots_tracker.write().unwrap();
w_roots_tracker.roots.remove(&slot);
w_roots_tracker.uncleaned_roots.remove(&slot);
w_roots_tracker.previous_uncleaned_roots.remove(&slot);
}
pub fn reset_uncleaned_roots(&mut self, max_clean_root: Option<Slot>) -> HashSet<Slot> {
pub fn reset_uncleaned_roots(&self, max_clean_root: Option<Slot>) -> HashSet<Slot> {
let mut cleaned_roots = HashSet::new();
self.uncleaned_roots.retain(|root| {
let mut w_roots_tracker = self.roots_tracker.write().unwrap();
w_roots_tracker.uncleaned_roots.retain(|root| {
let is_cleaned = max_clean_root
.map(|max_clean_root| *root <= max_clean_root)
.unwrap_or(true);
@ -318,7 +498,35 @@ impl<'a, T: 'a + Clone> AccountsIndex<T> {
// Only keep the slots that have yet to be cleaned
!is_cleaned
});
std::mem::replace(&mut self.previous_uncleaned_roots, cleaned_roots)
std::mem::replace(&mut w_roots_tracker.previous_uncleaned_roots, cleaned_roots)
}
pub fn is_uncleaned_root(&self, slot: Slot) -> bool {
self.roots_tracker
.read()
.unwrap()
.uncleaned_roots
.contains(&slot)
}
pub fn all_roots(&self) -> Vec<Slot> {
self.roots_tracker
.read()
.unwrap()
.roots
.iter()
.cloned()
.collect()
}
#[cfg(test)]
pub fn clear_roots(&self) {
self.roots_tracker.write().unwrap().roots.clear()
}
#[cfg(test)]
pub fn uncleaned_roots_len(&self) -> usize {
self.roots_tracker.read().unwrap().uncleaned_roots.len()
}
}
@ -343,9 +551,9 @@ mod tests {
#[test]
fn test_insert_no_ancestors() {
let key = Keypair::new();
let mut index = AccountsIndex::<bool>::default();
let index = AccountsIndex::<bool>::default();
let mut gc = Vec::new();
index.insert(0, &key.pubkey(), true, &mut gc);
index.upsert(0, &key.pubkey(), true, &mut gc);
assert!(gc.is_empty());
let ancestors = HashMap::new();
@ -360,9 +568,9 @@ mod tests {
#[test]
fn test_insert_wrong_ancestors() {
let key = Keypair::new();
let mut index = AccountsIndex::<bool>::default();
let index = AccountsIndex::<bool>::default();
let mut gc = Vec::new();
index.insert(0, &key.pubkey(), true, &mut gc);
index.upsert(0, &key.pubkey(), true, &mut gc);
assert!(gc.is_empty());
let ancestors = vec![(1, 1)].into_iter().collect();
@ -376,14 +584,14 @@ mod tests {
#[test]
fn test_insert_with_ancestors() {
let key = Keypair::new();
let mut index = AccountsIndex::<bool>::default();
let index = AccountsIndex::<bool>::default();
let mut gc = Vec::new();
index.insert(0, &key.pubkey(), true, &mut gc);
index.upsert(0, &key.pubkey(), true, &mut gc);
assert!(gc.is_empty());
let ancestors = vec![(0, 0)].into_iter().collect();
let (list, idx) = index.get(&key.pubkey(), Some(&ancestors), None).unwrap();
assert_eq!(list[idx], (0, true));
assert_eq!(list.slot_list()[idx], (0, true));
let mut num = 0;
let mut found_key = false;
@ -397,9 +605,150 @@ mod tests {
assert!(found_key);
}
fn setup_accounts_index_keys(num_pubkeys: usize) -> (AccountsIndex<bool>, Vec<Pubkey>) {
let index = AccountsIndex::<bool>::default();
let root_slot = 0;
let mut pubkeys: Vec<Pubkey> = std::iter::repeat_with(|| {
let new_pubkey = Pubkey::new_rand();
index.upsert(root_slot, &new_pubkey, true, &mut vec![]);
new_pubkey
})
.take(num_pubkeys.saturating_sub(1))
.collect();
if num_pubkeys != 0 {
pubkeys.push(Pubkey::default());
index.upsert(root_slot, &Pubkey::default(), true, &mut vec![]);
}
index.add_root(root_slot);
(index, pubkeys)
}
fn run_test_range(
index: &AccountsIndex<bool>,
pubkeys: &[Pubkey],
start_bound: Bound<usize>,
end_bound: Bound<usize>,
) {
// Exclusive `index_start`
let (pubkey_start, index_start) = match start_bound {
Unbounded => (Unbounded, 0),
Included(i) => (Included(pubkeys[i]), i),
Excluded(i) => (Excluded(pubkeys[i]), i + 1),
};
// Exclusive `index_end`
let (pubkey_end, index_end) = match end_bound {
Unbounded => (Unbounded, pubkeys.len()),
Included(i) => (Included(pubkeys[i]), i + 1),
Excluded(i) => (Excluded(pubkeys[i]), i),
};
let pubkey_range = (pubkey_start, pubkey_end);
let ancestors: Ancestors = HashMap::new();
let mut scanned_keys = HashSet::new();
index.range_scan_accounts(&ancestors, pubkey_range, |pubkey, _index| {
scanned_keys.insert(*pubkey);
});
let mut expected_len = 0;
for key in &pubkeys[index_start..index_end] {
expected_len += 1;
assert!(scanned_keys.contains(key));
}
assert_eq!(scanned_keys.len(), expected_len);
}
fn run_test_range_indexes(
index: &AccountsIndex<bool>,
pubkeys: &[Pubkey],
start: Option<usize>,
end: Option<usize>,
) {
let start_options = start
.map(|i| vec![Included(i), Excluded(i)])
.unwrap_or_else(|| vec![Unbounded]);
let end_options = end
.map(|i| vec![Included(i), Excluded(i)])
.unwrap_or_else(|| vec![Unbounded]);
for start in &start_options {
for end in &end_options {
run_test_range(index, pubkeys, *start, *end);
}
}
}
#[test]
fn test_range_scan_accounts() {
let (index, mut pubkeys) = setup_accounts_index_keys(3 * ITER_BATCH_SIZE);
pubkeys.sort();
run_test_range_indexes(&index, &pubkeys, None, None);
run_test_range_indexes(&index, &pubkeys, Some(ITER_BATCH_SIZE), None);
run_test_range_indexes(&index, &pubkeys, None, Some(2 * ITER_BATCH_SIZE as usize));
run_test_range_indexes(
&index,
&pubkeys,
Some(ITER_BATCH_SIZE as usize),
Some(2 * ITER_BATCH_SIZE as usize),
);
run_test_range_indexes(
&index,
&pubkeys,
Some(ITER_BATCH_SIZE as usize),
Some(2 * ITER_BATCH_SIZE as usize - 1),
);
run_test_range_indexes(
&index,
&pubkeys,
Some(ITER_BATCH_SIZE - 1 as usize),
Some(2 * ITER_BATCH_SIZE as usize + 1),
);
}
fn run_test_scan_accounts(num_pubkeys: usize) {
let (index, _) = setup_accounts_index_keys(num_pubkeys);
let ancestors: Ancestors = HashMap::new();
let mut scanned_keys = HashSet::new();
index.scan_accounts(&ancestors, |pubkey, _index| {
scanned_keys.insert(*pubkey);
});
assert_eq!(scanned_keys.len(), num_pubkeys);
}
#[test]
fn test_scan_accounts() {
run_test_scan_accounts(0);
run_test_scan_accounts(1);
run_test_scan_accounts(ITER_BATCH_SIZE * 10);
run_test_scan_accounts(ITER_BATCH_SIZE * 10 - 1);
run_test_scan_accounts(ITER_BATCH_SIZE * 10 + 1);
}
#[test]
fn test_accounts_iter_finished() {
let (index, _) = setup_accounts_index_keys(0);
let mut iter = index.iter(None::<Range<Pubkey>>);
assert!(iter.next().is_none());
let mut gc = vec![];
index.upsert(0, &Pubkey::new_rand(), true, &mut gc);
assert!(iter.next().is_none());
}
#[test]
fn test_is_root() {
let mut index = AccountsIndex::<bool>::default();
let index = AccountsIndex::<bool>::default();
assert!(!index.is_root(0));
index.add_root(0);
assert!(index.is_root(0));
@ -408,19 +757,19 @@ mod tests {
#[test]
fn test_insert_with_root() {
let key = Keypair::new();
let mut index = AccountsIndex::<bool>::default();
let index = AccountsIndex::<bool>::default();
let mut gc = Vec::new();
index.insert(0, &key.pubkey(), true, &mut gc);
index.upsert(0, &key.pubkey(), true, &mut gc);
assert!(gc.is_empty());
index.add_root(0);
let (list, idx) = index.get(&key.pubkey(), None, None).unwrap();
assert_eq!(list[idx], (0, true));
assert_eq!(list.slot_list()[idx], (0, true));
}
#[test]
fn test_clean_first() {
let mut index = AccountsIndex::<bool>::default();
let index = AccountsIndex::<bool>::default();
index.add_root(0);
index.add_root(1);
index.clean_dead_slot(0);
@ -431,7 +780,7 @@ mod tests {
#[test]
fn test_clean_last() {
//this behavior might be undefined, clean up should only occur on older slots
let mut index = AccountsIndex::<bool>::default();
let index = AccountsIndex::<bool>::default();
index.add_root(0);
index.add_root(1);
index.clean_dead_slot(1);
@ -441,92 +790,132 @@ mod tests {
#[test]
fn test_clean_and_unclean_slot() {
let mut index = AccountsIndex::<bool>::default();
assert_eq!(0, index.uncleaned_roots.len());
let index = AccountsIndex::<bool>::default();
assert_eq!(0, index.roots_tracker.read().unwrap().uncleaned_roots.len());
index.add_root(0);
index.add_root(1);
assert_eq!(2, index.uncleaned_roots.len());
assert_eq!(2, index.roots_tracker.read().unwrap().uncleaned_roots.len());
assert_eq!(0, index.previous_uncleaned_roots.len());
assert_eq!(
0,
index
.roots_tracker
.read()
.unwrap()
.previous_uncleaned_roots
.len()
);
index.reset_uncleaned_roots(None);
assert_eq!(2, index.roots.len());
assert_eq!(0, index.uncleaned_roots.len());
assert_eq!(2, index.previous_uncleaned_roots.len());
assert_eq!(2, index.roots_tracker.read().unwrap().roots.len());
assert_eq!(0, index.roots_tracker.read().unwrap().uncleaned_roots.len());
assert_eq!(
2,
index
.roots_tracker
.read()
.unwrap()
.previous_uncleaned_roots
.len()
);
index.add_root(2);
index.add_root(3);
assert_eq!(4, index.roots.len());
assert_eq!(2, index.uncleaned_roots.len());
assert_eq!(2, index.previous_uncleaned_roots.len());
assert_eq!(4, index.roots_tracker.read().unwrap().roots.len());
assert_eq!(2, index.roots_tracker.read().unwrap().uncleaned_roots.len());
assert_eq!(
2,
index
.roots_tracker
.read()
.unwrap()
.previous_uncleaned_roots
.len()
);
index.clean_dead_slot(1);
assert_eq!(3, index.roots.len());
assert_eq!(2, index.uncleaned_roots.len());
assert_eq!(1, index.previous_uncleaned_roots.len());
assert_eq!(3, index.roots_tracker.read().unwrap().roots.len());
assert_eq!(2, index.roots_tracker.read().unwrap().uncleaned_roots.len());
assert_eq!(
1,
index
.roots_tracker
.read()
.unwrap()
.previous_uncleaned_roots
.len()
);
index.clean_dead_slot(2);
assert_eq!(2, index.roots.len());
assert_eq!(1, index.uncleaned_roots.len());
assert_eq!(1, index.previous_uncleaned_roots.len());
assert_eq!(2, index.roots_tracker.read().unwrap().roots.len());
assert_eq!(1, index.roots_tracker.read().unwrap().uncleaned_roots.len());
assert_eq!(
1,
index
.roots_tracker
.read()
.unwrap()
.previous_uncleaned_roots
.len()
);
}
#[test]
fn test_update_last_wins() {
let key = Keypair::new();
let mut index = AccountsIndex::<bool>::default();
let index = AccountsIndex::<bool>::default();
let ancestors = vec![(0, 0)].into_iter().collect();
let mut gc = Vec::new();
index.insert(0, &key.pubkey(), true, &mut gc);
index.upsert(0, &key.pubkey(), true, &mut gc);
assert!(gc.is_empty());
let (list, idx) = index.get(&key.pubkey(), Some(&ancestors), None).unwrap();
assert_eq!(list[idx], (0, true));
assert_eq!(list.slot_list()[idx], (0, true));
drop(list);
let mut gc = Vec::new();
index.insert(0, &key.pubkey(), false, &mut gc);
index.upsert(0, &key.pubkey(), false, &mut gc);
assert_eq!(gc, vec![(0, true)]);
let (list, idx) = index.get(&key.pubkey(), Some(&ancestors), None).unwrap();
assert_eq!(list[idx], (0, false));
assert_eq!(list.slot_list()[idx], (0, false));
}
#[test]
fn test_update_new_slot() {
solana_logger::setup();
let key = Keypair::new();
let mut index = AccountsIndex::<bool>::default();
let index = AccountsIndex::<bool>::default();
let ancestors = vec![(0, 0)].into_iter().collect();
let mut gc = Vec::new();
index.insert(0, &key.pubkey(), true, &mut gc);
index.upsert(0, &key.pubkey(), true, &mut gc);
assert!(gc.is_empty());
index.insert(1, &key.pubkey(), false, &mut gc);
index.upsert(1, &key.pubkey(), false, &mut gc);
assert!(gc.is_empty());
let (list, idx) = index.get(&key.pubkey(), Some(&ancestors), None).unwrap();
assert_eq!(list[idx], (0, true));
assert_eq!(list.slot_list()[idx], (0, true));
let ancestors = vec![(1, 0)].into_iter().collect();
let (list, idx) = index.get(&key.pubkey(), Some(&ancestors), None).unwrap();
assert_eq!(list[idx], (1, false));
assert_eq!(list.slot_list()[idx], (1, false));
}
#[test]
fn test_update_gc_purged_slot() {
let key = Keypair::new();
let mut index = AccountsIndex::<bool>::default();
let index = AccountsIndex::<bool>::default();
let mut gc = Vec::new();
index.insert(0, &key.pubkey(), true, &mut gc);
index.upsert(0, &key.pubkey(), true, &mut gc);
assert!(gc.is_empty());
index.insert(1, &key.pubkey(), false, &mut gc);
index.insert(2, &key.pubkey(), true, &mut gc);
index.insert(3, &key.pubkey(), true, &mut gc);
index.upsert(1, &key.pubkey(), false, &mut gc);
index.upsert(2, &key.pubkey(), true, &mut gc);
index.upsert(3, &key.pubkey(), true, &mut gc);
index.add_root(0);
index.add_root(1);
index.add_root(3);
index.insert(4, &key.pubkey(), true, &mut gc);
index.upsert(4, &key.pubkey(), true, &mut gc);
// Updating index should not purge older roots, only purges
// previous updates within the same slot
assert_eq!(gc, vec![]);
let (list, idx) = index.get(&key.pubkey(), None, None).unwrap();
assert_eq!(list[idx], (3, true));
assert_eq!(list.slot_list()[idx], (3, true));
let mut num = 0;
let mut found_key = false;
@ -544,13 +933,11 @@ mod tests {
#[test]
fn test_purge() {
let key = Keypair::new();
let mut index = AccountsIndex::<u64>::default();
let index = AccountsIndex::<u64>::default();
let mut gc = Vec::new();
assert_eq!(Some(12), index.update(1, &key.pubkey(), 12, &mut gc));
assert!(index.upsert(1, &key.pubkey(), 12, &mut gc));
index.insert(1, &key.pubkey(), 12, &mut gc);
assert_eq!(None, index.update(1, &key.pubkey(), 10, &mut gc));
assert!(!index.upsert(1, &key.pubkey(), 10, &mut gc));
let purges = index.purge(&key.pubkey());
assert_eq!(purges, (vec![], false));
@ -559,13 +946,13 @@ mod tests {
let purges = index.purge(&key.pubkey());
assert_eq!(purges, (vec![(1, 10)], true));
assert_eq!(None, index.update(1, &key.pubkey(), 9, &mut gc));
assert!(!index.upsert(1, &key.pubkey(), 9, &mut gc));
}
#[test]
fn test_latest_slot() {
let slot_slice = vec![(0, true), (5, true), (3, true), (7, true)];
let mut index = AccountsIndex::<bool>::default();
let index = AccountsIndex::<bool>::default();
// No ancestors, no root, should return None
assert!(index.latest_slot(None, &slot_slice, None).is_none());
@ -615,7 +1002,7 @@ mod tests {
#[test]
fn test_purge_older_root_entries() {
// No roots, should be no reclaims
let mut index = AccountsIndex::<bool>::default();
let index = AccountsIndex::<bool>::default();
let mut slot_list = vec![(1, true), (2, true), (5, true), (9, true)];
let mut reclaims = vec![];
index.purge_older_root_entries(&mut slot_list, &mut reclaims, None);

View File

@ -5629,9 +5629,15 @@ mod tests {
impl Bank {
fn slots_by_pubkey(&self, pubkey: &Pubkey, ancestors: &Ancestors) -> Vec<Slot> {
let accounts_index = self.rc.accounts.accounts_db.accounts_index.read().unwrap();
let (accounts, _) = accounts_index.get(&pubkey, Some(&ancestors), None).unwrap();
accounts
let (locked_entry, _) = self
.rc
.accounts
.accounts_db
.accounts_index
.get(&pubkey, Some(&ancestors), None)
.unwrap();
locked_entry
.slot_list()
.iter()
.map(|(slot, _)| *slot)
.collect::<Vec<Slot>>()
@ -5744,16 +5750,12 @@ mod tests {
.accounts
.accounts_db
.accounts_index
.write()
.unwrap()
.add_root(genesis_bank1.slot() + 1);
bank1_without_zero
.rc
.accounts
.accounts_db
.accounts_index
.write()
.unwrap()
.purge(&zero_lamport_pubkey);
let some_slot = 1000;