Handle cleaning of zero-lamport accounts w.r.t. Incremental Snapshots (#18870)
* Handle cleaning zero-lamport accounts Handle cleaning zero-lamport accounts in slots higher than the last full snapshot slot. This is part of the Incremental Snapshot work. Fixes #18825
This commit is contained in:
parent
5085f10b4c
commit
5fb6b341c4
|
@ -102,7 +102,7 @@ fn main() {
|
|||
for x in 0..iterations {
|
||||
if clean {
|
||||
let mut time = Measure::start("clean");
|
||||
accounts.accounts_db.clean_accounts(None, false);
|
||||
accounts.accounts_db.clean_accounts(None, false, None);
|
||||
time.stop();
|
||||
println!("{}", time);
|
||||
for slot in 0..num_slots {
|
||||
|
|
|
@ -1155,7 +1155,7 @@ fn load_frozen_forks(
|
|||
// Must be called after `squash()`, so that AccountsDb knows what
|
||||
// the roots are for the cache flushing in exhaustively_free_unused_resource().
|
||||
// This could take few secs; so update last_free later
|
||||
new_root_bank.exhaustively_free_unused_resource();
|
||||
new_root_bank.exhaustively_free_unused_resource(None);
|
||||
last_free = Instant::now();
|
||||
}
|
||||
|
||||
|
|
|
@ -176,7 +176,7 @@ fn bench_delete_dependencies(bencher: &mut Bencher) {
|
|||
accounts.add_root(i);
|
||||
}
|
||||
bencher.iter(|| {
|
||||
accounts.accounts_db.clean_accounts(None, false);
|
||||
accounts.accounts_db.clean_accounts(None, false, None);
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -2078,7 +2078,7 @@ mod tests {
|
|||
}
|
||||
}
|
||||
info!("done..cleaning..");
|
||||
accounts.accounts_db.clean_accounts(None, false);
|
||||
accounts.accounts_db.clean_accounts(None, false, None);
|
||||
}
|
||||
|
||||
fn load_accounts_no_store(accounts: &Accounts, tx: Transaction) -> Vec<TransactionLoadResult> {
|
||||
|
|
|
@ -160,7 +160,7 @@ impl SnapshotRequestHandler {
|
|||
// accounts that were included in the bank delta hash when the bank was frozen,
|
||||
// and if we clean them here, the newly created snapshot's hash may not match
|
||||
// the frozen hash.
|
||||
snapshot_root_bank.clean_accounts(true, false);
|
||||
snapshot_root_bank.clean_accounts(true, false, None);
|
||||
clean_time.stop();
|
||||
|
||||
if accounts_db_caching_enabled {
|
||||
|
@ -399,7 +399,7 @@ impl AccountsBackgroundService {
|
|||
// slots >= bank.slot()
|
||||
bank.force_flush_accounts_cache();
|
||||
}
|
||||
bank.clean_accounts(true, false);
|
||||
bank.clean_accounts(true, false, None);
|
||||
last_cleaned_block_height = bank.block_height();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,8 @@ use crate::{
|
|||
accounts_hash::{AccountsHash, CalculateHashIntermediate, HashStats, PreviousPass},
|
||||
accounts_index::{
|
||||
AccountIndexGetResult, AccountSecondaryIndexes, AccountsIndex, AccountsIndexRootsStats,
|
||||
IndexKey, IsCached, ScanResult, SlotList, SlotSlice, ZeroLamport, BINS_FOR_TESTING,
|
||||
IndexKey, IsCached, RefCount, ScanResult, SlotList, SlotSlice, ZeroLamport,
|
||||
BINS_FOR_TESTING,
|
||||
},
|
||||
ancestors::Ancestors,
|
||||
append_vec::{AppendVec, StoredAccountMeta, StoredMeta, StoredMetaWriteVersion},
|
||||
|
@ -977,6 +978,10 @@ pub struct AccountsDb {
|
|||
/// such that potentially a 0-lamport account update could be present which
|
||||
/// means we can remove the account from the index entirely.
|
||||
dirty_stores: DashMap<(Slot, AppendVecId), Arc<AccountStorageEntry>>,
|
||||
|
||||
/// Zero-lamport accounts that are *not* purged during clean because they need to stay alive
|
||||
/// for incremental snapshot support.
|
||||
zero_lamport_accounts_to_purge_after_full_snapshot: DashSet<(Slot, Pubkey)>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
|
@ -1410,6 +1415,7 @@ impl AccountsDb {
|
|||
remove_unrooted_slots_synchronization: RemoveUnrootedSlotsSynchronization::default(),
|
||||
shrink_ratio: AccountShrinkThreshold::default(),
|
||||
dirty_stores: DashMap::default(),
|
||||
zero_lamport_accounts_to_purge_after_full_snapshot: DashSet::default(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1735,6 +1741,7 @@ impl AccountsDb {
|
|||
fn construct_candidate_clean_keys(
|
||||
&self,
|
||||
max_clean_root: Option<Slot>,
|
||||
last_full_snapshot_slot: Option<Slot>,
|
||||
timings: &mut CleanKeyTimings,
|
||||
) -> Vec<Pubkey> {
|
||||
let mut dirty_store_processing_time = Measure::start("dirty_store_processing");
|
||||
|
@ -1783,10 +1790,28 @@ impl AccountsDb {
|
|||
timings.delta_key_count = pubkeys.len() as u64;
|
||||
|
||||
let mut hashset_to_vec = Measure::start("flat_map");
|
||||
let pubkeys: Vec<Pubkey> = pubkeys.into_iter().collect();
|
||||
let mut pubkeys: Vec<Pubkey> = pubkeys.into_iter().collect();
|
||||
hashset_to_vec.stop();
|
||||
timings.hashset_to_vec_us += hashset_to_vec.as_us();
|
||||
|
||||
// Check if we should purge any of the zero_lamport_accounts_to_purge_later, based on the
|
||||
// last_full_snapshot_slot.
|
||||
assert!(
|
||||
last_full_snapshot_slot.is_some() || self.zero_lamport_accounts_to_purge_after_full_snapshot.is_empty(),
|
||||
"if snapshots are disabled, then zero_lamport_accounts_to_purge_later should always be empty"
|
||||
);
|
||||
if let Some(last_full_snapshot_slot) = last_full_snapshot_slot {
|
||||
self.zero_lamport_accounts_to_purge_after_full_snapshot
|
||||
.retain(|(slot, pubkey)| {
|
||||
let is_candidate_for_clean =
|
||||
max_slot >= *slot && last_full_snapshot_slot >= *slot;
|
||||
if is_candidate_for_clean {
|
||||
pubkeys.push(*pubkey);
|
||||
}
|
||||
!is_candidate_for_clean
|
||||
});
|
||||
}
|
||||
|
||||
pubkeys
|
||||
}
|
||||
|
||||
|
@ -1794,7 +1819,12 @@ impl AccountsDb {
|
|||
// collection
|
||||
// Only remove those accounts where the entire rooted history of the account
|
||||
// can be purged because there are no live append vecs in the ancestors
|
||||
pub fn clean_accounts(&self, max_clean_root: Option<Slot>, is_startup: bool) {
|
||||
pub fn clean_accounts(
|
||||
&self,
|
||||
max_clean_root: Option<Slot>,
|
||||
is_startup: bool,
|
||||
last_full_snapshot_slot: Option<Slot>,
|
||||
) {
|
||||
let max_clean_root = self.max_clean_root(max_clean_root);
|
||||
|
||||
// hold a lock to prevent slot shrinking from running because it might modify some rooted
|
||||
|
@ -1804,7 +1834,11 @@ impl AccountsDb {
|
|||
self.report_store_stats();
|
||||
|
||||
let mut key_timings = CleanKeyTimings::default();
|
||||
let pubkeys = self.construct_candidate_clean_keys(max_clean_root, &mut key_timings);
|
||||
let pubkeys = self.construct_candidate_clean_keys(
|
||||
max_clean_root,
|
||||
last_full_snapshot_slot,
|
||||
&mut key_timings,
|
||||
);
|
||||
|
||||
let total_keys_count = pubkeys.len();
|
||||
let mut accounts_scan = Measure::start("accounts_scan");
|
||||
|
@ -1938,17 +1972,13 @@ impl AccountsDb {
|
|||
Self::calc_delete_dependencies(&purges_zero_lamports, &mut store_counts);
|
||||
calc_deps_time.stop();
|
||||
|
||||
// Only keep purges_zero_lamports where the entire history of the account in the root set
|
||||
// can be purged. All AppendVecs for those updates are dead.
|
||||
let mut purge_filter = Measure::start("purge_filter");
|
||||
purges_zero_lamports.retain(|_pubkey, (account_infos, _ref_count)| {
|
||||
for (_slot, account_info) in account_infos.iter() {
|
||||
if store_counts.get(&account_info.store_id).unwrap().0 != 0 {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
});
|
||||
self.filter_zero_lamport_clean_for_incremental_snapshots(
|
||||
max_clean_root,
|
||||
last_full_snapshot_slot,
|
||||
&store_counts,
|
||||
&mut purges_zero_lamports,
|
||||
);
|
||||
purge_filter.stop();
|
||||
|
||||
let mut reclaims_time = Measure::start("reclaims");
|
||||
|
@ -2077,6 +2107,76 @@ impl AccountsDb {
|
|||
}
|
||||
}
|
||||
|
||||
/// During clean, some zero-lamport accounts that are marked for purge should *not* actually
|
||||
/// get purged. Filter out those accounts here.
|
||||
///
|
||||
/// When using incremental snapshots, do not purge zero-lamport accounts if the slot is higher
|
||||
/// than the last full snapshot slot. This is to protect against the following scenario:
|
||||
///
|
||||
/// ```text
|
||||
/// A full snapshot is taken, and it contains an account with a non-zero balance. Later,
|
||||
/// that account's goes to zero. Evntually cleaning runs, and before, this account would be
|
||||
/// cleaned up. Finally, an incremental snapshot is taken.
|
||||
///
|
||||
/// Later, the incremental (and full) snapshot is used to rebuild the bank and accounts
|
||||
/// database (e.x. if the node restarts). The full snapshot _does_ contain the account (from
|
||||
/// above) and its balance is non-zero, however, since the account was cleaned up in a later
|
||||
/// slot, the incremental snapshot does not contain any info about this account, thus, the
|
||||
/// accounts database will contain the old info from this account, which has its old non-zero
|
||||
/// balance. Very bad!
|
||||
/// ```
|
||||
///
|
||||
/// This filtering step can be skipped if there is no `last_full_snapshot_slot`, or if the
|
||||
/// `max_clean_root` is less-than-or-equal-to the `last_full_snapshot_slot`.
|
||||
fn filter_zero_lamport_clean_for_incremental_snapshots(
|
||||
&self,
|
||||
max_clean_root: Option<Slot>,
|
||||
last_full_snapshot_slot: Option<Slot>,
|
||||
store_counts: &HashMap<AppendVecId, (usize, HashSet<Pubkey>)>,
|
||||
purges_zero_lamports: &mut HashMap<Pubkey, (SlotList<AccountInfo>, RefCount)>,
|
||||
) {
|
||||
let should_filter_for_incremental_snapshots =
|
||||
max_clean_root.unwrap_or(Slot::MAX) > last_full_snapshot_slot.unwrap_or(Slot::MAX);
|
||||
assert!(
|
||||
last_full_snapshot_slot.is_some() || !should_filter_for_incremental_snapshots,
|
||||
"if filtering for incremental snapshots, then snapshots should be enabled",
|
||||
);
|
||||
|
||||
purges_zero_lamports.retain(|pubkey, (slot_account_infos, _ref_count)| {
|
||||
// Only keep purges_zero_lamports where the entire history of the account in the root set
|
||||
// can be purged. All AppendVecs for those updates are dead.
|
||||
for (_slot, account_info) in slot_account_infos.iter() {
|
||||
if store_counts.get(&account_info.store_id).unwrap().0 != 0 {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Exit early if not filtering more for incremental snapshots
|
||||
if !should_filter_for_incremental_snapshots {
|
||||
return true;
|
||||
}
|
||||
|
||||
let slot_account_info_at_highest_slot = slot_account_infos
|
||||
.iter()
|
||||
.max_by_key(|(slot, _account_info)| slot);
|
||||
|
||||
slot_account_info_at_highest_slot.map_or(true, |(slot, account_info)| {
|
||||
// Do *not* purge zero-lamport accounts if the slot is greater than the last full
|
||||
// snapshot slot. Since we're `retain`ing the accounts-to-purge, I felt creating
|
||||
// the `cannot_purge` variable made this easier to understand. Accounts that do
|
||||
// not get purged here are added to a list so they be considered for purging later
|
||||
// (i.e. after the next full snapshot).
|
||||
assert!(account_info.is_zero_lamport());
|
||||
let cannot_purge = *slot > last_full_snapshot_slot.unwrap();
|
||||
if cannot_purge {
|
||||
self.zero_lamport_accounts_to_purge_after_full_snapshot
|
||||
.insert((*slot, *pubkey));
|
||||
}
|
||||
!cannot_purge
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
// Must be kept private!, does sensitive cleanup that should only be called from
|
||||
// supported pipelines in AccountsDb
|
||||
fn process_dead_slots(
|
||||
|
@ -2482,7 +2582,7 @@ impl AccountsDb {
|
|||
num_candidates
|
||||
}
|
||||
|
||||
pub fn shrink_all_slots(&self, is_startup: bool) {
|
||||
pub fn shrink_all_slots(&self, is_startup: bool, last_full_snapshot_slot: Option<Slot>) {
|
||||
const DIRTY_STORES_CLEANING_THRESHOLD: usize = 10_000;
|
||||
const OUTER_CHUNK_SIZE: usize = 2000;
|
||||
const INNER_CHUNK_SIZE: usize = OUTER_CHUNK_SIZE / 8;
|
||||
|
@ -2496,7 +2596,7 @@ impl AccountsDb {
|
|||
}
|
||||
});
|
||||
if self.dirty_stores.len() > DIRTY_STORES_CLEANING_THRESHOLD {
|
||||
self.clean_accounts(None, is_startup);
|
||||
self.clean_accounts(None, is_startup, last_full_snapshot_slot);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
|
@ -2507,7 +2607,7 @@ impl AccountsDb {
|
|||
self.do_shrink_slot_forced_v1(slot);
|
||||
}
|
||||
if self.dirty_stores.len() > DIRTY_STORES_CLEANING_THRESHOLD {
|
||||
self.clean_accounts(None, is_startup);
|
||||
self.clean_accounts(None, is_startup, last_full_snapshot_slot);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -7220,7 +7320,7 @@ pub mod tests {
|
|||
// overwrite old rooted account version; only the r_slot_0_stores.count() should be
|
||||
// decremented
|
||||
db.store_uncached(2, &[(&pubkeys[0], &account)]);
|
||||
db.clean_accounts(None, false);
|
||||
db.clean_accounts(None, false, None);
|
||||
{
|
||||
let slot_0_stores = &db.storage.get_slot_stores(0).unwrap();
|
||||
let slot_1_stores = &db.storage.get_slot_stores(1).unwrap();
|
||||
|
@ -7659,7 +7759,7 @@ pub mod tests {
|
|||
|
||||
//slot is gone
|
||||
accounts.print_accounts_stats("pre-clean");
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
assert!(accounts.storage.0.get(&0).is_none());
|
||||
|
||||
//new value is there
|
||||
|
@ -7742,7 +7842,7 @@ pub mod tests {
|
|||
|
||||
// Slot 1 should be removed, slot 0 cannot be removed because it still has
|
||||
// the latest update for pubkey 2
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
assert!(accounts.storage.get_slot_stores(0).is_some());
|
||||
assert!(accounts.storage.get_slot_stores(1).is_none());
|
||||
|
||||
|
@ -7777,7 +7877,7 @@ pub mod tests {
|
|||
assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 3);
|
||||
assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey2), 1);
|
||||
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
// Slots 0 and 1 should each have been cleaned because all of their
|
||||
// accounts are zero lamports
|
||||
assert!(accounts.storage.get_slot_stores(0).is_none());
|
||||
|
@ -7791,7 +7891,7 @@ pub mod tests {
|
|||
assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 1);
|
||||
assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey2), 0);
|
||||
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
// Slot 2 will now be cleaned, which will leave account 1 with a ref count of 0
|
||||
assert!(accounts.storage.get_slot_stores(2).is_none());
|
||||
assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 0);
|
||||
|
@ -7818,7 +7918,7 @@ pub mod tests {
|
|||
|
||||
// Slot 0 should be removed, and
|
||||
// zero-lamport account should be cleaned
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
|
||||
assert!(accounts.storage.get_slot_stores(0).is_none());
|
||||
assert!(accounts.storage.get_slot_stores(1).is_none());
|
||||
|
@ -7858,7 +7958,7 @@ pub mod tests {
|
|||
assert_eq!(accounts.alive_account_count_in_slot(0), 1);
|
||||
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
|
||||
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
|
||||
//now old state is cleaned up
|
||||
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
|
||||
|
@ -7892,7 +7992,7 @@ pub mod tests {
|
|||
|
||||
accounts.print_accounts_stats("");
|
||||
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
|
||||
//Old state behind zero-lamport account is cleaned up
|
||||
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
|
||||
|
@ -8002,7 +8102,7 @@ pub mod tests {
|
|||
accounts.account_indexes.keys = None;
|
||||
}
|
||||
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
|
||||
//both zero lamport and normal accounts are cleaned up
|
||||
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
|
||||
|
@ -8052,13 +8152,13 @@ pub mod tests {
|
|||
// updates in later slots in slot 1
|
||||
assert_eq!(accounts.alive_account_count_in_slot(0), 1);
|
||||
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
|
||||
accounts.clean_accounts(Some(0), false);
|
||||
accounts.clean_accounts(Some(0), false, None);
|
||||
assert_eq!(accounts.alive_account_count_in_slot(0), 1);
|
||||
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
|
||||
assert!(accounts.accounts_index.get(&pubkey, None, None).is_some());
|
||||
|
||||
// Now the account can be cleaned up
|
||||
accounts.clean_accounts(Some(1), false);
|
||||
accounts.clean_accounts(Some(1), false, None);
|
||||
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
|
||||
assert_eq!(accounts.alive_account_count_in_slot(1), 0);
|
||||
|
||||
|
@ -8083,7 +8183,7 @@ pub mod tests {
|
|||
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 1);
|
||||
|
||||
//now uncleaned roots are cleaned up
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0);
|
||||
}
|
||||
|
||||
|
@ -8100,7 +8200,7 @@ pub mod tests {
|
|||
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 1);
|
||||
|
||||
//now uncleaned roots are cleaned up
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0);
|
||||
}
|
||||
|
||||
|
@ -8112,7 +8212,7 @@ pub mod tests {
|
|||
|
||||
// Create 100 accounts in slot 0
|
||||
create_account(&accounts, &mut pubkeys, 0, 100, 0, 0);
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
check_accounts(&accounts, &pubkeys, 0, 100, 1);
|
||||
|
||||
// do some updates to those accounts and re-check
|
||||
|
@ -8148,7 +8248,7 @@ pub mod tests {
|
|||
|
||||
// Modify first 20 of the accounts from slot 0 in slot 2
|
||||
modify_accounts(&accounts, &pubkeys, latest_slot, 20, 4);
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
// Overwrite account 31 from slot 0 with lamports=0 into slot 2.
|
||||
// Slot 2 should now have 20 + 1 = 21 accounts
|
||||
let account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
|
||||
|
@ -8162,7 +8262,7 @@ pub mod tests {
|
|||
accounts.add_root(latest_slot);
|
||||
assert!(check_storage(&accounts, 2, 31));
|
||||
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
// The first 20 accounts of slot 0 have been updated in slot 2, as well as
|
||||
// accounts 30 and 31 (overwritten with zero-lamport accounts in slot 1 and
|
||||
// slot 2 respectively), so only 78 accounts are left in slot 0's storage entries.
|
||||
|
@ -8298,7 +8398,7 @@ pub mod tests {
|
|||
|
||||
accounts.print_accounts_stats("pre_purge");
|
||||
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
|
||||
accounts.print_accounts_stats("post_purge");
|
||||
|
||||
|
@ -8363,7 +8463,7 @@ pub mod tests {
|
|||
info!("ancestors: {:?}", ancestors);
|
||||
let hash = accounts.update_accounts_hash_test(current_slot, &ancestors);
|
||||
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
|
||||
assert_eq!(
|
||||
accounts.update_accounts_hash_test(current_slot, &ancestors),
|
||||
|
@ -8430,7 +8530,7 @@ pub mod tests {
|
|||
|
||||
accounts.print_accounts_stats("accounts");
|
||||
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
|
||||
accounts.print_accounts_stats("accounts_post_purge");
|
||||
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
|
||||
|
@ -8503,7 +8603,7 @@ pub mod tests {
|
|||
fn test_accounts_purge_chained_purge_before_snapshot_restore() {
|
||||
solana_logger::setup();
|
||||
with_chained_zero_lamport_accounts(|accounts, current_slot| {
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
reconstruct_accounts_db_via_serialization(&accounts, current_slot)
|
||||
});
|
||||
}
|
||||
|
@ -8514,7 +8614,7 @@ pub mod tests {
|
|||
with_chained_zero_lamport_accounts(|accounts, current_slot| {
|
||||
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
|
||||
accounts.print_accounts_stats("after_reconstruct");
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
reconstruct_accounts_db_via_serialization(&accounts, current_slot)
|
||||
});
|
||||
}
|
||||
|
@ -9265,7 +9365,7 @@ pub mod tests {
|
|||
accounts.print_count_and_status("before reconstruct");
|
||||
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
|
||||
accounts.print_count_and_status("before purge zero");
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
accounts.print_count_and_status("after purge zero");
|
||||
|
||||
assert_load_account(&accounts, current_slot, pubkey, old_lamport);
|
||||
|
@ -9326,7 +9426,7 @@ pub mod tests {
|
|||
|
||||
accounts.print_accounts_stats("Post-B pre-clean");
|
||||
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
|
||||
info!("post B");
|
||||
accounts.print_accounts_stats("Post-B");
|
||||
|
@ -9366,7 +9466,7 @@ pub mod tests {
|
|||
|
||||
accounts.get_accounts_delta_hash(current_slot);
|
||||
accounts.add_root(current_slot);
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
|
||||
accounts.print_accounts_stats("Post-D clean");
|
||||
|
||||
|
@ -9456,7 +9556,7 @@ pub mod tests {
|
|||
current_slot += 1;
|
||||
assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1));
|
||||
accounts.store_uncached(current_slot, &[(&pubkey1, &zero_lamport_account)]);
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
|
||||
assert_eq!(
|
||||
// Removed one reference from the dead slot (reference only counted once
|
||||
|
@ -9481,9 +9581,9 @@ pub mod tests {
|
|||
// If step C and step D should be purged, snapshot restore would cause
|
||||
// pubkey1 to be revived as the state of step A.
|
||||
// So, prevent that from happening by introducing refcount
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
|
||||
info!("pubkey: {}", pubkey1);
|
||||
accounts.print_accounts_stats("pre_clean");
|
||||
|
@ -9498,10 +9598,10 @@ pub mod tests {
|
|||
accounts.add_root(current_slot);
|
||||
|
||||
// Do clean
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
|
||||
// 2nd clean needed to clean-up pubkey1
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
|
||||
// Ensure pubkey2 is cleaned from the index finally
|
||||
assert_not_load_account(&accounts, current_slot, pubkey1);
|
||||
|
@ -9526,7 +9626,7 @@ pub mod tests {
|
|||
accounts.shrink_candidate_slots();
|
||||
}
|
||||
|
||||
accounts.shrink_all_slots(*startup);
|
||||
accounts.shrink_all_slots(*startup, None);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -9642,13 +9742,13 @@ pub mod tests {
|
|||
accounts.get_accounts_delta_hash(current_slot);
|
||||
accounts.add_root(current_slot);
|
||||
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
|
||||
assert_eq!(
|
||||
pubkey_count,
|
||||
accounts.all_account_count_in_append_vec(shrink_slot)
|
||||
);
|
||||
accounts.shrink_all_slots(*startup);
|
||||
accounts.shrink_all_slots(*startup, None);
|
||||
assert_eq!(
|
||||
pubkey_count_after_shrink,
|
||||
accounts.all_account_count_in_append_vec(shrink_slot)
|
||||
|
@ -9666,7 +9766,7 @@ pub mod tests {
|
|||
.unwrap();
|
||||
|
||||
// repeating should be no-op
|
||||
accounts.shrink_all_slots(*startup);
|
||||
accounts.shrink_all_slots(*startup, None);
|
||||
assert_eq!(
|
||||
pubkey_count_after_shrink,
|
||||
accounts.all_account_count_in_append_vec(shrink_slot)
|
||||
|
@ -9710,7 +9810,7 @@ pub mod tests {
|
|||
}
|
||||
accounts.get_accounts_delta_hash(current_slot);
|
||||
accounts.add_root(current_slot);
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
|
||||
assert_eq!(
|
||||
pubkey_count,
|
||||
|
@ -9726,7 +9826,7 @@ pub mod tests {
|
|||
);
|
||||
|
||||
// Now, do full-shrink.
|
||||
accounts.shrink_all_slots(false);
|
||||
accounts.shrink_all_slots(false, None);
|
||||
assert_eq!(
|
||||
pubkey_count_after_shrink,
|
||||
accounts.all_account_count_in_append_vec(shrink_slot)
|
||||
|
@ -9855,7 +9955,7 @@ pub mod tests {
|
|||
accounts.get_accounts_delta_hash(current_slot);
|
||||
accounts.add_root(current_slot);
|
||||
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
|
||||
assert_eq!(
|
||||
pubkey_count,
|
||||
|
@ -9870,7 +9970,7 @@ pub mod tests {
|
|||
);
|
||||
|
||||
// Now, do full-shrink.
|
||||
accounts.shrink_all_slots(false);
|
||||
accounts.shrink_all_slots(false, None);
|
||||
assert_eq!(
|
||||
pubkey_count_after_shrink,
|
||||
accounts.all_account_count_in_append_vec(shrink_slot)
|
||||
|
@ -10084,7 +10184,7 @@ pub mod tests {
|
|||
accounts.flush_accounts_cache(true, None);
|
||||
|
||||
// clear out the dirty keys
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
|
||||
// flush 1
|
||||
accounts.get_accounts_delta_hash(1);
|
||||
|
@ -10096,11 +10196,11 @@ pub mod tests {
|
|||
// clean to remove pubkey1 from 0,
|
||||
// shrink to shrink pubkey1 from 0
|
||||
// then another clean to remove pubkey1 from slot 1
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
|
||||
accounts.shrink_candidate_slots();
|
||||
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
|
||||
accounts.print_accounts_stats("post-clean");
|
||||
assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 0);
|
||||
|
@ -10128,12 +10228,12 @@ pub mod tests {
|
|||
accounts.store_uncached(1, &[(key, &account)]);
|
||||
}
|
||||
accounts.add_root(1);
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.shrink_all_slots(false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
accounts.shrink_all_slots(false, None);
|
||||
|
||||
// Clean again to flush the dirty stores
|
||||
// and allow them to be recycled in the next step
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
accounts.print_accounts_stats("post-shrink");
|
||||
let num_stores = accounts.recycle_stores.read().unwrap().entry_count();
|
||||
assert!(num_stores > 0);
|
||||
|
@ -10180,7 +10280,7 @@ pub mod tests {
|
|||
db.add_root(1);
|
||||
|
||||
// Only clean zero lamport accounts up to slot 0
|
||||
db.clean_accounts(Some(0), false);
|
||||
db.clean_accounts(Some(0), false, None);
|
||||
|
||||
// Should still be able to find zero lamport account in slot 1
|
||||
assert_eq!(
|
||||
|
@ -10388,9 +10488,9 @@ pub mod tests {
|
|||
|
||||
db.add_root(0);
|
||||
db.add_root(1);
|
||||
db.clean_accounts(None, false);
|
||||
db.clean_accounts(None, false, None);
|
||||
db.flush_accounts_cache(true, None);
|
||||
db.clean_accounts(None, false);
|
||||
db.clean_accounts(None, false, None);
|
||||
db.add_root(2);
|
||||
|
||||
assert_eq!(db.read_only_accounts_cache.cache_len(), 0);
|
||||
|
@ -10438,7 +10538,7 @@ pub mod tests {
|
|||
db.add_root(1);
|
||||
|
||||
// Clean should not remove anything yet as nothing has been flushed
|
||||
db.clean_accounts(None, false);
|
||||
db.clean_accounts(None, false, None);
|
||||
let account = db
|
||||
.do_load(
|
||||
&Ancestors::default(),
|
||||
|
@ -10454,7 +10554,7 @@ pub mod tests {
|
|||
// Flush, then clean again. Should not need another root to initiate the cleaning
|
||||
// because `accounts_index.uncleaned_roots` should be correct
|
||||
db.flush_accounts_cache(true, None);
|
||||
db.clean_accounts(None, false);
|
||||
db.clean_accounts(None, false, None);
|
||||
assert!(db
|
||||
.do_load(
|
||||
&Ancestors::default(),
|
||||
|
@ -10510,7 +10610,7 @@ pub mod tests {
|
|||
// Flush, then clean. Should not need another root to initiate the cleaning
|
||||
// because `accounts_index.uncleaned_roots` should be correct
|
||||
db.flush_accounts_cache(true, None);
|
||||
db.clean_accounts(None, false);
|
||||
db.clean_accounts(None, false, None);
|
||||
|
||||
// The `zero_lamport_account_key` is still alive in slot 1, so refcount for the
|
||||
// pubkey should be 2
|
||||
|
@ -10669,7 +10769,7 @@ pub mod tests {
|
|||
|
||||
// Run clean, unrooted slot 1 should not be purged, and still readable from the cache,
|
||||
// because we're still doing a scan on it.
|
||||
db.clean_accounts(None, false);
|
||||
db.clean_accounts(None, false, None);
|
||||
let account = db
|
||||
.do_load(
|
||||
&scan_ancestors,
|
||||
|
@ -10683,7 +10783,7 @@ pub mod tests {
|
|||
// When the scan is over, clean should not panic and should not purge something
|
||||
// still in the cache.
|
||||
scan_tracker.exit().unwrap();
|
||||
db.clean_accounts(None, false);
|
||||
db.clean_accounts(None, false, None);
|
||||
let account = db
|
||||
.do_load(
|
||||
&scan_ancestors,
|
||||
|
@ -11205,7 +11305,7 @@ pub mod tests {
|
|||
db.get_accounts_delta_hash(1);
|
||||
|
||||
// Clean to remove outdated entry from slot 0
|
||||
db.clean_accounts(Some(1), false);
|
||||
db.clean_accounts(Some(1), false, None);
|
||||
|
||||
// Shrink Slot 0
|
||||
let mut slot0_stores = db.storage.get_slot_storage_entries(0).unwrap();
|
||||
|
@ -11230,7 +11330,7 @@ pub mod tests {
|
|||
// Should be one store before clean for slot 0
|
||||
assert_eq!(db.storage.get_slot_storage_entries(0).unwrap().len(), 1);
|
||||
db.get_accounts_delta_hash(2);
|
||||
db.clean_accounts(Some(2), false);
|
||||
db.clean_accounts(Some(2), false, None);
|
||||
|
||||
// No stores should exist for slot 0 after clean
|
||||
assert!(db.storage.get_slot_storage_entries(0).is_none());
|
||||
|
@ -11276,7 +11376,7 @@ pub mod tests {
|
|||
// Checking that the uncleaned_pubkeys are not pre-maturely removed
|
||||
// such that when the slots are rooted, and can actually be cleaned, then the
|
||||
// delta keys are still there.
|
||||
db.clean_accounts(None, false);
|
||||
db.clean_accounts(None, false, None);
|
||||
|
||||
db.print_accounts_stats("post-clean1");
|
||||
// Check stores > 0
|
||||
|
@ -11291,12 +11391,12 @@ pub mod tests {
|
|||
db.store_uncached(2, &[(&account_key1, &account3)]);
|
||||
db.get_accounts_delta_hash(2);
|
||||
|
||||
db.clean_accounts(None, false);
|
||||
db.clean_accounts(None, false, None);
|
||||
db.print_accounts_stats("post-clean2");
|
||||
|
||||
// root slots 1
|
||||
db.add_root(1);
|
||||
db.clean_accounts(None, false);
|
||||
db.clean_accounts(None, false, None);
|
||||
|
||||
db.print_accounts_stats("post-clean3");
|
||||
|
||||
|
@ -11305,7 +11405,7 @@ pub mod tests {
|
|||
db.add_root(3);
|
||||
|
||||
// Check that we can clean where max_root=3 and slot=2 is not rooted
|
||||
db.clean_accounts(None, false);
|
||||
db.clean_accounts(None, false, None);
|
||||
|
||||
assert!(db.uncleaned_pubkeys.is_empty());
|
||||
|
||||
|
@ -12106,7 +12206,7 @@ pub mod tests {
|
|||
|
||||
// The later rooted zero-lamport update to `shared_key` cannot be cleaned
|
||||
// because it is kept alive by the unrooted slot.
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
assert!(accounts
|
||||
.accounts_index
|
||||
.get_account_read_entry(&shared_key)
|
||||
|
@ -12117,11 +12217,196 @@ pub mod tests {
|
|||
accounts.purge_slot(slot0, 0, is_from_abs);
|
||||
|
||||
// Now clean should clean up the remaining key
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
assert!(accounts
|
||||
.accounts_index
|
||||
.get_account_read_entry(&shared_key)
|
||||
.is_none());
|
||||
assert!(accounts.storage.get_slot_storage_entries(slot0).is_none());
|
||||
}
|
||||
|
||||
/// Test to make sure `clean_accounts()` works properly with the `last_full_snapshot_slot`
|
||||
/// parameter. Basically:
|
||||
///
|
||||
/// - slot 1: set Account1's balance to non-zero
|
||||
/// - slot 2: set Account1's balance to a different non-zero amount
|
||||
/// - slot 3: set Account1's balance to zero
|
||||
/// - call `clean_accounts()` with `max_clean_root` set to 2
|
||||
/// - ensure Account1 has *not* been purged
|
||||
/// - ensure the store from slot 1 is cleaned up
|
||||
/// - call `clean_accounts()` with `last_full_snapshot_slot` set to 2
|
||||
/// - ensure Account1 has *not* been purged
|
||||
/// - call `clean_accounts()` with `last_full_snapshot_slot` set to 3
|
||||
/// - ensure Account1 *has* been purged
|
||||
#[test]
|
||||
fn test_clean_accounts_with_last_full_snapshot_slot() {
|
||||
solana_logger::setup();
|
||||
let accounts_db = AccountsDb::new_single_for_tests();
|
||||
let pubkey = solana_sdk::pubkey::new_rand();
|
||||
let owner = solana_sdk::pubkey::new_rand();
|
||||
let space = 0;
|
||||
|
||||
let slot1 = 1;
|
||||
let account = AccountSharedData::new(111, space, &owner);
|
||||
accounts_db.store_cached(slot1, &[(&pubkey, &account)]);
|
||||
accounts_db.get_accounts_delta_hash(slot1);
|
||||
accounts_db.add_root(slot1);
|
||||
|
||||
let slot2 = 2;
|
||||
let account = AccountSharedData::new(222, space, &owner);
|
||||
accounts_db.store_cached(slot2, &[(&pubkey, &account)]);
|
||||
accounts_db.get_accounts_delta_hash(slot2);
|
||||
accounts_db.add_root(slot2);
|
||||
|
||||
let slot3 = 3;
|
||||
let account = AccountSharedData::new(0, space, &owner);
|
||||
accounts_db.store_cached(slot3, &[(&pubkey, &account)]);
|
||||
accounts_db.get_accounts_delta_hash(slot3);
|
||||
accounts_db.add_root(slot3);
|
||||
|
||||
assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 3);
|
||||
|
||||
accounts_db.clean_accounts(Some(slot2), false, Some(slot2));
|
||||
assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 2);
|
||||
|
||||
accounts_db.clean_accounts(None, false, Some(slot2));
|
||||
assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 1);
|
||||
|
||||
accounts_db.clean_accounts(None, false, Some(slot3));
|
||||
assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filter_zero_lamport_clean_for_incremental_snapshots() {
|
||||
solana_logger::setup();
|
||||
let slot = 10;
|
||||
|
||||
struct TestParameters {
|
||||
last_full_snapshot_slot: Option<Slot>,
|
||||
max_clean_root: Option<Slot>,
|
||||
should_contain: bool,
|
||||
}
|
||||
|
||||
let do_test = |test_params: TestParameters| {
|
||||
let account_info = AccountInfo {
|
||||
store_id: 42,
|
||||
offset: 123,
|
||||
stored_size: 234,
|
||||
lamports: 0,
|
||||
};
|
||||
let pubkey = solana_sdk::pubkey::new_rand();
|
||||
let mut key_set = HashSet::default();
|
||||
key_set.insert(pubkey);
|
||||
let store_count = 0;
|
||||
let mut store_counts = HashMap::default();
|
||||
store_counts.insert(account_info.store_id, (store_count, key_set));
|
||||
let mut purges_zero_lamports = HashMap::default();
|
||||
purges_zero_lamports.insert(pubkey, (vec![(slot, account_info)], 1));
|
||||
|
||||
let accounts_db = AccountsDb::new_single_for_tests();
|
||||
accounts_db.filter_zero_lamport_clean_for_incremental_snapshots(
|
||||
test_params.max_clean_root,
|
||||
test_params.last_full_snapshot_slot,
|
||||
&store_counts,
|
||||
&mut purges_zero_lamports,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
purges_zero_lamports.contains_key(&pubkey),
|
||||
test_params.should_contain
|
||||
);
|
||||
};
|
||||
|
||||
// Scenario 1: last full snapshot is NONE
|
||||
// In this scenario incremental snapshots are OFF, so always purge
|
||||
{
|
||||
let last_full_snapshot_slot = None;
|
||||
|
||||
do_test(TestParameters {
|
||||
last_full_snapshot_slot,
|
||||
max_clean_root: Some(slot),
|
||||
should_contain: true,
|
||||
});
|
||||
|
||||
do_test(TestParameters {
|
||||
last_full_snapshot_slot,
|
||||
max_clean_root: None,
|
||||
should_contain: true,
|
||||
});
|
||||
}
|
||||
|
||||
// Scenario 2: last full snapshot is GREATER THAN zero lamport account slot
|
||||
// In this scenario always purge, and just test the various permutations of
|
||||
// `should_filter_for_incremental_snapshots` based on `max_clean_root`.
|
||||
{
|
||||
let last_full_snapshot_slot = Some(slot + 1);
|
||||
|
||||
do_test(TestParameters {
|
||||
last_full_snapshot_slot,
|
||||
max_clean_root: last_full_snapshot_slot,
|
||||
should_contain: true,
|
||||
});
|
||||
|
||||
do_test(TestParameters {
|
||||
last_full_snapshot_slot,
|
||||
max_clean_root: last_full_snapshot_slot.map(|s| s + 1),
|
||||
should_contain: true,
|
||||
});
|
||||
|
||||
do_test(TestParameters {
|
||||
last_full_snapshot_slot,
|
||||
max_clean_root: None,
|
||||
should_contain: true,
|
||||
});
|
||||
}
|
||||
|
||||
// Scenario 3: last full snapshot is EQUAL TO zero lamport account slot
|
||||
// In this scenario always purge, as it's the same as Scenario 2.
|
||||
{
|
||||
let last_full_snapshot_slot = Some(slot);
|
||||
|
||||
do_test(TestParameters {
|
||||
last_full_snapshot_slot,
|
||||
max_clean_root: last_full_snapshot_slot,
|
||||
should_contain: true,
|
||||
});
|
||||
|
||||
do_test(TestParameters {
|
||||
last_full_snapshot_slot,
|
||||
max_clean_root: last_full_snapshot_slot.map(|s| s + 1),
|
||||
should_contain: true,
|
||||
});
|
||||
|
||||
do_test(TestParameters {
|
||||
last_full_snapshot_slot,
|
||||
max_clean_root: None,
|
||||
should_contain: true,
|
||||
});
|
||||
}
|
||||
|
||||
// Scenario 4: last full snapshot is LESS THAN zero lamport account slot
|
||||
// In this scenario do *not* purge, except when `should_filter_for_incremental_snapshots`
|
||||
// is false
|
||||
{
|
||||
let last_full_snapshot_slot = Some(slot - 1);
|
||||
|
||||
do_test(TestParameters {
|
||||
last_full_snapshot_slot,
|
||||
max_clean_root: last_full_snapshot_slot,
|
||||
should_contain: true,
|
||||
});
|
||||
|
||||
do_test(TestParameters {
|
||||
last_full_snapshot_slot,
|
||||
max_clean_root: last_full_snapshot_slot.map(|s| s + 1),
|
||||
should_contain: false,
|
||||
});
|
||||
|
||||
do_test(TestParameters {
|
||||
last_full_snapshot_slot,
|
||||
max_clean_root: None,
|
||||
should_contain: false,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2441,7 +2441,7 @@ impl Bank {
|
|||
|
||||
// Should not be called outside of startup, will race with
|
||||
// concurrent cleaning logic in AccountsBackgroundService
|
||||
pub fn exhaustively_free_unused_resource(&self) {
|
||||
pub fn exhaustively_free_unused_resource(&self, last_full_snapshot_slot: Option<Slot>) {
|
||||
let mut flush = Measure::start("flush");
|
||||
// Flush all the rooted accounts. Must be called after `squash()`,
|
||||
// so that AccountsDb knows what the roots are.
|
||||
|
@ -2453,11 +2453,11 @@ impl Bank {
|
|||
// accounts that were included in the bank delta hash when the bank was frozen,
|
||||
// and if we clean them here, any newly created snapshot's hash for this bank
|
||||
// may not match the frozen hash.
|
||||
self.clean_accounts(true, false);
|
||||
self.clean_accounts(true, false, last_full_snapshot_slot);
|
||||
clean.stop();
|
||||
|
||||
let mut shrink = Measure::start("shrink");
|
||||
self.shrink_all_slots(false);
|
||||
self.shrink_all_slots(false, last_full_snapshot_slot);
|
||||
shrink.stop();
|
||||
|
||||
info!(
|
||||
|
@ -4986,18 +4986,19 @@ impl Bank {
|
|||
&self,
|
||||
test_hash_calculation: bool,
|
||||
accounts_db_skip_shrink: bool,
|
||||
last_full_snapshot_slot: Option<Slot>,
|
||||
) -> bool {
|
||||
info!("cleaning..");
|
||||
let mut clean_time = Measure::start("clean");
|
||||
if self.slot() > 0 {
|
||||
self.clean_accounts(true, true);
|
||||
self.clean_accounts(true, true, last_full_snapshot_slot);
|
||||
}
|
||||
clean_time.stop();
|
||||
|
||||
let mut shrink_all_slots_time = Measure::start("shrink_all_slots");
|
||||
if !accounts_db_skip_shrink && self.slot() > 0 {
|
||||
info!("shrinking..");
|
||||
self.shrink_all_slots(true);
|
||||
self.shrink_all_slots(true, last_full_snapshot_slot);
|
||||
}
|
||||
shrink_all_slots_time.stop();
|
||||
|
||||
|
@ -5268,24 +5269,33 @@ impl Bank {
|
|||
.add_program(program_id, process_instruction_with_context);
|
||||
}
|
||||
|
||||
pub fn clean_accounts(&self, skip_last: bool, is_startup: bool) {
|
||||
let max_clean_slot = if skip_last {
|
||||
// Don't clean the slot we're snapshotting because it may have zero-lamport
|
||||
// accounts that were included in the bank delta hash when the bank was frozen,
|
||||
// and if we clean them here, any newly created snapshot's hash for this bank
|
||||
// may not match the frozen hash.
|
||||
Some(self.slot().saturating_sub(1))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
pub fn clean_accounts(
|
||||
&self,
|
||||
skip_last: bool,
|
||||
is_startup: bool,
|
||||
last_full_snapshot_slot: Option<Slot>,
|
||||
) {
|
||||
// Don't clean the slot we're snapshotting because it may have zero-lamport
|
||||
// accounts that were included in the bank delta hash when the bank was frozen,
|
||||
// and if we clean them here, any newly created snapshot's hash for this bank
|
||||
// may not match the frozen hash.
|
||||
//
|
||||
// So when we're snapshotting, set `skip_last` to true so the highest slot to clean is
|
||||
// lowered by one.
|
||||
let highest_slot_to_clean = skip_last.then(|| self.slot().saturating_sub(1));
|
||||
|
||||
self.rc.accounts.accounts_db.clean_accounts(
|
||||
highest_slot_to_clean,
|
||||
is_startup,
|
||||
last_full_snapshot_slot,
|
||||
);
|
||||
}
|
||||
|
||||
pub fn shrink_all_slots(&self, is_startup: bool, last_full_snapshot_slot: Option<Slot>) {
|
||||
self.rc
|
||||
.accounts
|
||||
.accounts_db
|
||||
.clean_accounts(max_clean_slot, is_startup);
|
||||
}
|
||||
|
||||
pub fn shrink_all_slots(&self, is_startup: bool) {
|
||||
self.rc.accounts.accounts_db.shrink_all_slots(is_startup);
|
||||
.shrink_all_slots(is_startup, last_full_snapshot_slot);
|
||||
}
|
||||
|
||||
pub fn print_accounts_stats(&self) {
|
||||
|
@ -7866,7 +7876,7 @@ pub(crate) mod tests {
|
|||
bank.squash();
|
||||
bank.force_flush_accounts_cache();
|
||||
let hash = bank.update_accounts_hash();
|
||||
bank.clean_accounts(false, false);
|
||||
bank.clean_accounts(false, false, None);
|
||||
assert_eq!(bank.update_accounts_hash(), hash);
|
||||
|
||||
let bank0 = Arc::new(new_from_parent(&bank));
|
||||
|
@ -7886,14 +7896,14 @@ pub(crate) mod tests {
|
|||
|
||||
info!("bank0 purge");
|
||||
let hash = bank0.update_accounts_hash();
|
||||
bank0.clean_accounts(false, false);
|
||||
bank0.clean_accounts(false, false, None);
|
||||
assert_eq!(bank0.update_accounts_hash(), hash);
|
||||
|
||||
assert_eq!(bank0.get_account(&keypair.pubkey()).unwrap().lamports(), 10);
|
||||
assert_eq!(bank1.get_account(&keypair.pubkey()), None);
|
||||
|
||||
info!("bank1 purge");
|
||||
bank1.clean_accounts(false, false);
|
||||
bank1.clean_accounts(false, false, None);
|
||||
|
||||
assert_eq!(bank0.get_account(&keypair.pubkey()).unwrap().lamports(), 10);
|
||||
assert_eq!(bank1.get_account(&keypair.pubkey()), None);
|
||||
|
@ -7914,7 +7924,7 @@ pub(crate) mod tests {
|
|||
assert_eq!(bank0.get_account(&keypair.pubkey()), None);
|
||||
assert_eq!(bank1.get_account(&keypair.pubkey()), None);
|
||||
bank1.force_flush_accounts_cache();
|
||||
bank1.clean_accounts(false, false);
|
||||
bank1.clean_accounts(false, false, None);
|
||||
|
||||
assert!(bank1.verify_bank_hash(true));
|
||||
}
|
||||
|
@ -8779,11 +8789,11 @@ pub(crate) mod tests {
|
|||
bank.transfer(1_000, &mint_keypair, &pubkey).unwrap();
|
||||
bank.freeze();
|
||||
bank.update_accounts_hash();
|
||||
assert!(bank.verify_snapshot_bank(true, false));
|
||||
assert!(bank.verify_snapshot_bank(true, false, None));
|
||||
|
||||
// tamper the bank after freeze!
|
||||
bank.increment_signature_count(1);
|
||||
assert!(!bank.verify_snapshot_bank(true, false));
|
||||
assert!(!bank.verify_snapshot_bank(true, false, None));
|
||||
}
|
||||
|
||||
// Test that two bank forks with the same accounts should not hash to the same value.
|
||||
|
@ -11213,7 +11223,7 @@ pub(crate) mod tests {
|
|||
|
||||
// Clean accounts, which should add earlier slots to the shrink
|
||||
// candidate set
|
||||
bank2.clean_accounts(false, false);
|
||||
bank2.clean_accounts(false, false, None);
|
||||
|
||||
// Slots 0 and 1 should be candidates for shrinking, but slot 2
|
||||
// shouldn't because none of its accounts are outdated by a later
|
||||
|
@ -11269,7 +11279,7 @@ pub(crate) mod tests {
|
|||
goto_end_of_slot(Arc::<Bank>::get_mut(&mut bank).unwrap());
|
||||
|
||||
bank.squash();
|
||||
bank.clean_accounts(false, false);
|
||||
bank.clean_accounts(false, false, None);
|
||||
let force_to_return_alive_account = 0;
|
||||
assert_eq!(
|
||||
bank.process_stale_slot_with_budget(22, force_to_return_alive_account),
|
||||
|
@ -12897,7 +12907,7 @@ pub(crate) mod tests {
|
|||
current_major_fork_bank.squash();
|
||||
// Try to get cache flush/clean to overlap with the scan
|
||||
current_major_fork_bank.force_flush_accounts_cache();
|
||||
current_major_fork_bank.clean_accounts(false, false);
|
||||
current_major_fork_bank.clean_accounts(false, false, None);
|
||||
// Move purge here so that Bank::drop()->purge_slots() doesn't race
|
||||
// with clean. Simulates the call from AccountsBackgroundService
|
||||
let is_abs_service = true;
|
||||
|
@ -12947,7 +12957,7 @@ pub(crate) mod tests {
|
|||
current_bank.squash();
|
||||
if current_bank.slot() % 2 == 0 {
|
||||
current_bank.force_flush_accounts_cache();
|
||||
current_bank.clean_accounts(true, false);
|
||||
current_bank.clean_accounts(true, false, None);
|
||||
}
|
||||
prev_bank = current_bank.clone();
|
||||
current_bank = Arc::new(Bank::new_from_parent(
|
||||
|
@ -13912,7 +13922,7 @@ pub(crate) mod tests {
|
|||
bank2.squash();
|
||||
|
||||
drop(bank1);
|
||||
bank2.clean_accounts(false, false);
|
||||
bank2.clean_accounts(false, false, None);
|
||||
|
||||
let expected_ref_count_for_cleaned_up_keys = 0;
|
||||
let expected_ref_count_for_keys_in_both_slot1_and_slot2 = 1;
|
||||
|
|
|
@ -796,8 +796,11 @@ pub fn bank_from_snapshot_archives(
|
|||
info!("{}", measure_rebuild);
|
||||
|
||||
let mut measure_verify = Measure::start("verify");
|
||||
if !bank.verify_snapshot_bank(test_hash_calculation, accounts_db_skip_shrink)
|
||||
&& limit_load_slot_count_from_snapshot.is_none()
|
||||
if !bank.verify_snapshot_bank(
|
||||
test_hash_calculation,
|
||||
accounts_db_skip_shrink,
|
||||
Some(full_snapshot_archive_info.slot()),
|
||||
) && limit_load_slot_count_from_snapshot.is_none()
|
||||
{
|
||||
panic!("Snapshot bank for slot {} failed to verify", bank.slot());
|
||||
}
|
||||
|
@ -1582,7 +1585,7 @@ pub fn bank_to_full_snapshot_archive(
|
|||
assert!(bank.is_complete());
|
||||
bank.squash(); // Bank may not be a root
|
||||
bank.force_flush_accounts_cache();
|
||||
bank.clean_accounts(true, false);
|
||||
bank.clean_accounts(true, false, Some(bank.slot()));
|
||||
bank.update_accounts_hash();
|
||||
bank.rehash(); // Bank accounts may have been manually modified by the caller
|
||||
|
||||
|
@ -1625,7 +1628,7 @@ pub fn bank_to_incremental_snapshot_archive(
|
|||
assert!(bank.slot() > full_snapshot_slot);
|
||||
bank.squash(); // Bank may not be a root
|
||||
bank.force_flush_accounts_cache();
|
||||
bank.clean_accounts(true, false);
|
||||
bank.clean_accounts(true, false, Some(full_snapshot_slot));
|
||||
bank.update_accounts_hash();
|
||||
bank.rehash(); // Bank accounts may have been manually modified by the caller
|
||||
|
||||
|
@ -1820,6 +1823,7 @@ mod tests {
|
|||
use solana_sdk::{
|
||||
genesis_config::create_genesis_config,
|
||||
signature::{Keypair, Signer},
|
||||
system_transaction,
|
||||
};
|
||||
use std::mem::size_of;
|
||||
|
||||
|
@ -2813,4 +2817,213 @@ mod tests {
|
|||
|
||||
assert_eq!(deserialized_bank, *bank4);
|
||||
}
|
||||
|
||||
/// Test that cleaning works well in the edge cases of zero-lamport accounts and snapshots.
|
||||
/// Here's the scenario:
|
||||
///
|
||||
/// slot 1:
|
||||
/// - send some lamports to Account1 (from Account2) to bring it to life
|
||||
/// - take a full snapshot
|
||||
/// slot 2:
|
||||
/// - make Account1 have zero lamports (send back to Account2)
|
||||
/// - take an incremental snapshot
|
||||
/// - ensure deserializing from this snapshot is equal to this bank
|
||||
/// slot 3:
|
||||
/// - remove Account2's reference back to slot 2 by transfering from the mint to Account2
|
||||
/// slot 4:
|
||||
/// - ensure `clean_accounts()` has run and that Account1 is gone
|
||||
/// - take another incremental snapshot
|
||||
/// - ensure deserializing from this snapshots is equal to this bank
|
||||
/// - ensure Account1 hasn't come back from the dead
|
||||
///
|
||||
/// The check at slot 4 will fail with the pre-incremental-snapshot cleaning logic. Because
|
||||
/// of the cleaning/purging at slot 4, the incremental snapshot at slot 4 will no longer have
|
||||
/// information about Account1, but the full snapshost _does_ have info for Account1, which is
|
||||
/// no longer correct!
|
||||
#[test]
|
||||
fn test_incremental_snapshots_handle_zero_lamport_accounts() {
|
||||
solana_logger::setup();
|
||||
|
||||
let collector = Pubkey::new_unique();
|
||||
let key1 = Keypair::new();
|
||||
let key2 = Keypair::new();
|
||||
|
||||
let accounts_dir = tempfile::TempDir::new().unwrap();
|
||||
let snapshots_dir = tempfile::TempDir::new().unwrap();
|
||||
let snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
||||
let snapshot_archive_format = ArchiveFormat::Tar;
|
||||
|
||||
let (genesis_config, mint_keypair) = create_genesis_config(1_000_000);
|
||||
|
||||
let lamports_to_transfer = 123_456;
|
||||
let bank0 = Arc::new(Bank::new_with_paths_for_tests(
|
||||
&genesis_config,
|
||||
vec![accounts_dir.path().to_path_buf()],
|
||||
&[],
|
||||
None,
|
||||
None,
|
||||
AccountSecondaryIndexes::default(),
|
||||
false,
|
||||
AccountShrinkThreshold::default(),
|
||||
false,
|
||||
));
|
||||
bank0
|
||||
.transfer(lamports_to_transfer, &mint_keypair, &key2.pubkey())
|
||||
.unwrap();
|
||||
while !bank0.is_complete() {
|
||||
bank0.register_tick(&Hash::new_unique());
|
||||
}
|
||||
|
||||
let slot = 1;
|
||||
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &collector, slot));
|
||||
bank1
|
||||
.transfer(lamports_to_transfer, &key2, &key1.pubkey())
|
||||
.unwrap();
|
||||
while !bank1.is_complete() {
|
||||
bank1.register_tick(&Hash::new_unique());
|
||||
}
|
||||
|
||||
let full_snapshot_slot = slot;
|
||||
let full_snapshot_archive_info = bank_to_full_snapshot_archive(
|
||||
snapshots_dir.path(),
|
||||
&bank1,
|
||||
None,
|
||||
snapshot_archives_dir.path(),
|
||||
snapshot_archive_format,
|
||||
None,
|
||||
DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let slot = slot + 1;
|
||||
let bank2 = Arc::new(Bank::new_from_parent(&bank1, &collector, slot));
|
||||
let tx = system_transaction::transfer(
|
||||
&key1,
|
||||
&key2.pubkey(),
|
||||
lamports_to_transfer,
|
||||
bank2.last_blockhash(),
|
||||
);
|
||||
let (_blockhash, fee_calculator) = bank2.last_blockhash_with_fee_calculator();
|
||||
let fee = fee_calculator.calculate_fee(tx.message());
|
||||
let tx = system_transaction::transfer(
|
||||
&key1,
|
||||
&key2.pubkey(),
|
||||
lamports_to_transfer - fee,
|
||||
bank2.last_blockhash(),
|
||||
);
|
||||
bank2.process_transaction(&tx).unwrap();
|
||||
assert_eq!(
|
||||
bank2.get_balance(&key1.pubkey()),
|
||||
0,
|
||||
"Ensure Account1's balance is zero"
|
||||
);
|
||||
while !bank2.is_complete() {
|
||||
bank2.register_tick(&Hash::new_unique());
|
||||
}
|
||||
|
||||
// Take an incremental snapshot and then do a roundtrip on the bank and ensure it
|
||||
// deserializes correctly.
|
||||
let incremental_snapshot_archive_info = bank_to_incremental_snapshot_archive(
|
||||
snapshots_dir.path(),
|
||||
&bank2,
|
||||
full_snapshot_slot,
|
||||
None,
|
||||
snapshot_archives_dir.path(),
|
||||
snapshot_archive_format,
|
||||
None,
|
||||
DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
||||
)
|
||||
.unwrap();
|
||||
let (deserialized_bank, _) = bank_from_snapshot_archives(
|
||||
&[accounts_dir.path().to_path_buf()],
|
||||
&[],
|
||||
snapshots_dir.path(),
|
||||
&full_snapshot_archive_info,
|
||||
Some(&incremental_snapshot_archive_info),
|
||||
&genesis_config,
|
||||
None,
|
||||
None,
|
||||
AccountSecondaryIndexes::default(),
|
||||
false,
|
||||
None,
|
||||
AccountShrinkThreshold::default(),
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
crate::accounts_index::BINS_FOR_TESTING,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
deserialized_bank, *bank2,
|
||||
"Ensure rebuilding from an incremental snapshot works"
|
||||
);
|
||||
|
||||
let slot = slot + 1;
|
||||
let bank3 = Arc::new(Bank::new_from_parent(&bank2, &collector, slot));
|
||||
// Update Account2 so that it no longer holds a reference to slot2
|
||||
bank3
|
||||
.transfer(lamports_to_transfer, &mint_keypair, &key2.pubkey())
|
||||
.unwrap();
|
||||
while !bank3.is_complete() {
|
||||
bank3.register_tick(&Hash::new_unique());
|
||||
}
|
||||
|
||||
let slot = slot + 1;
|
||||
let bank4 = Arc::new(Bank::new_from_parent(&bank3, &collector, slot));
|
||||
while !bank4.is_complete() {
|
||||
bank4.register_tick(&Hash::new_unique());
|
||||
}
|
||||
|
||||
// Ensure account1 has been cleaned/purged from everywhere
|
||||
bank4.squash();
|
||||
bank4.clean_accounts(true, false, Some(full_snapshot_slot));
|
||||
assert!(
|
||||
bank4.get_account_modified_slot(&key1.pubkey()).is_none(),
|
||||
"Ensure Account1 has been cleaned and purged from AccountsDb"
|
||||
);
|
||||
|
||||
// Take an incremental snapshot and then do a roundtrip on the bank and ensure it
|
||||
// deserializes correctly
|
||||
let incremental_snapshot_archive_info = bank_to_incremental_snapshot_archive(
|
||||
snapshots_dir.path(),
|
||||
&bank4,
|
||||
full_snapshot_slot,
|
||||
None,
|
||||
snapshot_archives_dir.path(),
|
||||
snapshot_archive_format,
|
||||
None,
|
||||
DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let (deserialized_bank, _) = bank_from_snapshot_archives(
|
||||
&[accounts_dir.path().to_path_buf()],
|
||||
&[],
|
||||
snapshots_dir.path(),
|
||||
&full_snapshot_archive_info,
|
||||
Some(&incremental_snapshot_archive_info),
|
||||
&genesis_config,
|
||||
None,
|
||||
None,
|
||||
AccountSecondaryIndexes::default(),
|
||||
false,
|
||||
None,
|
||||
AccountShrinkThreshold::default(),
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
crate::accounts_index::BINS_FOR_TESTING,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
deserialized_bank, *bank4,
|
||||
"Ensure rebuilding from an incremental snapshot works",
|
||||
);
|
||||
assert!(
|
||||
deserialized_bank
|
||||
.get_account_modified_slot(&key1.pubkey())
|
||||
.is_none(),
|
||||
"Ensure Account1 has not been brought back from the dead"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1387,7 +1387,7 @@ mod tests {
|
|||
bank.force_flush_accounts_cache();
|
||||
// do clean and assert that it actually did its job
|
||||
assert_eq!(3, bank.get_snapshot_storages().len());
|
||||
bank.clean_accounts(false, false);
|
||||
bank.clean_accounts(false, false, None);
|
||||
assert_eq!(2, bank.get_snapshot_storages().len());
|
||||
});
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ fn test_shrink_and_clean() {
|
|||
|
||||
// let's dance.
|
||||
for _ in 0..10 {
|
||||
accounts.clean_accounts(None, false);
|
||||
accounts.clean_accounts(None, false, None);
|
||||
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue