Cache account stores, flush from AccountsBackgroundService (#13140)
This commit is contained in:
parent
4a66e3eddc
commit
6dfad0652f
|
@ -2,13 +2,11 @@ use clap::{crate_description, crate_name, value_t, App, Arg};
|
|||
use rayon::prelude::*;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::{
|
||||
accounts::{create_test_accounts, update_accounts, Accounts},
|
||||
accounts::{create_test_accounts, update_accounts_bench, Accounts},
|
||||
accounts_index::Ancestors,
|
||||
};
|
||||
use solana_sdk::{genesis_config::ClusterType, pubkey::Pubkey};
|
||||
use std::env;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::{collections::HashSet, env, fs, path::PathBuf};
|
||||
|
||||
fn main() {
|
||||
solana_logger::setup();
|
||||
|
@ -56,7 +54,8 @@ fn main() {
|
|||
if fs::remove_dir_all(path.clone()).is_err() {
|
||||
println!("Warning: Couldn't remove {:?}", path);
|
||||
}
|
||||
let accounts = Accounts::new(vec![path], &ClusterType::Testnet);
|
||||
let accounts =
|
||||
Accounts::new_with_config(vec![path], &ClusterType::Testnet, HashSet::new(), false);
|
||||
println!("Creating {} accounts", num_accounts);
|
||||
let mut create_time = Measure::start("create accounts");
|
||||
let pubkeys: Vec<_> = (0..num_slots)
|
||||
|
@ -92,7 +91,7 @@ fn main() {
|
|||
time.stop();
|
||||
println!("{}", time);
|
||||
for slot in 0..num_slots {
|
||||
update_accounts(&accounts, &pubkeys, ((x + 1) * num_slots + slot) as u64);
|
||||
update_accounts_bench(&accounts, &pubkeys, ((x + 1) * num_slots + slot) as u64);
|
||||
accounts.add_root((x * num_slots + slot) as u64);
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -78,6 +78,7 @@ pub struct TvuConfig {
|
|||
pub trusted_validators: Option<HashSet<Pubkey>>,
|
||||
pub repair_validators: Option<HashSet<Pubkey>>,
|
||||
pub accounts_hash_fault_injection_slots: u64,
|
||||
pub accounts_db_caching_enabled: bool,
|
||||
}
|
||||
|
||||
impl Tvu {
|
||||
|
@ -272,6 +273,7 @@ impl Tvu {
|
|||
bank_forks.clone(),
|
||||
&exit,
|
||||
accounts_background_request_handler,
|
||||
tvu_config.accounts_db_caching_enabled,
|
||||
);
|
||||
|
||||
Tvu {
|
||||
|
|
|
@ -119,6 +119,7 @@ pub struct ValidatorConfig {
|
|||
pub no_poh_speed_test: bool,
|
||||
pub poh_pinned_cpu_core: usize,
|
||||
pub account_indexes: HashSet<AccountIndex>,
|
||||
pub accounts_db_caching_enabled: bool,
|
||||
}
|
||||
|
||||
impl Default for ValidatorConfig {
|
||||
|
@ -164,6 +165,7 @@ impl Default for ValidatorConfig {
|
|||
no_poh_speed_test: true,
|
||||
poh_pinned_cpu_core: poh_service::DEFAULT_PINNED_CPU_CORE,
|
||||
account_indexes: HashSet::new(),
|
||||
accounts_db_caching_enabled: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -629,6 +631,7 @@ impl Validator {
|
|||
trusted_validators: config.trusted_validators.clone(),
|
||||
repair_validators: config.repair_validators.clone(),
|
||||
accounts_hash_fault_injection_slots: config.accounts_hash_fault_injection_slots,
|
||||
accounts_db_caching_enabled: config.accounts_db_caching_enabled,
|
||||
},
|
||||
);
|
||||
|
||||
|
@ -960,6 +963,7 @@ fn new_banks_from_ledger(
|
|||
frozen_accounts: config.frozen_accounts.clone(),
|
||||
debug_keys: config.debug_keys.clone(),
|
||||
account_indexes: config.account_indexes.clone(),
|
||||
accounts_db_caching_enabled: config.accounts_db_caching_enabled,
|
||||
..blockstore_processor::ProcessOptions::default()
|
||||
};
|
||||
|
||||
|
|
|
@ -105,6 +105,7 @@ mod tests {
|
|||
None,
|
||||
None,
|
||||
HashSet::new(),
|
||||
false,
|
||||
);
|
||||
bank0.freeze();
|
||||
let mut bank_forks = BankForks::new(bank0);
|
||||
|
@ -161,6 +162,7 @@ mod tests {
|
|||
None,
|
||||
None,
|
||||
HashSet::new(),
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
@ -216,7 +218,7 @@ mod tests {
|
|||
if slot % set_root_interval == 0 || slot == last_slot - 1 {
|
||||
// set_root should send a snapshot request
|
||||
bank_forks.set_root(bank.slot(), &request_sender, None);
|
||||
snapshot_request_handler.handle_snapshot_requests();
|
||||
snapshot_request_handler.handle_snapshot_requests(false);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1926,6 +1926,7 @@ fn main() {
|
|||
);
|
||||
assert!(bank.is_complete());
|
||||
bank.squash();
|
||||
bank.force_flush_accounts_cache();
|
||||
bank.clean_accounts(true);
|
||||
bank.update_accounts_hash();
|
||||
if rehash {
|
||||
|
|
|
@ -69,6 +69,7 @@ pub fn load(
|
|||
process_options.debug_keys.clone(),
|
||||
Some(&crate::builtins::get(process_options.bpf_jit)),
|
||||
process_options.account_indexes.clone(),
|
||||
process_options.accounts_db_caching_enabled,
|
||||
)
|
||||
.expect("Load from snapshot failed");
|
||||
if let Some(shrink_paths) = shrink_paths {
|
||||
|
|
|
@ -346,6 +346,7 @@ pub struct ProcessOptions {
|
|||
pub frozen_accounts: Vec<Pubkey>,
|
||||
pub debug_keys: Option<Arc<HashSet<Pubkey>>>,
|
||||
pub account_indexes: HashSet<AccountIndex>,
|
||||
pub accounts_db_caching_enabled: bool,
|
||||
}
|
||||
|
||||
pub fn process_blockstore(
|
||||
|
@ -371,6 +372,7 @@ pub fn process_blockstore(
|
|||
opts.debug_keys.clone(),
|
||||
Some(&crate::builtins::get(opts.bpf_jit)),
|
||||
opts.account_indexes.clone(),
|
||||
opts.accounts_db_caching_enabled,
|
||||
);
|
||||
let bank0 = Arc::new(bank0);
|
||||
info!("processing ledger for slot 0...");
|
||||
|
@ -929,6 +931,8 @@ fn load_frozen_forks(
|
|||
new_root_bank.squash();
|
||||
|
||||
if last_free.elapsed() > Duration::from_secs(10) {
|
||||
// Must be called after `squash()`, so that AccountsDb knows what
|
||||
// the roots are for the cache flushing in exhaustively_free_unused_resource().
|
||||
// This could take few secs; so update last_free later
|
||||
new_root_bank.exhaustively_free_unused_resource();
|
||||
last_free = Instant::now();
|
||||
|
@ -2901,6 +2905,7 @@ pub mod tests {
|
|||
None,
|
||||
None,
|
||||
HashSet::new(),
|
||||
false,
|
||||
);
|
||||
*bank.epoch_schedule()
|
||||
}
|
||||
|
|
1
run.sh
1
run.sh
|
@ -105,6 +105,7 @@ args=(
|
|||
--init-complete-file "$dataDir"/init-completed
|
||||
--snapshot-compression none
|
||||
--require-tower
|
||||
--accounts-db-caching-enabled
|
||||
)
|
||||
# shellcheck disable=SC2086
|
||||
solana-validator "${args[@]}" $SOLANA_RUN_SH_VALIDATOR_ARGS &
|
||||
|
|
|
@ -50,6 +50,7 @@ fn test_accounts_create(bencher: &mut Bencher) {
|
|||
None,
|
||||
None,
|
||||
HashSet::new(),
|
||||
false,
|
||||
);
|
||||
bencher.iter(|| {
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
|
@ -61,35 +62,39 @@ fn test_accounts_create(bencher: &mut Bencher) {
|
|||
fn test_accounts_squash(bencher: &mut Bencher) {
|
||||
let (mut genesis_config, _) = create_genesis_config(100_000);
|
||||
genesis_config.rent.burn_percent = 100; // Avoid triggering an assert in Bank::distribute_rent_to_validators()
|
||||
let bank1 = Arc::new(Bank::new_with_paths(
|
||||
let mut prev_bank = Arc::new(Bank::new_with_paths(
|
||||
&genesis_config,
|
||||
vec![PathBuf::from("bench_a1")],
|
||||
&[],
|
||||
None,
|
||||
None,
|
||||
HashSet::new(),
|
||||
false,
|
||||
));
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
deposit_many(&bank1, &mut pubkeys, 250_000);
|
||||
bank1.freeze();
|
||||
deposit_many(&prev_bank, &mut pubkeys, 250_000);
|
||||
prev_bank.freeze();
|
||||
|
||||
// Measures the performance of the squash operation.
|
||||
// This mainly consists of the freeze operation which calculates the
|
||||
// merkle hash of the account state and distribution of fees and rent
|
||||
let mut slot = 1u64;
|
||||
bencher.iter(|| {
|
||||
let bank2 = Arc::new(Bank::new_from_parent(&bank1, &Pubkey::default(), slot));
|
||||
bank2.deposit(&pubkeys[0], 1);
|
||||
bank2.squash();
|
||||
let next_bank = Arc::new(Bank::new_from_parent(&prev_bank, &Pubkey::default(), slot));
|
||||
next_bank.deposit(&pubkeys[0], 1);
|
||||
next_bank.squash();
|
||||
slot += 1;
|
||||
prev_bank = next_bank;
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn test_accounts_hash_bank_hash(bencher: &mut Bencher) {
|
||||
let accounts = Accounts::new(
|
||||
let accounts = Accounts::new_with_config(
|
||||
vec![PathBuf::from("bench_accounts_hash_internal")],
|
||||
&ClusterType::Development,
|
||||
HashSet::new(),
|
||||
false,
|
||||
);
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
let num_accounts = 60_000;
|
||||
|
@ -107,9 +112,11 @@ fn test_accounts_hash_bank_hash(bencher: &mut Bencher) {
|
|||
#[bench]
|
||||
fn test_update_accounts_hash(bencher: &mut Bencher) {
|
||||
solana_logger::setup();
|
||||
let accounts = Accounts::new(
|
||||
let accounts = Accounts::new_with_config(
|
||||
vec![PathBuf::from("update_accounts_hash")],
|
||||
&ClusterType::Development,
|
||||
HashSet::new(),
|
||||
false,
|
||||
);
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
create_test_accounts(&accounts, &mut pubkeys, 50_000, 0);
|
||||
|
@ -124,9 +131,11 @@ fn test_update_accounts_hash(bencher: &mut Bencher) {
|
|||
#[bench]
|
||||
fn test_accounts_delta_hash(bencher: &mut Bencher) {
|
||||
solana_logger::setup();
|
||||
let accounts = Accounts::new(
|
||||
let accounts = Accounts::new_with_config(
|
||||
vec![PathBuf::from("accounts_delta_hash")],
|
||||
&ClusterType::Development,
|
||||
HashSet::new(),
|
||||
false,
|
||||
);
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
create_test_accounts(&accounts, &mut pubkeys, 100_000, 0);
|
||||
|
@ -138,17 +147,19 @@ fn test_accounts_delta_hash(bencher: &mut Bencher) {
|
|||
#[bench]
|
||||
fn bench_delete_dependencies(bencher: &mut Bencher) {
|
||||
solana_logger::setup();
|
||||
let accounts = Accounts::new(
|
||||
let accounts = Accounts::new_with_config(
|
||||
vec![PathBuf::from("accounts_delete_deps")],
|
||||
&ClusterType::Development,
|
||||
HashSet::new(),
|
||||
false,
|
||||
);
|
||||
let mut old_pubkey = Pubkey::default();
|
||||
let zero_account = Account::new(0, 0, &Account::default().owner);
|
||||
for i in 0..1000 {
|
||||
let pubkey = solana_sdk::pubkey::new_rand();
|
||||
let account = Account::new((i + 1) as u64, 0, &Account::default().owner);
|
||||
accounts.store_slow(i, &pubkey, &account);
|
||||
accounts.store_slow(i, &old_pubkey, &zero_account);
|
||||
accounts.store_slow_uncached(i, &pubkey, &account);
|
||||
accounts.store_slow_uncached(i, &old_pubkey, &zero_account);
|
||||
old_pubkey = pubkey;
|
||||
accounts.add_root(i);
|
||||
}
|
||||
|
@ -165,12 +176,14 @@ fn store_accounts_with_possible_contention<F: 'static>(
|
|||
F: Fn(&Accounts, &[Pubkey]) + Send + Copy,
|
||||
{
|
||||
let num_readers = 5;
|
||||
let accounts = Arc::new(Accounts::new(
|
||||
let accounts = Arc::new(Accounts::new_with_config(
|
||||
vec![
|
||||
PathBuf::from(std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()))
|
||||
.join(bench_name),
|
||||
],
|
||||
&ClusterType::Development,
|
||||
HashSet::new(),
|
||||
false,
|
||||
));
|
||||
let num_keys = 1000;
|
||||
let slot = 0;
|
||||
|
@ -180,7 +193,7 @@ fn store_accounts_with_possible_contention<F: 'static>(
|
|||
.map(|_| {
|
||||
let pubkey = solana_sdk::pubkey::new_rand();
|
||||
let account = Account::new(1, 0, &Account::default().owner);
|
||||
accounts.store_slow(slot, &pubkey, &account);
|
||||
accounts.store_slow_uncached(slot, &pubkey, &account);
|
||||
pubkey
|
||||
})
|
||||
.collect(),
|
||||
|
@ -206,7 +219,7 @@ fn store_accounts_with_possible_contention<F: 'static>(
|
|||
// Write to a different slot than the one being read from. Because
|
||||
// there's a new account pubkey being written to every time, will
|
||||
// compete for the accounts index lock on every store
|
||||
accounts.store_slow(slot + 1, &solana_sdk::pubkey::new_rand(), &account);
|
||||
accounts.store_slow_uncached(slot + 1, &solana_sdk::pubkey::new_rand(), &account);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ fn bench_accounts_index(bencher: &mut Bencher) {
|
|||
);
|
||||
reclaims.clear();
|
||||
}
|
||||
index.add_root(root);
|
||||
index.add_root(root, false);
|
||||
root += 1;
|
||||
fork += 1;
|
||||
});
|
||||
|
|
|
@ -48,6 +48,7 @@ fn append_vec_sequential_read(bencher: &mut Bencher) {
|
|||
let mut indexes = add_test_accounts(&vec, size);
|
||||
bencher.iter(|| {
|
||||
let (sample, pos) = indexes.pop().unwrap();
|
||||
println!("reading pos {} {}", sample, pos);
|
||||
let (account, _next) = vec.get_account(pos).unwrap();
|
||||
let (_meta, test) = create_test_account(sample);
|
||||
assert_eq!(account.data, test.data.as_slice());
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
use crate::{
|
||||
accounts_db::{AccountsDB, AppendVecId, BankHashInfo, ErrorCounters},
|
||||
accounts_db::{AccountsDB, AppendVecId, BankHashInfo, ErrorCounters, LoadedAccount},
|
||||
accounts_index::{AccountIndex, Ancestors, IndexKey},
|
||||
append_vec::StoredAccount,
|
||||
bank::{
|
||||
NonceRollbackFull, NonceRollbackInfo, TransactionCheckResult, TransactionExecutionResult,
|
||||
},
|
||||
|
@ -82,19 +81,21 @@ pub enum AccountAddressFilter {
|
|||
|
||||
impl Accounts {
|
||||
pub fn new(paths: Vec<PathBuf>, cluster_type: &ClusterType) -> Self {
|
||||
Self::new_with_indexes(paths, cluster_type, HashSet::new())
|
||||
Self::new_with_config(paths, cluster_type, HashSet::new(), false)
|
||||
}
|
||||
|
||||
pub fn new_with_indexes(
|
||||
pub fn new_with_config(
|
||||
paths: Vec<PathBuf>,
|
||||
cluster_type: &ClusterType,
|
||||
account_indexes: HashSet<AccountIndex>,
|
||||
caching_enabled: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
accounts_db: Arc::new(AccountsDB::new_with_indexes(
|
||||
accounts_db: Arc::new(AccountsDB::new_with_config(
|
||||
paths,
|
||||
cluster_type,
|
||||
account_indexes,
|
||||
caching_enabled,
|
||||
)),
|
||||
account_locks: Mutex::new(HashSet::new()),
|
||||
readonly_locks: Arc::new(RwLock::new(Some(HashMap::new()))),
|
||||
|
@ -447,24 +448,20 @@ impl Accounts {
|
|||
}
|
||||
|
||||
/// scans underlying accounts_db for this delta (slot) with a map function
|
||||
/// from StoredAccount to B
|
||||
/// from LoadedAccount to B
|
||||
/// returns only the latest/current version of B for this slot
|
||||
fn scan_slot<F, B>(&self, slot: Slot, func: F) -> Vec<B>
|
||||
pub fn scan_slot<F, B>(&self, slot: Slot, func: F) -> Vec<B>
|
||||
where
|
||||
F: Fn(&StoredAccount) -> Option<B> + Send + Sync,
|
||||
F: Fn(LoadedAccount) -> Option<B> + Send + Sync,
|
||||
B: Send + Default,
|
||||
{
|
||||
let accumulator: Vec<Vec<(Pubkey, u64, B)>> = self.accounts_db.scan_account_storage(
|
||||
slot,
|
||||
|stored_account: &StoredAccount,
|
||||
_id: AppendVecId,
|
||||
accum: &mut Vec<(Pubkey, u64, B)>| {
|
||||
if let Some(val) = func(&stored_account) {
|
||||
accum.push((
|
||||
stored_account.meta.pubkey,
|
||||
std::u64::MAX - stored_account.meta.write_version,
|
||||
val,
|
||||
));
|
||||
|loaded_account: LoadedAccount, _id: AppendVecId, accum: &mut Vec<(Pubkey, u64, B)>| {
|
||||
let pubkey = *loaded_account.pubkey();
|
||||
let write_version = loaded_account.write_version();
|
||||
if let Some(val) = func(loaded_account) {
|
||||
accum.push((pubkey, std::u64::MAX - write_version, val));
|
||||
}
|
||||
},
|
||||
);
|
||||
|
@ -488,11 +485,11 @@ impl Accounts {
|
|||
self.scan_slot(slot, |stored_account| {
|
||||
let hit = match program_id {
|
||||
None => true,
|
||||
Some(program_id) => stored_account.account_meta.owner == *program_id,
|
||||
Some(program_id) => stored_account.owner() == program_id,
|
||||
};
|
||||
|
||||
if hit {
|
||||
Some((stored_account.meta.pubkey, stored_account.clone_account()))
|
||||
Some((*stored_account.pubkey(), stored_account.account()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
@ -675,9 +672,15 @@ impl Accounts {
|
|||
)
|
||||
}
|
||||
|
||||
/// Slow because lock is held for 1 operation instead of many
|
||||
pub fn store_slow(&self, slot: Slot, pubkey: &Pubkey, account: &Account) {
|
||||
self.accounts_db.store(slot, &[(pubkey, account)]);
|
||||
/// Slow because lock is held for 1 operation instead of many.
|
||||
/// WARNING: This noncached version is only to be used for tests/benchmarking
|
||||
/// as bypassing the cache in general is not supported
|
||||
pub fn store_slow_uncached(&self, slot: Slot, pubkey: &Pubkey, account: &Account) {
|
||||
self.accounts_db.store_uncached(slot, &[(pubkey, account)]);
|
||||
}
|
||||
|
||||
pub fn store_slow_cached(&self, slot: Slot, pubkey: &Pubkey, account: &Account) {
|
||||
self.accounts_db.store_cached(slot, &[(pubkey, account)]);
|
||||
}
|
||||
|
||||
fn is_locked_readonly(&self, key: &Pubkey) -> bool {
|
||||
|
@ -846,7 +849,7 @@ impl Accounts {
|
|||
/// Store the accounts into the DB
|
||||
// allow(clippy) needed for various gating flags
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn store_accounts(
|
||||
pub fn store_cached(
|
||||
&self,
|
||||
slot: Slot,
|
||||
txs: &[Transaction],
|
||||
|
@ -868,7 +871,7 @@ impl Accounts {
|
|||
fix_recent_blockhashes_sysvar_delay,
|
||||
rent_fix_enabled,
|
||||
);
|
||||
self.accounts_db.store(slot, &accounts_to_store);
|
||||
self.accounts_db.store_cached(slot, &accounts_to_store);
|
||||
}
|
||||
|
||||
/// Purge a slot if it is not a root
|
||||
|
@ -876,6 +879,7 @@ impl Accounts {
|
|||
pub fn purge_slot(&self, slot: Slot) {
|
||||
self.accounts_db.purge_slot(slot);
|
||||
}
|
||||
|
||||
/// Add a slot to root. Root slots cannot be purged
|
||||
pub fn add_root(&self, slot: Slot) {
|
||||
self.accounts_db.add_root(slot)
|
||||
|
@ -1024,16 +1028,18 @@ pub fn create_test_accounts(
|
|||
for t in 0..num {
|
||||
let pubkey = solana_sdk::pubkey::new_rand();
|
||||
let account = Account::new((t + 1) as u64, 0, &Account::default().owner);
|
||||
accounts.store_slow(slot, &pubkey, &account);
|
||||
accounts.store_slow_uncached(slot, &pubkey, &account);
|
||||
pubkeys.push(pubkey);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_accounts(accounts: &Accounts, pubkeys: &[Pubkey], slot: u64) {
|
||||
// Only used by bench, not safe to call otherwise accounts can conflict with the
|
||||
// accounts cache!
|
||||
pub fn update_accounts_bench(accounts: &Accounts, pubkeys: &[Pubkey], slot: u64) {
|
||||
for pubkey in pubkeys {
|
||||
let amount = thread_rng().gen_range(0, 10);
|
||||
let account = Account::new(amount, 0, &Account::default().owner);
|
||||
accounts.store_slow(slot, &pubkey, &account);
|
||||
accounts.store_slow_uncached(slot, &pubkey, &account);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1070,9 +1076,10 @@ mod tests {
|
|||
) -> Vec<TransactionLoadResult> {
|
||||
let mut hash_queue = BlockhashQueue::new(100);
|
||||
hash_queue.register_hash(&tx.message().recent_blockhash, &fee_calculator);
|
||||
let accounts = Accounts::new(Vec::new(), &ClusterType::Development);
|
||||
let accounts =
|
||||
Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false);
|
||||
for ka in ka.iter() {
|
||||
accounts.store_slow(0, &ka.0, &ka.1);
|
||||
accounts.store_slow_uncached(0, &ka.0, &ka.1);
|
||||
}
|
||||
|
||||
let ancestors = vec![(0, 0)].into_iter().collect();
|
||||
|
@ -1619,18 +1626,19 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_load_by_program_slot() {
|
||||
let accounts = Accounts::new(Vec::new(), &ClusterType::Development);
|
||||
let accounts =
|
||||
Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false);
|
||||
|
||||
// Load accounts owned by various programs into AccountsDB
|
||||
let pubkey0 = solana_sdk::pubkey::new_rand();
|
||||
let account0 = Account::new(1, 0, &Pubkey::new(&[2; 32]));
|
||||
accounts.store_slow(0, &pubkey0, &account0);
|
||||
accounts.store_slow_uncached(0, &pubkey0, &account0);
|
||||
let pubkey1 = solana_sdk::pubkey::new_rand();
|
||||
let account1 = Account::new(1, 0, &Pubkey::new(&[2; 32]));
|
||||
accounts.store_slow(0, &pubkey1, &account1);
|
||||
accounts.store_slow_uncached(0, &pubkey1, &account1);
|
||||
let pubkey2 = solana_sdk::pubkey::new_rand();
|
||||
let account2 = Account::new(1, 0, &Pubkey::new(&[3; 32]));
|
||||
accounts.store_slow(0, &pubkey2, &account2);
|
||||
accounts.store_slow_uncached(0, &pubkey2, &account2);
|
||||
|
||||
let loaded = accounts.load_by_program_slot(0, Some(&Pubkey::new(&[2; 32])));
|
||||
assert_eq!(loaded.len(), 2);
|
||||
|
@ -1642,7 +1650,8 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_accounts_account_not_found() {
|
||||
let accounts = Accounts::new(Vec::new(), &ClusterType::Development);
|
||||
let accounts =
|
||||
Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false);
|
||||
let mut error_counters = ErrorCounters::default();
|
||||
let ancestors = vec![(0, 0)].into_iter().collect();
|
||||
|
||||
|
@ -1660,7 +1669,8 @@ mod tests {
|
|||
#[test]
|
||||
#[should_panic]
|
||||
fn test_accounts_empty_bank_hash() {
|
||||
let accounts = Accounts::new(Vec::new(), &ClusterType::Development);
|
||||
let accounts =
|
||||
Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false);
|
||||
accounts.bank_hash_at(1);
|
||||
}
|
||||
|
||||
|
@ -1676,11 +1686,12 @@ mod tests {
|
|||
let account2 = Account::new(3, 0, &Pubkey::default());
|
||||
let account3 = Account::new(4, 0, &Pubkey::default());
|
||||
|
||||
let accounts = Accounts::new(Vec::new(), &ClusterType::Development);
|
||||
accounts.store_slow(0, &keypair0.pubkey(), &account0);
|
||||
accounts.store_slow(0, &keypair1.pubkey(), &account1);
|
||||
accounts.store_slow(0, &keypair2.pubkey(), &account2);
|
||||
accounts.store_slow(0, &keypair3.pubkey(), &account3);
|
||||
let accounts =
|
||||
Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false);
|
||||
accounts.store_slow_uncached(0, &keypair0.pubkey(), &account0);
|
||||
accounts.store_slow_uncached(0, &keypair1.pubkey(), &account1);
|
||||
accounts.store_slow_uncached(0, &keypair2.pubkey(), &account2);
|
||||
accounts.store_slow_uncached(0, &keypair3.pubkey(), &account3);
|
||||
|
||||
let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])];
|
||||
let message = Message::new_with_compiled_instructions(
|
||||
|
@ -1788,10 +1799,11 @@ mod tests {
|
|||
let account1 = Account::new(2, 0, &Pubkey::default());
|
||||
let account2 = Account::new(3, 0, &Pubkey::default());
|
||||
|
||||
let accounts = Accounts::new(Vec::new(), &ClusterType::Development);
|
||||
accounts.store_slow(0, &keypair0.pubkey(), &account0);
|
||||
accounts.store_slow(0, &keypair1.pubkey(), &account1);
|
||||
accounts.store_slow(0, &keypair2.pubkey(), &account2);
|
||||
let accounts =
|
||||
Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false);
|
||||
accounts.store_slow_uncached(0, &keypair0.pubkey(), &account0);
|
||||
accounts.store_slow_uncached(0, &keypair1.pubkey(), &account1);
|
||||
accounts.store_slow_uncached(0, &keypair2.pubkey(), &account2);
|
||||
|
||||
let accounts_arc = Arc::new(accounts);
|
||||
|
||||
|
@ -1917,7 +1929,8 @@ mod tests {
|
|||
|
||||
let mut loaded = vec![loaded0, loaded1];
|
||||
|
||||
let accounts = Accounts::new(Vec::new(), &ClusterType::Development);
|
||||
let accounts =
|
||||
Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false);
|
||||
{
|
||||
let mut readonly_locks = accounts.readonly_locks.write().unwrap();
|
||||
let readonly_locks = readonly_locks.as_mut().unwrap();
|
||||
|
@ -1969,15 +1982,16 @@ mod tests {
|
|||
#[test]
|
||||
fn huge_clean() {
|
||||
solana_logger::setup();
|
||||
let accounts = Accounts::new(Vec::new(), &ClusterType::Development);
|
||||
let accounts =
|
||||
Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false);
|
||||
let mut old_pubkey = Pubkey::default();
|
||||
let zero_account = Account::new(0, 0, &Account::default().owner);
|
||||
info!("storing..");
|
||||
for i in 0..2_000 {
|
||||
let pubkey = solana_sdk::pubkey::new_rand();
|
||||
let account = Account::new((i + 1) as u64, 0, &Account::default().owner);
|
||||
accounts.store_slow(i, &pubkey, &account);
|
||||
accounts.store_slow(i, &old_pubkey, &zero_account);
|
||||
accounts.store_slow_uncached(i, &pubkey, &account);
|
||||
accounts.store_slow_uncached(i, &old_pubkey, &zero_account);
|
||||
old_pubkey = pubkey;
|
||||
accounts.add_root(i);
|
||||
if i % 1_000 == 0 {
|
||||
|
@ -2011,7 +2025,8 @@ mod tests {
|
|||
#[test]
|
||||
fn test_instructions() {
|
||||
solana_logger::setup();
|
||||
let accounts = Accounts::new(Vec::new(), &ClusterType::Development);
|
||||
let accounts =
|
||||
Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false);
|
||||
|
||||
let instructions_key = solana_sdk::sysvar::instructions::id();
|
||||
let keypair = Keypair::new();
|
||||
|
@ -2291,7 +2306,8 @@ mod tests {
|
|||
let mut loaded = vec![loaded];
|
||||
|
||||
let next_blockhash = Hash::new_unique();
|
||||
let accounts = Accounts::new(Vec::new(), &ClusterType::Development);
|
||||
let accounts =
|
||||
Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false);
|
||||
let collected_accounts = accounts.collect_accounts_to_store(
|
||||
&txs,
|
||||
None,
|
||||
|
@ -2401,7 +2417,8 @@ mod tests {
|
|||
let mut loaded = vec![loaded];
|
||||
|
||||
let next_blockhash = Hash::new_unique();
|
||||
let accounts = Accounts::new(Vec::new(), &ClusterType::Development);
|
||||
let accounts =
|
||||
Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false);
|
||||
let collected_accounts = accounts.collect_accounts_to_store(
|
||||
&txs,
|
||||
None,
|
||||
|
|
|
@ -77,7 +77,7 @@ pub struct SnapshotRequestHandler {
|
|||
|
||||
impl SnapshotRequestHandler {
|
||||
// Returns the latest requested snapshot slot, if one exists
|
||||
pub fn handle_snapshot_requests(&self) -> Option<u64> {
|
||||
pub fn handle_snapshot_requests(&self, accounts_db_caching_enabled: bool) -> Option<u64> {
|
||||
self.snapshot_request_receiver
|
||||
.try_iter()
|
||||
.last()
|
||||
|
@ -92,9 +92,19 @@ impl SnapshotRequestHandler {
|
|||
hash_time.stop();
|
||||
|
||||
let mut shrink_time = Measure::start("shrink_time");
|
||||
snapshot_root_bank.process_stale_slot_with_budget(0, SHRUNKEN_ACCOUNT_PER_INTERVAL);
|
||||
if !accounts_db_caching_enabled {
|
||||
snapshot_root_bank
|
||||
.process_stale_slot_with_budget(0, SHRUNKEN_ACCOUNT_PER_INTERVAL);
|
||||
}
|
||||
shrink_time.stop();
|
||||
|
||||
let mut flush_accounts_cache_time = Measure::start("flush_accounts_cache_time");
|
||||
if accounts_db_caching_enabled {
|
||||
// Force flush all the roots from the cache so that the snapshot can be taken.
|
||||
snapshot_root_bank.force_flush_accounts_cache();
|
||||
}
|
||||
flush_accounts_cache_time.stop();
|
||||
|
||||
let mut clean_time = Measure::start("clean_time");
|
||||
// Don't clean the slot we're snapshotting because it may have zero-lamport
|
||||
// accounts that were included in the bank delta hash when the bank was frozen,
|
||||
|
@ -103,6 +113,12 @@ impl SnapshotRequestHandler {
|
|||
snapshot_root_bank.clean_accounts(true);
|
||||
clean_time.stop();
|
||||
|
||||
if accounts_db_caching_enabled {
|
||||
shrink_time = Measure::start("shrink_time");
|
||||
snapshot_root_bank.shrink_candidate_slots();
|
||||
shrink_time.stop();
|
||||
}
|
||||
|
||||
// Generate an accounts package
|
||||
let mut snapshot_time = Measure::start("snapshot_time");
|
||||
let r = snapshot_utils::snapshot_bank(
|
||||
|
@ -130,6 +146,12 @@ impl SnapshotRequestHandler {
|
|||
|
||||
datapoint_info!(
|
||||
"handle_snapshot_requests-timing",
|
||||
("hash_time", hash_time.as_us(), i64),
|
||||
(
|
||||
"flush_accounts_cache_time",
|
||||
flush_accounts_cache_time.as_us(),
|
||||
i64
|
||||
),
|
||||
("shrink_time", shrink_time.as_us(), i64),
|
||||
("clean_time", clean_time.as_us(), i64),
|
||||
("snapshot_time", snapshot_time.as_us(), i64),
|
||||
|
@ -138,7 +160,6 @@ impl SnapshotRequestHandler {
|
|||
purge_old_snapshots_time.as_us(),
|
||||
i64
|
||||
),
|
||||
("hash_time", hash_time.as_us(), i64),
|
||||
);
|
||||
snapshot_root_bank.block_height()
|
||||
})
|
||||
|
@ -180,11 +201,11 @@ pub struct ABSRequestHandler {
|
|||
|
||||
impl ABSRequestHandler {
|
||||
// Returns the latest requested snapshot block height, if one exists
|
||||
pub fn handle_snapshot_requests(&self) -> Option<u64> {
|
||||
pub fn handle_snapshot_requests(&self, accounts_db_caching_enabled: bool) -> Option<u64> {
|
||||
self.snapshot_request_handler
|
||||
.as_ref()
|
||||
.and_then(|snapshot_request_handler| {
|
||||
snapshot_request_handler.handle_snapshot_requests()
|
||||
snapshot_request_handler.handle_snapshot_requests(accounts_db_caching_enabled)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -208,6 +229,7 @@ impl AccountsBackgroundService {
|
|||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
request_handler: ABSRequestHandler,
|
||||
accounts_db_caching_enabled: bool,
|
||||
) -> Self {
|
||||
info!("AccountsBackgroundService active");
|
||||
let exit = exit.clone();
|
||||
|
@ -250,26 +272,36 @@ impl AccountsBackgroundService {
|
|||
// request for `N` to the snapshot request channel before setting a root `R > N`, and
|
||||
// snapshot_request_handler.handle_requests() will always look for the latest
|
||||
// available snapshot in the channel.
|
||||
let snapshot_block_height = request_handler.handle_snapshot_requests();
|
||||
let snapshot_block_height =
|
||||
request_handler.handle_snapshot_requests(accounts_db_caching_enabled);
|
||||
if accounts_db_caching_enabled {
|
||||
bank.flush_accounts_cache_if_needed();
|
||||
}
|
||||
|
||||
if let Some(snapshot_block_height) = snapshot_block_height {
|
||||
// Safe, see proof above
|
||||
assert!(last_cleaned_block_height <= snapshot_block_height);
|
||||
last_cleaned_block_height = snapshot_block_height;
|
||||
} else {
|
||||
// under sustained writes, shrink can lag behind so cap to
|
||||
// SHRUNKEN_ACCOUNT_PER_INTERVAL (which is based on INTERVAL_MS,
|
||||
// which in turn roughly asscociated block time)
|
||||
consumed_budget = bank
|
||||
.process_stale_slot_with_budget(
|
||||
consumed_budget,
|
||||
SHRUNKEN_ACCOUNT_PER_INTERVAL,
|
||||
)
|
||||
.min(SHRUNKEN_ACCOUNT_PER_INTERVAL);
|
||||
|
||||
if accounts_db_caching_enabled {
|
||||
bank.shrink_candidate_slots();
|
||||
} else {
|
||||
// under sustained writes, shrink can lag behind so cap to
|
||||
// SHRUNKEN_ACCOUNT_PER_INTERVAL (which is based on INTERVAL_MS,
|
||||
// which in turn roughly asscociated block time)
|
||||
consumed_budget = bank
|
||||
.process_stale_slot_with_budget(
|
||||
consumed_budget,
|
||||
SHRUNKEN_ACCOUNT_PER_INTERVAL,
|
||||
)
|
||||
.min(SHRUNKEN_ACCOUNT_PER_INTERVAL);
|
||||
}
|
||||
if bank.block_height() - last_cleaned_block_height
|
||||
> (CLEAN_INTERVAL_BLOCKS + thread_rng().gen_range(0, 10))
|
||||
{
|
||||
if accounts_db_caching_enabled {
|
||||
bank.force_flush_accounts_cache();
|
||||
}
|
||||
bank.clean_accounts(true);
|
||||
last_cleaned_block_height = bank.block_height();
|
||||
}
|
||||
|
|
|
@ -0,0 +1,254 @@
|
|||
use dashmap::DashMap;
|
||||
use solana_sdk::{account::Account, clock::Slot, hash::Hash, pubkey::Pubkey};
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
ops::Deref,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicU64, Ordering},
|
||||
Arc, RwLock,
|
||||
},
|
||||
};
|
||||
|
||||
pub type SlotCache = Arc<SlotCacheInner>;
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct SlotCacheInner {
|
||||
cache: DashMap<Pubkey, CachedAccount>,
|
||||
same_account_writes: AtomicU64,
|
||||
same_account_writes_size: AtomicU64,
|
||||
unique_account_writes_size: AtomicU64,
|
||||
is_frozen: AtomicBool,
|
||||
}
|
||||
|
||||
impl SlotCacheInner {
|
||||
pub fn report_slot_store_metrics(&self) {
|
||||
datapoint_info!(
|
||||
"slot_repeated_writes",
|
||||
(
|
||||
"same_account_writes",
|
||||
self.same_account_writes.load(Ordering::Relaxed),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"same_account_writes_size",
|
||||
self.same_account_writes_size.load(Ordering::Relaxed),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"unique_account_writes_size",
|
||||
self.unique_account_writes_size.load(Ordering::Relaxed),
|
||||
i64
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
pub fn insert(&self, pubkey: &Pubkey, account: Account, hash: Hash) {
|
||||
if self.cache.contains_key(pubkey) {
|
||||
self.same_account_writes.fetch_add(1, Ordering::Relaxed);
|
||||
self.same_account_writes_size
|
||||
.fetch_add(account.data.len() as u64, Ordering::Relaxed);
|
||||
} else {
|
||||
self.unique_account_writes_size
|
||||
.fetch_add(account.data.len() as u64, Ordering::Relaxed);
|
||||
}
|
||||
self.cache.insert(*pubkey, CachedAccount { account, hash });
|
||||
}
|
||||
|
||||
pub fn get_cloned(&self, pubkey: &Pubkey) -> Option<CachedAccount> {
|
||||
self.cache
|
||||
.get(pubkey)
|
||||
// 1) Maybe can eventually use a Cow to avoid a clone on every read
|
||||
// 2) Popping is only safe if its guaranteed only replay/banking threads
|
||||
// are reading from the AccountsDb
|
||||
.map(|account_ref| account_ref.value().clone())
|
||||
}
|
||||
|
||||
pub fn mark_slot_frozen(&self) {
|
||||
self.is_frozen.store(true, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
pub fn is_frozen(&self) -> bool {
|
||||
self.is_frozen.load(Ordering::SeqCst)
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for SlotCacheInner {
|
||||
type Target = DashMap<Pubkey, CachedAccount>;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.cache
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CachedAccount {
|
||||
pub account: Account,
|
||||
pub hash: Hash,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct AccountsCache {
|
||||
cache: DashMap<Slot, SlotCache>,
|
||||
// Queue of potentially unflushed roots. Random eviction + cache too large
|
||||
// could have triggered a flush of this slot already
|
||||
maybe_unflushed_roots: RwLock<HashSet<Slot>>,
|
||||
max_flushed_root: AtomicU64,
|
||||
}
|
||||
|
||||
impl AccountsCache {
|
||||
pub fn report_size(&self) {
|
||||
let total_unique_writes_size: u64 = self
|
||||
.cache
|
||||
.iter()
|
||||
.map(|item| {
|
||||
let slot_cache = item.value();
|
||||
slot_cache
|
||||
.unique_account_writes_size
|
||||
.load(Ordering::Relaxed)
|
||||
})
|
||||
.sum();
|
||||
datapoint_info!(
|
||||
"accounts_cache_size",
|
||||
(
|
||||
"num_roots",
|
||||
self.maybe_unflushed_roots.read().unwrap().len(),
|
||||
i64
|
||||
),
|
||||
("num_slots", self.cache.len(), i64),
|
||||
("total_unique_writes_size", total_unique_writes_size, i64),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn store(&self, slot: Slot, pubkey: &Pubkey, account: Account, hash: Hash) {
|
||||
let slot_cache = self.slot_cache(slot).unwrap_or_else(||
|
||||
// DashMap entry.or_insert() returns a RefMut, essentially a write lock,
|
||||
// which is dropped after this block ends, minimizing time held by the lock.
|
||||
// However, we still want to persist the reference to the `SlotStores` behind
|
||||
// the lock, hence we clone it out, (`SlotStores` is an Arc so is cheap to clone).
|
||||
self
|
||||
.cache
|
||||
.entry(slot)
|
||||
.or_insert(Arc::new(SlotCacheInner::default()))
|
||||
.clone());
|
||||
|
||||
slot_cache.insert(pubkey, account, hash);
|
||||
}
|
||||
|
||||
pub fn load(&self, slot: Slot, pubkey: &Pubkey) -> Option<CachedAccount> {
|
||||
self.slot_cache(slot)
|
||||
.and_then(|slot_cache| slot_cache.get_cloned(pubkey))
|
||||
}
|
||||
|
||||
pub fn remove_slot(&self, slot: Slot) -> Option<SlotCache> {
|
||||
self.cache.remove(&slot).map(|(_, slot_cache)| slot_cache)
|
||||
}
|
||||
|
||||
pub fn slot_cache(&self, slot: Slot) -> Option<SlotCache> {
|
||||
self.cache.get(&slot).map(|result| result.value().clone())
|
||||
}
|
||||
|
||||
pub fn add_root(&self, root: Slot) {
|
||||
self.maybe_unflushed_roots.write().unwrap().insert(root);
|
||||
}
|
||||
|
||||
pub fn clear_roots(&self) -> HashSet<Slot> {
|
||||
std::mem::replace(
|
||||
&mut self.maybe_unflushed_roots.write().unwrap(),
|
||||
HashSet::new(),
|
||||
)
|
||||
}
|
||||
|
||||
// Removes slots less than or equal to `max_root`. Only safe to pass in a rooted slot,
|
||||
// otherwise the slot removed could still be undergoing replay!
|
||||
pub fn remove_slots_le(&self, max_root: Slot) -> Vec<(Slot, SlotCache)> {
|
||||
let mut removed_slots = vec![];
|
||||
self.cache.retain(|slot, slot_cache| {
|
||||
let should_remove = *slot <= max_root;
|
||||
if should_remove {
|
||||
removed_slots.push((*slot, slot_cache.clone()))
|
||||
}
|
||||
!should_remove
|
||||
});
|
||||
removed_slots
|
||||
}
|
||||
|
||||
pub fn find_older_frozen_slots(&self, num_to_retain: usize) -> Vec<Slot> {
|
||||
if self.cache.len() > num_to_retain {
|
||||
let mut slots: Vec<_> = self
|
||||
.cache
|
||||
.iter()
|
||||
.filter_map(|item| {
|
||||
let (slot, slot_cache) = item.pair();
|
||||
if slot_cache.is_frozen() {
|
||||
Some(*slot)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
slots.sort_unstable();
|
||||
slots.truncate(slots.len().saturating_sub(num_to_retain));
|
||||
slots
|
||||
} else {
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
|
||||
pub fn num_slots(&self) -> usize {
|
||||
self.cache.len()
|
||||
}
|
||||
|
||||
pub fn fetch_max_flush_root(&self) -> Slot {
|
||||
self.max_flushed_root.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn set_max_flush_root(&self, root: Slot) {
|
||||
self.max_flushed_root.fetch_max(root, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_remove_slots_le() {
|
||||
let cache = AccountsCache::default();
|
||||
// Cache is empty, should return nothing
|
||||
assert!(cache.remove_slots_le(1).is_empty());
|
||||
let inserted_slot = 0;
|
||||
cache.store(
|
||||
inserted_slot,
|
||||
&Pubkey::new_unique(),
|
||||
Account::new(1, 0, &Pubkey::default()),
|
||||
Hash::default(),
|
||||
);
|
||||
// If the cache is told the size limit is 0, it should return the one slot
|
||||
let removed = cache.remove_slots_le(0);
|
||||
assert_eq!(removed.len(), 1);
|
||||
assert_eq!(removed[0].0, inserted_slot);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_older_frozen_slots() {
|
||||
let cache = AccountsCache::default();
|
||||
// Cache is empty, should return nothing
|
||||
assert!(cache.find_older_frozen_slots(0).is_empty());
|
||||
let inserted_slot = 0;
|
||||
cache.store(
|
||||
inserted_slot,
|
||||
&Pubkey::new_unique(),
|
||||
Account::new(1, 0, &Pubkey::default()),
|
||||
Hash::default(),
|
||||
);
|
||||
|
||||
// If the cache is told the size limit is 0, it should return nothing because there's only
|
||||
// one cached slot
|
||||
assert!(cache.find_older_frozen_slots(1).is_empty());
|
||||
// If the cache is told the size limit is 0, it should return nothing, because there's no
|
||||
// frozen slots
|
||||
assert!(cache.find_older_frozen_slots(0).is_empty());
|
||||
cache.slot_cache(inserted_slot).unwrap().mark_slot_frozen();
|
||||
// If the cache is told the size limit is 0, it should return the one frozen slot
|
||||
assert_eq!(cache.find_older_frozen_slots(0), vec![inserted_slot]);
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -34,6 +34,22 @@ pub type AccountMap<K, V> = BTreeMap<K, V>;
|
|||
|
||||
type AccountMapEntry<T> = Arc<AccountMapEntryInner<T>>;
|
||||
|
||||
pub trait IsCached {
|
||||
fn is_cached(&self) -> bool;
|
||||
}
|
||||
|
||||
impl IsCached for bool {
|
||||
fn is_cached(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
impl IsCached for u64 {
|
||||
fn is_cached(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
enum ScanTypes<R: RangeBounds<Pubkey>> {
|
||||
Unindexed(Option<R>),
|
||||
Indexed(IndexKey),
|
||||
|
@ -220,7 +236,7 @@ pub struct AccountsIndex<T> {
|
|||
ongoing_scan_roots: RwLock<BTreeMap<Slot, u64>>,
|
||||
}
|
||||
|
||||
impl<T: 'static + Clone> AccountsIndex<T> {
|
||||
impl<T: 'static + Clone + IsCached> AccountsIndex<T> {
|
||||
fn iter<R>(&self, range: Option<R>) -> AccountsIndexIterator<T>
|
||||
where
|
||||
R: RangeBounds<Pubkey>,
|
||||
|
@ -592,22 +608,24 @@ impl<T: 'static + Clone> AccountsIndex<T> {
|
|||
pub fn purge_exact(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
slots: HashSet<Slot>,
|
||||
slots_to_purge: &HashSet<Slot>,
|
||||
reclaims: &mut SlotList<T>,
|
||||
account_indexes: &HashSet<AccountIndex>,
|
||||
) -> (SlotList<T>, bool) {
|
||||
) -> bool {
|
||||
let res = {
|
||||
let mut write_account_map_entry = self.get_account_write_entry(pubkey).unwrap();
|
||||
write_account_map_entry.slot_list_mut(|slot_list| {
|
||||
let reclaims = slot_list
|
||||
.iter()
|
||||
.filter(|(slot, _)| slots.contains(&slot))
|
||||
.cloned()
|
||||
.collect();
|
||||
slot_list.retain(|(slot, _)| !slots.contains(slot));
|
||||
(reclaims, slot_list.is_empty())
|
||||
slot_list.retain(|(slot, item)| {
|
||||
let should_purge = slots_to_purge.contains(&slot);
|
||||
if should_purge {
|
||||
reclaims.push((*slot, item.clone()));
|
||||
}
|
||||
!should_purge
|
||||
});
|
||||
slot_list.is_empty()
|
||||
})
|
||||
};
|
||||
self.purge_secondary_indexes_by_inner_key(pubkey, Some(&slots), account_indexes);
|
||||
self.purge_secondary_indexes_by_inner_key(pubkey, Some(&slots_to_purge), account_indexes);
|
||||
res
|
||||
}
|
||||
|
||||
|
@ -820,7 +838,7 @@ impl<T: 'static + Clone> AccountsIndex<T> {
|
|||
|
||||
let mut purged_slots: HashSet<Slot> = HashSet::new();
|
||||
list.retain(|(slot, value)| {
|
||||
let should_purge = Self::can_purge(max_root, *slot);
|
||||
let should_purge = Self::can_purge(max_root, *slot) && !value.is_cached();
|
||||
if should_purge {
|
||||
reclaims.push((*slot, value.clone()));
|
||||
purged_slots.insert(*slot);
|
||||
|
@ -831,6 +849,7 @@ impl<T: 'static + Clone> AccountsIndex<T> {
|
|||
self.purge_secondary_indexes_by_inner_key(pubkey, Some(&purged_slots), account_indexes);
|
||||
}
|
||||
|
||||
// `is_cached` closure is needed to work around the generic (`T`) indexed type.
|
||||
pub fn clean_rooted_entries(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
|
@ -851,28 +870,6 @@ impl<T: 'static + Clone> AccountsIndex<T> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn clean_unrooted_entries_by_slot(
|
||||
&self,
|
||||
purge_slot: Slot,
|
||||
pubkey: &Pubkey,
|
||||
reclaims: &mut SlotList<T>,
|
||||
account_indexes: &HashSet<AccountIndex>,
|
||||
) {
|
||||
if let Some(mut locked_entry) = self.get_account_write_entry(pubkey) {
|
||||
locked_entry.slot_list_mut(|slot_list| {
|
||||
slot_list.retain(|(slot, entry)| {
|
||||
if *slot == purge_slot {
|
||||
reclaims.push((*slot, entry.clone()));
|
||||
}
|
||||
*slot != purge_slot
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
let purge_slot: HashSet<Slot> = vec![purge_slot].into_iter().collect();
|
||||
self.purge_secondary_indexes_by_inner_key(pubkey, Some(&purge_slot), account_indexes);
|
||||
}
|
||||
|
||||
pub fn can_purge(max_root: Slot, slot: Slot) -> bool {
|
||||
slot < max_root
|
||||
}
|
||||
|
@ -881,11 +878,24 @@ impl<T: 'static + Clone> AccountsIndex<T> {
|
|||
self.roots_tracker.read().unwrap().roots.contains(&slot)
|
||||
}
|
||||
|
||||
pub fn add_root(&self, slot: Slot) {
|
||||
pub fn add_root(&self, slot: Slot, caching_enabled: bool) {
|
||||
let mut w_roots_tracker = self.roots_tracker.write().unwrap();
|
||||
w_roots_tracker.roots.insert(slot);
|
||||
w_roots_tracker.uncleaned_roots.insert(slot);
|
||||
w_roots_tracker.max_root = std::cmp::max(slot, w_roots_tracker.max_root);
|
||||
// we delay cleaning until flushing!
|
||||
if !caching_enabled {
|
||||
w_roots_tracker.uncleaned_roots.insert(slot);
|
||||
}
|
||||
// `AccountsDb::flush_accounts_cache()` relies on roots being added in order
|
||||
assert!(slot >= w_roots_tracker.max_root);
|
||||
w_roots_tracker.max_root = slot;
|
||||
}
|
||||
|
||||
pub fn add_uncleaned_roots<I>(&self, roots: I)
|
||||
where
|
||||
I: IntoIterator<Item = Slot>,
|
||||
{
|
||||
let mut w_roots_tracker = self.roots_tracker.write().unwrap();
|
||||
w_roots_tracker.uncleaned_roots.extend(roots);
|
||||
}
|
||||
|
||||
fn max_root(&self) -> Slot {
|
||||
|
@ -895,10 +905,29 @@ impl<T: 'static + Clone> AccountsIndex<T> {
|
|||
/// Remove the slot when the storage for the slot is freed
|
||||
/// Accounts no longer reference this slot.
|
||||
pub fn clean_dead_slot(&self, slot: Slot) {
|
||||
let mut w_roots_tracker = self.roots_tracker.write().unwrap();
|
||||
w_roots_tracker.roots.remove(&slot);
|
||||
w_roots_tracker.uncleaned_roots.remove(&slot);
|
||||
w_roots_tracker.previous_uncleaned_roots.remove(&slot);
|
||||
if self.is_root(slot) {
|
||||
let (roots_len, uncleaned_roots_len, previous_uncleaned_roots_len) = {
|
||||
let mut w_roots_tracker = self.roots_tracker.write().unwrap();
|
||||
w_roots_tracker.roots.remove(&slot);
|
||||
w_roots_tracker.uncleaned_roots.remove(&slot);
|
||||
w_roots_tracker.previous_uncleaned_roots.remove(&slot);
|
||||
(
|
||||
w_roots_tracker.roots.len(),
|
||||
w_roots_tracker.uncleaned_roots.len(),
|
||||
w_roots_tracker.previous_uncleaned_roots.len(),
|
||||
)
|
||||
};
|
||||
datapoint_info!(
|
||||
"accounts_index_roots_len",
|
||||
("roots_len", roots_len as i64, i64),
|
||||
("uncleaned_roots_len", uncleaned_roots_len as i64, i64),
|
||||
(
|
||||
"previous_uncleaned_roots_len",
|
||||
previous_uncleaned_roots_len as i64,
|
||||
i64
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reset_uncleaned_roots(&self, max_clean_root: Option<Slot>) -> HashSet<Slot> {
|
||||
|
@ -1136,7 +1165,7 @@ pub mod tests {
|
|||
);
|
||||
}
|
||||
|
||||
index.add_root(root_slot);
|
||||
index.add_root(root_slot, false);
|
||||
|
||||
(index, pubkeys)
|
||||
}
|
||||
|
@ -1272,7 +1301,7 @@ pub mod tests {
|
|||
fn test_is_root() {
|
||||
let index = AccountsIndex::<bool>::default();
|
||||
assert!(!index.is_root(0));
|
||||
index.add_root(0);
|
||||
index.add_root(0, false);
|
||||
assert!(index.is_root(0));
|
||||
}
|
||||
|
||||
|
@ -1292,7 +1321,7 @@ pub mod tests {
|
|||
);
|
||||
assert!(gc.is_empty());
|
||||
|
||||
index.add_root(0);
|
||||
index.add_root(0, false);
|
||||
let (list, idx) = index.get(&key.pubkey(), None, None).unwrap();
|
||||
assert_eq!(list.slot_list()[idx], (0, true));
|
||||
}
|
||||
|
@ -1300,8 +1329,8 @@ pub mod tests {
|
|||
#[test]
|
||||
fn test_clean_first() {
|
||||
let index = AccountsIndex::<bool>::default();
|
||||
index.add_root(0);
|
||||
index.add_root(1);
|
||||
index.add_root(0, false);
|
||||
index.add_root(1, false);
|
||||
index.clean_dead_slot(0);
|
||||
assert!(index.is_root(1));
|
||||
assert!(!index.is_root(0));
|
||||
|
@ -1311,8 +1340,8 @@ pub mod tests {
|
|||
fn test_clean_last() {
|
||||
//this behavior might be undefined, clean up should only occur on older slots
|
||||
let index = AccountsIndex::<bool>::default();
|
||||
index.add_root(0);
|
||||
index.add_root(1);
|
||||
index.add_root(0, false);
|
||||
index.add_root(1, false);
|
||||
index.clean_dead_slot(1);
|
||||
assert!(!index.is_root(1));
|
||||
assert!(index.is_root(0));
|
||||
|
@ -1322,8 +1351,8 @@ pub mod tests {
|
|||
fn test_clean_and_unclean_slot() {
|
||||
let index = AccountsIndex::<bool>::default();
|
||||
assert_eq!(0, index.roots_tracker.read().unwrap().uncleaned_roots.len());
|
||||
index.add_root(0);
|
||||
index.add_root(1);
|
||||
index.add_root(0, false);
|
||||
index.add_root(1, false);
|
||||
assert_eq!(2, index.roots_tracker.read().unwrap().uncleaned_roots.len());
|
||||
|
||||
assert_eq!(
|
||||
|
@ -1348,8 +1377,8 @@ pub mod tests {
|
|||
.len()
|
||||
);
|
||||
|
||||
index.add_root(2);
|
||||
index.add_root(3);
|
||||
index.add_root(2, false);
|
||||
index.add_root(3, false);
|
||||
assert_eq!(4, index.roots_tracker.read().unwrap().roots.len());
|
||||
assert_eq!(2, index.roots_tracker.read().unwrap().uncleaned_roots.len());
|
||||
assert_eq!(
|
||||
|
@ -1500,9 +1529,9 @@ pub mod tests {
|
|||
true,
|
||||
&mut gc,
|
||||
);
|
||||
index.add_root(0);
|
||||
index.add_root(1);
|
||||
index.add_root(3);
|
||||
index.add_root(0, false);
|
||||
index.add_root(1, false);
|
||||
index.add_root(3, false);
|
||||
index.upsert(
|
||||
4,
|
||||
&key.pubkey(),
|
||||
|
@ -1559,7 +1588,7 @@ pub mod tests {
|
|||
|
||||
let purges = index.purge_roots(&key.pubkey());
|
||||
assert_eq!(purges, (vec![], false));
|
||||
index.add_root(1);
|
||||
index.add_root(1, false);
|
||||
|
||||
let purges = index.purge_roots(&key.pubkey());
|
||||
assert_eq!(purges, (vec![(1, 10)], true));
|
||||
|
@ -1584,7 +1613,7 @@ pub mod tests {
|
|||
assert!(index.latest_slot(None, &slot_slice, None).is_none());
|
||||
|
||||
// Given a root, should return the root
|
||||
index.add_root(5);
|
||||
index.add_root(5, false);
|
||||
assert_eq!(index.latest_slot(None, &slot_slice, None).unwrap(), 1);
|
||||
|
||||
// Given a max_root == root, should still return the root
|
||||
|
@ -1666,7 +1695,12 @@ pub mod tests {
|
|||
slots.len()
|
||||
);
|
||||
|
||||
index.purge_exact(&account_key, slots.into_iter().collect(), account_index);
|
||||
index.purge_exact(
|
||||
&account_key,
|
||||
&slots.into_iter().collect(),
|
||||
&mut vec![],
|
||||
account_index,
|
||||
);
|
||||
|
||||
assert!(secondary_index.index.is_empty());
|
||||
assert!(secondary_index.reverse_index.is_empty());
|
||||
|
@ -1716,9 +1750,9 @@ pub mod tests {
|
|||
|
||||
// Add a later root, earlier slots should be reclaimed
|
||||
slot_list = vec![(1, true), (2, true), (5, true), (9, true)];
|
||||
index.add_root(1);
|
||||
index.add_root(1, false);
|
||||
// Note 2 is not a root
|
||||
index.add_root(5);
|
||||
index.add_root(5, false);
|
||||
reclaims = vec![];
|
||||
index.purge_older_root_entries(
|
||||
&Pubkey::default(),
|
||||
|
@ -1732,7 +1766,7 @@ pub mod tests {
|
|||
|
||||
// Add a later root that is not in the list, should not affect the outcome
|
||||
slot_list = vec![(1, true), (2, true), (5, true), (9, true)];
|
||||
index.add_root(6);
|
||||
index.add_root(6, false);
|
||||
reclaims = vec![];
|
||||
index.purge_older_root_entries(
|
||||
&Pubkey::default(),
|
||||
|
@ -1977,7 +2011,7 @@ pub mod tests {
|
|||
assert!(secondary_index.get(&secondary_key1).is_empty());
|
||||
assert_eq!(secondary_index.get(&secondary_key2), vec![account_key]);
|
||||
|
||||
// If another fork reintroduces secondary_key1, then it should be readded to the
|
||||
// If another fork reintroduces secondary_key1, then it should be re-added to the
|
||||
// index
|
||||
let fork = slot + 1;
|
||||
index.upsert(
|
||||
|
@ -1993,7 +2027,7 @@ pub mod tests {
|
|||
|
||||
// If we set a root at fork, and clean, then the secondary_key1 should no longer
|
||||
// be findable
|
||||
index.add_root(fork);
|
||||
index.add_root(fork, false);
|
||||
index
|
||||
.get_account_write_entry(&account_key)
|
||||
.unwrap()
|
||||
|
|
|
@ -54,19 +54,31 @@ pub struct AccountMeta {
|
|||
pub rent_epoch: Epoch,
|
||||
}
|
||||
|
||||
impl<'a> From<&'a Account> for AccountMeta {
|
||||
fn from(account: &'a Account) -> Self {
|
||||
Self {
|
||||
lamports: account.lamports,
|
||||
owner: account.owner,
|
||||
executable: account.executable,
|
||||
rent_epoch: account.rent_epoch,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// References to Memory Mapped memory
|
||||
/// The Account is stored separately from its data, so getting the actual account requires a clone
|
||||
#[derive(PartialEq, Debug)]
|
||||
pub struct StoredAccount<'a> {
|
||||
pub struct StoredAccountMeta<'a> {
|
||||
pub meta: &'a StoredMeta,
|
||||
/// account data
|
||||
pub account_meta: &'a AccountMeta,
|
||||
pub data: &'a [u8],
|
||||
pub offset: usize,
|
||||
pub stored_size: usize,
|
||||
pub hash: &'a Hash,
|
||||
}
|
||||
|
||||
impl<'a> StoredAccount<'a> {
|
||||
impl<'a> StoredAccountMeta<'a> {
|
||||
pub fn clone_account(&self) -> Account {
|
||||
Account {
|
||||
lamports: self.account_meta.lamports,
|
||||
|
@ -366,17 +378,19 @@ impl AppendVec {
|
|||
Some((unsafe { &*ptr }, next))
|
||||
}
|
||||
|
||||
pub fn get_account<'a>(&'a self, offset: usize) -> Option<(StoredAccount<'a>, usize)> {
|
||||
pub fn get_account<'a>(&'a self, offset: usize) -> Option<(StoredAccountMeta<'a>, usize)> {
|
||||
let (meta, next): (&'a StoredMeta, _) = self.get_type(offset)?;
|
||||
let (account_meta, next): (&'a AccountMeta, _) = self.get_type(next)?;
|
||||
let (hash, next): (&'a Hash, _) = self.get_type(next)?;
|
||||
let (data, next) = self.get_slice(next, meta.data_len as usize)?;
|
||||
let stored_size = next - offset;
|
||||
Some((
|
||||
StoredAccount {
|
||||
StoredAccountMeta {
|
||||
meta,
|
||||
account_meta,
|
||||
data,
|
||||
offset,
|
||||
stored_size,
|
||||
hash,
|
||||
},
|
||||
next,
|
||||
|
@ -392,7 +406,7 @@ impl AppendVec {
|
|||
self.path.clone()
|
||||
}
|
||||
|
||||
pub fn accounts(&self, mut start: usize) -> Vec<StoredAccount> {
|
||||
pub fn accounts(&self, mut start: usize) -> Vec<StoredAccountMeta> {
|
||||
let mut accounts = vec![];
|
||||
while let Some((account, next)) = self.get_account(start) {
|
||||
accounts.push(account);
|
||||
|
@ -411,12 +425,7 @@ impl AppendVec {
|
|||
let mut rv = Vec::with_capacity(accounts.len());
|
||||
for ((stored_meta, account), hash) in accounts.iter().zip(hashes) {
|
||||
let meta_ptr = stored_meta as *const StoredMeta;
|
||||
let account_meta = AccountMeta {
|
||||
lamports: account.lamports,
|
||||
owner: account.owner,
|
||||
executable: account.executable,
|
||||
rent_epoch: account.rent_epoch,
|
||||
};
|
||||
let account_meta = AccountMeta::from(*account);
|
||||
let account_meta_ptr = &account_meta as *const AccountMeta;
|
||||
let data_len = stored_meta.data_len as usize;
|
||||
let data_ptr = account.data.as_ptr();
|
||||
|
@ -433,6 +442,11 @@ impl AppendVec {
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// The last entry in this offset needs to be the u64 aligned offset, because that's
|
||||
// where the *next* entry will begin to be stored.
|
||||
rv.push(u64_align!(*offset));
|
||||
|
||||
rv
|
||||
}
|
||||
|
||||
|
@ -442,9 +456,12 @@ impl AppendVec {
|
|||
account: &Account,
|
||||
hash: Hash,
|
||||
) -> Option<usize> {
|
||||
self.append_accounts(&[(storage_meta, account)], &[hash])
|
||||
.first()
|
||||
.cloned()
|
||||
let res = self.append_accounts(&[(storage_meta, account)], &[hash]);
|
||||
if res.len() == 1 {
|
||||
None
|
||||
} else {
|
||||
res.first().cloned()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -511,7 +528,7 @@ pub mod tests {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> StoredAccount<'a> {
|
||||
impl<'a> StoredAccountMeta<'a> {
|
||||
#[allow(clippy::cast_ref_to_mut)]
|
||||
fn set_data_len_unsafe(&self, new_data_len: u64) {
|
||||
// UNSAFE: cast away & (= const ref) to &mut to force to mutate append-only (=read-only) AppendVec
|
||||
|
|
|
@ -861,13 +861,22 @@ impl Default for BlockhashQueue {
|
|||
|
||||
impl Bank {
|
||||
pub fn new(genesis_config: &GenesisConfig) -> Self {
|
||||
Self::new_with_paths(&genesis_config, Vec::new(), &[], None, None, HashSet::new())
|
||||
Self::new_with_paths(
|
||||
&genesis_config,
|
||||
Vec::new(),
|
||||
&[],
|
||||
None,
|
||||
None,
|
||||
HashSet::new(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn new_with_indexes(
|
||||
pub(crate) fn new_with_config(
|
||||
genesis_config: &GenesisConfig,
|
||||
account_indexes: HashSet<AccountIndex>,
|
||||
accounts_db_caching_enabled: bool,
|
||||
) -> Self {
|
||||
Self::new_with_paths(
|
||||
&genesis_config,
|
||||
|
@ -876,6 +885,7 @@ impl Bank {
|
|||
None,
|
||||
None,
|
||||
account_indexes,
|
||||
accounts_db_caching_enabled,
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -886,16 +896,18 @@ impl Bank {
|
|||
debug_keys: Option<Arc<HashSet<Pubkey>>>,
|
||||
additional_builtins: Option<&Builtins>,
|
||||
account_indexes: HashSet<AccountIndex>,
|
||||
accounts_db_caching_enabled: bool,
|
||||
) -> Self {
|
||||
let mut bank = Self::default();
|
||||
bank.ancestors.insert(bank.slot(), 0);
|
||||
bank.transaction_debug_keys = debug_keys;
|
||||
bank.cluster_type = Some(genesis_config.cluster_type);
|
||||
|
||||
bank.rc.accounts = Arc::new(Accounts::new_with_indexes(
|
||||
bank.rc.accounts = Arc::new(Accounts::new_with_config(
|
||||
paths,
|
||||
&genesis_config.cluster_type,
|
||||
account_indexes,
|
||||
accounts_db_caching_enabled,
|
||||
));
|
||||
bank.process_genesis_config(genesis_config);
|
||||
bank.finish_init(genesis_config, additional_builtins);
|
||||
|
@ -1998,7 +2010,6 @@ impl Bank {
|
|||
// record and commit are finished, those transactions will be
|
||||
// committed before this write lock can be obtained here.
|
||||
let mut hash = self.hash.write().unwrap();
|
||||
|
||||
if *hash == Hash::default() {
|
||||
// finish up any deferred changes to account state
|
||||
self.collect_rent_eagerly();
|
||||
|
@ -2010,12 +2021,19 @@ impl Bank {
|
|||
// freeze is a one-way trip, idempotent
|
||||
self.freeze_started.store(true, Relaxed);
|
||||
*hash = self.hash_internal_state();
|
||||
self.rc.accounts.accounts_db.mark_slot_frozen(self.slot());
|
||||
}
|
||||
}
|
||||
|
||||
// Should not be called outside of startup, will race with
|
||||
// concurrent cleaning logic in AccountsBackgroundService
|
||||
pub fn exhaustively_free_unused_resource(&self) {
|
||||
let mut flush = Measure::start("flush");
|
||||
// Flush all the rooted accounts. Must be called after `squash()`,
|
||||
// so that AccountsDb knows what the roots are.
|
||||
self.force_flush_accounts_cache();
|
||||
flush.stop();
|
||||
|
||||
let mut clean = Measure::start("clean");
|
||||
// Don't clean the slot we're snapshotting because it may have zero-lamport
|
||||
// accounts that were included in the bank delta hash when the bank was frozen,
|
||||
|
@ -2030,9 +2048,10 @@ impl Bank {
|
|||
|
||||
info!(
|
||||
"exhaustively_free_unused_resource()
|
||||
flush: {},
|
||||
clean: {},
|
||||
shrink: {}",
|
||||
clean, shrink,
|
||||
flush, clean, shrink,
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -3144,7 +3163,7 @@ impl Bank {
|
|||
}
|
||||
|
||||
let mut write_time = Measure::start("write_time");
|
||||
self.rc.accounts.store_accounts(
|
||||
self.rc.accounts.store_cached(
|
||||
self.slot(),
|
||||
txs,
|
||||
iteration_order,
|
||||
|
@ -3841,7 +3860,9 @@ impl Bank {
|
|||
|
||||
pub fn store_account(&self, pubkey: &Pubkey, account: &Account) {
|
||||
assert!(!self.freeze_started());
|
||||
self.rc.accounts.store_slow(self.slot(), pubkey, account);
|
||||
self.rc
|
||||
.accounts
|
||||
.store_slow_cached(self.slot(), pubkey, account);
|
||||
|
||||
if Stakes::is_stake(account) {
|
||||
self.stakes
|
||||
|
@ -3851,6 +3872,17 @@ impl Bank {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn force_flush_accounts_cache(&self) {
|
||||
self.rc.accounts.accounts_db.force_flush_accounts_cache()
|
||||
}
|
||||
|
||||
pub fn flush_accounts_cache_if_needed(&self) {
|
||||
self.rc
|
||||
.accounts
|
||||
.accounts_db
|
||||
.flush_accounts_cache_if_needed()
|
||||
}
|
||||
|
||||
fn store_account_and_update_capitalization(&self, pubkey: &Pubkey, new_account: &Account) {
|
||||
if let Some(old_account) = self.get_account(&pubkey) {
|
||||
match new_account.lamports.cmp(&old_account.lamports) {
|
||||
|
@ -4538,7 +4570,7 @@ impl Bank {
|
|||
budget_recovery_delta: usize,
|
||||
) -> usize {
|
||||
if consumed_budget == 0 {
|
||||
let shrunken_account_count = self.rc.accounts.accounts_db.process_stale_slot();
|
||||
let shrunken_account_count = self.rc.accounts.accounts_db.process_stale_slot_v1();
|
||||
if shrunken_account_count > 0 {
|
||||
datapoint_info!(
|
||||
"stale_slot_shrink",
|
||||
|
@ -4550,6 +4582,10 @@ impl Bank {
|
|||
consumed_budget.saturating_sub(budget_recovery_delta)
|
||||
}
|
||||
|
||||
pub fn shrink_candidate_slots(&self) -> usize {
|
||||
self.rc.accounts.accounts_db.shrink_candidate_slots()
|
||||
}
|
||||
|
||||
pub fn secp256k1_program_enabled(&self) -> bool {
|
||||
self.feature_set
|
||||
.is_active(&feature_set::secp256k1_program_enabled::id())
|
||||
|
@ -4874,6 +4910,7 @@ fn is_simple_vote_transaction(transaction: &Transaction) -> bool {
|
|||
pub(crate) mod tests {
|
||||
use super::*;
|
||||
use crate::{
|
||||
accounts_db::SHRINK_RATIO,
|
||||
accounts_index::{AccountMap, Ancestors, ITER_BATCH_SIZE},
|
||||
genesis_utils::{
|
||||
activate_all_features, bootstrap_validator_stake_lamports,
|
||||
|
@ -6726,7 +6763,7 @@ pub(crate) mod tests {
|
|||
.accounts
|
||||
.accounts_db
|
||||
.accounts_index
|
||||
.add_root(genesis_bank1.slot() + 1);
|
||||
.add_root(genesis_bank1.slot() + 1, false);
|
||||
bank1_without_zero
|
||||
.rc
|
||||
.accounts
|
||||
|
@ -6997,10 +7034,14 @@ pub(crate) mod tests {
|
|||
let pubkey = solana_sdk::pubkey::new_rand();
|
||||
let tx = system_transaction::transfer(&mint_keypair, &pubkey, 0, blockhash);
|
||||
bank.process_transaction(&tx).unwrap();
|
||||
bank.freeze();
|
||||
bank.squash();
|
||||
bank = Arc::new(new_from_parent(&bank));
|
||||
}
|
||||
|
||||
bank.freeze();
|
||||
bank.squash();
|
||||
bank.force_flush_accounts_cache();
|
||||
let hash = bank.update_accounts_hash();
|
||||
bank.clean_accounts(false);
|
||||
assert_eq!(bank.update_accounts_hash(), hash);
|
||||
|
@ -7037,9 +7078,11 @@ pub(crate) mod tests {
|
|||
assert!(bank0.verify_bank_hash());
|
||||
|
||||
// Squash and then verify hash_internal value
|
||||
bank0.freeze();
|
||||
bank0.squash();
|
||||
assert!(bank0.verify_bank_hash());
|
||||
|
||||
bank1.freeze();
|
||||
bank1.squash();
|
||||
bank1.update_accounts_hash();
|
||||
assert!(bank1.verify_bank_hash());
|
||||
|
@ -7047,6 +7090,7 @@ pub(crate) mod tests {
|
|||
// keypair should have 0 tokens on both forks
|
||||
assert_eq!(bank0.get_account(&keypair.pubkey()), None);
|
||||
assert_eq!(bank1.get_account(&keypair.pubkey()), None);
|
||||
bank1.force_flush_accounts_cache();
|
||||
bank1.clean_accounts(false);
|
||||
|
||||
assert!(bank1.verify_bank_hash());
|
||||
|
@ -8714,7 +8758,11 @@ pub(crate) mod tests {
|
|||
let (genesis_config, _mint_keypair) = create_genesis_config(500);
|
||||
let mut account_indexes = HashSet::new();
|
||||
account_indexes.insert(AccountIndex::ProgramId);
|
||||
let bank = Arc::new(Bank::new_with_indexes(&genesis_config, account_indexes));
|
||||
let bank = Arc::new(Bank::new_with_config(
|
||||
&genesis_config,
|
||||
account_indexes,
|
||||
false,
|
||||
));
|
||||
|
||||
let address = Pubkey::new_unique();
|
||||
let program_id = Pubkey::new_unique();
|
||||
|
@ -10151,6 +10199,94 @@ pub(crate) mod tests {
|
|||
assert_eq!(42, bank.get_balance(&program2_pubkey));
|
||||
}
|
||||
|
||||
fn get_shrink_account_size() -> usize {
|
||||
let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000_000);
|
||||
|
||||
// Set root for bank 0, with caching enabled
|
||||
let mut bank0 = Arc::new(Bank::new_with_config(
|
||||
&genesis_config,
|
||||
HashSet::new(),
|
||||
false,
|
||||
));
|
||||
bank0.restore_old_behavior_for_fragile_tests();
|
||||
goto_end_of_slot(Arc::<Bank>::get_mut(&mut bank0).unwrap());
|
||||
bank0.freeze();
|
||||
bank0.squash();
|
||||
|
||||
let sizes = bank0
|
||||
.rc
|
||||
.accounts
|
||||
.scan_slot(0, |stored_account| Some(stored_account.stored_size()));
|
||||
|
||||
// Create an account such that it takes SHRINK_RATIO of the total account space for
|
||||
// the slot, so when it gets pruned, the storage entry will become a shrink candidate.
|
||||
let bank0_total_size: usize = sizes.into_iter().sum();
|
||||
let pubkey0_size = (bank0_total_size as f64 / (1.0 - SHRINK_RATIO)).ceil();
|
||||
assert!(pubkey0_size / (pubkey0_size + bank0_total_size as f64) > SHRINK_RATIO);
|
||||
pubkey0_size as usize
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shrink_candidate_slots_cached() {
|
||||
solana_logger::setup();
|
||||
|
||||
let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000_000);
|
||||
let pubkey0 = solana_sdk::pubkey::new_rand();
|
||||
let pubkey1 = solana_sdk::pubkey::new_rand();
|
||||
let pubkey2 = solana_sdk::pubkey::new_rand();
|
||||
|
||||
// Set root for bank 0, with caching enabled
|
||||
let mut bank0 = Arc::new(Bank::new_with_config(&genesis_config, HashSet::new(), true));
|
||||
bank0.restore_old_behavior_for_fragile_tests();
|
||||
|
||||
let pubkey0_size = get_shrink_account_size();
|
||||
let account0 = Account::new(1000, pubkey0_size as usize, &Pubkey::new_unique());
|
||||
bank0.store_account(&pubkey0, &account0);
|
||||
|
||||
goto_end_of_slot(Arc::<Bank>::get_mut(&mut bank0).unwrap());
|
||||
bank0.freeze();
|
||||
bank0.squash();
|
||||
|
||||
// Store some lamports in bank 1
|
||||
let some_lamports = 123;
|
||||
let mut bank1 = Arc::new(new_from_parent(&bank0));
|
||||
bank1.deposit(&pubkey1, some_lamports);
|
||||
bank1.deposit(&pubkey2, some_lamports);
|
||||
goto_end_of_slot(Arc::<Bank>::get_mut(&mut bank1).unwrap());
|
||||
|
||||
// Store some lamports for pubkey1 in bank 2, root bank 2
|
||||
let mut bank2 = Arc::new(new_from_parent(&bank1));
|
||||
bank2.deposit(&pubkey1, some_lamports);
|
||||
bank2.store_account(&pubkey0, &account0);
|
||||
goto_end_of_slot(Arc::<Bank>::get_mut(&mut bank2).unwrap());
|
||||
bank2.freeze();
|
||||
bank2.squash();
|
||||
bank2.force_flush_accounts_cache();
|
||||
|
||||
// Clean accounts, which should add earlier slots to the shrink
|
||||
// candidate set
|
||||
bank2.clean_accounts(false);
|
||||
|
||||
// Slots 0 and 1 should be candidates for shrinking, but slot 2
|
||||
// shouldn't because none of its accounts are outdated by a later
|
||||
// root
|
||||
assert_eq!(bank2.shrink_candidate_slots(), 2);
|
||||
let alive_counts: Vec<usize> = (0..3)
|
||||
.map(|slot| {
|
||||
bank2
|
||||
.rc
|
||||
.accounts
|
||||
.accounts_db
|
||||
.alive_account_count_in_slot(slot)
|
||||
})
|
||||
.collect();
|
||||
|
||||
// No more slots should be shrunk
|
||||
assert_eq!(bank2.shrink_candidate_slots(), 0);
|
||||
// alive_counts represents the count of alive accounts in the three slots 0,1,2
|
||||
assert_eq!(alive_counts, vec![9, 1, 7]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_stale_slot_with_budget() {
|
||||
solana_logger::setup();
|
||||
|
@ -11120,6 +11256,7 @@ pub(crate) mod tests {
|
|||
None,
|
||||
Some(&builtins),
|
||||
HashSet::new(),
|
||||
false,
|
||||
));
|
||||
// move to next epoch to create now deprecated rewards sysvar intentionally
|
||||
let bank1 = Arc::new(Bank::new_from_parent(
|
||||
|
@ -11347,100 +11484,6 @@ pub(crate) mod tests {
|
|||
);
|
||||
}
|
||||
|
||||
fn setup_bank_with_removable_zero_lamport_account() -> Arc<Bank> {
|
||||
let (genesis_config, _mint_keypair) = create_genesis_config(2000);
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
bank0.freeze();
|
||||
|
||||
let bank1 = Arc::new(Bank::new_from_parent(
|
||||
&Arc::new(bank0),
|
||||
&Pubkey::default(),
|
||||
1,
|
||||
));
|
||||
|
||||
let zero_lamport_pubkey = solana_sdk::pubkey::new_rand();
|
||||
|
||||
bank1.store_account_and_update_capitalization(
|
||||
&zero_lamport_pubkey,
|
||||
&Account::new(0, 0, &Pubkey::default()),
|
||||
);
|
||||
// Store another account in a separate AppendVec than `zero_lamport_pubkey`
|
||||
// (guaranteed because of large file size). We need this to ensure slot is
|
||||
// not cleaned up after clean is called, so that the bank hash still exists
|
||||
// when we call rehash() later in this test.
|
||||
let large_account_pubkey = solana_sdk::pubkey::new_rand();
|
||||
bank1.store_account_and_update_capitalization(
|
||||
&large_account_pubkey,
|
||||
&Account::new(
|
||||
1000,
|
||||
bank1.rc.accounts.accounts_db.file_size() as usize,
|
||||
&Pubkey::default(),
|
||||
),
|
||||
);
|
||||
assert_ne!(
|
||||
bank1
|
||||
.rc
|
||||
.accounts
|
||||
.accounts_db
|
||||
.get_append_vec_id(&large_account_pubkey, 1)
|
||||
.unwrap(),
|
||||
bank1
|
||||
.rc
|
||||
.accounts
|
||||
.accounts_db
|
||||
.get_append_vec_id(&zero_lamport_pubkey, 1)
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
// Make sure rent collection doesn't overwrite `large_account_pubkey`, which
|
||||
// keeps slot 1 alive in the accounts database. Otherwise, slot 1 and it's bank
|
||||
// hash would be removed from accounts, preventing `rehash()` from succeeding
|
||||
bank1.restore_old_behavior_for_fragile_tests();
|
||||
bank1.freeze();
|
||||
let bank1_hash = bank1.hash();
|
||||
|
||||
let bank2 = Bank::new_from_parent(&bank1, &Pubkey::default(), 2);
|
||||
bank2.freeze();
|
||||
|
||||
// Set a root so clean will happen on this slot
|
||||
bank1.squash();
|
||||
|
||||
// All accounts other than `zero_lamport_pubkey` should be updated, which
|
||||
// means clean should be able to delete the `zero_lamport_pubkey`
|
||||
bank2.squash();
|
||||
|
||||
// Bank 1 hash should not change
|
||||
bank1.rehash();
|
||||
let new_bank1_hash = bank1.hash();
|
||||
assert_eq!(bank1_hash, new_bank1_hash);
|
||||
|
||||
bank1
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clean_zero_lamport_account_different_hash() {
|
||||
let bank1 = setup_bank_with_removable_zero_lamport_account();
|
||||
let old_hash = bank1.hash();
|
||||
|
||||
// `zero_lamport_pubkey` should have been deleted, hashes will not match
|
||||
bank1.clean_accounts(false);
|
||||
bank1.rehash();
|
||||
let new_bank1_hash = bank1.hash();
|
||||
assert_ne!(old_hash, new_bank1_hash);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clean_zero_lamport_account_same_hash() {
|
||||
let bank1 = setup_bank_with_removable_zero_lamport_account();
|
||||
let old_hash = bank1.hash();
|
||||
|
||||
// `zero_lamport_pubkey` will not be deleted, hashes will match
|
||||
bank1.clean_accounts(true);
|
||||
bank1.rehash();
|
||||
let new_bank1_hash = bank1.hash();
|
||||
assert_eq!(old_hash, new_bank1_hash);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_program_is_native_loader() {
|
||||
let (genesis_config, mint_keypair) = create_genesis_config(50000);
|
||||
|
@ -11531,7 +11574,7 @@ pub(crate) mod tests {
|
|||
assert!(!debug.is_empty());
|
||||
}
|
||||
|
||||
fn test_store_scan_consistency<F: 'static>(update_f: F)
|
||||
fn test_store_scan_consistency<F: 'static>(accounts_db_caching_enabled: bool, update_f: F)
|
||||
where
|
||||
F: Fn(Arc<Bank>, crossbeam_channel::Sender<Arc<Bank>>, Arc<HashSet<Pubkey>>, Pubkey, u64)
|
||||
+ std::marker::Send,
|
||||
|
@ -11544,7 +11587,11 @@ pub(crate) mod tests {
|
|||
)
|
||||
.genesis_config;
|
||||
genesis_config.rent = Rent::free();
|
||||
let bank0 = Arc::new(Bank::new(&genesis_config));
|
||||
let bank0 = Arc::new(Bank::new_with_config(
|
||||
&genesis_config,
|
||||
HashSet::new(),
|
||||
accounts_db_caching_enabled,
|
||||
));
|
||||
|
||||
// Set up pubkeys to write to
|
||||
let total_pubkeys = ITER_BATCH_SIZE * 10;
|
||||
|
@ -11590,7 +11637,7 @@ pub(crate) mod tests {
|
|||
bank_to_scan_receiver.recv_timeout(Duration::from_millis(10))
|
||||
{
|
||||
let accounts = bank_to_scan.get_program_accounts(&program_id);
|
||||
// Should never seen empty accounts because no slot ever deleted
|
||||
// Should never see empty accounts because no slot ever deleted
|
||||
// any of the original accounts, and the scan should reflect the
|
||||
// account state at some frozen slot `X` (no partial updates).
|
||||
assert!(!accounts.is_empty());
|
||||
|
@ -11640,87 +11687,91 @@ pub(crate) mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_store_scan_consistency_unrooted() {
|
||||
test_store_scan_consistency(
|
||||
|bank0, bank_to_scan_sender, pubkeys_to_modify, program_id, starting_lamports| {
|
||||
let mut current_major_fork_bank = bank0;
|
||||
loop {
|
||||
let mut current_minor_fork_bank = current_major_fork_bank.clone();
|
||||
let num_new_banks = 2;
|
||||
let lamports = current_minor_fork_bank.slot() + starting_lamports + 1;
|
||||
// Modify banks on the two banks on the minor fork
|
||||
for pubkeys_to_modify in &pubkeys_to_modify
|
||||
.iter()
|
||||
.chunks(pubkeys_to_modify.len() / num_new_banks)
|
||||
{
|
||||
current_minor_fork_bank = Arc::new(Bank::new_from_parent(
|
||||
¤t_minor_fork_bank,
|
||||
&solana_sdk::pubkey::new_rand(),
|
||||
current_minor_fork_bank.slot() + 2,
|
||||
));
|
||||
let account = Account::new(lamports, 0, &program_id);
|
||||
// Write partial updates to each of the banks in the minor fork so if any of them
|
||||
// get cleaned up, there will be keys with the wrong account value/missing.
|
||||
for key in pubkeys_to_modify {
|
||||
current_minor_fork_bank.store_account(key, &account);
|
||||
for accounts_db_caching_enabled in &[false, true] {
|
||||
test_store_scan_consistency(
|
||||
*accounts_db_caching_enabled,
|
||||
|bank0, bank_to_scan_sender, pubkeys_to_modify, program_id, starting_lamports| {
|
||||
let mut current_major_fork_bank = bank0;
|
||||
loop {
|
||||
let mut current_minor_fork_bank = current_major_fork_bank.clone();
|
||||
let num_new_banks = 2;
|
||||
let lamports = current_minor_fork_bank.slot() + starting_lamports + 1;
|
||||
// Modify banks on the two banks on the minor fork
|
||||
for pubkeys_to_modify in &pubkeys_to_modify
|
||||
.iter()
|
||||
.chunks(pubkeys_to_modify.len() / num_new_banks)
|
||||
{
|
||||
current_minor_fork_bank = Arc::new(Bank::new_from_parent(
|
||||
¤t_minor_fork_bank,
|
||||
&solana_sdk::pubkey::new_rand(),
|
||||
current_minor_fork_bank.slot() + 2,
|
||||
));
|
||||
let account = Account::new(lamports, 0, &program_id);
|
||||
// Write partial updates to each of the banks in the minor fork so if any of them
|
||||
// get cleaned up, there will be keys with the wrong account value/missing.
|
||||
for key in pubkeys_to_modify {
|
||||
current_minor_fork_bank.store_account(key, &account);
|
||||
}
|
||||
current_minor_fork_bank.freeze();
|
||||
}
|
||||
current_minor_fork_bank.freeze();
|
||||
|
||||
// All the parent banks made in this iteration of the loop
|
||||
// are currently discoverable, previous parents should have
|
||||
// been squashed
|
||||
assert_eq!(
|
||||
current_minor_fork_bank.clone().parents_inclusive().len(),
|
||||
num_new_banks + 1,
|
||||
);
|
||||
|
||||
// `next_major_bank` needs to be sandwiched between the minor fork banks
|
||||
// That way, after the squash(), the minor fork has the potential to see a
|
||||
// *partial* clean of the banks < `next_major_bank`.
|
||||
current_major_fork_bank = Arc::new(Bank::new_from_parent(
|
||||
¤t_major_fork_bank,
|
||||
&solana_sdk::pubkey::new_rand(),
|
||||
current_minor_fork_bank.slot() - 1,
|
||||
));
|
||||
let lamports = current_major_fork_bank.slot() + starting_lamports + 1;
|
||||
let account = Account::new(lamports, 0, &program_id);
|
||||
for key in pubkeys_to_modify.iter() {
|
||||
// Store rooted updates to these pubkeys such that the minor
|
||||
// fork updates to the same keys will be deleted by clean
|
||||
current_major_fork_bank.store_account(key, &account);
|
||||
}
|
||||
|
||||
// Send the last new bank to the scan thread to perform the scan.
|
||||
// Meanwhile this thread will continually set roots on a separate fork
|
||||
// and squash.
|
||||
/*
|
||||
bank 0
|
||||
/ \
|
||||
minor bank 1 \
|
||||
/ current_major_fork_bank
|
||||
minor bank 2
|
||||
|
||||
*/
|
||||
// The capacity of the channel is 1 so that this thread will wait for the scan to finish before starting
|
||||
// the next iteration, allowing the scan to stay in sync with these updates
|
||||
// such that every scan will see this interruption.
|
||||
if bank_to_scan_sender.send(current_minor_fork_bank).is_err() {
|
||||
// Channel was disconnected, exit
|
||||
return;
|
||||
}
|
||||
current_major_fork_bank.freeze();
|
||||
current_major_fork_bank.squash();
|
||||
// Try to get cache flush/clean to overlap with the scan
|
||||
current_major_fork_bank.force_flush_accounts_cache();
|
||||
current_major_fork_bank.clean_accounts(false);
|
||||
}
|
||||
|
||||
// All the parent banks made in this iteration of the loop
|
||||
// are currently discoverable, previous parents should have
|
||||
// been squashed
|
||||
assert_eq!(
|
||||
current_minor_fork_bank.clone().parents_inclusive().len(),
|
||||
num_new_banks + 1,
|
||||
);
|
||||
|
||||
// `next_major_bank` needs to be sandwiched between the minor fork banks
|
||||
// That way, after the squash(), the minor fork has the potential to see a
|
||||
// *partial* clean of the banks < `next_major_bank`.
|
||||
current_major_fork_bank = Arc::new(Bank::new_from_parent(
|
||||
¤t_major_fork_bank,
|
||||
&solana_sdk::pubkey::new_rand(),
|
||||
current_minor_fork_bank.slot() - 1,
|
||||
));
|
||||
let lamports = current_major_fork_bank.slot() + starting_lamports + 1;
|
||||
let account = Account::new(lamports, 0, &program_id);
|
||||
for key in pubkeys_to_modify.iter() {
|
||||
// Store rooted updates to these pubkeys such that the minor
|
||||
// fork updates to the same keys will be deleted by clean
|
||||
current_major_fork_bank.store_account(key, &account);
|
||||
}
|
||||
|
||||
// Send the last new bank to the scan thread to perform the scan.
|
||||
// Meanwhile this thread will continually set roots on a separate fork
|
||||
// and squash.
|
||||
/*
|
||||
bank 0
|
||||
/ \
|
||||
minor bank 1 \
|
||||
/ current_major_fork_bank
|
||||
minor bank 2
|
||||
|
||||
*/
|
||||
// The capacity of the channel is 1 so that this thread will wait for the scan to finish before starting
|
||||
// the next iteration, allowing the scan to stay in sync with these updates
|
||||
// such that every scan will see this interruption.
|
||||
current_major_fork_bank.freeze();
|
||||
current_major_fork_bank.squash();
|
||||
if bank_to_scan_sender.send(current_minor_fork_bank).is_err() {
|
||||
// Channel was disconnected, exit
|
||||
return;
|
||||
}
|
||||
|
||||
// Try to get clean to overlap with the scan
|
||||
current_major_fork_bank.clean_accounts(false);
|
||||
}
|
||||
},
|
||||
)
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_store_scan_consistency_root() {
|
||||
test_store_scan_consistency(
|
||||
false,
|
||||
|bank0, bank_to_scan_sender, pubkeys_to_modify, program_id, starting_lamports| {
|
||||
let mut current_bank = bank0.clone();
|
||||
let mut prev_bank = bank0;
|
||||
|
@ -11742,7 +11793,10 @@ pub(crate) mod tests {
|
|||
// Channel was disconnected, exit
|
||||
return;
|
||||
}
|
||||
|
||||
current_bank.freeze();
|
||||
current_bank.squash();
|
||||
current_bank.force_flush_accounts_cache();
|
||||
current_bank.clean_accounts(true);
|
||||
prev_bank = current_bank.clone();
|
||||
current_bank = Arc::new(Bank::new_from_parent(
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))]
|
||||
pub mod accounts;
|
||||
pub mod accounts_background_service;
|
||||
pub mod accounts_cache;
|
||||
pub mod accounts_db;
|
||||
pub mod accounts_index;
|
||||
pub mod append_vec;
|
||||
|
|
|
@ -117,6 +117,7 @@ where
|
|||
.deserialize_from::<R, T>(reader)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn bank_from_stream<R, P>(
|
||||
serde_style: SerdeStyle,
|
||||
stream: &mut BufReader<R>,
|
||||
|
@ -127,6 +128,7 @@ pub(crate) fn bank_from_stream<R, P>(
|
|||
debug_keys: Option<Arc<HashSet<Pubkey>>>,
|
||||
additional_builtins: Option<&Builtins>,
|
||||
account_indexes: HashSet<AccountIndex>,
|
||||
caching_enabled: bool,
|
||||
) -> std::result::Result<Bank, Error>
|
||||
where
|
||||
R: Read,
|
||||
|
@ -146,6 +148,7 @@ where
|
|||
debug_keys,
|
||||
additional_builtins,
|
||||
account_indexes,
|
||||
caching_enabled,
|
||||
)?;
|
||||
Ok(bank)
|
||||
}};
|
||||
|
@ -223,6 +226,7 @@ impl<'a, C: TypeContext<'a>> Serialize for SerializableAccountsDB<'a, C> {
|
|||
#[cfg(RUSTC_WITH_SPECIALIZATION)]
|
||||
impl<'a, C> IgnoreAsHelper for SerializableAccountsDB<'a, C> {}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn reconstruct_bank_from_fields<E, P>(
|
||||
bank_fields: BankFieldsToDeserialize,
|
||||
accounts_db_fields: AccountsDbFields<E>,
|
||||
|
@ -233,6 +237,7 @@ fn reconstruct_bank_from_fields<E, P>(
|
|||
debug_keys: Option<Arc<HashSet<Pubkey>>>,
|
||||
additional_builtins: Option<&Builtins>,
|
||||
account_indexes: HashSet<AccountIndex>,
|
||||
caching_enabled: bool,
|
||||
) -> Result<Bank, Error>
|
||||
where
|
||||
E: Into<AccountStorageEntry>,
|
||||
|
@ -244,6 +249,7 @@ where
|
|||
append_vecs_path,
|
||||
&genesis_config.cluster_type,
|
||||
account_indexes,
|
||||
caching_enabled,
|
||||
)?;
|
||||
accounts_db.freeze_accounts(&bank_fields.ancestors, frozen_account_pubkeys);
|
||||
|
||||
|
@ -265,13 +271,18 @@ fn reconstruct_accountsdb_from_fields<E, P>(
|
|||
stream_append_vecs_path: P,
|
||||
cluster_type: &ClusterType,
|
||||
account_indexes: HashSet<AccountIndex>,
|
||||
caching_enabled: bool,
|
||||
) -> Result<AccountsDB, Error>
|
||||
where
|
||||
E: Into<AccountStorageEntry>,
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
let mut accounts_db =
|
||||
AccountsDB::new_with_indexes(account_paths.to_vec(), cluster_type, account_indexes);
|
||||
let mut accounts_db = AccountsDB::new_with_config(
|
||||
account_paths.to_vec(),
|
||||
cluster_type,
|
||||
account_indexes,
|
||||
caching_enabled,
|
||||
);
|
||||
let AccountsDbFields(storage, version, slot, bank_hash_info) = accounts_db_fields;
|
||||
|
||||
// convert to two level map of slot -> id -> account storage entry
|
||||
|
@ -365,6 +376,10 @@ where
|
|||
);
|
||||
}
|
||||
|
||||
if max_id > AppendVecId::MAX / 2 {
|
||||
panic!("Storage id {} larger than allowed max", max_id);
|
||||
}
|
||||
|
||||
accounts_db.next_id.store(max_id + 1, Ordering::Relaxed);
|
||||
accounts_db
|
||||
.write_version
|
||||
|
|
|
@ -70,6 +70,7 @@ where
|
|||
stream_append_vecs_path,
|
||||
&ClusterType::Development,
|
||||
HashSet::new(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -121,7 +122,8 @@ where
|
|||
fn test_accounts_serialize_style(serde_style: SerdeStyle) {
|
||||
solana_logger::setup();
|
||||
let (_accounts_dir, paths) = get_temp_accounts_paths(4).unwrap();
|
||||
let accounts = Accounts::new(paths, &ClusterType::Development);
|
||||
let accounts =
|
||||
Accounts::new_with_config(paths, &ClusterType::Development, HashSet::new(), false);
|
||||
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
create_test_accounts(&accounts, &mut pubkeys, 100, 0);
|
||||
|
@ -181,7 +183,9 @@ fn test_bank_serialize_style(serde_style: SerdeStyle) {
|
|||
let key3 = Keypair::new();
|
||||
bank2.deposit(&key3.pubkey(), 0);
|
||||
|
||||
bank2.freeze();
|
||||
bank2.squash();
|
||||
bank2.force_flush_accounts_cache();
|
||||
|
||||
let snapshot_storages = bank2.get_snapshot_storages();
|
||||
let mut buf = vec![];
|
||||
|
@ -214,6 +218,7 @@ fn test_bank_serialize_style(serde_style: SerdeStyle) {
|
|||
None,
|
||||
None,
|
||||
HashSet::new(),
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
dbank.src = ref_sc;
|
||||
|
|
|
@ -581,6 +581,7 @@ pub fn remove_snapshot<P: AsRef<Path>>(slot: Slot, snapshot_path: P) -> Result<(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn bank_from_archive<P: AsRef<Path>>(
|
||||
account_paths: &[PathBuf],
|
||||
frozen_account_pubkeys: &[Pubkey],
|
||||
|
@ -591,6 +592,7 @@ pub fn bank_from_archive<P: AsRef<Path>>(
|
|||
debug_keys: Option<Arc<HashSet<Pubkey>>>,
|
||||
additional_builtins: Option<&Builtins>,
|
||||
account_indexes: HashSet<AccountIndex>,
|
||||
accounts_db_caching_enabled: bool,
|
||||
) -> Result<Bank> {
|
||||
// Untar the snapshot into a temporary directory
|
||||
let unpack_dir = tempfile::Builder::new()
|
||||
|
@ -616,6 +618,7 @@ pub fn bank_from_archive<P: AsRef<Path>>(
|
|||
debug_keys,
|
||||
additional_builtins,
|
||||
account_indexes,
|
||||
accounts_db_caching_enabled,
|
||||
)?;
|
||||
|
||||
if !bank.verify_snapshot_bank() {
|
||||
|
@ -752,6 +755,7 @@ pub fn untar_snapshot_in<P: AsRef<Path>, Q: AsRef<Path>>(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn rebuild_bank_from_snapshots<P>(
|
||||
snapshot_version: &str,
|
||||
account_paths: &[PathBuf],
|
||||
|
@ -762,6 +766,7 @@ fn rebuild_bank_from_snapshots<P>(
|
|||
debug_keys: Option<Arc<HashSet<Pubkey>>>,
|
||||
additional_builtins: Option<&Builtins>,
|
||||
account_indexes: HashSet<AccountIndex>,
|
||||
accounts_db_caching_enabled: bool,
|
||||
) -> Result<Bank>
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
|
@ -799,6 +804,7 @@ where
|
|||
debug_keys,
|
||||
additional_builtins,
|
||||
account_indexes,
|
||||
accounts_db_caching_enabled,
|
||||
),
|
||||
}?)
|
||||
})?;
|
||||
|
|
|
@ -1144,7 +1144,9 @@ mod tests {
|
|||
#[test]
|
||||
fn test_create_zero_lamport_with_clean() {
|
||||
with_create_zero_lamport(|bank| {
|
||||
bank.freeze();
|
||||
bank.squash();
|
||||
bank.force_flush_accounts_cache();
|
||||
// do clean and assert that it actually did its job
|
||||
assert_eq!(3, bank.get_snapshot_storages().len());
|
||||
bank.clean_accounts(false);
|
||||
|
|
|
@ -25,7 +25,7 @@ fn test_shrink_and_clean() {
|
|||
if exit_for_shrink.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
accounts_for_shrink.process_stale_slot();
|
||||
accounts_for_shrink.process_stale_slot_v1();
|
||||
});
|
||||
|
||||
let mut alive_accounts = vec![];
|
||||
|
@ -45,7 +45,7 @@ fn test_shrink_and_clean() {
|
|||
|
||||
for (pubkey, account) in alive_accounts.iter_mut() {
|
||||
account.lamports -= 1;
|
||||
accounts.store(current_slot, &[(&pubkey, &account)]);
|
||||
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
|
||||
}
|
||||
accounts.add_root(current_slot);
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ fn test_bad_bank_hash() {
|
|||
.iter()
|
||||
.map(|idx| (&accounts_keys[*idx].0, &accounts_keys[*idx].1))
|
||||
.collect();
|
||||
db.store(some_slot, &account_refs);
|
||||
db.store_uncached(some_slot, &account_refs);
|
||||
|
||||
for (key, account) in &account_refs {
|
||||
assert_eq!(
|
||||
|
|
|
@ -1423,6 +1423,11 @@ pub fn main() {
|
|||
.value_name("INDEX")
|
||||
.help("Enable an accounts index, indexed by the selected account field"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("accounts_db_caching_enabled")
|
||||
.long("accounts-db-caching-enabled")
|
||||
.help("Enable accounts caching"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let identity_keypair = Arc::new(keypair_of(&matches, "identity").unwrap_or_else(Keypair::new));
|
||||
|
@ -1593,6 +1598,7 @@ pub fn main() {
|
|||
poh_pinned_cpu_core: value_of(&matches, "poh_pinned_cpu_core")
|
||||
.unwrap_or(poh_service::DEFAULT_PINNED_CPU_CORE),
|
||||
account_indexes,
|
||||
accounts_db_caching_enabled: matches.is_present("accounts_db_caching_enabled"),
|
||||
..ValidatorConfig::default()
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in New Issue