2020-12-31 18:06:03 -08:00
|
|
|
use crate::{
|
2021-09-12 15:14:59 -07:00
|
|
|
accounts_index_storage::AccountsIndexStorage,
|
2021-05-19 09:50:34 -07:00
|
|
|
ancestors::Ancestors,
|
2021-09-19 18:22:09 -07:00
|
|
|
bucket_map_holder::{Age, BucketMapHolder},
|
2021-01-17 20:31:03 -08:00
|
|
|
contains::Contains,
|
2021-09-10 15:52:25 -07:00
|
|
|
in_mem_accounts_index::InMemAccountsIndex,
|
2020-12-31 18:06:03 -08:00
|
|
|
inline_spl_token_v2_0::{self, SPL_TOKEN_ACCOUNT_MINT_OFFSET, SPL_TOKEN_ACCOUNT_OWNER_OFFSET},
|
2021-08-04 07:18:05 -07:00
|
|
|
pubkey_bins::PubkeyBinCalculator16,
|
2020-12-31 18:06:03 -08:00
|
|
|
secondary_index::*,
|
|
|
|
};
|
2021-04-12 10:11:33 -07:00
|
|
|
use bv::BitVec;
|
2021-05-06 13:04:13 -07:00
|
|
|
use log::*;
|
2020-10-21 17:05:27 -07:00
|
|
|
use ouroboros::self_referencing;
|
2021-02-03 15:00:42 -08:00
|
|
|
use solana_measure::measure::Measure;
|
2020-12-31 18:06:03 -08:00
|
|
|
use solana_sdk::{
|
2021-06-14 21:04:01 -07:00
|
|
|
clock::{BankId, Slot},
|
2020-12-31 18:06:03 -08:00
|
|
|
pubkey::{Pubkey, PUBKEY_BYTES},
|
2020-10-21 17:05:27 -07:00
|
|
|
};
|
2020-01-28 17:03:20 -08:00
|
|
|
use std::{
|
2021-09-12 19:54:09 -07:00
|
|
|
collections::{btree_map::BTreeMap, HashSet},
|
2021-07-16 15:05:23 -07:00
|
|
|
fmt::Debug,
|
2020-12-31 18:06:03 -08:00
|
|
|
ops::{
|
|
|
|
Bound,
|
|
|
|
Bound::{Excluded, Included, Unbounded},
|
|
|
|
Range, RangeBounds,
|
|
|
|
},
|
2021-09-18 07:54:57 -07:00
|
|
|
path::PathBuf,
|
2020-12-31 18:06:03 -08:00
|
|
|
sync::{
|
2021-09-18 07:56:26 -07:00
|
|
|
atomic::{AtomicBool, AtomicU64, AtomicU8, Ordering},
|
2021-06-14 21:04:01 -07:00
|
|
|
Arc, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard,
|
2020-12-31 18:06:03 -08:00
|
|
|
},
|
2020-01-28 17:03:20 -08:00
|
|
|
};
|
2021-06-14 21:04:01 -07:00
|
|
|
use thiserror::Error;
|
2020-12-31 18:06:03 -08:00
|
|
|
|
2020-11-16 17:23:11 -08:00
|
|
|
pub const ITER_BATCH_SIZE: usize = 1000;
|
2021-08-26 16:12:43 -07:00
|
|
|
pub const BINS_DEFAULT: usize = 8192;
|
2021-09-18 07:53:44 -07:00
|
|
|
pub const BINS_FOR_TESTING: usize = 2; // we want > 1, but each bin is a few disk files with a disk based index, so fewer is better
|
|
|
|
pub const BINS_FOR_BENCHMARKS: usize = 2;
|
2021-09-17 15:02:43 -07:00
|
|
|
pub const FLUSH_THREADS_TESTING: usize = 1;
|
2021-08-17 12:50:01 -07:00
|
|
|
pub const ACCOUNTS_INDEX_CONFIG_FOR_TESTING: AccountsIndexConfig = AccountsIndexConfig {
|
2021-08-30 16:40:10 -07:00
|
|
|
bins: Some(BINS_FOR_TESTING),
|
2021-09-17 15:02:43 -07:00
|
|
|
flush_threads: Some(FLUSH_THREADS_TESTING),
|
2021-09-18 07:54:57 -07:00
|
|
|
drives: None,
|
2021-09-19 16:00:15 -07:00
|
|
|
index_limit_mb: None,
|
2021-09-19 18:22:09 -07:00
|
|
|
ages_to_stay_in_cache: None,
|
2021-08-17 12:50:01 -07:00
|
|
|
};
|
|
|
|
pub const ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS: AccountsIndexConfig = AccountsIndexConfig {
|
2021-08-30 16:40:10 -07:00
|
|
|
bins: Some(BINS_FOR_BENCHMARKS),
|
2021-09-17 15:02:43 -07:00
|
|
|
flush_threads: Some(FLUSH_THREADS_TESTING),
|
2021-09-18 07:54:57 -07:00
|
|
|
drives: None,
|
2021-09-19 16:00:15 -07:00
|
|
|
index_limit_mb: None,
|
2021-09-19 18:22:09 -07:00
|
|
|
ages_to_stay_in_cache: None,
|
2021-08-17 12:50:01 -07:00
|
|
|
};
|
2021-06-14 21:04:01 -07:00
|
|
|
pub type ScanResult<T> = Result<T, ScanError>;
|
2020-03-25 21:08:56 -07:00
|
|
|
pub type SlotList<T> = Vec<(Slot, T)>;
|
|
|
|
pub type SlotSlice<'s, T> = &'s [(Slot, T)];
|
2020-03-12 22:14:37 -07:00
|
|
|
pub type RefCount = u64;
|
2021-09-15 10:36:08 -07:00
|
|
|
pub type AccountMap<V> = Arc<InMemAccountsIndex<V>>;
|
2019-04-15 17:15:50 -07:00
|
|
|
|
2021-09-10 19:09:51 -07:00
|
|
|
pub(crate) type AccountMapEntry<T> = Arc<AccountMapEntryInner<T>>;
|
2020-10-21 17:05:27 -07:00
|
|
|
|
2021-09-13 20:59:03 -07:00
|
|
|
pub trait IsCached:
|
|
|
|
'static + Clone + Debug + PartialEq + ZeroLamport + Copy + Default + Sync + Send
|
|
|
|
{
|
2021-01-11 17:00:23 -08:00
|
|
|
fn is_cached(&self) -> bool;
|
|
|
|
}
|
|
|
|
|
2021-09-14 15:51:07 -07:00
|
|
|
pub trait IndexValue:
|
|
|
|
'static + IsCached + Clone + Debug + PartialEq + ZeroLamport + Copy + Default + Sync + Send
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2021-06-14 21:04:01 -07:00
|
|
|
#[derive(Error, Debug, PartialEq)]
|
|
|
|
pub enum ScanError {
|
|
|
|
#[error("Node detected it replayed bad version of slot {slot:?} with id {bank_id:?}, thus the scan on said slot was aborted")]
|
|
|
|
SlotRemoved { slot: Slot, bank_id: BankId },
|
|
|
|
}
|
|
|
|
|
2020-12-31 18:06:03 -08:00
|
|
|
enum ScanTypes<R: RangeBounds<Pubkey>> {
|
|
|
|
Unindexed(Option<R>),
|
|
|
|
Indexed(IndexKey),
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, Copy)]
|
|
|
|
pub enum IndexKey {
|
|
|
|
ProgramId(Pubkey),
|
|
|
|
SplTokenMint(Pubkey),
|
|
|
|
SplTokenOwner(Pubkey),
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
|
|
|
pub enum AccountIndex {
|
|
|
|
ProgramId,
|
|
|
|
SplTokenMint,
|
|
|
|
SplTokenOwner,
|
|
|
|
}
|
|
|
|
|
2021-05-11 15:06:22 -07:00
|
|
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
|
|
|
pub struct AccountSecondaryIndexesIncludeExclude {
|
|
|
|
pub exclude: bool,
|
|
|
|
pub keys: HashSet<Pubkey>,
|
|
|
|
}
|
|
|
|
|
2021-09-07 18:09:40 -07:00
|
|
|
#[derive(Debug, Default, Clone)]
|
2021-08-17 12:50:01 -07:00
|
|
|
pub struct AccountsIndexConfig {
|
|
|
|
pub bins: Option<usize>,
|
2021-09-17 15:02:43 -07:00
|
|
|
pub flush_threads: Option<usize>,
|
2021-09-18 07:54:57 -07:00
|
|
|
pub drives: Option<Vec<PathBuf>>,
|
2021-09-19 16:00:15 -07:00
|
|
|
pub index_limit_mb: Option<usize>,
|
2021-09-19 18:22:09 -07:00
|
|
|
pub ages_to_stay_in_cache: Option<Age>,
|
2021-08-17 12:50:01 -07:00
|
|
|
}
|
|
|
|
|
2021-05-11 15:06:22 -07:00
|
|
|
#[derive(Debug, Default, Clone)]
|
|
|
|
pub struct AccountSecondaryIndexes {
|
|
|
|
pub keys: Option<AccountSecondaryIndexesIncludeExclude>,
|
|
|
|
pub indexes: HashSet<AccountIndex>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl AccountSecondaryIndexes {
|
|
|
|
pub fn is_empty(&self) -> bool {
|
|
|
|
self.indexes.is_empty()
|
|
|
|
}
|
|
|
|
pub fn contains(&self, index: &AccountIndex) -> bool {
|
|
|
|
self.indexes.contains(index)
|
|
|
|
}
|
|
|
|
pub fn include_key(&self, key: &Pubkey) -> bool {
|
|
|
|
match &self.keys {
|
|
|
|
Some(options) => options.exclude ^ options.keys.contains(key),
|
|
|
|
None => true, // include all keys
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-05-10 07:22:48 -07:00
|
|
|
|
2021-09-16 19:36:23 -07:00
|
|
|
#[derive(Debug, Default)]
|
|
|
|
pub struct AccountMapEntryMeta {
|
|
|
|
pub dirty: AtomicBool,
|
2021-09-18 07:56:26 -07:00
|
|
|
pub age: AtomicU8,
|
2021-09-16 19:36:23 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
impl AccountMapEntryMeta {
|
2021-09-19 18:22:09 -07:00
|
|
|
pub fn new_dirty<T: IndexValue>(storage: &Arc<BucketMapHolder<T>>) -> Self {
|
2021-09-16 19:36:23 -07:00
|
|
|
AccountMapEntryMeta {
|
|
|
|
dirty: AtomicBool::new(true),
|
2021-09-19 18:22:09 -07:00
|
|
|
age: AtomicU8::new(storage.future_age_to_flush()),
|
2021-09-16 19:36:23 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-10 16:32:10 -07:00
|
|
|
#[derive(Debug, Default)]
|
2020-10-21 17:05:27 -07:00
|
|
|
pub struct AccountMapEntryInner<T> {
|
|
|
|
ref_count: AtomicU64,
|
|
|
|
pub slot_list: RwLock<SlotList<T>>,
|
2021-09-16 19:36:23 -07:00
|
|
|
pub meta: AccountMapEntryMeta,
|
2020-10-21 17:05:27 -07:00
|
|
|
}
|
|
|
|
|
2021-09-14 15:51:07 -07:00
|
|
|
impl<T: IndexValue> AccountMapEntryInner<T> {
|
2021-09-21 10:40:07 -07:00
|
|
|
pub fn new(slot_list: SlotList<T>, ref_count: RefCount, meta: AccountMapEntryMeta) -> Self {
|
|
|
|
Self {
|
|
|
|
slot_list: RwLock::new(slot_list),
|
|
|
|
ref_count: AtomicU64::new(ref_count),
|
|
|
|
meta,
|
|
|
|
}
|
|
|
|
}
|
2021-09-02 16:25:27 -07:00
|
|
|
pub fn ref_count(&self) -> RefCount {
|
2021-02-04 09:11:05 -08:00
|
|
|
self.ref_count.load(Ordering::Relaxed)
|
|
|
|
}
|
2021-09-02 16:25:27 -07:00
|
|
|
|
|
|
|
pub fn add_un_ref(&self, add: bool) {
|
|
|
|
if add {
|
|
|
|
self.ref_count.fetch_add(1, Ordering::Relaxed);
|
|
|
|
} else {
|
|
|
|
self.ref_count.fetch_sub(1, Ordering::Relaxed);
|
|
|
|
}
|
2021-09-16 19:36:23 -07:00
|
|
|
self.set_dirty(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn dirty(&self) -> bool {
|
|
|
|
self.meta.dirty.load(Ordering::Relaxed)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn set_dirty(&self, value: bool) -> bool {
|
|
|
|
self.meta.dirty.swap(value, Ordering::Relaxed)
|
2021-09-02 16:25:27 -07:00
|
|
|
}
|
2021-09-21 06:41:17 -07:00
|
|
|
|
|
|
|
pub fn age(&self) -> Age {
|
|
|
|
self.meta.age.load(Ordering::Relaxed)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn set_age(&self, value: Age) {
|
|
|
|
self.meta.age.store(value, Ordering::Relaxed)
|
|
|
|
}
|
2021-02-04 09:11:05 -08:00
|
|
|
}
|
|
|
|
|
2021-09-14 15:51:07 -07:00
|
|
|
pub enum AccountIndexGetResult<'a, T: IndexValue> {
|
2021-04-23 07:33:14 -07:00
|
|
|
Found(ReadAccountMapEntry<T>, usize),
|
|
|
|
NotFoundOnFork,
|
2021-05-21 08:02:14 -07:00
|
|
|
Missing(AccountMapsReadLock<'a, T>),
|
2021-04-23 07:33:14 -07:00
|
|
|
}
|
|
|
|
|
2020-10-21 17:05:27 -07:00
|
|
|
#[self_referencing]
|
2021-09-14 15:51:07 -07:00
|
|
|
pub struct ReadAccountMapEntry<T: IndexValue> {
|
2020-11-13 01:12:41 -08:00
|
|
|
owned_entry: AccountMapEntry<T>,
|
2020-10-21 17:05:27 -07:00
|
|
|
#[borrows(owned_entry)]
|
2021-06-24 23:53:54 -07:00
|
|
|
#[covariant]
|
2020-11-13 01:12:41 -08:00
|
|
|
slot_list_guard: RwLockReadGuard<'this, SlotList<T>>,
|
2020-10-21 17:05:27 -07:00
|
|
|
}
|
|
|
|
|
2021-09-14 15:51:07 -07:00
|
|
|
impl<T: IndexValue> Debug for ReadAccountMapEntry<T> {
|
2021-09-01 19:52:16 -07:00
|
|
|
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
|
|
|
write!(f, "{:?}", self.borrow_owned_entry())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-14 15:51:07 -07:00
|
|
|
impl<T: IndexValue> ReadAccountMapEntry<T> {
|
2020-10-21 17:05:27 -07:00
|
|
|
pub fn from_account_map_entry(account_map_entry: AccountMapEntry<T>) -> Self {
|
|
|
|
ReadAccountMapEntryBuilder {
|
|
|
|
owned_entry: account_map_entry,
|
|
|
|
slot_list_guard_builder: |lock| lock.slot_list.read().unwrap(),
|
|
|
|
}
|
|
|
|
.build()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn slot_list(&self) -> &SlotList<T> {
|
2020-11-13 01:12:41 -08:00
|
|
|
&*self.borrow_slot_list_guard()
|
2020-10-21 17:05:27 -07:00
|
|
|
}
|
|
|
|
|
2021-08-09 06:59:56 -07:00
|
|
|
pub fn ref_count(&self) -> RefCount {
|
2021-09-02 16:25:27 -07:00
|
|
|
self.borrow_owned_entry().ref_count()
|
2020-10-21 17:05:27 -07:00
|
|
|
}
|
2021-02-04 12:44:19 -08:00
|
|
|
|
|
|
|
pub fn unref(&self) {
|
2021-09-02 16:25:27 -07:00
|
|
|
self.borrow_owned_entry().add_un_ref(false);
|
2021-02-04 12:44:19 -08:00
|
|
|
}
|
2021-06-02 00:51:46 -07:00
|
|
|
|
|
|
|
pub fn addref(&self) {
|
2021-09-02 16:25:27 -07:00
|
|
|
self.borrow_owned_entry().add_un_ref(true);
|
2021-06-02 00:51:46 -07:00
|
|
|
}
|
2020-10-21 17:05:27 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[self_referencing]
|
2021-09-14 15:51:07 -07:00
|
|
|
pub struct WriteAccountMapEntry<T: IndexValue> {
|
2020-11-13 01:12:41 -08:00
|
|
|
owned_entry: AccountMapEntry<T>,
|
2020-10-21 17:05:27 -07:00
|
|
|
#[borrows(owned_entry)]
|
2021-06-24 23:53:54 -07:00
|
|
|
#[covariant]
|
2020-11-13 01:12:41 -08:00
|
|
|
slot_list_guard: RwLockWriteGuard<'this, SlotList<T>>,
|
2020-10-21 17:05:27 -07:00
|
|
|
}
|
|
|
|
|
2021-09-14 15:51:07 -07:00
|
|
|
impl<T: IndexValue> WriteAccountMapEntry<T> {
|
2020-10-21 17:05:27 -07:00
|
|
|
pub fn from_account_map_entry(account_map_entry: AccountMapEntry<T>) -> Self {
|
|
|
|
WriteAccountMapEntryBuilder {
|
|
|
|
owned_entry: account_map_entry,
|
|
|
|
slot_list_guard_builder: |lock| lock.slot_list.write().unwrap(),
|
|
|
|
}
|
|
|
|
.build()
|
|
|
|
}
|
|
|
|
|
2021-09-03 08:45:37 -07:00
|
|
|
pub fn slot_list(&self) -> &SlotList<T> {
|
2020-11-13 01:12:41 -08:00
|
|
|
&*self.borrow_slot_list_guard()
|
2020-10-21 17:05:27 -07:00
|
|
|
}
|
|
|
|
|
2020-11-13 01:12:41 -08:00
|
|
|
pub fn slot_list_mut<RT>(
|
|
|
|
&mut self,
|
|
|
|
user: impl for<'this> FnOnce(&mut RwLockWriteGuard<'this, SlotList<T>>) -> RT,
|
|
|
|
) -> RT {
|
2021-09-16 19:36:23 -07:00
|
|
|
let result = self.with_slot_list_guard_mut(user);
|
|
|
|
self.borrow_owned_entry().set_dirty(true);
|
|
|
|
result
|
2020-10-21 17:05:27 -07:00
|
|
|
}
|
|
|
|
|
2021-05-19 14:21:24 -07:00
|
|
|
// create an entry that is equivalent to this process:
|
|
|
|
// 1. new empty (refcount=0, slot_list={})
|
|
|
|
// 2. update(slot, account_info)
|
|
|
|
// This code is called when the first entry [ie. (slot,account_info)] for a pubkey is inserted into the index.
|
2021-09-19 18:22:09 -07:00
|
|
|
pub fn new_entry_after_update(
|
|
|
|
slot: Slot,
|
|
|
|
account_info: T,
|
|
|
|
storage: &Arc<BucketMapHolder<T>>,
|
|
|
|
) -> AccountMapEntry<T> {
|
2021-05-19 14:21:24 -07:00
|
|
|
let ref_count = if account_info.is_cached() { 0 } else { 1 };
|
2021-09-21 10:40:07 -07:00
|
|
|
Arc::new(AccountMapEntryInner::new(
|
|
|
|
vec![(slot, account_info)],
|
|
|
|
ref_count,
|
|
|
|
AccountMapEntryMeta::new_dirty(storage),
|
|
|
|
))
|
2021-05-19 14:21:24 -07:00
|
|
|
}
|
|
|
|
|
2020-10-21 17:05:27 -07:00
|
|
|
// Try to update an item in the slot list the given `slot` If an item for the slot
|
|
|
|
// already exists in the list, remove the older item, add it to `reclaims`, and insert
|
|
|
|
// the new item.
|
|
|
|
pub fn update(&mut self, slot: Slot, account_info: T, reclaims: &mut SlotList<T>) {
|
2021-05-19 06:48:46 -07:00
|
|
|
let mut addref = !account_info.is_cached();
|
|
|
|
self.slot_list_mut(|list| {
|
2021-09-12 19:54:09 -07:00
|
|
|
addref =
|
|
|
|
InMemAccountsIndex::update_slot_list(list, slot, account_info, reclaims, false);
|
2021-05-19 06:48:46 -07:00
|
|
|
});
|
|
|
|
if addref {
|
2021-02-04 12:44:19 -08:00
|
|
|
// If it's the first non-cache insert, also bump the stored ref count
|
2021-09-03 08:45:37 -07:00
|
|
|
self.borrow_owned_entry().add_un_ref(true);
|
2020-10-21 17:05:27 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-28 09:33:07 -07:00
|
|
|
#[derive(Debug, Default, AbiExample, Clone)]
|
2021-04-12 10:11:33 -07:00
|
|
|
pub struct RollingBitField {
|
|
|
|
max_width: u64,
|
|
|
|
min: u64,
|
|
|
|
max: u64, // exclusive
|
|
|
|
bits: BitVec,
|
|
|
|
count: usize,
|
2021-04-29 07:11:28 -07:00
|
|
|
// These are items that are true and lower than min.
|
|
|
|
// They would cause us to exceed max_width if we stored them in our bit field.
|
2021-05-28 09:33:07 -07:00
|
|
|
// We only expect these items in conditions where there is some other bug in the system
|
|
|
|
// or in testing when large ranges are created.
|
2021-04-29 07:11:28 -07:00
|
|
|
excess: HashSet<u64>,
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
2021-05-28 09:33:07 -07:00
|
|
|
|
|
|
|
impl PartialEq<RollingBitField> for RollingBitField {
|
|
|
|
fn eq(&self, other: &Self) -> bool {
|
|
|
|
// 2 instances could have different internal data for the same values,
|
|
|
|
// so we have to compare data.
|
|
|
|
self.len() == other.len() && {
|
|
|
|
for item in self.get_all() {
|
|
|
|
if !other.contains(&item) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-12 10:11:33 -07:00
|
|
|
// functionally similar to a hashset
|
|
|
|
// Relies on there being a sliding window of key values. The key values continue to increase.
|
|
|
|
// Old key values are removed from the lesser values and do not accumulate.
|
|
|
|
impl RollingBitField {
|
|
|
|
pub fn new(max_width: u64) -> Self {
|
|
|
|
assert!(max_width > 0);
|
|
|
|
assert!(max_width.is_power_of_two()); // power of 2 to make dividing a shift
|
|
|
|
let bits = BitVec::new_fill(false, max_width);
|
|
|
|
Self {
|
|
|
|
max_width,
|
|
|
|
bits,
|
|
|
|
count: 0,
|
|
|
|
min: 0,
|
|
|
|
max: 0,
|
2021-04-29 07:11:28 -07:00
|
|
|
excess: HashSet::new(),
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// find the array index
|
|
|
|
fn get_address(&self, key: &u64) -> u64 {
|
|
|
|
key % self.max_width
|
|
|
|
}
|
|
|
|
|
2021-04-23 09:09:39 -07:00
|
|
|
pub fn range_width(&self) -> u64 {
|
|
|
|
// note that max isn't updated on remove, so it can be above the current max
|
|
|
|
self.max - self.min
|
|
|
|
}
|
|
|
|
|
2021-06-08 07:32:16 -07:00
|
|
|
pub fn min(&self) -> Option<u64> {
|
|
|
|
if self.is_empty() {
|
|
|
|
None
|
|
|
|
} else if self.excess.is_empty() {
|
|
|
|
Some(self.min)
|
|
|
|
} else {
|
|
|
|
let mut min = if self.all_items_in_excess() {
|
|
|
|
u64::MAX
|
|
|
|
} else {
|
|
|
|
self.min
|
|
|
|
};
|
|
|
|
for item in &self.excess {
|
|
|
|
min = std::cmp::min(min, *item);
|
|
|
|
}
|
|
|
|
Some(min)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-12 10:11:33 -07:00
|
|
|
pub fn insert(&mut self, key: u64) {
|
2021-05-28 09:33:07 -07:00
|
|
|
let mut bits_empty = self.count == 0 || self.all_items_in_excess();
|
2021-04-29 07:11:28 -07:00
|
|
|
let update_bits = if bits_empty {
|
|
|
|
true // nothing in bits, so in range
|
|
|
|
} else if key < self.min {
|
|
|
|
// bits not empty and this insert is before min, so add to excess
|
|
|
|
if self.excess.insert(key) {
|
|
|
|
self.count += 1;
|
|
|
|
}
|
|
|
|
false
|
|
|
|
} else if key < self.max {
|
|
|
|
true // fits current bit field range
|
|
|
|
} else {
|
|
|
|
// key is >= max
|
|
|
|
let new_max = key + 1;
|
|
|
|
loop {
|
|
|
|
let new_width = new_max.saturating_sub(self.min);
|
|
|
|
if new_width <= self.max_width {
|
|
|
|
// this key will fit the max range
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// move the min item from bits to excess and then purge from min to make room for this new max
|
|
|
|
let inserted = self.excess.insert(self.min);
|
|
|
|
assert!(inserted);
|
|
|
|
|
|
|
|
let key = self.min;
|
|
|
|
let address = self.get_address(&key);
|
|
|
|
self.bits.set(address, false);
|
|
|
|
self.purge(&key);
|
2021-05-28 09:33:07 -07:00
|
|
|
|
|
|
|
if self.all_items_in_excess() {
|
|
|
|
// if we moved the last existing item to excess, then we are ready to insert the new item in the bits
|
|
|
|
bits_empty = true;
|
|
|
|
break;
|
|
|
|
}
|
2021-04-29 07:11:28 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
true // moved things to excess if necessary, so update bits with the new entry
|
|
|
|
};
|
|
|
|
|
|
|
|
if update_bits {
|
|
|
|
let address = self.get_address(&key);
|
|
|
|
let value = self.bits.get(address);
|
|
|
|
if !value {
|
|
|
|
self.bits.set(address, true);
|
|
|
|
if bits_empty {
|
|
|
|
self.min = key;
|
|
|
|
self.max = key + 1;
|
|
|
|
} else {
|
|
|
|
self.min = std::cmp::min(self.min, key);
|
|
|
|
self.max = std::cmp::max(self.max, key + 1);
|
2021-05-28 09:33:07 -07:00
|
|
|
assert!(
|
|
|
|
self.min + self.max_width >= self.max,
|
|
|
|
"min: {}, max: {}, max_width: {}",
|
|
|
|
self.min,
|
|
|
|
self.max,
|
|
|
|
self.max_width
|
|
|
|
);
|
2021-04-29 07:11:28 -07:00
|
|
|
}
|
|
|
|
self.count += 1;
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-27 07:15:44 -07:00
|
|
|
pub fn remove(&mut self, key: &u64) -> bool {
|
2021-04-29 07:11:28 -07:00
|
|
|
if key >= &self.min {
|
|
|
|
// if asked to remove something bigger than max, then no-op
|
|
|
|
if key < &self.max {
|
|
|
|
let address = self.get_address(key);
|
|
|
|
let get = self.bits.get(address);
|
|
|
|
if get {
|
|
|
|
self.count -= 1;
|
|
|
|
self.bits.set(address, false);
|
|
|
|
self.purge(key);
|
|
|
|
}
|
|
|
|
get
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// asked to remove something < min. would be in excess if it exists
|
|
|
|
let remove = self.excess.remove(key);
|
|
|
|
if remove {
|
|
|
|
self.count -= 1;
|
|
|
|
}
|
|
|
|
remove
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-28 09:33:07 -07:00
|
|
|
fn all_items_in_excess(&self) -> bool {
|
|
|
|
self.excess.len() == self.count
|
|
|
|
}
|
|
|
|
|
2021-04-12 10:11:33 -07:00
|
|
|
// after removing 'key' where 'key' = min, make min the correct new min value
|
|
|
|
fn purge(&mut self, key: &u64) {
|
2021-05-28 09:33:07 -07:00
|
|
|
if self.count > 0 && !self.all_items_in_excess() {
|
2021-04-23 09:09:39 -07:00
|
|
|
if key == &self.min {
|
|
|
|
let start = self.min + 1; // min just got removed
|
|
|
|
for key in start..self.max {
|
|
|
|
if self.contains_assume_in_range(&key) {
|
|
|
|
self.min = key;
|
|
|
|
break;
|
|
|
|
}
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
|
|
|
}
|
2021-04-23 09:09:39 -07:00
|
|
|
} else {
|
2021-05-28 09:33:07 -07:00
|
|
|
// The idea is that there are no items in the bitfield anymore.
|
|
|
|
// But, there MAY be items in excess. The model works such that items < min go into excess.
|
|
|
|
// So, after purging all items from bitfield, we hold max to be what it previously was, but set min to max.
|
|
|
|
// Thus, if we lookup >= max, answer is always false without having to look in excess.
|
|
|
|
// If we changed max here to 0, we would lose the ability to know the range of items in excess (if any).
|
|
|
|
// So, now, with min updated = max:
|
|
|
|
// If we lookup < max, then we first check min.
|
|
|
|
// If >= min, then we look in bitfield.
|
|
|
|
// Otherwise, we look in excess since the request is < min.
|
|
|
|
// So, resetting min like this after a remove results in the correct behavior for the model.
|
|
|
|
// Later, if we insert and there are 0 items total (excess + bitfield), then we reset min/max to reflect the new item only.
|
|
|
|
self.min = self.max;
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn contains_assume_in_range(&self, key: &u64) -> bool {
|
|
|
|
// the result may be aliased. Caller is responsible for determining key is in range.
|
|
|
|
let address = self.get_address(key);
|
|
|
|
self.bits.get(address)
|
|
|
|
}
|
|
|
|
|
2021-05-28 09:33:07 -07:00
|
|
|
// This is the 99% use case.
|
|
|
|
// This needs be fast for the most common case of asking for key >= min.
|
2021-04-12 10:11:33 -07:00
|
|
|
pub fn contains(&self, key: &u64) -> bool {
|
2021-04-29 07:11:28 -07:00
|
|
|
if key < &self.max {
|
|
|
|
if key >= &self.min {
|
2021-05-28 09:33:07 -07:00
|
|
|
// in the bitfield range
|
2021-04-29 07:11:28 -07:00
|
|
|
self.contains_assume_in_range(key)
|
|
|
|
} else {
|
|
|
|
self.excess.contains(key)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn len(&self) -> usize {
|
|
|
|
self.count
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn is_empty(&self) -> bool {
|
|
|
|
self.len() == 0
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn clear(&mut self) {
|
|
|
|
let mut n = Self::new(self.max_width);
|
|
|
|
std::mem::swap(&mut n, self);
|
|
|
|
}
|
|
|
|
|
2021-06-14 21:04:01 -07:00
|
|
|
pub fn max(&self) -> u64 {
|
|
|
|
self.max
|
|
|
|
}
|
|
|
|
|
2021-04-12 10:11:33 -07:00
|
|
|
pub fn get_all(&self) -> Vec<u64> {
|
|
|
|
let mut all = Vec::with_capacity(self.count);
|
2021-04-29 07:11:28 -07:00
|
|
|
self.excess.iter().for_each(|slot| all.push(*slot));
|
2021-04-12 10:11:33 -07:00
|
|
|
for key in self.min..self.max {
|
|
|
|
if self.contains_assume_in_range(&key) {
|
|
|
|
all.push(key);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
all
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
2020-10-21 17:05:27 -07:00
|
|
|
pub struct RootsTracker {
|
2021-04-12 10:11:33 -07:00
|
|
|
roots: RollingBitField,
|
2020-11-16 17:23:11 -08:00
|
|
|
max_root: Slot,
|
2020-10-21 17:05:27 -07:00
|
|
|
uncleaned_roots: HashSet<Slot>,
|
|
|
|
previous_uncleaned_roots: HashSet<Slot>,
|
|
|
|
}
|
2019-05-30 21:31:35 -07:00
|
|
|
|
2021-04-12 10:11:33 -07:00
|
|
|
impl Default for RootsTracker {
|
|
|
|
fn default() -> Self {
|
|
|
|
// we expect to keep a rolling set of 400k slots around at a time
|
2021-04-20 19:18:15 -07:00
|
|
|
// 4M gives us plenty of extra(?!) room to handle a width 10x what we should need.
|
|
|
|
// cost is 4M bits of memory, which is .5MB
|
|
|
|
RootsTracker::new(4194304)
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl RootsTracker {
|
|
|
|
pub fn new(max_width: u64) -> Self {
|
|
|
|
Self {
|
|
|
|
roots: RollingBitField::new(max_width),
|
|
|
|
max_root: 0,
|
|
|
|
uncleaned_roots: HashSet::new(),
|
|
|
|
previous_uncleaned_roots: HashSet::new(),
|
|
|
|
}
|
|
|
|
}
|
2021-06-08 07:32:16 -07:00
|
|
|
|
|
|
|
pub fn min_root(&self) -> Option<Slot> {
|
|
|
|
self.roots.min()
|
|
|
|
}
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
|
|
|
|
2021-01-27 01:39:47 -08:00
|
|
|
#[derive(Debug, Default)]
|
|
|
|
pub struct AccountsIndexRootsStats {
|
|
|
|
pub roots_len: usize,
|
|
|
|
pub uncleaned_roots_len: usize,
|
|
|
|
pub previous_uncleaned_roots_len: usize,
|
2021-04-23 09:09:39 -07:00
|
|
|
pub roots_range: u64,
|
2021-04-28 11:24:01 -07:00
|
|
|
pub rooted_cleaned_count: usize,
|
|
|
|
pub unrooted_cleaned_count: usize,
|
2021-01-27 01:39:47 -08:00
|
|
|
}
|
|
|
|
|
2021-09-14 15:51:07 -07:00
|
|
|
pub struct AccountsIndexIterator<'a, T: IndexValue> {
|
2021-06-28 13:03:57 -07:00
|
|
|
account_maps: &'a LockMapTypeSlice<T>,
|
2021-08-04 07:18:05 -07:00
|
|
|
bin_calculator: &'a PubkeyBinCalculator16,
|
2020-10-21 17:05:27 -07:00
|
|
|
start_bound: Bound<Pubkey>,
|
|
|
|
end_bound: Bound<Pubkey>,
|
|
|
|
is_finished: bool,
|
2021-08-26 16:12:43 -07:00
|
|
|
collect_all_unsorted: bool,
|
2019-04-15 17:15:50 -07:00
|
|
|
}
|
|
|
|
|
2021-09-14 15:51:07 -07:00
|
|
|
impl<'a, T: IndexValue> AccountsIndexIterator<'a, T> {
|
2021-09-10 15:52:49 -07:00
|
|
|
fn range<R>(
|
|
|
|
map: &AccountMapsReadLock<T>,
|
2021-08-26 16:12:43 -07:00
|
|
|
range: R,
|
|
|
|
collect_all_unsorted: bool,
|
2021-09-10 15:52:49 -07:00
|
|
|
) -> Vec<(Pubkey, AccountMapEntry<T>)>
|
2021-08-26 16:12:43 -07:00
|
|
|
where
|
2021-09-12 19:57:15 -07:00
|
|
|
R: RangeBounds<Pubkey> + std::fmt::Debug,
|
2021-08-26 16:12:43 -07:00
|
|
|
{
|
2021-09-12 19:57:15 -07:00
|
|
|
let mut result = map.items(&Some(&range));
|
2021-08-26 16:12:43 -07:00
|
|
|
if !collect_all_unsorted {
|
2021-09-10 15:52:49 -07:00
|
|
|
result.sort_unstable_by(|a, b| a.0.cmp(&b.0));
|
2021-08-26 16:12:43 -07:00
|
|
|
}
|
|
|
|
result
|
|
|
|
}
|
|
|
|
|
2020-10-21 17:05:27 -07:00
|
|
|
fn clone_bound(bound: Bound<&Pubkey>) -> Bound<Pubkey> {
|
|
|
|
match bound {
|
|
|
|
Unbounded => Unbounded,
|
|
|
|
Included(k) => Included(*k),
|
|
|
|
Excluded(k) => Excluded(*k),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-04 07:18:05 -07:00
|
|
|
fn bin_from_bound(&self, bound: &Bound<Pubkey>, unbounded_bin: usize) -> usize {
|
2021-07-27 11:40:45 -07:00
|
|
|
match bound {
|
2021-08-04 07:18:05 -07:00
|
|
|
Bound::Included(bound) | Bound::Excluded(bound) => {
|
|
|
|
self.bin_calculator.bin_from_pubkey(bound)
|
|
|
|
}
|
2021-07-27 11:40:45 -07:00
|
|
|
Bound::Unbounded => unbounded_bin,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-15 08:26:50 -07:00
|
|
|
fn start_bin(&self) -> usize {
|
|
|
|
// start in bin where 'start_bound' would exist
|
2021-08-04 07:18:05 -07:00
|
|
|
self.bin_from_bound(&self.start_bound, 0)
|
2021-07-27 11:40:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
fn end_bin_inclusive(&self) -> usize {
|
|
|
|
// end in bin where 'end_bound' would exist
|
2021-08-04 07:18:05 -07:00
|
|
|
self.bin_from_bound(&self.end_bound, usize::MAX)
|
2021-07-27 11:40:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
fn bin_start_and_range(&self) -> (usize, usize) {
|
|
|
|
let start_bin = self.start_bin();
|
|
|
|
// calculate the max range of bins to look in
|
|
|
|
let end_bin_inclusive = self.end_bin_inclusive();
|
|
|
|
let bin_range = if start_bin > end_bin_inclusive {
|
|
|
|
0 // empty range
|
|
|
|
} else if end_bin_inclusive == usize::MAX {
|
|
|
|
usize::MAX
|
|
|
|
} else {
|
|
|
|
// the range is end_inclusive + 1 - start
|
|
|
|
// end_inclusive could be usize::MAX already if no bound was specified
|
|
|
|
end_bin_inclusive.saturating_add(1) - start_bin
|
|
|
|
};
|
|
|
|
(start_bin, bin_range)
|
2021-07-15 08:26:50 -07:00
|
|
|
}
|
|
|
|
|
2021-09-03 16:00:49 -07:00
|
|
|
pub fn new<R>(
|
|
|
|
index: &'a AccountsIndex<T>,
|
|
|
|
range: Option<&R>,
|
|
|
|
collect_all_unsorted: bool,
|
|
|
|
) -> Self
|
2020-10-21 17:05:27 -07:00
|
|
|
where
|
|
|
|
R: RangeBounds<Pubkey>,
|
|
|
|
{
|
|
|
|
Self {
|
|
|
|
start_bound: range
|
|
|
|
.as_ref()
|
|
|
|
.map(|r| Self::clone_bound(r.start_bound()))
|
|
|
|
.unwrap_or(Unbounded),
|
|
|
|
end_bound: range
|
|
|
|
.as_ref()
|
|
|
|
.map(|r| Self::clone_bound(r.end_bound()))
|
|
|
|
.unwrap_or(Unbounded),
|
2021-08-04 07:18:05 -07:00
|
|
|
account_maps: &index.account_maps,
|
2020-10-21 17:05:27 -07:00
|
|
|
is_finished: false,
|
2021-08-04 07:18:05 -07:00
|
|
|
bin_calculator: &index.bin_calculator,
|
2021-08-26 16:12:43 -07:00
|
|
|
collect_all_unsorted,
|
2020-10-21 17:05:27 -07:00
|
|
|
}
|
|
|
|
}
|
2021-09-17 15:19:29 -07:00
|
|
|
|
|
|
|
pub fn hold_range_in_memory<R>(&self, range: &R, start_holding: bool)
|
|
|
|
where
|
|
|
|
R: RangeBounds<Pubkey> + Debug,
|
|
|
|
{
|
|
|
|
// forward this hold request ONLY to the bins which contain keys in the specified range
|
|
|
|
let (start_bin, bin_range) = self.bin_start_and_range();
|
|
|
|
self.account_maps
|
|
|
|
.iter()
|
|
|
|
.skip(start_bin)
|
|
|
|
.take(bin_range)
|
|
|
|
.for_each(|map| {
|
|
|
|
map.read()
|
|
|
|
.unwrap()
|
|
|
|
.hold_range_in_memory(range, start_holding);
|
|
|
|
});
|
|
|
|
}
|
2020-10-21 17:05:27 -07:00
|
|
|
}
|
|
|
|
|
2021-09-14 15:51:07 -07:00
|
|
|
impl<'a, T: IndexValue> Iterator for AccountsIndexIterator<'a, T> {
|
2020-10-21 17:05:27 -07:00
|
|
|
type Item = Vec<(Pubkey, AccountMapEntry<T>)>;
|
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
|
|
|
if self.is_finished {
|
|
|
|
return None;
|
|
|
|
}
|
2021-07-27 11:40:45 -07:00
|
|
|
let (start_bin, bin_range) = self.bin_start_and_range();
|
2021-08-31 18:03:42 -07:00
|
|
|
let mut chunk = Vec::with_capacity(ITER_BATCH_SIZE);
|
2021-07-27 11:40:45 -07:00
|
|
|
'outer: for i in self.account_maps.iter().skip(start_bin).take(bin_range) {
|
2021-08-26 16:12:43 -07:00
|
|
|
for (pubkey, account_map_entry) in Self::range(
|
|
|
|
&i.read().unwrap(),
|
|
|
|
(self.start_bound, self.end_bound),
|
|
|
|
self.collect_all_unsorted,
|
|
|
|
) {
|
|
|
|
if chunk.len() >= ITER_BATCH_SIZE && !self.collect_all_unsorted {
|
2021-07-13 09:11:17 -07:00
|
|
|
break 'outer;
|
|
|
|
}
|
2021-09-10 15:52:49 -07:00
|
|
|
let item = (pubkey, account_map_entry);
|
2021-07-13 09:11:17 -07:00
|
|
|
chunk.push(item);
|
|
|
|
}
|
|
|
|
}
|
2020-10-21 17:05:27 -07:00
|
|
|
|
|
|
|
if chunk.is_empty() {
|
|
|
|
self.is_finished = true;
|
|
|
|
return None;
|
2021-08-26 16:12:43 -07:00
|
|
|
} else if self.collect_all_unsorted {
|
|
|
|
self.is_finished = true;
|
2020-10-21 17:05:27 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
self.start_bound = Excluded(chunk.last().unwrap().0);
|
|
|
|
Some(chunk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-24 09:50:19 -08:00
|
|
|
pub trait ZeroLamport {
|
|
|
|
fn is_zero_lamport(&self) -> bool;
|
|
|
|
}
|
|
|
|
|
2021-09-10 19:09:51 -07:00
|
|
|
type MapType<T> = AccountMap<T>;
|
2021-06-28 13:03:57 -07:00
|
|
|
type LockMapType<T> = Vec<RwLock<MapType<T>>>;
|
|
|
|
type LockMapTypeSlice<T> = [RwLock<MapType<T>>];
|
2021-06-17 12:41:52 -07:00
|
|
|
type AccountMapsWriteLock<'a, T> = RwLockWriteGuard<'a, MapType<T>>;
|
|
|
|
type AccountMapsReadLock<'a, T> = RwLockReadGuard<'a, MapType<T>>;
|
2021-05-14 13:27:10 -07:00
|
|
|
|
2021-06-14 21:04:01 -07:00
|
|
|
#[derive(Debug, Default)]
|
|
|
|
pub struct ScanSlotTracker {
|
|
|
|
is_removed: bool,
|
|
|
|
ref_count: u64,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ScanSlotTracker {
|
|
|
|
pub fn is_removed(&self) -> bool {
|
|
|
|
self.is_removed
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn mark_removed(&mut self) {
|
|
|
|
self.is_removed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-12 15:29:30 -07:00
|
|
|
#[derive(Debug)]
|
2021-09-14 15:51:07 -07:00
|
|
|
pub struct AccountsIndex<T: IndexValue> {
|
2021-06-17 12:41:52 -07:00
|
|
|
pub account_maps: LockMapType<T>,
|
2021-08-04 07:18:05 -07:00
|
|
|
pub bin_calculator: PubkeyBinCalculator16,
|
2020-12-31 18:06:03 -08:00
|
|
|
program_id_index: SecondaryIndex<DashMapSecondaryIndexEntry>,
|
|
|
|
spl_token_mint_index: SecondaryIndex<DashMapSecondaryIndexEntry>,
|
|
|
|
spl_token_owner_index: SecondaryIndex<RwLockSecondaryIndexEntry>,
|
2020-10-21 17:05:27 -07:00
|
|
|
roots_tracker: RwLock<RootsTracker>,
|
2020-11-16 17:23:11 -08:00
|
|
|
ongoing_scan_roots: RwLock<BTreeMap<Slot, u64>>,
|
2021-06-14 21:04:01 -07:00
|
|
|
// Each scan has some latest slot `S` that is the tip of the fork the scan
|
|
|
|
// is iterating over. The unique id of that slot `S` is recorded here (note we don't use
|
|
|
|
// `S` as the id because there can be more than one version of a slot `S`). If a fork
|
|
|
|
// is abandoned, all of the slots on that fork up to `S` will be removed via
|
|
|
|
// `AccountsDb::remove_unrooted_slots()`. When the scan finishes, it'll realize that the
|
|
|
|
// results of the scan may have been corrupted by `remove_unrooted_slots` and abort its results.
|
|
|
|
//
|
|
|
|
// `removed_bank_ids` tracks all the slot ids that were removed via `remove_unrooted_slots()` so any attempted scans
|
|
|
|
// on any of these slots fails. This is safe to purge once the associated Bank is dropped and
|
|
|
|
// scanning the fork with that Bank at the tip is no longer possible.
|
|
|
|
pub removed_bank_ids: Mutex<HashSet<BankId>>,
|
2021-09-12 15:14:59 -07:00
|
|
|
|
2021-09-13 20:59:03 -07:00
|
|
|
storage: AccountsIndexStorage<T>,
|
2020-10-21 17:05:27 -07:00
|
|
|
}
|
|
|
|
|
2021-09-14 15:51:07 -07:00
|
|
|
impl<T: IndexValue> AccountsIndex<T> {
|
2021-08-04 19:58:53 -07:00
|
|
|
pub fn default_for_tests() -> Self {
|
2021-08-17 12:50:01 -07:00
|
|
|
Self::new(Some(ACCOUNTS_INDEX_CONFIG_FOR_TESTING))
|
2021-08-04 19:58:53 -07:00
|
|
|
}
|
|
|
|
|
2021-08-17 12:50:01 -07:00
|
|
|
pub fn new(config: Option<AccountsIndexConfig>) -> Self {
|
2021-09-12 15:14:59 -07:00
|
|
|
let (account_maps, bin_calculator, storage) = Self::allocate_accounts_index(config);
|
2021-05-12 15:29:30 -07:00
|
|
|
Self {
|
2021-08-04 13:28:35 -07:00
|
|
|
account_maps,
|
|
|
|
bin_calculator,
|
2021-05-12 15:29:30 -07:00
|
|
|
program_id_index: SecondaryIndex::<DashMapSecondaryIndexEntry>::new(
|
|
|
|
"program_id_index_stats",
|
|
|
|
),
|
|
|
|
spl_token_mint_index: SecondaryIndex::<DashMapSecondaryIndexEntry>::new(
|
|
|
|
"spl_token_mint_index_stats",
|
|
|
|
),
|
|
|
|
spl_token_owner_index: SecondaryIndex::<RwLockSecondaryIndexEntry>::new(
|
|
|
|
"spl_token_owner_index_stats",
|
|
|
|
),
|
|
|
|
roots_tracker: RwLock::<RootsTracker>::default(),
|
|
|
|
ongoing_scan_roots: RwLock::<BTreeMap<Slot, u64>>::default(),
|
2021-06-14 21:04:01 -07:00
|
|
|
removed_bank_ids: Mutex::<HashSet<BankId>>::default(),
|
2021-09-12 15:14:59 -07:00
|
|
|
storage,
|
2021-05-12 15:29:30 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-17 12:50:01 -07:00
|
|
|
fn allocate_accounts_index(
|
|
|
|
config: Option<AccountsIndexConfig>,
|
2021-09-13 20:59:03 -07:00
|
|
|
) -> (
|
|
|
|
LockMapType<T>,
|
|
|
|
PubkeyBinCalculator16,
|
|
|
|
AccountsIndexStorage<T>,
|
|
|
|
) {
|
2021-08-17 12:50:01 -07:00
|
|
|
let bins = config
|
2021-09-17 09:39:41 -07:00
|
|
|
.as_ref()
|
2021-08-17 12:50:01 -07:00
|
|
|
.and_then(|config| config.bins)
|
|
|
|
.unwrap_or(BINS_DEFAULT);
|
2021-09-02 15:58:07 -07:00
|
|
|
// create bin_calculator early to verify # bins is reasonable
|
|
|
|
let bin_calculator = PubkeyBinCalculator16::new(bins);
|
2021-09-17 09:39:41 -07:00
|
|
|
let storage = AccountsIndexStorage::new(bins, &config);
|
2021-08-04 13:28:35 -07:00
|
|
|
let account_maps = (0..bins)
|
|
|
|
.into_iter()
|
2021-09-16 14:12:22 -07:00
|
|
|
.map(|bin| RwLock::new(Arc::clone(&storage.in_mem[bin])))
|
2021-08-04 13:28:35 -07:00
|
|
|
.collect::<Vec<_>>();
|
2021-09-12 15:14:59 -07:00
|
|
|
(account_maps, bin_calculator, storage)
|
2021-08-04 13:28:35 -07:00
|
|
|
}
|
2021-08-04 19:58:53 -07:00
|
|
|
|
2021-09-03 16:00:49 -07:00
|
|
|
fn iter<R>(&self, range: Option<&R>, collect_all_unsorted: bool) -> AccountsIndexIterator<T>
|
2020-10-21 17:05:27 -07:00
|
|
|
where
|
|
|
|
R: RangeBounds<Pubkey>,
|
|
|
|
{
|
2021-08-26 16:12:43 -07:00
|
|
|
AccountsIndexIterator::new(self, range, collect_all_unsorted)
|
2020-10-21 17:05:27 -07:00
|
|
|
}
|
|
|
|
|
2020-12-31 18:06:03 -08:00
|
|
|
fn do_checked_scan_accounts<F, R>(
|
|
|
|
&self,
|
2021-02-03 15:00:42 -08:00
|
|
|
metric_name: &'static str,
|
2020-12-31 18:06:03 -08:00
|
|
|
ancestors: &Ancestors,
|
2021-06-14 21:04:01 -07:00
|
|
|
scan_bank_id: BankId,
|
2020-12-31 18:06:03 -08:00
|
|
|
func: F,
|
|
|
|
scan_type: ScanTypes<R>,
|
2021-08-26 16:12:43 -07:00
|
|
|
collect_all_unsorted: bool,
|
2021-06-14 21:04:01 -07:00
|
|
|
) -> Result<(), ScanError>
|
|
|
|
where
|
2020-11-16 17:23:11 -08:00
|
|
|
F: FnMut(&Pubkey, (&T, Slot)),
|
2021-09-01 06:13:56 -07:00
|
|
|
R: RangeBounds<Pubkey> + std::fmt::Debug,
|
2020-11-16 17:23:11 -08:00
|
|
|
{
|
2021-06-14 21:04:01 -07:00
|
|
|
{
|
|
|
|
let locked_removed_bank_ids = self.removed_bank_ids.lock().unwrap();
|
|
|
|
if locked_removed_bank_ids.contains(&scan_bank_id) {
|
|
|
|
return Err(ScanError::SlotRemoved {
|
|
|
|
slot: ancestors.max_slot(),
|
|
|
|
bank_id: scan_bank_id,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-16 17:23:11 -08:00
|
|
|
let max_root = {
|
|
|
|
let mut w_ongoing_scan_roots = self
|
|
|
|
// This lock is also grabbed by clean_accounts(), so clean
|
|
|
|
// has at most cleaned up to the current `max_root` (since
|
|
|
|
// clean only happens *after* BankForks::set_root() which sets
|
|
|
|
// the `max_root`)
|
|
|
|
.ongoing_scan_roots
|
|
|
|
.write()
|
|
|
|
.unwrap();
|
|
|
|
// `max_root()` grabs a lock while
|
|
|
|
// the `ongoing_scan_roots` lock is held,
|
|
|
|
// make sure inverse doesn't happen to avoid
|
|
|
|
// deadlock
|
|
|
|
let max_root = self.max_root();
|
|
|
|
*w_ongoing_scan_roots.entry(max_root).or_default() += 1;
|
2020-11-20 13:01:04 -08:00
|
|
|
|
2020-11-16 17:23:11 -08:00
|
|
|
max_root
|
|
|
|
};
|
|
|
|
|
2020-11-20 13:01:04 -08:00
|
|
|
// First we show that for any bank `B` that is a descendant of
|
|
|
|
// the current `max_root`, it must be true that and `B.ancestors.contains(max_root)`,
|
2021-03-02 06:36:49 -08:00
|
|
|
// regardless of the pattern of `squash()` behavior, where `ancestors` is the set
|
2020-11-20 13:01:04 -08:00
|
|
|
// of ancestors that is tracked in each bank.
|
|
|
|
//
|
|
|
|
// Proof: At startup, if starting from a snapshot, generate_index() adds all banks
|
|
|
|
// in the snapshot to the index via `add_root()` and so `max_root` will be the
|
|
|
|
// greatest of these. Thus, so the claim holds at startup since there are no
|
|
|
|
// descendants of `max_root`.
|
|
|
|
//
|
|
|
|
// Now we proceed by induction on each `BankForks::set_root()`.
|
|
|
|
// Assume the claim holds when the `max_root` is `R`. Call the set of
|
|
|
|
// descendants of `R` present in BankForks `R_descendants`.
|
|
|
|
//
|
|
|
|
// Then for any banks `B` in `R_descendants`, it must be that `B.ancestors.contains(S)`,
|
|
|
|
// where `S` is any ancestor of `B` such that `S >= R`.
|
|
|
|
//
|
|
|
|
// For example:
|
|
|
|
// `R` -> `A` -> `C` -> `B`
|
|
|
|
// Then `B.ancestors == {R, A, C}`
|
|
|
|
//
|
|
|
|
// Next we call `BankForks::set_root()` at some descendant of `R`, `R_new`,
|
|
|
|
// where `R_new > R`.
|
|
|
|
//
|
|
|
|
// When we squash `R_new`, `max_root` in the AccountsIndex here is now set to `R_new`,
|
|
|
|
// and all nondescendants of `R_new` are pruned.
|
|
|
|
//
|
|
|
|
// Now consider any outstanding references to banks in the system that are descended from
|
|
|
|
// `max_root == R_new`. Take any one of these references and call it `B`. Because `B` is
|
|
|
|
// a descendant of `R_new`, this means `B` was also a descendant of `R`. Thus `B`
|
|
|
|
// must be a member of `R_descendants` because `B` was constructed and added to
|
|
|
|
// BankForks before the `set_root`.
|
|
|
|
//
|
|
|
|
// This means by the guarantees of `R_descendants` described above, because
|
2021-03-02 06:36:49 -08:00
|
|
|
// `R_new` is an ancestor of `B`, and `R < R_new < B`, then `B.ancestors.contains(R_new)`.
|
2020-11-20 13:01:04 -08:00
|
|
|
//
|
|
|
|
// Now until the next `set_root`, any new banks constructed from `new_from_parent` will
|
|
|
|
// also have `max_root == R_new` in their ancestor set, so the claim holds for those descendants
|
|
|
|
// as well. Once the next `set_root` happens, we once again update `max_root` and the same
|
|
|
|
// inductive argument can be applied again to show the claim holds.
|
|
|
|
|
|
|
|
// Check that the `max_root` is present in `ancestors`. From the proof above, if
|
|
|
|
// `max_root` is not present in `ancestors`, this means the bank `B` with the
|
|
|
|
// given `ancestors` is not descended from `max_root, which means
|
|
|
|
// either:
|
|
|
|
// 1) `B` is on a different fork or
|
|
|
|
// 2) `B` is an ancestor of `max_root`.
|
|
|
|
// In both cases we can ignore the given ancestors and instead just rely on the roots
|
|
|
|
// present as `max_root` indicates the roots present in the index are more up to date
|
|
|
|
// than the ancestors given.
|
2021-04-12 08:51:57 -07:00
|
|
|
let empty = Ancestors::default();
|
2020-11-20 13:01:04 -08:00
|
|
|
let ancestors = if ancestors.contains_key(&max_root) {
|
|
|
|
ancestors
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
This takes of edge cases like:
|
|
|
|
|
|
|
|
Diagram 1:
|
|
|
|
|
|
|
|
slot 0
|
|
|
|
|
|
|
|
|
slot 1
|
|
|
|
/ \
|
|
|
|
slot 2 |
|
|
|
|
| slot 3 (max root)
|
|
|
|
slot 4 (scan)
|
|
|
|
|
|
|
|
By the time the scan on slot 4 is called, slot 2 may already have been
|
|
|
|
cleaned by a clean on slot 3, but slot 4 may not have been cleaned.
|
|
|
|
The state in slot 2 would have been purged and is not saved in any roots.
|
|
|
|
In this case, a scan on slot 4 wouldn't accurately reflect the state when bank 4
|
|
|
|
was frozen. In cases like this, we default to a scan on the latest roots by
|
|
|
|
removing all `ancestors`.
|
|
|
|
*/
|
|
|
|
&empty
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
Now there are two cases, either `ancestors` is empty or nonempty:
|
|
|
|
|
|
|
|
1) If ancestors is empty, then this is the same as a scan on a rooted bank,
|
|
|
|
and `ongoing_scan_roots` provides protection against cleanup of roots necessary
|
|
|
|
for the scan, and passing `Some(max_root)` to `do_scan_accounts()` ensures newer
|
|
|
|
roots don't appear in the scan.
|
|
|
|
|
|
|
|
2) If ancestors is non-empty, then from the `ancestors_contains(&max_root)` above, we know
|
|
|
|
that the fork structure must look something like:
|
|
|
|
|
|
|
|
Diagram 2:
|
|
|
|
|
|
|
|
Build fork structure:
|
|
|
|
slot 0
|
|
|
|
|
|
|
|
|
slot 1 (max_root)
|
|
|
|
/ \
|
|
|
|
slot 2 |
|
|
|
|
| slot 3 (potential newer max root)
|
|
|
|
slot 4
|
|
|
|
|
|
|
|
|
slot 5 (scan)
|
|
|
|
|
|
|
|
Consider both types of ancestors, ancestor <= `max_root` and
|
|
|
|
ancestor > `max_root`, where `max_root == 1` as illustrated above.
|
|
|
|
|
|
|
|
a) The set of `ancestors <= max_root` are all rooted, which means their state
|
|
|
|
is protected by the same guarantees as 1).
|
|
|
|
|
|
|
|
b) As for the `ancestors > max_root`, those banks have at least one reference discoverable
|
|
|
|
through the chain of `Bank::BankRc::parent` starting from the calling bank. For instance
|
|
|
|
bank 5's parent reference keeps bank 4 alive, which will prevent the `Bank::drop()` from
|
|
|
|
running and cleaning up bank 4. Furthermore, no cleans can happen past the saved max_root == 1,
|
|
|
|
so a potential newer max root at 3 will not clean up any of the ancestors > 1, so slot 4
|
2021-04-16 08:23:32 -07:00
|
|
|
will not be cleaned in the middle of the scan either. (NOTE similar reasoning is employed for
|
|
|
|
assert!() justification in AccountsDb::retry_to_get_account_accessor)
|
2020-11-20 13:01:04 -08:00
|
|
|
*/
|
2020-12-31 18:06:03 -08:00
|
|
|
match scan_type {
|
|
|
|
ScanTypes::Unindexed(range) => {
|
2021-02-03 15:00:42 -08:00
|
|
|
// Pass "" not to log metrics, so RPC doesn't get spammy
|
2021-08-26 16:12:43 -07:00
|
|
|
self.do_scan_accounts(
|
|
|
|
metric_name,
|
|
|
|
ancestors,
|
|
|
|
func,
|
|
|
|
range,
|
|
|
|
Some(max_root),
|
|
|
|
collect_all_unsorted,
|
|
|
|
);
|
2020-12-31 18:06:03 -08:00
|
|
|
}
|
|
|
|
ScanTypes::Indexed(IndexKey::ProgramId(program_id)) => {
|
|
|
|
self.do_scan_secondary_index(
|
|
|
|
ancestors,
|
|
|
|
func,
|
|
|
|
&self.program_id_index,
|
|
|
|
&program_id,
|
|
|
|
Some(max_root),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
ScanTypes::Indexed(IndexKey::SplTokenMint(mint_key)) => {
|
|
|
|
self.do_scan_secondary_index(
|
|
|
|
ancestors,
|
|
|
|
func,
|
|
|
|
&self.spl_token_mint_index,
|
|
|
|
&mint_key,
|
|
|
|
Some(max_root),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
ScanTypes::Indexed(IndexKey::SplTokenOwner(owner_key)) => {
|
|
|
|
self.do_scan_secondary_index(
|
|
|
|
ancestors,
|
|
|
|
func,
|
|
|
|
&self.spl_token_owner_index,
|
|
|
|
&owner_key,
|
|
|
|
Some(max_root),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-16 17:23:11 -08:00
|
|
|
{
|
|
|
|
let mut ongoing_scan_roots = self.ongoing_scan_roots.write().unwrap();
|
|
|
|
let count = ongoing_scan_roots.get_mut(&max_root).unwrap();
|
|
|
|
*count -= 1;
|
|
|
|
if *count == 0 {
|
|
|
|
ongoing_scan_roots.remove(&max_root);
|
|
|
|
}
|
|
|
|
}
|
2021-06-14 21:04:01 -07:00
|
|
|
|
|
|
|
// If the fork with tip at bank `scan_bank_id` was removed during our scan, then the scan
|
|
|
|
// may have been corrupted, so abort the results.
|
|
|
|
let was_scan_corrupted = self
|
|
|
|
.removed_bank_ids
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.contains(&scan_bank_id);
|
|
|
|
|
|
|
|
if was_scan_corrupted {
|
|
|
|
Err(ScanError::SlotRemoved {
|
|
|
|
slot: ancestors.max_slot(),
|
|
|
|
bank_id: scan_bank_id,
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
Ok(())
|
|
|
|
}
|
2020-11-16 17:23:11 -08:00
|
|
|
}
|
|
|
|
|
2021-02-03 15:00:42 -08:00
|
|
|
fn do_unchecked_scan_accounts<F, R>(
|
|
|
|
&self,
|
|
|
|
metric_name: &'static str,
|
|
|
|
ancestors: &Ancestors,
|
|
|
|
func: F,
|
|
|
|
range: Option<R>,
|
2021-08-26 16:12:43 -07:00
|
|
|
collect_all_unsorted: bool,
|
2021-02-03 15:00:42 -08:00
|
|
|
) where
|
2020-11-16 17:23:11 -08:00
|
|
|
F: FnMut(&Pubkey, (&T, Slot)),
|
2021-09-01 06:13:56 -07:00
|
|
|
R: RangeBounds<Pubkey> + std::fmt::Debug,
|
2020-11-16 17:23:11 -08:00
|
|
|
{
|
2021-08-26 16:12:43 -07:00
|
|
|
self.do_scan_accounts(
|
|
|
|
metric_name,
|
|
|
|
ancestors,
|
|
|
|
func,
|
|
|
|
range,
|
|
|
|
None,
|
|
|
|
collect_all_unsorted,
|
|
|
|
);
|
2020-11-16 17:23:11 -08:00
|
|
|
}
|
|
|
|
|
2020-11-20 13:01:04 -08:00
|
|
|
// Scan accounts and return latest version of each account that is either:
|
|
|
|
// 1) rooted or
|
|
|
|
// 2) present in ancestors
|
2020-12-13 17:26:34 -08:00
|
|
|
fn do_scan_accounts<F, R>(
|
|
|
|
&self,
|
2021-02-03 15:00:42 -08:00
|
|
|
metric_name: &'static str,
|
2020-11-16 17:23:11 -08:00
|
|
|
ancestors: &Ancestors,
|
|
|
|
mut func: F,
|
|
|
|
range: Option<R>,
|
|
|
|
max_root: Option<Slot>,
|
2021-08-26 16:12:43 -07:00
|
|
|
collect_all_unsorted: bool,
|
2020-11-16 17:23:11 -08:00
|
|
|
) where
|
2020-06-08 17:38:14 -07:00
|
|
|
F: FnMut(&Pubkey, (&T, Slot)),
|
2021-09-01 06:13:56 -07:00
|
|
|
R: RangeBounds<Pubkey> + std::fmt::Debug,
|
2019-07-10 22:06:32 -07:00
|
|
|
{
|
2020-12-31 18:06:03 -08:00
|
|
|
// TODO: expand to use mint index to find the `pubkey_list` below more efficiently
|
|
|
|
// instead of scanning the entire range
|
2021-02-03 15:00:42 -08:00
|
|
|
let mut total_elapsed_timer = Measure::start("total");
|
|
|
|
let mut num_keys_iterated = 0;
|
|
|
|
let mut latest_slot_elapsed = 0;
|
|
|
|
let mut load_account_elapsed = 0;
|
|
|
|
let mut read_lock_elapsed = 0;
|
|
|
|
let mut iterator_elapsed = 0;
|
|
|
|
let mut iterator_timer = Measure::start("iterator_elapsed");
|
2021-09-03 16:00:49 -07:00
|
|
|
for pubkey_list in self.iter(range.as_ref(), collect_all_unsorted) {
|
2021-02-03 15:00:42 -08:00
|
|
|
iterator_timer.stop();
|
|
|
|
iterator_elapsed += iterator_timer.as_us();
|
2020-10-21 17:05:27 -07:00
|
|
|
for (pubkey, list) in pubkey_list {
|
2021-02-03 15:00:42 -08:00
|
|
|
num_keys_iterated += 1;
|
|
|
|
let mut read_lock_timer = Measure::start("read_lock");
|
2020-10-21 17:05:27 -07:00
|
|
|
let list_r = &list.slot_list.read().unwrap();
|
2021-02-03 15:00:42 -08:00
|
|
|
read_lock_timer.stop();
|
|
|
|
read_lock_elapsed += read_lock_timer.as_us();
|
|
|
|
let mut latest_slot_timer = Measure::start("latest_slot");
|
2021-06-18 06:34:46 -07:00
|
|
|
if let Some(index) = self.latest_slot(Some(ancestors), list_r, max_root) {
|
2021-02-03 15:00:42 -08:00
|
|
|
latest_slot_timer.stop();
|
|
|
|
latest_slot_elapsed += latest_slot_timer.as_us();
|
|
|
|
let mut load_account_timer = Measure::start("load_account");
|
2020-10-21 17:05:27 -07:00
|
|
|
func(&pubkey, (&list_r[index].1, list_r[index].0));
|
2021-02-03 15:00:42 -08:00
|
|
|
load_account_timer.stop();
|
|
|
|
load_account_elapsed += load_account_timer.as_us();
|
2020-10-21 17:05:27 -07:00
|
|
|
}
|
|
|
|
}
|
2021-02-03 15:00:42 -08:00
|
|
|
iterator_timer = Measure::start("iterator_elapsed");
|
|
|
|
}
|
|
|
|
|
|
|
|
total_elapsed_timer.stop();
|
|
|
|
if !metric_name.is_empty() {
|
|
|
|
datapoint_info!(
|
|
|
|
metric_name,
|
|
|
|
("total_elapsed", total_elapsed_timer.as_us(), i64),
|
|
|
|
("latest_slot_elapsed", latest_slot_elapsed, i64),
|
|
|
|
("read_lock_elapsed", read_lock_elapsed, i64),
|
|
|
|
("load_account_elapsed", load_account_elapsed, i64),
|
|
|
|
("iterator_elapsed", iterator_elapsed, i64),
|
|
|
|
("num_keys_iterated", num_keys_iterated, i64),
|
|
|
|
)
|
2020-10-21 17:05:27 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-31 18:06:03 -08:00
|
|
|
fn do_scan_secondary_index<
|
|
|
|
F,
|
|
|
|
SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send,
|
|
|
|
>(
|
|
|
|
&self,
|
|
|
|
ancestors: &Ancestors,
|
|
|
|
mut func: F,
|
|
|
|
index: &SecondaryIndex<SecondaryIndexEntryType>,
|
|
|
|
index_key: &Pubkey,
|
|
|
|
max_root: Option<Slot>,
|
|
|
|
) where
|
|
|
|
F: FnMut(&Pubkey, (&T, Slot)),
|
|
|
|
{
|
|
|
|
for pubkey in index.get(index_key) {
|
2021-03-04 23:16:53 -08:00
|
|
|
// Maybe these reads from the AccountsIndex can be batched every time it
|
2020-12-31 18:06:03 -08:00
|
|
|
// grabs the read lock as well...
|
2021-04-23 07:33:14 -07:00
|
|
|
if let AccountIndexGetResult::Found(list_r, index) =
|
|
|
|
self.get(&pubkey, Some(ancestors), max_root)
|
|
|
|
{
|
2020-12-31 18:06:03 -08:00
|
|
|
func(
|
|
|
|
&pubkey,
|
|
|
|
(&list_r.slot_list()[index].1, list_r.slot_list()[index].0),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-21 17:05:27 -07:00
|
|
|
pub fn get_account_read_entry(&self, pubkey: &Pubkey) -> Option<ReadAccountMapEntry<T>> {
|
2021-06-28 13:03:57 -07:00
|
|
|
let lock = self.get_account_maps_read_lock(pubkey);
|
2021-05-21 11:59:32 -07:00
|
|
|
self.get_account_read_entry_with_lock(pubkey, &lock)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn get_account_read_entry_with_lock(
|
|
|
|
&self,
|
|
|
|
pubkey: &Pubkey,
|
|
|
|
lock: &AccountMapsReadLock<'_, T>,
|
|
|
|
) -> Option<ReadAccountMapEntry<T>> {
|
|
|
|
lock.get(pubkey)
|
2020-10-21 17:05:27 -07:00
|
|
|
.map(ReadAccountMapEntry::from_account_map_entry)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_account_write_entry(&self, pubkey: &Pubkey) -> Option<WriteAccountMapEntry<T>> {
|
2021-08-04 07:18:05 -07:00
|
|
|
self.account_maps[self.bin_calculator.bin_from_pubkey(pubkey)]
|
2020-10-21 17:05:27 -07:00
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.get(pubkey)
|
|
|
|
.map(WriteAccountMapEntry::from_account_map_entry)
|
|
|
|
}
|
|
|
|
|
2021-05-10 07:22:48 -07:00
|
|
|
pub fn handle_dead_keys(
|
|
|
|
&self,
|
|
|
|
dead_keys: &[&Pubkey],
|
|
|
|
account_indexes: &AccountSecondaryIndexes,
|
|
|
|
) {
|
2020-10-21 17:05:27 -07:00
|
|
|
if !dead_keys.is_empty() {
|
|
|
|
for key in dead_keys.iter() {
|
2021-09-15 10:36:08 -07:00
|
|
|
let w_index = self.get_account_maps_write_lock(key);
|
2021-09-12 08:26:04 -07:00
|
|
|
if w_index.remove_if_slot_list_empty(**key) {
|
2021-09-02 14:54:37 -07:00
|
|
|
// Note it's only safe to remove all the entries for this key
|
|
|
|
// because we have the lock for this key's entry in the AccountsIndex,
|
|
|
|
// so no other thread is also updating the index
|
|
|
|
self.purge_secondary_indexes_by_inner_key(key, account_indexes);
|
2020-10-21 17:05:27 -07:00
|
|
|
}
|
2019-07-10 22:06:32 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-13 00:22:14 -07:00
|
|
|
/// call func with every pubkey and index visible from a given set of ancestors
|
2021-06-14 21:04:01 -07:00
|
|
|
pub(crate) fn scan_accounts<F>(
|
|
|
|
&self,
|
|
|
|
ancestors: &Ancestors,
|
|
|
|
scan_bank_id: BankId,
|
|
|
|
func: F,
|
|
|
|
) -> Result<(), ScanError>
|
2020-05-13 00:22:14 -07:00
|
|
|
where
|
2020-06-08 17:38:14 -07:00
|
|
|
F: FnMut(&Pubkey, (&T, Slot)),
|
2020-05-13 00:22:14 -07:00
|
|
|
{
|
2021-08-26 16:12:43 -07:00
|
|
|
let collect_all_unsorted = false;
|
2021-02-03 15:00:42 -08:00
|
|
|
// Pass "" not to log metrics, so RPC doesn't get spammy
|
|
|
|
self.do_checked_scan_accounts(
|
|
|
|
"",
|
|
|
|
ancestors,
|
2021-06-14 21:04:01 -07:00
|
|
|
scan_bank_id,
|
2021-02-03 15:00:42 -08:00
|
|
|
func,
|
|
|
|
ScanTypes::Unindexed(None::<Range<Pubkey>>),
|
2021-08-26 16:12:43 -07:00
|
|
|
collect_all_unsorted,
|
2021-06-14 21:04:01 -07:00
|
|
|
)
|
2020-11-16 17:23:11 -08:00
|
|
|
}
|
|
|
|
|
2021-02-03 15:00:42 -08:00
|
|
|
pub(crate) fn unchecked_scan_accounts<F>(
|
|
|
|
&self,
|
|
|
|
metric_name: &'static str,
|
|
|
|
ancestors: &Ancestors,
|
|
|
|
func: F,
|
2021-08-26 16:12:43 -07:00
|
|
|
collect_all_unsorted: bool,
|
2021-02-03 15:00:42 -08:00
|
|
|
) where
|
2020-11-16 17:23:11 -08:00
|
|
|
F: FnMut(&Pubkey, (&T, Slot)),
|
|
|
|
{
|
2021-08-26 16:12:43 -07:00
|
|
|
self.do_unchecked_scan_accounts(
|
|
|
|
metric_name,
|
|
|
|
ancestors,
|
|
|
|
func,
|
|
|
|
None::<Range<Pubkey>>,
|
|
|
|
collect_all_unsorted,
|
|
|
|
);
|
2020-05-13 00:22:14 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// call func with every pubkey and index visible from a given set of ancestors with range
|
2021-02-03 15:00:42 -08:00
|
|
|
pub(crate) fn range_scan_accounts<F, R>(
|
|
|
|
&self,
|
|
|
|
metric_name: &'static str,
|
|
|
|
ancestors: &Ancestors,
|
|
|
|
range: R,
|
2021-08-26 16:12:43 -07:00
|
|
|
collect_all_unsorted: bool,
|
2021-02-03 15:00:42 -08:00
|
|
|
func: F,
|
|
|
|
) where
|
2020-06-08 17:38:14 -07:00
|
|
|
F: FnMut(&Pubkey, (&T, Slot)),
|
2021-09-01 06:13:56 -07:00
|
|
|
R: RangeBounds<Pubkey> + std::fmt::Debug,
|
2020-05-13 00:22:14 -07:00
|
|
|
{
|
2020-11-16 17:23:11 -08:00
|
|
|
// Only the rent logic should be calling this, which doesn't need the safety checks
|
2021-08-26 16:12:43 -07:00
|
|
|
self.do_unchecked_scan_accounts(
|
|
|
|
metric_name,
|
|
|
|
ancestors,
|
|
|
|
func,
|
|
|
|
Some(range),
|
|
|
|
collect_all_unsorted,
|
|
|
|
);
|
2020-05-13 00:22:14 -07:00
|
|
|
}
|
|
|
|
|
2020-12-31 18:06:03 -08:00
|
|
|
/// call func with every pubkey and index visible from a given set of ancestors
|
2021-06-14 21:04:01 -07:00
|
|
|
pub(crate) fn index_scan_accounts<F>(
|
|
|
|
&self,
|
|
|
|
ancestors: &Ancestors,
|
|
|
|
scan_bank_id: BankId,
|
|
|
|
index_key: IndexKey,
|
|
|
|
func: F,
|
|
|
|
) -> Result<(), ScanError>
|
2020-12-31 18:06:03 -08:00
|
|
|
where
|
|
|
|
F: FnMut(&Pubkey, (&T, Slot)),
|
|
|
|
{
|
2021-08-26 16:12:43 -07:00
|
|
|
let collect_all_unsorted = false;
|
|
|
|
|
2021-02-03 15:00:42 -08:00
|
|
|
// Pass "" not to log metrics, so RPC doesn't get spammy
|
2020-12-31 18:06:03 -08:00
|
|
|
self.do_checked_scan_accounts(
|
2021-02-03 15:00:42 -08:00
|
|
|
"",
|
2020-12-31 18:06:03 -08:00
|
|
|
ancestors,
|
2021-06-14 21:04:01 -07:00
|
|
|
scan_bank_id,
|
2020-12-31 18:06:03 -08:00
|
|
|
func,
|
|
|
|
ScanTypes::<Range<Pubkey>>::Indexed(index_key),
|
2021-08-26 16:12:43 -07:00
|
|
|
collect_all_unsorted,
|
2021-06-14 21:04:01 -07:00
|
|
|
)
|
2020-12-31 18:06:03 -08:00
|
|
|
}
|
|
|
|
|
2020-11-26 13:09:20 -08:00
|
|
|
pub fn get_rooted_entries(&self, slice: SlotSlice<T>, max: Option<Slot>) -> SlotList<T> {
|
2021-04-05 09:26:53 -07:00
|
|
|
let max = max.unwrap_or(Slot::MAX);
|
|
|
|
let lock = &self.roots_tracker.read().unwrap().roots;
|
2020-03-25 21:08:56 -07:00
|
|
|
slice
|
|
|
|
.iter()
|
2021-04-05 09:26:53 -07:00
|
|
|
.filter(|(slot, _)| *slot <= max && lock.contains(slot))
|
2019-10-23 12:46:48 -07:00
|
|
|
.cloned()
|
2019-12-02 09:51:05 -08:00
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2020-07-02 22:25:17 -07:00
|
|
|
// returns the rooted entries and the storage ref count
|
2020-10-21 17:05:27 -07:00
|
|
|
pub fn roots_and_ref_count(
|
|
|
|
&self,
|
|
|
|
locked_account_entry: &ReadAccountMapEntry<T>,
|
2020-11-26 13:09:20 -08:00
|
|
|
max: Option<Slot>,
|
2020-10-21 17:05:27 -07:00
|
|
|
) -> (SlotList<T>, RefCount) {
|
2020-07-02 22:25:17 -07:00
|
|
|
(
|
2021-06-18 06:34:46 -07:00
|
|
|
self.get_rooted_entries(locked_account_entry.slot_list(), max),
|
2021-08-09 06:59:56 -07:00
|
|
|
locked_account_entry.ref_count(),
|
2020-07-02 22:25:17 -07:00
|
|
|
)
|
2019-12-02 09:51:05 -08:00
|
|
|
}
|
|
|
|
|
2021-01-17 20:31:03 -08:00
|
|
|
pub fn purge_exact<'a, C>(
|
|
|
|
&'a self,
|
2020-12-31 18:06:03 -08:00
|
|
|
pubkey: &Pubkey,
|
2021-01-17 20:31:03 -08:00
|
|
|
slots_to_purge: &'a C,
|
2021-01-11 17:00:23 -08:00
|
|
|
reclaims: &mut SlotList<T>,
|
2021-01-17 20:31:03 -08:00
|
|
|
) -> bool
|
|
|
|
where
|
|
|
|
C: Contains<'a, Slot>,
|
|
|
|
{
|
2021-06-14 07:10:26 -07:00
|
|
|
if let Some(mut write_account_map_entry) = self.get_account_write_entry(pubkey) {
|
|
|
|
write_account_map_entry.slot_list_mut(|slot_list| {
|
|
|
|
slot_list.retain(|(slot, item)| {
|
2021-06-18 06:34:46 -07:00
|
|
|
let should_purge = slots_to_purge.contains(slot);
|
2021-06-14 07:10:26 -07:00
|
|
|
if should_purge {
|
2021-09-01 06:13:08 -07:00
|
|
|
reclaims.push((*slot, *item));
|
2021-06-14 07:10:26 -07:00
|
|
|
false
|
|
|
|
} else {
|
|
|
|
true
|
|
|
|
}
|
|
|
|
});
|
|
|
|
slot_list.is_empty()
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
true
|
|
|
|
}
|
2020-07-02 22:25:17 -07:00
|
|
|
}
|
|
|
|
|
2020-11-16 17:23:11 -08:00
|
|
|
pub fn min_ongoing_scan_root(&self) -> Option<Slot> {
|
|
|
|
self.ongoing_scan_roots
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.keys()
|
|
|
|
.next()
|
|
|
|
.cloned()
|
|
|
|
}
|
|
|
|
|
2020-09-28 16:04:46 -07:00
|
|
|
// Given a SlotSlice `L`, a list of ancestors and a maximum slot, find the latest element
|
2020-11-16 17:23:11 -08:00
|
|
|
// in `L`, where the slot `S` is an ancestor or root, and if `S` is a root, then `S <= max_root`
|
2020-09-28 16:04:46 -07:00
|
|
|
fn latest_slot(
|
|
|
|
&self,
|
|
|
|
ancestors: Option<&Ancestors>,
|
|
|
|
slice: SlotSlice<T>,
|
2020-11-16 17:23:11 -08:00
|
|
|
max_root: Option<Slot>,
|
2020-09-28 16:04:46 -07:00
|
|
|
) -> Option<usize> {
|
|
|
|
let mut current_max = 0;
|
2019-04-15 17:15:50 -07:00
|
|
|
let mut rv = None;
|
2021-04-05 09:35:14 -07:00
|
|
|
if let Some(ancestors) = ancestors {
|
|
|
|
if !ancestors.is_empty() {
|
|
|
|
for (i, (slot, _t)) in slice.iter().rev().enumerate() {
|
2021-04-12 08:52:24 -07:00
|
|
|
if (rv.is_none() || *slot > current_max) && ancestors.contains_key(slot) {
|
2021-04-05 09:35:14 -07:00
|
|
|
rv = Some(i);
|
|
|
|
current_max = *slot;
|
|
|
|
}
|
|
|
|
}
|
2019-04-15 17:15:50 -07:00
|
|
|
}
|
|
|
|
}
|
2020-09-28 16:04:46 -07:00
|
|
|
|
2021-04-05 09:35:14 -07:00
|
|
|
let max_root = max_root.unwrap_or(Slot::MAX);
|
|
|
|
let mut tracker = None;
|
2019-04-15 17:15:50 -07:00
|
|
|
|
2021-04-05 09:35:14 -07:00
|
|
|
for (i, (slot, _t)) in slice.iter().rev().enumerate() {
|
2021-04-12 08:52:24 -07:00
|
|
|
if (rv.is_none() || *slot > current_max) && *slot <= max_root {
|
2021-04-05 09:35:14 -07:00
|
|
|
let lock = match tracker {
|
|
|
|
Some(inner) => inner,
|
|
|
|
None => self.roots_tracker.read().unwrap(),
|
|
|
|
};
|
2021-06-18 06:34:46 -07:00
|
|
|
if lock.roots.contains(slot) {
|
2021-04-05 09:35:14 -07:00
|
|
|
rv = Some(i);
|
|
|
|
current_max = *slot;
|
|
|
|
}
|
|
|
|
tracker = Some(lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rv.map(|index| slice.len() - 1 - index)
|
2020-09-28 16:04:46 -07:00
|
|
|
}
|
|
|
|
|
2021-09-17 15:19:29 -07:00
|
|
|
pub fn hold_range_in_memory<R>(&self, range: &R, start_holding: bool)
|
|
|
|
where
|
|
|
|
R: RangeBounds<Pubkey> + Debug,
|
|
|
|
{
|
|
|
|
let iter = self.iter(Some(range), true);
|
|
|
|
iter.hold_range_in_memory(range, start_holding);
|
|
|
|
}
|
2021-09-18 20:08:58 -07:00
|
|
|
|
|
|
|
pub fn set_startup(&self, value: bool) {
|
|
|
|
self.storage.storage.set_startup(value);
|
|
|
|
}
|
|
|
|
|
2019-07-10 22:06:32 -07:00
|
|
|
/// Get an account
|
|
|
|
/// The latest account that appears in `ancestors` or `roots` is returned.
|
2020-06-08 17:38:14 -07:00
|
|
|
pub(crate) fn get(
|
2019-07-20 17:58:39 -07:00
|
|
|
&self,
|
|
|
|
pubkey: &Pubkey,
|
2020-06-08 17:38:14 -07:00
|
|
|
ancestors: Option<&Ancestors>,
|
2020-09-28 16:04:46 -07:00
|
|
|
max_root: Option<Slot>,
|
2021-05-21 08:02:14 -07:00
|
|
|
) -> AccountIndexGetResult<'_, T> {
|
2021-08-04 07:18:05 -07:00
|
|
|
let read_lock = self.account_maps[self.bin_calculator.bin_from_pubkey(pubkey)]
|
|
|
|
.read()
|
|
|
|
.unwrap();
|
2021-04-23 07:33:14 -07:00
|
|
|
let account = read_lock
|
|
|
|
.get(pubkey)
|
|
|
|
.map(ReadAccountMapEntry::from_account_map_entry);
|
|
|
|
|
|
|
|
match account {
|
|
|
|
Some(locked_entry) => {
|
|
|
|
drop(read_lock);
|
|
|
|
let slot_list = locked_entry.slot_list();
|
|
|
|
let found_index = self.latest_slot(ancestors, slot_list, max_root);
|
|
|
|
match found_index {
|
|
|
|
Some(found_index) => AccountIndexGetResult::Found(locked_entry, found_index),
|
|
|
|
None => AccountIndexGetResult::NotFoundOnFork,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
None => AccountIndexGetResult::Missing(read_lock),
|
|
|
|
}
|
2019-07-10 22:06:32 -07:00
|
|
|
}
|
|
|
|
|
2020-10-03 15:18:58 -07:00
|
|
|
// Get the maximum root <= `max_allowed_root` from the given `slice`
|
2021-04-30 13:34:38 -07:00
|
|
|
fn get_newest_root_in_slot_list(
|
2021-04-12 10:11:33 -07:00
|
|
|
roots: &RollingBitField,
|
2020-10-03 15:18:58 -07:00
|
|
|
slice: SlotSlice<T>,
|
|
|
|
max_allowed_root: Option<Slot>,
|
|
|
|
) -> Slot {
|
2019-06-13 17:35:16 -07:00
|
|
|
let mut max_root = 0;
|
2020-03-25 21:08:56 -07:00
|
|
|
for (f, _) in slice.iter() {
|
2020-10-03 15:18:58 -07:00
|
|
|
if let Some(max_allowed_root) = max_allowed_root {
|
|
|
|
if *f > max_allowed_root {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2019-06-13 17:35:16 -07:00
|
|
|
if *f > max_root && roots.contains(f) {
|
|
|
|
max_root = *f;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
max_root
|
|
|
|
}
|
|
|
|
|
2021-05-17 09:58:33 -07:00
|
|
|
pub(crate) fn update_secondary_indexes(
|
2020-12-31 18:06:03 -08:00
|
|
|
&self,
|
|
|
|
pubkey: &Pubkey,
|
|
|
|
account_owner: &Pubkey,
|
|
|
|
account_data: &[u8],
|
2021-05-10 07:22:48 -07:00
|
|
|
account_indexes: &AccountSecondaryIndexes,
|
2020-12-31 18:06:03 -08:00
|
|
|
) {
|
2021-01-23 08:05:05 -08:00
|
|
|
if account_indexes.is_empty() {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-05-11 15:06:22 -07:00
|
|
|
if account_indexes.contains(&AccountIndex::ProgramId)
|
|
|
|
&& account_indexes.include_key(account_owner)
|
|
|
|
{
|
2021-05-12 15:29:30 -07:00
|
|
|
self.program_id_index.insert(account_owner, pubkey);
|
2020-12-31 18:06:03 -08:00
|
|
|
}
|
|
|
|
// Note because of the below check below on the account data length, when an
|
2021-03-09 13:06:07 -08:00
|
|
|
// account hits zero lamports and is reset to AccountSharedData::Default, then we skip
|
2020-12-31 18:06:03 -08:00
|
|
|
// the below updates to the secondary indexes.
|
|
|
|
//
|
|
|
|
// Skipping means not updating secondary index to mark the account as missing.
|
|
|
|
// This doesn't introduce false positives during a scan because the caller to scan
|
|
|
|
// provides the ancestors to check. So even if a zero-lamport account is not yet
|
|
|
|
// removed from the secondary index, the scan function will:
|
|
|
|
// 1) consult the primary index via `get(&pubkey, Some(ancestors), max_root)`
|
|
|
|
// and find the zero-lamport version
|
2021-03-09 13:06:07 -08:00
|
|
|
// 2) When the fetch from storage occurs, it will return AccountSharedData::Default
|
2020-12-31 18:06:03 -08:00
|
|
|
// (as persisted tombstone for snapshots). This will then ultimately be
|
|
|
|
// filtered out by post-scan filters, like in `get_filtered_spl_token_accounts_by_owner()`.
|
|
|
|
if *account_owner == inline_spl_token_v2_0::id()
|
|
|
|
&& account_data.len() == inline_spl_token_v2_0::state::Account::get_packed_len()
|
|
|
|
{
|
|
|
|
if account_indexes.contains(&AccountIndex::SplTokenOwner) {
|
|
|
|
let owner_key = Pubkey::new(
|
|
|
|
&account_data[SPL_TOKEN_ACCOUNT_OWNER_OFFSET
|
|
|
|
..SPL_TOKEN_ACCOUNT_OWNER_OFFSET + PUBKEY_BYTES],
|
|
|
|
);
|
2021-05-11 15:06:22 -07:00
|
|
|
if account_indexes.include_key(&owner_key) {
|
2021-05-12 15:29:30 -07:00
|
|
|
self.spl_token_owner_index.insert(&owner_key, pubkey);
|
2021-05-11 15:06:22 -07:00
|
|
|
}
|
2020-12-31 18:06:03 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if account_indexes.contains(&AccountIndex::SplTokenMint) {
|
|
|
|
let mint_key = Pubkey::new(
|
|
|
|
&account_data[SPL_TOKEN_ACCOUNT_MINT_OFFSET
|
|
|
|
..SPL_TOKEN_ACCOUNT_MINT_OFFSET + PUBKEY_BYTES],
|
|
|
|
);
|
2021-05-11 15:06:22 -07:00
|
|
|
if account_indexes.include_key(&mint_key) {
|
2021-05-12 15:29:30 -07:00
|
|
|
self.spl_token_mint_index.insert(&mint_key, pubkey);
|
2021-05-11 15:06:22 -07:00
|
|
|
}
|
2020-12-31 18:06:03 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-28 13:03:57 -07:00
|
|
|
fn get_account_maps_write_lock(&self, pubkey: &Pubkey) -> AccountMapsWriteLock<T> {
|
2021-08-04 07:18:05 -07:00
|
|
|
self.account_maps[self.bin_calculator.bin_from_pubkey(pubkey)]
|
|
|
|
.write()
|
|
|
|
.unwrap()
|
2021-05-14 13:27:10 -07:00
|
|
|
}
|
|
|
|
|
2021-06-28 13:03:57 -07:00
|
|
|
pub(crate) fn get_account_maps_read_lock(&self, pubkey: &Pubkey) -> AccountMapsReadLock<T> {
|
2021-08-04 07:18:05 -07:00
|
|
|
self.account_maps[self.bin_calculator.bin_from_pubkey(pubkey)]
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn bins(&self) -> usize {
|
|
|
|
self.account_maps.len()
|
2021-05-21 11:59:32 -07:00
|
|
|
}
|
|
|
|
|
2021-05-20 08:29:13 -07:00
|
|
|
// Same functionally to upsert, but:
|
|
|
|
// 1. operates on a batch of items
|
|
|
|
// 2. holds the write lock for the duration of adding the items
|
2021-05-17 09:58:33 -07:00
|
|
|
// Can save time when inserting lots of new keys.
|
|
|
|
// But, does NOT update secondary index
|
2021-05-20 08:29:13 -07:00
|
|
|
// This is designed to be called at startup time.
|
|
|
|
#[allow(clippy::needless_collect)]
|
2021-07-07 13:35:35 -07:00
|
|
|
pub(crate) fn insert_new_if_missing_into_primary_index(
|
|
|
|
&self,
|
2021-01-23 08:05:05 -08:00
|
|
|
slot: Slot,
|
2021-06-25 12:31:55 -07:00
|
|
|
item_len: usize,
|
2021-07-07 13:35:35 -07:00
|
|
|
items: impl Iterator<Item = (Pubkey, T)>,
|
2021-06-25 06:36:55 -07:00
|
|
|
) -> (Vec<Pubkey>, u64) {
|
2021-07-07 13:36:40 -07:00
|
|
|
// big enough so not likely to re-allocate, small enough to not over-allocate by too much
|
|
|
|
// this assumes the largest bin contains twice the expected amount of the average size per bin
|
2021-08-04 07:18:05 -07:00
|
|
|
let bins = self.bins();
|
|
|
|
let expected_items_per_bin = item_len * 2 / bins;
|
|
|
|
let mut binned = (0..bins)
|
2021-05-20 08:29:13 -07:00
|
|
|
.into_iter()
|
2021-06-28 13:03:57 -07:00
|
|
|
.map(|pubkey_bin| (pubkey_bin, Vec::with_capacity(expected_items_per_bin)))
|
|
|
|
.collect::<Vec<_>>();
|
2021-09-10 16:46:08 -07:00
|
|
|
let mut dirty_pubkeys = items
|
|
|
|
.filter_map(|(pubkey, account_info)| {
|
|
|
|
let bin = self.bin_calculator.bin_from_pubkey(&pubkey);
|
|
|
|
// this value is equivalent to what update() below would have created if we inserted a new item
|
|
|
|
let is_zero_lamport = account_info.is_zero_lamport();
|
|
|
|
let result = if is_zero_lamport { Some(pubkey) } else { None };
|
|
|
|
|
2021-09-19 18:22:09 -07:00
|
|
|
let info = WriteAccountMapEntry::new_entry_after_update(
|
|
|
|
slot,
|
|
|
|
account_info,
|
|
|
|
&self.storage.storage,
|
|
|
|
);
|
2021-09-10 16:46:08 -07:00
|
|
|
binned[bin].1.push((pubkey, info));
|
|
|
|
result
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
2021-06-28 13:03:57 -07:00
|
|
|
binned.retain(|x| !x.1.is_empty());
|
|
|
|
|
|
|
|
let insertion_time = AtomicU64::new(0);
|
|
|
|
|
2021-09-10 16:46:08 -07:00
|
|
|
binned.into_iter().for_each(|(pubkey_bin, items)| {
|
|
|
|
let mut _reclaims = SlotList::new();
|
|
|
|
|
|
|
|
// big enough so not likely to re-allocate, small enough to not over-allocate by too much
|
|
|
|
// this assumes 10% of keys are duplicates. This vector will be flattened below.
|
2021-09-15 10:36:08 -07:00
|
|
|
let w_account_maps = self.account_maps[pubkey_bin].write().unwrap();
|
2021-09-10 16:46:08 -07:00
|
|
|
let mut insert_time = Measure::start("insert_into_primary_index");
|
|
|
|
items.into_iter().for_each(|(pubkey, new_item)| {
|
2021-09-12 11:39:29 -07:00
|
|
|
let already_exists =
|
|
|
|
w_account_maps.insert_new_entry_if_missing_with_lock(pubkey, new_item);
|
2021-09-10 16:46:08 -07:00
|
|
|
if let Some((mut w_account_entry, account_info, pubkey)) = already_exists {
|
|
|
|
let is_zero_lamport = account_info.is_zero_lamport();
|
|
|
|
w_account_entry.update(slot, account_info, &mut _reclaims);
|
|
|
|
if !is_zero_lamport {
|
|
|
|
// zero lamports were already added to dirty_pubkeys above
|
|
|
|
dirty_pubkeys.push(pubkey);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
insert_time.stop();
|
|
|
|
insertion_time.fetch_add(insert_time.as_us(), Ordering::Relaxed);
|
|
|
|
});
|
2021-06-24 12:52:11 -07:00
|
|
|
|
2021-08-04 16:33:47 -07:00
|
|
|
(dirty_pubkeys, insertion_time.load(Ordering::Relaxed))
|
2021-01-23 08:05:05 -08:00
|
|
|
}
|
|
|
|
|
2020-10-21 17:05:27 -07:00
|
|
|
// Updates the given pubkey at the given slot with the new account information.
|
|
|
|
// Returns true if the pubkey was newly inserted into the index, otherwise, if the
|
|
|
|
// pubkey updates an existing entry in the index, returns false.
|
|
|
|
pub fn upsert(
|
2019-07-20 17:58:39 -07:00
|
|
|
&self,
|
2019-10-23 22:01:22 -07:00
|
|
|
slot: Slot,
|
2019-07-20 17:58:39 -07:00
|
|
|
pubkey: &Pubkey,
|
2020-12-31 18:06:03 -08:00
|
|
|
account_owner: &Pubkey,
|
|
|
|
account_data: &[u8],
|
2021-05-10 07:22:48 -07:00
|
|
|
account_indexes: &AccountSecondaryIndexes,
|
2019-07-20 17:58:39 -07:00
|
|
|
account_info: T,
|
2020-03-25 21:08:56 -07:00
|
|
|
reclaims: &mut SlotList<T>,
|
2021-08-09 06:58:59 -07:00
|
|
|
previous_slot_entry_was_cached: bool,
|
2021-07-27 06:46:27 -07:00
|
|
|
) {
|
|
|
|
// We don't atomically update both primary index and secondary index together.
|
|
|
|
// This certainly creates a small time window with inconsistent state across the two indexes.
|
|
|
|
// However, this is acceptable because:
|
|
|
|
//
|
|
|
|
// - A strict consistent view at any given moment of time is not necessary, because the only
|
|
|
|
// use case for the secondary index is `scan`, and `scans` are only supported/require consistency
|
|
|
|
// on frozen banks, and this inconsistency is only possible on working banks.
|
|
|
|
//
|
|
|
|
// - The secondary index is never consulted as primary source of truth for gets/stores.
|
|
|
|
// So, what the accounts_index sees alone is sufficient as a source of truth for other non-scan
|
|
|
|
// account operations.
|
2021-09-19 18:22:09 -07:00
|
|
|
let new_item =
|
|
|
|
WriteAccountMapEntry::new_entry_after_update(slot, account_info, &self.storage.storage);
|
2021-08-05 06:45:08 -07:00
|
|
|
let map = &self.account_maps[self.bin_calculator.bin_from_pubkey(pubkey)];
|
|
|
|
|
|
|
|
let r_account_maps = map.read().unwrap();
|
2021-09-13 06:27:21 -07:00
|
|
|
if !r_account_maps.update_key_if_exists(
|
2021-08-09 06:58:59 -07:00
|
|
|
pubkey,
|
|
|
|
&new_item,
|
|
|
|
reclaims,
|
|
|
|
previous_slot_entry_was_cached,
|
|
|
|
) {
|
2021-09-13 06:27:21 -07:00
|
|
|
drop(r_account_maps);
|
2021-09-15 10:36:08 -07:00
|
|
|
let w_account_maps = map.write().unwrap();
|
2021-09-12 19:54:09 -07:00
|
|
|
w_account_maps.upsert(pubkey, new_item, reclaims, previous_slot_entry_was_cached);
|
2021-07-27 06:46:27 -07:00
|
|
|
}
|
2021-05-12 15:29:30 -07:00
|
|
|
self.update_secondary_indexes(pubkey, account_owner, account_data, account_indexes);
|
2019-04-15 17:15:50 -07:00
|
|
|
}
|
2019-05-30 21:31:35 -07:00
|
|
|
|
2020-03-12 22:14:37 -07:00
|
|
|
pub fn unref_from_storage(&self, pubkey: &Pubkey) {
|
2020-10-21 17:05:27 -07:00
|
|
|
if let Some(locked_entry) = self.get_account_read_entry(pubkey) {
|
2021-02-04 12:44:19 -08:00
|
|
|
locked_entry.unref();
|
2020-03-12 22:14:37 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn ref_count_from_storage(&self, pubkey: &Pubkey) -> RefCount {
|
2020-10-21 17:05:27 -07:00
|
|
|
if let Some(locked_entry) = self.get_account_read_entry(pubkey) {
|
2021-08-09 06:59:56 -07:00
|
|
|
locked_entry.ref_count()
|
2020-03-12 22:14:37 -07:00
|
|
|
} else {
|
|
|
|
0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-12 15:29:30 -07:00
|
|
|
fn purge_secondary_indexes_by_inner_key<'a>(
|
2021-01-17 20:31:03 -08:00
|
|
|
&'a self,
|
2020-12-31 18:06:03 -08:00
|
|
|
inner_key: &Pubkey,
|
2021-05-10 07:22:48 -07:00
|
|
|
account_indexes: &AccountSecondaryIndexes,
|
2021-05-12 15:29:30 -07:00
|
|
|
) {
|
2020-12-31 18:06:03 -08:00
|
|
|
if account_indexes.contains(&AccountIndex::ProgramId) {
|
2021-05-12 15:29:30 -07:00
|
|
|
self.program_id_index.remove_by_inner_key(inner_key);
|
2020-12-31 18:06:03 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if account_indexes.contains(&AccountIndex::SplTokenOwner) {
|
2021-05-12 15:29:30 -07:00
|
|
|
self.spl_token_owner_index.remove_by_inner_key(inner_key);
|
2020-12-31 18:06:03 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if account_indexes.contains(&AccountIndex::SplTokenMint) {
|
2021-05-12 15:29:30 -07:00
|
|
|
self.spl_token_mint_index.remove_by_inner_key(inner_key);
|
2020-12-31 18:06:03 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-28 16:04:46 -07:00
|
|
|
fn purge_older_root_entries(
|
|
|
|
&self,
|
2021-04-30 13:34:38 -07:00
|
|
|
slot_list: &mut SlotList<T>,
|
2020-09-28 16:04:46 -07:00
|
|
|
reclaims: &mut SlotList<T>,
|
|
|
|
max_clean_root: Option<Slot>,
|
|
|
|
) {
|
2021-03-04 23:16:53 -08:00
|
|
|
let roots_tracker = &self.roots_tracker.read().unwrap();
|
2021-04-30 13:34:38 -07:00
|
|
|
let newest_root_in_slot_list =
|
2021-06-18 06:34:46 -07:00
|
|
|
Self::get_newest_root_in_slot_list(&roots_tracker.roots, slot_list, max_clean_root);
|
2021-04-30 13:34:38 -07:00
|
|
|
let max_clean_root = max_clean_root.unwrap_or(roots_tracker.max_root);
|
2020-03-02 21:57:25 -08:00
|
|
|
|
2021-04-30 13:34:38 -07:00
|
|
|
slot_list.retain(|(slot, value)| {
|
|
|
|
let should_purge =
|
|
|
|
Self::can_purge_older_entries(max_clean_root, newest_root_in_slot_list, *slot)
|
|
|
|
&& !value.is_cached();
|
2020-12-31 18:06:03 -08:00
|
|
|
if should_purge {
|
2021-09-01 06:13:08 -07:00
|
|
|
reclaims.push((*slot, *value));
|
2020-12-31 18:06:03 -08:00
|
|
|
}
|
2021-04-30 13:34:38 -07:00
|
|
|
!should_purge
|
2020-12-31 18:06:03 -08:00
|
|
|
});
|
2020-03-02 21:57:25 -08:00
|
|
|
}
|
|
|
|
|
2020-09-28 16:04:46 -07:00
|
|
|
pub fn clean_rooted_entries(
|
|
|
|
&self,
|
|
|
|
pubkey: &Pubkey,
|
|
|
|
reclaims: &mut SlotList<T>,
|
|
|
|
max_clean_root: Option<Slot>,
|
|
|
|
) {
|
2021-04-30 13:34:38 -07:00
|
|
|
let mut is_slot_list_empty = false;
|
2020-10-21 17:05:27 -07:00
|
|
|
if let Some(mut locked_entry) = self.get_account_write_entry(pubkey) {
|
2020-11-13 01:12:41 -08:00
|
|
|
locked_entry.slot_list_mut(|slot_list| {
|
2021-05-12 15:29:30 -07:00
|
|
|
self.purge_older_root_entries(slot_list, reclaims, max_clean_root);
|
2021-04-30 13:34:38 -07:00
|
|
|
is_slot_list_empty = slot_list.is_empty();
|
2020-11-13 01:12:41 -08:00
|
|
|
});
|
2020-03-02 21:57:25 -08:00
|
|
|
}
|
2021-04-30 13:34:38 -07:00
|
|
|
|
|
|
|
// If the slot list is empty, remove the pubkey from `account_maps`. Make sure to grab the
|
|
|
|
// lock and double check the slot list is still empty, because another writer could have
|
|
|
|
// locked and inserted the pubkey inbetween when `is_slot_list_empty=true` and the call to
|
|
|
|
// remove() below.
|
|
|
|
if is_slot_list_empty {
|
2021-09-15 10:36:08 -07:00
|
|
|
let w_maps = self.get_account_maps_write_lock(pubkey);
|
2021-09-12 12:05:44 -07:00
|
|
|
w_maps.remove_if_slot_list_empty(*pubkey);
|
2021-04-30 13:34:38 -07:00
|
|
|
}
|
2020-03-02 21:57:25 -08:00
|
|
|
}
|
|
|
|
|
2021-04-30 13:34:38 -07:00
|
|
|
/// When can an entry be purged?
|
|
|
|
///
|
|
|
|
/// If we get a slot update where slot != newest_root_in_slot_list for an account where slot <
|
|
|
|
/// max_clean_root, then we know it's safe to delete because:
|
|
|
|
///
|
|
|
|
/// a) If slot < newest_root_in_slot_list, then we know the update is outdated by a later rooted
|
|
|
|
/// update, namely the one in newest_root_in_slot_list
|
|
|
|
///
|
|
|
|
/// b) If slot > newest_root_in_slot_list, then because slot < max_clean_root and we know there are
|
|
|
|
/// no roots in the slot list between newest_root_in_slot_list and max_clean_root, (otherwise there
|
|
|
|
/// would be a bigger newest_root_in_slot_list, which is a contradiction), then we know slot must be
|
|
|
|
/// an unrooted slot less than max_clean_root and thus safe to clean as well.
|
|
|
|
fn can_purge_older_entries(
|
|
|
|
max_clean_root: Slot,
|
|
|
|
newest_root_in_slot_list: Slot,
|
|
|
|
slot: Slot,
|
|
|
|
) -> bool {
|
|
|
|
slot < max_clean_root && slot != newest_root_in_slot_list
|
2019-04-15 17:15:50 -07:00
|
|
|
}
|
2019-06-10 18:15:39 -07:00
|
|
|
|
2021-05-24 13:51:17 -07:00
|
|
|
/// Given a list of slots, return a new list of only the slots that are rooted
|
|
|
|
pub fn get_rooted_from_list<'a>(&self, slots: impl Iterator<Item = &'a Slot>) -> Vec<Slot> {
|
|
|
|
let roots_tracker = self.roots_tracker.read().unwrap();
|
|
|
|
slots
|
|
|
|
.filter_map(|s| {
|
|
|
|
if roots_tracker.roots.contains(s) {
|
|
|
|
Some(*s)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2019-10-23 22:01:22 -07:00
|
|
|
pub fn is_root(&self, slot: Slot) -> bool {
|
2020-10-21 17:05:27 -07:00
|
|
|
self.roots_tracker.read().unwrap().roots.contains(&slot)
|
2019-04-15 17:15:50 -07:00
|
|
|
}
|
2019-06-10 18:15:39 -07:00
|
|
|
|
2021-01-11 17:00:23 -08:00
|
|
|
pub fn add_root(&self, slot: Slot, caching_enabled: bool) {
|
2020-10-21 17:05:27 -07:00
|
|
|
let mut w_roots_tracker = self.roots_tracker.write().unwrap();
|
|
|
|
w_roots_tracker.roots.insert(slot);
|
2021-01-11 17:00:23 -08:00
|
|
|
// we delay cleaning until flushing!
|
|
|
|
if !caching_enabled {
|
|
|
|
w_roots_tracker.uncleaned_roots.insert(slot);
|
|
|
|
}
|
|
|
|
// `AccountsDb::flush_accounts_cache()` relies on roots being added in order
|
|
|
|
assert!(slot >= w_roots_tracker.max_root);
|
|
|
|
w_roots_tracker.max_root = slot;
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn add_uncleaned_roots<I>(&self, roots: I)
|
|
|
|
where
|
|
|
|
I: IntoIterator<Item = Slot>,
|
|
|
|
{
|
|
|
|
let mut w_roots_tracker = self.roots_tracker.write().unwrap();
|
|
|
|
w_roots_tracker.uncleaned_roots.extend(roots);
|
2020-11-16 17:23:11 -08:00
|
|
|
}
|
|
|
|
|
2021-01-24 09:50:19 -08:00
|
|
|
pub fn max_root(&self) -> Slot {
|
2020-11-16 17:23:11 -08:00
|
|
|
self.roots_tracker.read().unwrap().max_root
|
2019-04-15 17:15:50 -07:00
|
|
|
}
|
2020-11-16 17:23:11 -08:00
|
|
|
|
2019-10-23 22:01:22 -07:00
|
|
|
/// Remove the slot when the storage for the slot is freed
|
|
|
|
/// Accounts no longer reference this slot.
|
2021-01-27 01:39:47 -08:00
|
|
|
pub fn clean_dead_slot(&self, slot: Slot) -> Option<AccountsIndexRootsStats> {
|
2021-05-05 13:17:45 -07:00
|
|
|
let (roots_len, uncleaned_roots_len, previous_uncleaned_roots_len, roots_range) = {
|
|
|
|
let mut w_roots_tracker = self.roots_tracker.write().unwrap();
|
2021-05-06 13:04:13 -07:00
|
|
|
let removed_from_unclean_roots = w_roots_tracker.uncleaned_roots.remove(&slot);
|
|
|
|
let removed_from_previous_uncleaned_roots =
|
|
|
|
w_roots_tracker.previous_uncleaned_roots.remove(&slot);
|
2021-05-05 13:17:45 -07:00
|
|
|
if !w_roots_tracker.roots.remove(&slot) {
|
2021-05-06 13:04:13 -07:00
|
|
|
if removed_from_unclean_roots {
|
|
|
|
error!("clean_dead_slot-removed_from_unclean_roots: {}", slot);
|
|
|
|
inc_new_counter_error!("clean_dead_slot-removed_from_unclean_roots", 1, 1);
|
|
|
|
}
|
|
|
|
if removed_from_previous_uncleaned_roots {
|
|
|
|
error!(
|
|
|
|
"clean_dead_slot-removed_from_previous_uncleaned_roots: {}",
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
inc_new_counter_error!(
|
|
|
|
"clean_dead_slot-removed_from_previous_uncleaned_roots",
|
|
|
|
1,
|
|
|
|
1
|
|
|
|
);
|
|
|
|
}
|
2021-05-05 13:17:45 -07:00
|
|
|
return None;
|
|
|
|
}
|
|
|
|
(
|
|
|
|
w_roots_tracker.roots.len(),
|
|
|
|
w_roots_tracker.uncleaned_roots.len(),
|
|
|
|
w_roots_tracker.previous_uncleaned_roots.len(),
|
|
|
|
w_roots_tracker.roots.range_width(),
|
|
|
|
)
|
|
|
|
};
|
|
|
|
Some(AccountsIndexRootsStats {
|
|
|
|
roots_len,
|
|
|
|
uncleaned_roots_len,
|
|
|
|
previous_uncleaned_roots_len,
|
|
|
|
roots_range,
|
|
|
|
rooted_cleaned_count: 0,
|
|
|
|
unrooted_cleaned_count: 0,
|
|
|
|
})
|
2020-06-11 22:51:43 -07:00
|
|
|
}
|
|
|
|
|
2021-06-14 06:53:07 -07:00
|
|
|
pub fn min_root(&self) -> Option<Slot> {
|
|
|
|
self.roots_tracker.read().unwrap().min_root()
|
|
|
|
}
|
|
|
|
|
2020-10-21 17:05:27 -07:00
|
|
|
pub fn reset_uncleaned_roots(&self, max_clean_root: Option<Slot>) -> HashSet<Slot> {
|
2020-09-28 16:04:46 -07:00
|
|
|
let mut cleaned_roots = HashSet::new();
|
2020-10-21 17:05:27 -07:00
|
|
|
let mut w_roots_tracker = self.roots_tracker.write().unwrap();
|
|
|
|
w_roots_tracker.uncleaned_roots.retain(|root| {
|
2020-09-28 16:04:46 -07:00
|
|
|
let is_cleaned = max_clean_root
|
|
|
|
.map(|max_clean_root| *root <= max_clean_root)
|
|
|
|
.unwrap_or(true);
|
|
|
|
if is_cleaned {
|
|
|
|
cleaned_roots.insert(*root);
|
|
|
|
}
|
|
|
|
// Only keep the slots that have yet to be cleaned
|
|
|
|
!is_cleaned
|
|
|
|
});
|
2020-10-21 17:05:27 -07:00
|
|
|
std::mem::replace(&mut w_roots_tracker.previous_uncleaned_roots, cleaned_roots)
|
|
|
|
}
|
|
|
|
|
2021-01-23 04:02:44 -08:00
|
|
|
#[cfg(test)]
|
|
|
|
pub fn clear_uncleaned_roots(&self, max_clean_root: Option<Slot>) -> HashSet<Slot> {
|
|
|
|
let mut cleaned_roots = HashSet::new();
|
|
|
|
let mut w_roots_tracker = self.roots_tracker.write().unwrap();
|
|
|
|
w_roots_tracker.uncleaned_roots.retain(|root| {
|
|
|
|
let is_cleaned = max_clean_root
|
|
|
|
.map(|max_clean_root| *root <= max_clean_root)
|
|
|
|
.unwrap_or(true);
|
|
|
|
if is_cleaned {
|
|
|
|
cleaned_roots.insert(*root);
|
|
|
|
}
|
|
|
|
// Only keep the slots that have yet to be cleaned
|
|
|
|
!is_cleaned
|
|
|
|
});
|
|
|
|
cleaned_roots
|
|
|
|
}
|
|
|
|
|
2020-10-21 17:05:27 -07:00
|
|
|
pub fn is_uncleaned_root(&self, slot: Slot) -> bool {
|
|
|
|
self.roots_tracker
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.uncleaned_roots
|
|
|
|
.contains(&slot)
|
|
|
|
}
|
|
|
|
|
2020-12-21 21:33:37 -08:00
|
|
|
pub fn num_roots(&self) -> usize {
|
|
|
|
self.roots_tracker.read().unwrap().roots.len()
|
|
|
|
}
|
|
|
|
|
2020-10-21 17:05:27 -07:00
|
|
|
pub fn all_roots(&self) -> Vec<Slot> {
|
2021-04-12 10:11:33 -07:00
|
|
|
let tracker = self.roots_tracker.read().unwrap();
|
|
|
|
tracker.roots.get_all()
|
2020-10-21 17:05:27 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
pub fn clear_roots(&self) {
|
|
|
|
self.roots_tracker.write().unwrap().roots.clear()
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
pub fn uncleaned_roots_len(&self) -> usize {
|
|
|
|
self.roots_tracker.read().unwrap().uncleaned_roots.len()
|
2019-04-15 17:15:50 -07:00
|
|
|
}
|
2020-12-31 18:06:03 -08:00
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
// filter any rooted entries and return them along with a bool that indicates
|
|
|
|
// if this account has no more entries. Note this does not update the secondary
|
|
|
|
// indexes!
|
|
|
|
pub fn purge_roots(&self, pubkey: &Pubkey) -> (SlotList<T>, bool) {
|
|
|
|
let mut write_account_map_entry = self.get_account_write_entry(pubkey).unwrap();
|
|
|
|
write_account_map_entry.slot_list_mut(|slot_list| {
|
|
|
|
let reclaims = self.get_rooted_entries(slot_list, None);
|
|
|
|
slot_list.retain(|(slot, _)| !self.is_root(*slot));
|
|
|
|
(reclaims, slot_list.is_empty())
|
|
|
|
})
|
|
|
|
}
|
2019-04-15 17:15:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
2020-12-31 18:06:03 -08:00
|
|
|
pub mod tests {
|
2019-04-15 17:15:50 -07:00
|
|
|
use super::*;
|
2020-02-20 13:28:55 -08:00
|
|
|
use solana_sdk::signature::{Keypair, Signer};
|
2021-07-15 08:26:50 -07:00
|
|
|
use std::ops::RangeInclusive;
|
2019-04-15 17:15:50 -07:00
|
|
|
|
2020-12-31 18:06:03 -08:00
|
|
|
pub enum SecondaryIndexTypes<'a> {
|
|
|
|
RwLock(&'a SecondaryIndex<RwLockSecondaryIndexEntry>),
|
|
|
|
DashMap(&'a SecondaryIndex<DashMapSecondaryIndexEntry>),
|
|
|
|
}
|
|
|
|
|
2021-05-10 07:22:48 -07:00
|
|
|
pub fn spl_token_mint_index_enabled() -> AccountSecondaryIndexes {
|
2020-12-31 18:06:03 -08:00
|
|
|
let mut account_indexes = HashSet::new();
|
|
|
|
account_indexes.insert(AccountIndex::SplTokenMint);
|
2021-05-11 15:06:22 -07:00
|
|
|
AccountSecondaryIndexes {
|
|
|
|
indexes: account_indexes,
|
|
|
|
keys: None,
|
|
|
|
}
|
2020-12-31 18:06:03 -08:00
|
|
|
}
|
|
|
|
|
2021-05-10 07:22:48 -07:00
|
|
|
pub fn spl_token_owner_index_enabled() -> AccountSecondaryIndexes {
|
2020-12-31 18:06:03 -08:00
|
|
|
let mut account_indexes = HashSet::new();
|
|
|
|
account_indexes.insert(AccountIndex::SplTokenOwner);
|
2021-05-11 15:06:22 -07:00
|
|
|
AccountSecondaryIndexes {
|
|
|
|
indexes: account_indexes,
|
|
|
|
keys: None,
|
|
|
|
}
|
2020-12-31 18:06:03 -08:00
|
|
|
}
|
|
|
|
|
2021-09-14 15:51:07 -07:00
|
|
|
impl<'a, T: IndexValue> AccountIndexGetResult<'a, T> {
|
2021-04-23 07:33:14 -07:00
|
|
|
pub fn unwrap(self) -> (ReadAccountMapEntry<T>, usize) {
|
|
|
|
match self {
|
|
|
|
AccountIndexGetResult::Found(lock, size) => (lock, size),
|
|
|
|
_ => {
|
|
|
|
panic!("trying to unwrap AccountIndexGetResult with non-Success result");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn is_none(&self) -> bool {
|
|
|
|
!self.is_some()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn is_some(&self) -> bool {
|
|
|
|
matches!(self, AccountIndexGetResult::Found(_lock, _size))
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn map<V, F: FnOnce((ReadAccountMapEntry<T>, usize)) -> V>(self, f: F) -> Option<V> {
|
|
|
|
match self {
|
|
|
|
AccountIndexGetResult::Found(lock, size) => Some(f((lock, size))),
|
|
|
|
_ => None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-10 07:22:48 -07:00
|
|
|
fn create_dashmap_secondary_index_state() -> (usize, usize, AccountSecondaryIndexes) {
|
2020-12-31 18:06:03 -08:00
|
|
|
{
|
|
|
|
// Check that we're actually testing the correct variant
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2020-12-31 18:06:03 -08:00
|
|
|
let _type_check = SecondaryIndexTypes::DashMap(&index.spl_token_mint_index);
|
|
|
|
}
|
|
|
|
|
|
|
|
(0, PUBKEY_BYTES, spl_token_mint_index_enabled())
|
|
|
|
}
|
|
|
|
|
2021-05-10 07:22:48 -07:00
|
|
|
fn create_rwlock_secondary_index_state() -> (usize, usize, AccountSecondaryIndexes) {
|
2020-12-31 18:06:03 -08:00
|
|
|
{
|
|
|
|
// Check that we're actually testing the correct variant
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2020-12-31 18:06:03 -08:00
|
|
|
let _type_check = SecondaryIndexTypes::RwLock(&index.spl_token_owner_index);
|
|
|
|
}
|
|
|
|
|
|
|
|
(
|
|
|
|
SPL_TOKEN_ACCOUNT_OWNER_OFFSET,
|
|
|
|
SPL_TOKEN_ACCOUNT_OWNER_OFFSET + PUBKEY_BYTES,
|
|
|
|
spl_token_owner_index_enabled(),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2021-05-28 09:33:07 -07:00
|
|
|
#[test]
|
|
|
|
fn test_bitfield_delete_non_excess() {
|
|
|
|
solana_logger::setup();
|
|
|
|
let len = 16;
|
|
|
|
let mut bitfield = RollingBitField::new(len);
|
2021-06-08 07:32:16 -07:00
|
|
|
assert_eq!(bitfield.min(), None);
|
2021-05-28 09:33:07 -07:00
|
|
|
|
|
|
|
bitfield.insert(0);
|
2021-06-08 07:32:16 -07:00
|
|
|
assert_eq!(bitfield.min(), Some(0));
|
2021-05-28 09:33:07 -07:00
|
|
|
let too_big = len + 1;
|
|
|
|
bitfield.insert(too_big);
|
|
|
|
assert!(bitfield.contains(&0));
|
|
|
|
assert!(bitfield.contains(&too_big));
|
|
|
|
assert_eq!(bitfield.len(), 2);
|
|
|
|
assert_eq!(bitfield.excess.len(), 1);
|
|
|
|
assert_eq!(bitfield.min, too_big);
|
2021-06-08 07:32:16 -07:00
|
|
|
assert_eq!(bitfield.min(), Some(0));
|
2021-05-28 09:33:07 -07:00
|
|
|
assert_eq!(bitfield.max, too_big + 1);
|
|
|
|
|
|
|
|
// delete the thing that is NOT in excess
|
|
|
|
bitfield.remove(&too_big);
|
|
|
|
assert_eq!(bitfield.min, too_big + 1);
|
|
|
|
assert_eq!(bitfield.max, too_big + 1);
|
|
|
|
let too_big_times_2 = too_big * 2;
|
|
|
|
bitfield.insert(too_big_times_2);
|
|
|
|
assert!(bitfield.contains(&0));
|
|
|
|
assert!(bitfield.contains(&too_big_times_2));
|
|
|
|
assert_eq!(bitfield.len(), 2);
|
|
|
|
assert_eq!(bitfield.excess.len(), 1);
|
2021-06-08 07:32:16 -07:00
|
|
|
assert_eq!(bitfield.min(), bitfield.excess.iter().min().copied());
|
2021-05-28 09:33:07 -07:00
|
|
|
assert_eq!(bitfield.min, too_big_times_2);
|
|
|
|
assert_eq!(bitfield.max, too_big_times_2 + 1);
|
|
|
|
|
|
|
|
bitfield.remove(&0);
|
|
|
|
bitfield.remove(&too_big_times_2);
|
|
|
|
assert!(bitfield.is_empty());
|
|
|
|
let other = 5;
|
|
|
|
bitfield.insert(other);
|
|
|
|
assert!(bitfield.contains(&other));
|
|
|
|
assert!(bitfield.excess.is_empty());
|
|
|
|
assert_eq!(bitfield.min, other);
|
|
|
|
assert_eq!(bitfield.max, other + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_bitfield_insert_excess() {
|
|
|
|
solana_logger::setup();
|
|
|
|
let len = 16;
|
|
|
|
let mut bitfield = RollingBitField::new(len);
|
|
|
|
|
|
|
|
bitfield.insert(0);
|
|
|
|
let too_big = len + 1;
|
|
|
|
bitfield.insert(too_big);
|
|
|
|
assert!(bitfield.contains(&0));
|
|
|
|
assert!(bitfield.contains(&too_big));
|
|
|
|
assert_eq!(bitfield.len(), 2);
|
|
|
|
assert_eq!(bitfield.excess.len(), 1);
|
|
|
|
assert!(bitfield.excess.contains(&0));
|
|
|
|
assert_eq!(bitfield.min, too_big);
|
|
|
|
assert_eq!(bitfield.max, too_big + 1);
|
|
|
|
|
|
|
|
// delete the thing that IS in excess
|
|
|
|
// this does NOT affect min/max
|
|
|
|
bitfield.remove(&0);
|
|
|
|
assert_eq!(bitfield.min, too_big);
|
|
|
|
assert_eq!(bitfield.max, too_big + 1);
|
|
|
|
// re-add to excess
|
|
|
|
bitfield.insert(0);
|
|
|
|
assert!(bitfield.contains(&0));
|
|
|
|
assert!(bitfield.contains(&too_big));
|
|
|
|
assert_eq!(bitfield.len(), 2);
|
|
|
|
assert_eq!(bitfield.excess.len(), 1);
|
|
|
|
assert_eq!(bitfield.min, too_big);
|
|
|
|
assert_eq!(bitfield.max, too_big + 1);
|
|
|
|
}
|
|
|
|
|
2021-04-12 10:11:33 -07:00
|
|
|
#[test]
|
|
|
|
fn test_bitfield_permutations() {
|
|
|
|
solana_logger::setup();
|
|
|
|
let mut bitfield = RollingBitField::new(2097152);
|
|
|
|
let mut hash = HashSet::new();
|
|
|
|
|
|
|
|
let min = 101_000;
|
|
|
|
let width = 400_000;
|
|
|
|
let dead = 19;
|
|
|
|
|
|
|
|
let mut slot = min;
|
|
|
|
while hash.len() < width {
|
|
|
|
slot += 1;
|
|
|
|
if slot % dead == 0 {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
hash.insert(slot);
|
|
|
|
bitfield.insert(slot);
|
|
|
|
}
|
|
|
|
compare(&hash, &bitfield);
|
|
|
|
|
|
|
|
let max = slot + 1;
|
|
|
|
|
|
|
|
let mut time = Measure::start("");
|
|
|
|
let mut count = 0;
|
|
|
|
for slot in (min - 10)..max + 100 {
|
|
|
|
if hash.contains(&slot) {
|
|
|
|
count += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
time.stop();
|
|
|
|
|
|
|
|
let mut time2 = Measure::start("");
|
|
|
|
let mut count2 = 0;
|
|
|
|
for slot in (min - 10)..max + 100 {
|
|
|
|
if bitfield.contains(&slot) {
|
|
|
|
count2 += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
time2.stop();
|
|
|
|
info!(
|
|
|
|
"{}ms, {}ms, {} ratio",
|
|
|
|
time.as_ms(),
|
|
|
|
time2.as_ms(),
|
|
|
|
time.as_ns() / time2.as_ns()
|
|
|
|
);
|
|
|
|
assert_eq!(count, count2);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
#[should_panic(expected = "assertion failed: max_width.is_power_of_two()")]
|
|
|
|
fn test_bitfield_power_2() {
|
|
|
|
let _ = RollingBitField::new(3);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
#[should_panic(expected = "assertion failed: max_width > 0")]
|
|
|
|
fn test_bitfield_0() {
|
|
|
|
let _ = RollingBitField::new(0);
|
|
|
|
}
|
|
|
|
|
2021-04-29 07:11:28 -07:00
|
|
|
fn setup_empty(width: u64) -> RollingBitFieldTester {
|
|
|
|
let bitfield = RollingBitField::new(width);
|
|
|
|
let hash_set = HashSet::new();
|
|
|
|
RollingBitFieldTester { bitfield, hash_set }
|
|
|
|
}
|
2021-04-12 10:11:33 -07:00
|
|
|
|
2021-04-29 07:11:28 -07:00
|
|
|
struct RollingBitFieldTester {
|
|
|
|
pub bitfield: RollingBitField,
|
|
|
|
pub hash_set: HashSet<u64>,
|
|
|
|
}
|
2021-04-12 10:11:33 -07:00
|
|
|
|
2021-04-29 07:11:28 -07:00
|
|
|
impl RollingBitFieldTester {
|
|
|
|
fn insert(&mut self, slot: u64) {
|
|
|
|
self.bitfield.insert(slot);
|
|
|
|
self.hash_set.insert(slot);
|
|
|
|
assert!(self.bitfield.contains(&slot));
|
|
|
|
compare(&self.hash_set, &self.bitfield);
|
|
|
|
}
|
|
|
|
fn remove(&mut self, slot: &u64) -> bool {
|
|
|
|
let result = self.bitfield.remove(slot);
|
|
|
|
assert_eq!(result, self.hash_set.remove(slot));
|
2021-06-18 06:34:46 -07:00
|
|
|
assert!(!self.bitfield.contains(slot));
|
2021-04-29 07:11:28 -07:00
|
|
|
self.compare();
|
|
|
|
result
|
|
|
|
}
|
|
|
|
fn compare(&self) {
|
|
|
|
compare(&self.hash_set, &self.bitfield);
|
|
|
|
}
|
|
|
|
}
|
2021-04-12 10:11:33 -07:00
|
|
|
|
2021-04-29 07:11:28 -07:00
|
|
|
fn setup_wide(width: u64, start: u64) -> RollingBitFieldTester {
|
|
|
|
let mut tester = setup_empty(width);
|
2021-04-12 10:11:33 -07:00
|
|
|
|
2021-04-29 07:11:28 -07:00
|
|
|
tester.compare();
|
|
|
|
tester.insert(start);
|
|
|
|
tester.insert(start + 1);
|
|
|
|
tester
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_bitfield_insert_wide() {
|
|
|
|
solana_logger::setup();
|
|
|
|
let width = 16;
|
|
|
|
let start = 0;
|
2021-04-29 07:11:28 -07:00
|
|
|
let mut tester = setup_wide(width, start);
|
2021-04-12 10:11:33 -07:00
|
|
|
|
|
|
|
let slot = start + width;
|
2021-04-29 07:11:28 -07:00
|
|
|
let all = tester.bitfield.get_all();
|
|
|
|
// higher than max range by 1
|
|
|
|
tester.insert(slot);
|
|
|
|
let bitfield = tester.bitfield;
|
|
|
|
for slot in all {
|
|
|
|
assert!(bitfield.contains(&slot));
|
|
|
|
}
|
|
|
|
assert_eq!(bitfield.excess.len(), 1);
|
|
|
|
assert_eq!(bitfield.count, 3);
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_bitfield_insert_wide_before() {
|
|
|
|
solana_logger::setup();
|
|
|
|
let width = 16;
|
|
|
|
let start = 100;
|
2021-04-29 07:11:28 -07:00
|
|
|
let mut bitfield = setup_wide(width, start).bitfield;
|
2021-04-12 10:11:33 -07:00
|
|
|
|
|
|
|
let slot = start + 1 - width;
|
|
|
|
// assert here - would make min too low, causing too wide of a range
|
|
|
|
bitfield.insert(slot);
|
2021-04-29 07:11:28 -07:00
|
|
|
assert_eq!(1, bitfield.excess.len());
|
|
|
|
assert_eq!(3, bitfield.count);
|
|
|
|
assert!(bitfield.contains(&slot));
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_bitfield_insert_wide_before_ok() {
|
|
|
|
solana_logger::setup();
|
|
|
|
let width = 16;
|
|
|
|
let start = 100;
|
2021-04-29 07:11:28 -07:00
|
|
|
let mut bitfield = setup_wide(width, start).bitfield;
|
2021-04-12 10:11:33 -07:00
|
|
|
|
2021-04-29 07:11:28 -07:00
|
|
|
let slot = start + 2 - width; // this item would make our width exactly equal to what is allowed, but it is also inserting prior to min
|
2021-04-12 10:11:33 -07:00
|
|
|
bitfield.insert(slot);
|
2021-04-29 07:11:28 -07:00
|
|
|
assert_eq!(1, bitfield.excess.len());
|
2021-04-12 10:11:33 -07:00
|
|
|
assert!(bitfield.contains(&slot));
|
2021-04-29 07:11:28 -07:00
|
|
|
assert_eq!(3, bitfield.count);
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_bitfield_contains_wide_no_assert() {
|
|
|
|
{
|
|
|
|
let width = 16;
|
|
|
|
let start = 0;
|
2021-04-29 07:11:28 -07:00
|
|
|
let bitfield = setup_wide(width, start).bitfield;
|
2021-04-12 10:11:33 -07:00
|
|
|
|
|
|
|
let mut slot = width;
|
|
|
|
assert!(!bitfield.contains(&slot));
|
|
|
|
slot += 1;
|
|
|
|
assert!(!bitfield.contains(&slot));
|
|
|
|
}
|
|
|
|
{
|
|
|
|
let width = 16;
|
|
|
|
let start = 100;
|
2021-04-29 07:11:28 -07:00
|
|
|
let bitfield = setup_wide(width, start).bitfield;
|
2021-04-12 10:11:33 -07:00
|
|
|
|
|
|
|
// too large
|
|
|
|
let mut slot = width;
|
|
|
|
assert!(!bitfield.contains(&slot));
|
|
|
|
slot += 1;
|
|
|
|
assert!(!bitfield.contains(&slot));
|
|
|
|
// too small, before min
|
|
|
|
slot = 0;
|
|
|
|
assert!(!bitfield.contains(&slot));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_bitfield_remove_wide() {
|
|
|
|
let width = 16;
|
|
|
|
let start = 0;
|
2021-04-29 07:11:28 -07:00
|
|
|
let mut tester = setup_wide(width, start);
|
2021-04-12 10:11:33 -07:00
|
|
|
let slot = width;
|
2021-04-29 07:11:28 -07:00
|
|
|
assert!(!tester.remove(&slot));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_bitfield_excess2() {
|
|
|
|
solana_logger::setup();
|
|
|
|
let width = 16;
|
|
|
|
let mut tester = setup_empty(width);
|
|
|
|
let slot = 100;
|
|
|
|
// insert 1st slot
|
|
|
|
tester.insert(slot);
|
|
|
|
assert!(tester.bitfield.excess.is_empty());
|
|
|
|
|
|
|
|
// insert a slot before the previous one. this is 'excess' since we don't use this pattern in normal operation
|
|
|
|
let slot2 = slot - 1;
|
|
|
|
tester.insert(slot2);
|
|
|
|
assert_eq!(tester.bitfield.excess.len(), 1);
|
|
|
|
|
|
|
|
// remove the 1st slot. we will be left with only excess
|
|
|
|
tester.remove(&slot);
|
|
|
|
assert!(tester.bitfield.contains(&slot2));
|
|
|
|
assert_eq!(tester.bitfield.excess.len(), 1);
|
|
|
|
|
|
|
|
// re-insert at valid range, making sure we don't insert into excess
|
|
|
|
tester.insert(slot);
|
|
|
|
assert_eq!(tester.bitfield.excess.len(), 1);
|
|
|
|
|
|
|
|
// remove the excess slot.
|
|
|
|
tester.remove(&slot2);
|
|
|
|
assert!(tester.bitfield.contains(&slot));
|
|
|
|
assert!(tester.bitfield.excess.is_empty());
|
|
|
|
|
|
|
|
// re-insert the excess slot
|
|
|
|
tester.insert(slot2);
|
|
|
|
assert_eq!(tester.bitfield.excess.len(), 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_bitfield_excess() {
|
|
|
|
solana_logger::setup();
|
|
|
|
// start at slot 0 or a separate, higher slot
|
|
|
|
for width in [16, 4194304].iter() {
|
|
|
|
let width = *width;
|
|
|
|
let mut tester = setup_empty(width);
|
|
|
|
for start in [0, width * 5].iter().cloned() {
|
|
|
|
// recreate means create empty bitfield with each iteration, otherwise re-use
|
|
|
|
for recreate in [false, true].iter().cloned() {
|
|
|
|
let max = start + 3;
|
|
|
|
// first root to add
|
|
|
|
for slot in start..max {
|
|
|
|
// subsequent roots to add
|
|
|
|
for slot2 in (slot + 1)..max {
|
|
|
|
// reverse_slots = 1 means add slots in reverse order (max to min). This causes us to add second and later slots to excess.
|
|
|
|
for reverse_slots in [false, true].iter().cloned() {
|
|
|
|
let maybe_reverse = |slot| {
|
|
|
|
if reverse_slots {
|
|
|
|
max - slot
|
|
|
|
} else {
|
|
|
|
slot
|
|
|
|
}
|
|
|
|
};
|
|
|
|
if recreate {
|
|
|
|
let recreated = setup_empty(width);
|
|
|
|
tester = recreated;
|
|
|
|
}
|
|
|
|
|
|
|
|
// insert
|
|
|
|
for slot in slot..=slot2 {
|
|
|
|
let slot_use = maybe_reverse(slot);
|
|
|
|
tester.insert(slot_use);
|
|
|
|
debug!(
|
|
|
|
"slot: {}, bitfield: {:?}, reverse: {}, len: {}, excess: {:?}",
|
|
|
|
slot_use,
|
|
|
|
tester.bitfield,
|
|
|
|
reverse_slots,
|
|
|
|
tester.bitfield.len(),
|
|
|
|
tester.bitfield.excess
|
|
|
|
);
|
|
|
|
assert!(
|
|
|
|
(reverse_slots && tester.bitfield.len() > 1)
|
|
|
|
^ tester.bitfield.excess.is_empty()
|
|
|
|
);
|
|
|
|
}
|
|
|
|
if start > width * 2 {
|
|
|
|
assert!(!tester.bitfield.contains(&(start - width * 2)));
|
|
|
|
}
|
|
|
|
assert!(!tester.bitfield.contains(&(start + width * 2)));
|
|
|
|
let len = (slot2 - slot + 1) as usize;
|
|
|
|
assert_eq!(tester.bitfield.len(), len);
|
|
|
|
assert_eq!(tester.bitfield.count, len);
|
|
|
|
|
|
|
|
// remove
|
|
|
|
for slot in slot..=slot2 {
|
|
|
|
let slot_use = maybe_reverse(slot);
|
|
|
|
assert!(tester.remove(&slot_use));
|
|
|
|
assert!(
|
|
|
|
(reverse_slots && !tester.bitfield.is_empty())
|
|
|
|
^ tester.bitfield.excess.is_empty()
|
|
|
|
);
|
|
|
|
}
|
|
|
|
assert!(tester.bitfield.is_empty());
|
|
|
|
assert_eq!(tester.bitfield.count, 0);
|
|
|
|
if start > width * 2 {
|
|
|
|
assert!(!tester.bitfield.contains(&(start - width * 2)));
|
|
|
|
}
|
|
|
|
assert!(!tester.bitfield.contains(&(start + width * 2)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_bitfield_remove_wide_before() {
|
|
|
|
let width = 16;
|
|
|
|
let start = 100;
|
2021-04-29 07:11:28 -07:00
|
|
|
let mut tester = setup_wide(width, start);
|
2021-04-12 10:11:33 -07:00
|
|
|
let slot = start + 1 - width;
|
2021-04-29 07:11:28 -07:00
|
|
|
assert!(!tester.remove(&slot));
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
|
|
|
|
2021-05-28 09:33:07 -07:00
|
|
|
fn compare_internal(hashset: &HashSet<u64>, bitfield: &RollingBitField) {
|
2021-04-12 10:11:33 -07:00
|
|
|
assert_eq!(hashset.len(), bitfield.len());
|
|
|
|
assert_eq!(hashset.is_empty(), bitfield.is_empty());
|
2021-04-29 07:11:28 -07:00
|
|
|
if !bitfield.is_empty() {
|
|
|
|
let mut min = Slot::MAX;
|
2021-06-08 07:32:16 -07:00
|
|
|
let mut overall_min = Slot::MAX;
|
2021-04-29 07:11:28 -07:00
|
|
|
let mut max = Slot::MIN;
|
|
|
|
for item in bitfield.get_all() {
|
|
|
|
assert!(hashset.contains(&item));
|
|
|
|
if !bitfield.excess.contains(&item) {
|
|
|
|
min = std::cmp::min(min, item);
|
|
|
|
max = std::cmp::max(max, item);
|
|
|
|
}
|
2021-06-08 07:32:16 -07:00
|
|
|
overall_min = std::cmp::min(overall_min, item);
|
2021-04-29 07:11:28 -07:00
|
|
|
}
|
2021-06-08 07:32:16 -07:00
|
|
|
assert_eq!(bitfield.min(), Some(overall_min));
|
2021-04-29 07:11:28 -07:00
|
|
|
assert_eq!(bitfield.get_all().len(), hashset.len());
|
|
|
|
// range isn't tracked for excess items
|
|
|
|
if bitfield.excess.len() != bitfield.len() {
|
|
|
|
let width = if bitfield.is_empty() {
|
2021-04-23 09:09:39 -07:00
|
|
|
0
|
|
|
|
} else {
|
|
|
|
max + 1 - min
|
2021-04-29 07:11:28 -07:00
|
|
|
};
|
|
|
|
assert!(
|
|
|
|
bitfield.range_width() >= width,
|
|
|
|
"hashset: {:?}, bitfield: {:?}, bitfield.range_width: {}, width: {}",
|
|
|
|
hashset,
|
|
|
|
bitfield.get_all(),
|
|
|
|
bitfield.range_width(),
|
|
|
|
width,
|
|
|
|
);
|
|
|
|
}
|
2021-06-08 07:32:16 -07:00
|
|
|
} else {
|
|
|
|
assert_eq!(bitfield.min(), None);
|
2021-04-29 07:11:28 -07:00
|
|
|
}
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
|
|
|
|
2021-05-28 09:33:07 -07:00
|
|
|
fn compare(hashset: &HashSet<u64>, bitfield: &RollingBitField) {
|
|
|
|
compare_internal(hashset, bitfield);
|
|
|
|
let clone = bitfield.clone();
|
|
|
|
compare_internal(hashset, &clone);
|
2021-06-18 06:34:46 -07:00
|
|
|
assert!(clone.eq(bitfield));
|
2021-05-28 09:33:07 -07:00
|
|
|
assert_eq!(clone, *bitfield);
|
|
|
|
}
|
|
|
|
|
2021-04-12 10:11:33 -07:00
|
|
|
#[test]
|
|
|
|
fn test_bitfield_functionality() {
|
|
|
|
solana_logger::setup();
|
|
|
|
|
|
|
|
// bitfield sizes are powers of 2, cycle through values of 1, 2, 4, .. 2^9
|
|
|
|
for power in 0..10 {
|
|
|
|
let max_bitfield_width = 2u64.pow(power) as u64;
|
|
|
|
let width_iteration_max = if max_bitfield_width > 1 {
|
|
|
|
// add up to 2 items so we can test out multiple items
|
|
|
|
3
|
|
|
|
} else {
|
|
|
|
// 0 or 1 items is all we can fit with a width of 1 item
|
|
|
|
2
|
|
|
|
};
|
|
|
|
for width in 0..width_iteration_max {
|
2021-04-29 07:11:28 -07:00
|
|
|
let mut tester = setup_empty(max_bitfield_width);
|
2021-04-12 10:11:33 -07:00
|
|
|
|
|
|
|
let min = 101_000;
|
|
|
|
let dead = 19;
|
|
|
|
|
|
|
|
let mut slot = min;
|
2021-04-29 07:11:28 -07:00
|
|
|
while tester.hash_set.len() < width {
|
2021-04-12 10:11:33 -07:00
|
|
|
slot += 1;
|
|
|
|
if max_bitfield_width > 2 && slot % dead == 0 {
|
|
|
|
// with max_bitfield_width of 1 and 2, there is no room for dead slots
|
|
|
|
continue;
|
|
|
|
}
|
2021-04-29 07:11:28 -07:00
|
|
|
tester.insert(slot);
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
|
|
|
let max = slot + 1;
|
|
|
|
|
|
|
|
for slot in (min - 10)..max + 100 {
|
2021-04-29 07:11:28 -07:00
|
|
|
assert_eq!(
|
|
|
|
tester.bitfield.contains(&slot),
|
|
|
|
tester.hash_set.contains(&slot)
|
|
|
|
);
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if width > 0 {
|
2021-04-29 07:11:28 -07:00
|
|
|
assert!(tester.remove(&slot));
|
|
|
|
assert!(!tester.remove(&slot));
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
|
|
|
|
2021-04-29 07:11:28 -07:00
|
|
|
let all = tester.bitfield.get_all();
|
2021-04-12 10:11:33 -07:00
|
|
|
|
|
|
|
// remove the rest, including a call that removes slot again
|
|
|
|
for item in all.iter() {
|
2021-06-18 06:34:46 -07:00
|
|
|
assert!(tester.remove(item));
|
|
|
|
assert!(!tester.remove(item));
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
let min = max + ((width * 2) as u64) + 3;
|
|
|
|
let slot = min; // several widths past previous min
|
|
|
|
let max = slot + 1;
|
2021-04-29 07:11:28 -07:00
|
|
|
tester.insert(slot);
|
2021-04-12 10:11:33 -07:00
|
|
|
|
|
|
|
for slot in (min - 10)..max + 100 {
|
2021-04-29 07:11:28 -07:00
|
|
|
assert_eq!(
|
|
|
|
tester.bitfield.contains(&slot),
|
|
|
|
tester.hash_set.contains(&slot)
|
|
|
|
);
|
2021-04-12 10:11:33 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn bitfield_insert_and_test(bitfield: &mut RollingBitField, slot: Slot) {
|
|
|
|
let len = bitfield.len();
|
|
|
|
let old_all = bitfield.get_all();
|
|
|
|
let (new_min, new_max) = if bitfield.is_empty() {
|
|
|
|
(slot, slot + 1)
|
|
|
|
} else {
|
|
|
|
(
|
|
|
|
std::cmp::min(bitfield.min, slot),
|
|
|
|
std::cmp::max(bitfield.max, slot + 1),
|
|
|
|
)
|
|
|
|
};
|
|
|
|
bitfield.insert(slot);
|
|
|
|
assert_eq!(bitfield.min, new_min);
|
|
|
|
assert_eq!(bitfield.max, new_max);
|
|
|
|
assert_eq!(bitfield.len(), len + 1);
|
|
|
|
assert!(!bitfield.is_empty());
|
|
|
|
assert!(bitfield.contains(&slot));
|
|
|
|
// verify aliasing is what we expect
|
|
|
|
assert!(bitfield.contains_assume_in_range(&(slot + bitfield.max_width)));
|
|
|
|
let get_all = bitfield.get_all();
|
|
|
|
old_all
|
|
|
|
.into_iter()
|
|
|
|
.for_each(|slot| assert!(get_all.contains(&slot)));
|
|
|
|
assert!(get_all.contains(&slot));
|
|
|
|
assert!(get_all.len() == len + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_bitfield_clear() {
|
|
|
|
let mut bitfield = RollingBitField::new(4);
|
|
|
|
assert_eq!(bitfield.len(), 0);
|
|
|
|
assert!(bitfield.is_empty());
|
|
|
|
bitfield_insert_and_test(&mut bitfield, 0);
|
|
|
|
bitfield.clear();
|
|
|
|
assert_eq!(bitfield.len(), 0);
|
|
|
|
assert!(bitfield.is_empty());
|
|
|
|
assert!(bitfield.get_all().is_empty());
|
|
|
|
bitfield_insert_and_test(&mut bitfield, 1);
|
|
|
|
bitfield.clear();
|
|
|
|
assert_eq!(bitfield.len(), 0);
|
|
|
|
assert!(bitfield.is_empty());
|
|
|
|
assert!(bitfield.get_all().is_empty());
|
|
|
|
bitfield_insert_and_test(&mut bitfield, 4);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_bitfield_wrapping() {
|
|
|
|
let mut bitfield = RollingBitField::new(4);
|
|
|
|
assert_eq!(bitfield.len(), 0);
|
|
|
|
assert!(bitfield.is_empty());
|
|
|
|
bitfield_insert_and_test(&mut bitfield, 0);
|
|
|
|
assert_eq!(bitfield.get_all(), vec![0]);
|
|
|
|
bitfield_insert_and_test(&mut bitfield, 2);
|
|
|
|
assert_eq!(bitfield.get_all(), vec![0, 2]);
|
|
|
|
bitfield_insert_and_test(&mut bitfield, 3);
|
|
|
|
bitfield.insert(3); // redundant insert
|
|
|
|
assert_eq!(bitfield.get_all(), vec![0, 2, 3]);
|
2021-04-27 07:15:44 -07:00
|
|
|
assert!(bitfield.remove(&0));
|
|
|
|
assert!(!bitfield.remove(&0));
|
2021-04-12 10:11:33 -07:00
|
|
|
assert_eq!(bitfield.min, 2);
|
|
|
|
assert_eq!(bitfield.max, 4);
|
|
|
|
assert_eq!(bitfield.len(), 2);
|
2021-04-27 07:15:44 -07:00
|
|
|
assert!(!bitfield.remove(&0)); // redundant remove
|
2021-04-12 10:11:33 -07:00
|
|
|
assert_eq!(bitfield.len(), 2);
|
|
|
|
assert_eq!(bitfield.get_all(), vec![2, 3]);
|
|
|
|
bitfield.insert(4); // wrapped around value - same bit as '0'
|
|
|
|
assert_eq!(bitfield.min, 2);
|
|
|
|
assert_eq!(bitfield.max, 5);
|
|
|
|
assert_eq!(bitfield.len(), 3);
|
|
|
|
assert_eq!(bitfield.get_all(), vec![2, 3, 4]);
|
2021-04-27 07:15:44 -07:00
|
|
|
assert!(bitfield.remove(&2));
|
2021-04-12 10:11:33 -07:00
|
|
|
assert_eq!(bitfield.min, 3);
|
|
|
|
assert_eq!(bitfield.max, 5);
|
|
|
|
assert_eq!(bitfield.len(), 2);
|
|
|
|
assert_eq!(bitfield.get_all(), vec![3, 4]);
|
2021-04-27 07:15:44 -07:00
|
|
|
assert!(bitfield.remove(&3));
|
2021-04-12 10:11:33 -07:00
|
|
|
assert_eq!(bitfield.min, 4);
|
|
|
|
assert_eq!(bitfield.max, 5);
|
|
|
|
assert_eq!(bitfield.len(), 1);
|
|
|
|
assert_eq!(bitfield.get_all(), vec![4]);
|
2021-04-27 07:15:44 -07:00
|
|
|
assert!(bitfield.remove(&4));
|
2021-04-12 10:11:33 -07:00
|
|
|
assert_eq!(bitfield.len(), 0);
|
|
|
|
assert!(bitfield.is_empty());
|
|
|
|
assert!(bitfield.get_all().is_empty());
|
|
|
|
bitfield_insert_and_test(&mut bitfield, 8);
|
2021-04-27 07:15:44 -07:00
|
|
|
assert!(bitfield.remove(&8));
|
2021-04-12 10:11:33 -07:00
|
|
|
assert_eq!(bitfield.len(), 0);
|
|
|
|
assert!(bitfield.is_empty());
|
|
|
|
assert!(bitfield.get_all().is_empty());
|
|
|
|
bitfield_insert_and_test(&mut bitfield, 9);
|
2021-04-27 07:15:44 -07:00
|
|
|
assert!(bitfield.remove(&9));
|
2021-04-12 10:11:33 -07:00
|
|
|
assert_eq!(bitfield.len(), 0);
|
|
|
|
assert!(bitfield.is_empty());
|
|
|
|
assert!(bitfield.get_all().is_empty());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_bitfield_smaller() {
|
|
|
|
// smaller bitfield, fewer entries, including 0
|
|
|
|
solana_logger::setup();
|
|
|
|
|
|
|
|
for width in 0..34 {
|
|
|
|
let mut bitfield = RollingBitField::new(4096);
|
2021-04-29 07:11:28 -07:00
|
|
|
let mut hash_set = HashSet::new();
|
2021-04-12 10:11:33 -07:00
|
|
|
|
|
|
|
let min = 1_010_000;
|
|
|
|
let dead = 19;
|
|
|
|
|
|
|
|
let mut slot = min;
|
2021-04-29 07:11:28 -07:00
|
|
|
while hash_set.len() < width {
|
2021-04-12 10:11:33 -07:00
|
|
|
slot += 1;
|
|
|
|
if slot % dead == 0 {
|
|
|
|
continue;
|
|
|
|
}
|
2021-04-29 07:11:28 -07:00
|
|
|
hash_set.insert(slot);
|
2021-04-12 10:11:33 -07:00
|
|
|
bitfield.insert(slot);
|
|
|
|
}
|
|
|
|
|
|
|
|
let max = slot + 1;
|
|
|
|
|
|
|
|
let mut time = Measure::start("");
|
|
|
|
let mut count = 0;
|
|
|
|
for slot in (min - 10)..max + 100 {
|
2021-04-29 07:11:28 -07:00
|
|
|
if hash_set.contains(&slot) {
|
2021-04-12 10:11:33 -07:00
|
|
|
count += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
time.stop();
|
|
|
|
|
|
|
|
let mut time2 = Measure::start("");
|
|
|
|
let mut count2 = 0;
|
|
|
|
for slot in (min - 10)..max + 100 {
|
|
|
|
if bitfield.contains(&slot) {
|
|
|
|
count2 += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
time2.stop();
|
|
|
|
info!(
|
|
|
|
"{}, {}, {}",
|
|
|
|
time.as_ms(),
|
|
|
|
time2.as_ms(),
|
|
|
|
time.as_ns() / time2.as_ns()
|
|
|
|
);
|
|
|
|
assert_eq!(count, count2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-26 16:12:43 -07:00
|
|
|
const COLLECT_ALL_UNSORTED_FALSE: bool = false;
|
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
#[test]
|
|
|
|
fn test_get_empty() {
|
|
|
|
let key = Keypair::new();
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2021-04-12 08:51:57 -07:00
|
|
|
let ancestors = Ancestors::default();
|
2020-09-28 16:04:46 -07:00
|
|
|
assert!(index.get(&key.pubkey(), Some(&ancestors), None).is_none());
|
|
|
|
assert!(index.get(&key.pubkey(), None, None).is_none());
|
2019-07-10 22:06:32 -07:00
|
|
|
|
|
|
|
let mut num = 0;
|
2021-08-26 16:12:43 -07:00
|
|
|
index.unchecked_scan_accounts(
|
|
|
|
"",
|
|
|
|
&ancestors,
|
|
|
|
|_pubkey, _index| num += 1,
|
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2019-07-10 22:06:32 -07:00
|
|
|
assert_eq!(num, 0);
|
2019-04-15 17:15:50 -07:00
|
|
|
}
|
|
|
|
|
2021-05-11 15:06:22 -07:00
|
|
|
#[test]
|
|
|
|
fn test_secondary_index_include_exclude() {
|
|
|
|
let pk1 = Pubkey::new_unique();
|
|
|
|
let pk2 = Pubkey::new_unique();
|
|
|
|
let mut index = AccountSecondaryIndexes::default();
|
|
|
|
|
|
|
|
assert!(!index.contains(&AccountIndex::ProgramId));
|
|
|
|
index.indexes.insert(AccountIndex::ProgramId);
|
|
|
|
assert!(index.contains(&AccountIndex::ProgramId));
|
|
|
|
assert!(index.include_key(&pk1));
|
|
|
|
assert!(index.include_key(&pk2));
|
|
|
|
|
|
|
|
let exclude = false;
|
|
|
|
index.keys = Some(AccountSecondaryIndexesIncludeExclude {
|
|
|
|
keys: [pk1].iter().cloned().collect::<HashSet<_>>(),
|
|
|
|
exclude,
|
|
|
|
});
|
|
|
|
assert!(index.include_key(&pk1));
|
|
|
|
assert!(!index.include_key(&pk2));
|
|
|
|
|
|
|
|
let exclude = true;
|
|
|
|
index.keys = Some(AccountSecondaryIndexesIncludeExclude {
|
|
|
|
keys: [pk1].iter().cloned().collect::<HashSet<_>>(),
|
|
|
|
exclude,
|
|
|
|
});
|
|
|
|
assert!(!index.include_key(&pk1));
|
|
|
|
assert!(index.include_key(&pk2));
|
|
|
|
|
|
|
|
let exclude = true;
|
|
|
|
index.keys = Some(AccountSecondaryIndexesIncludeExclude {
|
|
|
|
keys: [pk1, pk2].iter().cloned().collect::<HashSet<_>>(),
|
|
|
|
exclude,
|
|
|
|
});
|
|
|
|
assert!(!index.include_key(&pk1));
|
|
|
|
assert!(!index.include_key(&pk2));
|
|
|
|
|
|
|
|
let exclude = false;
|
|
|
|
index.keys = Some(AccountSecondaryIndexesIncludeExclude {
|
|
|
|
keys: [pk1, pk2].iter().cloned().collect::<HashSet<_>>(),
|
|
|
|
exclude,
|
|
|
|
});
|
|
|
|
assert!(index.include_key(&pk1));
|
|
|
|
assert!(index.include_key(&pk2));
|
|
|
|
}
|
|
|
|
|
2021-08-09 06:58:59 -07:00
|
|
|
const UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE: bool = false;
|
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
#[test]
|
|
|
|
fn test_insert_no_ancestors() {
|
|
|
|
let key = Keypair::new();
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2019-06-10 18:15:39 -07:00
|
|
|
let mut gc = Vec::new();
|
2020-12-31 18:06:03 -08:00
|
|
|
index.upsert(
|
|
|
|
0,
|
|
|
|
&key.pubkey(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
2021-05-10 07:22:48 -07:00
|
|
|
&AccountSecondaryIndexes::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
true,
|
|
|
|
&mut gc,
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
2019-04-15 17:15:50 -07:00
|
|
|
assert!(gc.is_empty());
|
|
|
|
|
2021-04-12 08:51:57 -07:00
|
|
|
let ancestors = Ancestors::default();
|
2020-09-28 16:04:46 -07:00
|
|
|
assert!(index.get(&key.pubkey(), Some(&ancestors), None).is_none());
|
|
|
|
assert!(index.get(&key.pubkey(), None, None).is_none());
|
2019-07-10 22:06:32 -07:00
|
|
|
|
|
|
|
let mut num = 0;
|
2021-08-26 16:12:43 -07:00
|
|
|
index.unchecked_scan_accounts(
|
|
|
|
"",
|
|
|
|
&ancestors,
|
|
|
|
|_pubkey, _index| num += 1,
|
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2019-07-10 22:06:32 -07:00
|
|
|
assert_eq!(num, 0);
|
2019-04-15 17:15:50 -07:00
|
|
|
}
|
|
|
|
|
2021-05-17 09:58:33 -07:00
|
|
|
type AccountInfoTest = f64;
|
|
|
|
|
2021-09-14 15:51:07 -07:00
|
|
|
impl IndexValue for AccountInfoTest {}
|
2021-05-17 09:58:33 -07:00
|
|
|
impl IsCached for AccountInfoTest {
|
|
|
|
fn is_cached(&self) -> bool {
|
|
|
|
true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ZeroLamport for AccountInfoTest {
|
|
|
|
fn is_zero_lamport(&self) -> bool {
|
|
|
|
true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[test]
|
|
|
|
fn test_insert_new_with_lock_no_ancestors() {
|
|
|
|
let key = Keypair::new();
|
2021-05-18 16:08:37 -07:00
|
|
|
let pubkey = &key.pubkey();
|
2021-05-17 09:58:33 -07:00
|
|
|
let slot = 0;
|
|
|
|
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2021-05-17 09:58:33 -07:00
|
|
|
let account_info = true;
|
2021-07-07 13:35:35 -07:00
|
|
|
let items = vec![(*pubkey, account_info)];
|
2021-06-25 12:31:55 -07:00
|
|
|
index.insert_new_if_missing_into_primary_index(slot, items.len(), items.into_iter());
|
2021-05-17 09:58:33 -07:00
|
|
|
|
|
|
|
let mut ancestors = Ancestors::default();
|
2021-06-18 06:34:46 -07:00
|
|
|
assert!(index.get(pubkey, Some(&ancestors), None).is_none());
|
|
|
|
assert!(index.get(pubkey, None, None).is_none());
|
2021-05-17 09:58:33 -07:00
|
|
|
|
|
|
|
let mut num = 0;
|
2021-08-26 16:12:43 -07:00
|
|
|
index.unchecked_scan_accounts(
|
|
|
|
"",
|
|
|
|
&ancestors,
|
|
|
|
|_pubkey, _index| num += 1,
|
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2021-05-17 09:58:33 -07:00
|
|
|
assert_eq!(num, 0);
|
|
|
|
ancestors.insert(slot, 0);
|
2021-06-18 06:34:46 -07:00
|
|
|
assert!(index.get(pubkey, Some(&ancestors), None).is_some());
|
|
|
|
assert_eq!(index.ref_count_from_storage(pubkey), 1);
|
2021-08-26 16:12:43 -07:00
|
|
|
index.unchecked_scan_accounts(
|
|
|
|
"",
|
|
|
|
&ancestors,
|
|
|
|
|_pubkey, _index| num += 1,
|
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2021-05-17 09:58:33 -07:00
|
|
|
assert_eq!(num, 1);
|
|
|
|
|
|
|
|
// not zero lamports
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<AccountInfoTest>::default_for_tests();
|
2021-05-17 09:58:33 -07:00
|
|
|
let account_info: AccountInfoTest = 0 as AccountInfoTest;
|
2021-07-07 13:35:35 -07:00
|
|
|
let items = vec![(*pubkey, account_info)];
|
2021-06-25 12:31:55 -07:00
|
|
|
index.insert_new_if_missing_into_primary_index(slot, items.len(), items.into_iter());
|
2021-05-17 09:58:33 -07:00
|
|
|
|
|
|
|
let mut ancestors = Ancestors::default();
|
2021-06-18 06:34:46 -07:00
|
|
|
assert!(index.get(pubkey, Some(&ancestors), None).is_none());
|
|
|
|
assert!(index.get(pubkey, None, None).is_none());
|
2021-05-17 09:58:33 -07:00
|
|
|
|
|
|
|
let mut num = 0;
|
2021-08-26 16:12:43 -07:00
|
|
|
index.unchecked_scan_accounts(
|
|
|
|
"",
|
|
|
|
&ancestors,
|
|
|
|
|_pubkey, _index| num += 1,
|
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2021-05-17 09:58:33 -07:00
|
|
|
assert_eq!(num, 0);
|
|
|
|
ancestors.insert(slot, 0);
|
2021-06-18 06:34:46 -07:00
|
|
|
assert!(index.get(pubkey, Some(&ancestors), None).is_some());
|
|
|
|
assert_eq!(index.ref_count_from_storage(pubkey), 0); // cached, so 0
|
2021-08-26 16:12:43 -07:00
|
|
|
index.unchecked_scan_accounts(
|
|
|
|
"",
|
|
|
|
&ancestors,
|
|
|
|
|_pubkey, _index| num += 1,
|
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2021-05-17 09:58:33 -07:00
|
|
|
assert_eq!(num, 1);
|
|
|
|
}
|
|
|
|
|
2021-05-19 14:21:24 -07:00
|
|
|
#[test]
|
|
|
|
fn test_new_entry() {
|
|
|
|
let slot = 0;
|
|
|
|
// account_info type that IS cached
|
|
|
|
let account_info = AccountInfoTest::default();
|
2021-09-19 18:22:09 -07:00
|
|
|
let index = AccountsIndex::default_for_tests();
|
2021-05-19 14:21:24 -07:00
|
|
|
|
2021-09-19 18:22:09 -07:00
|
|
|
let new_entry = WriteAccountMapEntry::new_entry_after_update(
|
|
|
|
slot,
|
|
|
|
account_info,
|
|
|
|
&index.storage.storage,
|
|
|
|
);
|
2021-05-19 14:21:24 -07:00
|
|
|
assert_eq!(new_entry.ref_count.load(Ordering::Relaxed), 0);
|
|
|
|
assert_eq!(new_entry.slot_list.read().unwrap().capacity(), 1);
|
|
|
|
assert_eq!(
|
|
|
|
new_entry.slot_list.read().unwrap().to_vec(),
|
|
|
|
vec![(slot, account_info)]
|
|
|
|
);
|
|
|
|
|
|
|
|
// account_info type that is NOT cached
|
|
|
|
let account_info = true;
|
2021-09-19 18:22:09 -07:00
|
|
|
let index = AccountsIndex::default_for_tests();
|
2021-05-19 14:21:24 -07:00
|
|
|
|
2021-09-19 18:22:09 -07:00
|
|
|
let new_entry = WriteAccountMapEntry::new_entry_after_update(
|
|
|
|
slot,
|
|
|
|
account_info,
|
|
|
|
&index.storage.storage,
|
|
|
|
);
|
2021-05-19 14:21:24 -07:00
|
|
|
assert_eq!(new_entry.ref_count.load(Ordering::Relaxed), 1);
|
|
|
|
assert_eq!(new_entry.slot_list.read().unwrap().capacity(), 1);
|
|
|
|
assert_eq!(
|
|
|
|
new_entry.slot_list.read().unwrap().to_vec(),
|
|
|
|
vec![(slot, account_info)]
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2021-05-20 08:29:13 -07:00
|
|
|
#[test]
|
|
|
|
fn test_batch_insert() {
|
|
|
|
let slot0 = 0;
|
|
|
|
let key0 = Keypair::new().pubkey();
|
|
|
|
let key1 = Keypair::new().pubkey();
|
|
|
|
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2021-05-20 08:29:13 -07:00
|
|
|
let account_infos = [true, false];
|
|
|
|
|
2021-07-07 13:35:35 -07:00
|
|
|
let items = vec![(key0, account_infos[0]), (key1, account_infos[1])];
|
2021-06-25 12:31:55 -07:00
|
|
|
index.insert_new_if_missing_into_primary_index(slot0, items.len(), items.into_iter());
|
2021-05-20 08:29:13 -07:00
|
|
|
|
|
|
|
for (i, key) in [key0, key1].iter().enumerate() {
|
|
|
|
let entry = index.get_account_read_entry(key).unwrap();
|
2021-08-09 06:59:56 -07:00
|
|
|
assert_eq!(entry.ref_count(), 1);
|
2021-05-20 08:29:13 -07:00
|
|
|
assert_eq!(entry.slot_list().to_vec(), vec![(slot0, account_infos[i]),]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-14 15:51:07 -07:00
|
|
|
fn test_new_entry_code_paths_helper<T: IndexValue>(
|
2021-05-19 14:21:24 -07:00
|
|
|
account_infos: [T; 2],
|
|
|
|
is_cached: bool,
|
|
|
|
upsert: bool,
|
|
|
|
) {
|
|
|
|
let slot0 = 0;
|
|
|
|
let slot1 = 1;
|
|
|
|
let key = Keypair::new().pubkey();
|
|
|
|
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<T>::default_for_tests();
|
2021-05-19 14:21:24 -07:00
|
|
|
let mut gc = Vec::new();
|
|
|
|
|
|
|
|
if upsert {
|
|
|
|
// insert first entry for pubkey. This will use new_entry_after_update and not call update.
|
|
|
|
index.upsert(
|
|
|
|
slot0,
|
|
|
|
&key,
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
|
|
|
&AccountSecondaryIndexes::default(),
|
2021-09-01 06:13:08 -07:00
|
|
|
account_infos[0],
|
2021-05-19 14:21:24 -07:00
|
|
|
&mut gc,
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2021-05-19 14:21:24 -07:00
|
|
|
);
|
|
|
|
} else {
|
2021-09-01 06:13:08 -07:00
|
|
|
let items = vec![(key, account_infos[0])];
|
2021-06-25 12:31:55 -07:00
|
|
|
index.insert_new_if_missing_into_primary_index(slot0, items.len(), items.into_iter());
|
2021-05-19 14:21:24 -07:00
|
|
|
}
|
|
|
|
assert!(gc.is_empty());
|
|
|
|
|
|
|
|
// verify the added entry matches expected
|
|
|
|
{
|
|
|
|
let entry = index.get_account_read_entry(&key).unwrap();
|
2021-08-09 06:59:56 -07:00
|
|
|
assert_eq!(entry.ref_count(), if is_cached { 0 } else { 1 });
|
2021-09-01 06:13:08 -07:00
|
|
|
let expected = vec![(slot0, account_infos[0])];
|
2021-05-19 14:21:24 -07:00
|
|
|
assert_eq!(entry.slot_list().to_vec(), expected);
|
2021-09-19 18:22:09 -07:00
|
|
|
let new_entry = WriteAccountMapEntry::new_entry_after_update(
|
|
|
|
slot0,
|
|
|
|
account_infos[0],
|
|
|
|
&index.storage.storage,
|
|
|
|
);
|
2021-05-19 14:21:24 -07:00
|
|
|
assert_eq!(
|
|
|
|
entry.slot_list().to_vec(),
|
|
|
|
new_entry.slot_list.read().unwrap().to_vec(),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// insert second entry for pubkey. This will use update and NOT use new_entry_after_update.
|
|
|
|
if upsert {
|
|
|
|
index.upsert(
|
|
|
|
slot1,
|
|
|
|
&key,
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
|
|
|
&AccountSecondaryIndexes::default(),
|
2021-09-01 06:13:08 -07:00
|
|
|
account_infos[1],
|
2021-05-19 14:21:24 -07:00
|
|
|
&mut gc,
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2021-05-19 14:21:24 -07:00
|
|
|
);
|
|
|
|
} else {
|
2021-09-01 06:13:08 -07:00
|
|
|
let items = vec![(key, account_infos[1])];
|
2021-06-25 12:31:55 -07:00
|
|
|
index.insert_new_if_missing_into_primary_index(slot1, items.len(), items.into_iter());
|
2021-05-19 14:21:24 -07:00
|
|
|
}
|
|
|
|
assert!(gc.is_empty());
|
|
|
|
|
2021-05-21 11:59:32 -07:00
|
|
|
for lock in &[false, true] {
|
|
|
|
let read_lock = if *lock {
|
2021-06-28 13:03:57 -07:00
|
|
|
Some(index.get_account_maps_read_lock(&key))
|
2021-05-21 11:59:32 -07:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
let entry = if *lock {
|
|
|
|
index
|
|
|
|
.get_account_read_entry_with_lock(&key, read_lock.as_ref().unwrap())
|
|
|
|
.unwrap()
|
|
|
|
} else {
|
|
|
|
index.get_account_read_entry(&key).unwrap()
|
|
|
|
};
|
|
|
|
|
2021-08-09 06:59:56 -07:00
|
|
|
assert_eq!(entry.ref_count(), if is_cached { 0 } else { 2 });
|
2021-05-19 14:21:24 -07:00
|
|
|
assert_eq!(
|
|
|
|
entry.slot_list().to_vec(),
|
2021-09-01 06:13:08 -07:00
|
|
|
vec![(slot0, account_infos[0]), (slot1, account_infos[1])]
|
2021-05-19 14:21:24 -07:00
|
|
|
);
|
|
|
|
|
2021-09-19 18:22:09 -07:00
|
|
|
let new_entry = WriteAccountMapEntry::new_entry_after_update(
|
|
|
|
slot1,
|
|
|
|
account_infos[1],
|
|
|
|
&index.storage.storage,
|
|
|
|
);
|
2021-05-19 14:21:24 -07:00
|
|
|
assert_eq!(entry.slot_list()[1], new_entry.slot_list.read().unwrap()[0],);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_new_entry_and_update_code_paths() {
|
|
|
|
for is_upsert in &[false, true] {
|
|
|
|
// account_info type that IS cached
|
|
|
|
test_new_entry_code_paths_helper([1.0, 2.0], true, *is_upsert);
|
|
|
|
|
|
|
|
// account_info type that is NOT cached
|
|
|
|
test_new_entry_code_paths_helper([true, false], false, *is_upsert);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-17 09:58:33 -07:00
|
|
|
#[test]
|
|
|
|
fn test_insert_with_lock_no_ancestors() {
|
|
|
|
let key = Keypair::new();
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2021-05-17 09:58:33 -07:00
|
|
|
let slot = 0;
|
2021-05-19 14:21:24 -07:00
|
|
|
let account_info = true;
|
2021-05-17 09:58:33 -07:00
|
|
|
|
2021-09-19 18:22:09 -07:00
|
|
|
let new_entry = WriteAccountMapEntry::new_entry_after_update(
|
|
|
|
slot,
|
|
|
|
account_info,
|
|
|
|
&index.storage.storage,
|
|
|
|
);
|
2021-08-05 06:45:08 -07:00
|
|
|
assert_eq!(0, account_maps_len_expensive(&index));
|
|
|
|
|
|
|
|
// will fail because key doesn't exist
|
|
|
|
let r_account_maps = index.get_account_maps_read_lock(&key.pubkey());
|
2021-09-13 06:27:21 -07:00
|
|
|
assert!(!r_account_maps.update_key_if_exists(
|
2021-08-05 06:45:08 -07:00
|
|
|
&key.pubkey(),
|
|
|
|
&new_entry,
|
|
|
|
&mut SlotList::default(),
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2021-08-05 06:45:08 -07:00
|
|
|
));
|
2021-09-13 06:27:21 -07:00
|
|
|
drop(r_account_maps);
|
2021-08-05 06:45:08 -07:00
|
|
|
assert_eq!(
|
|
|
|
(slot, account_info),
|
|
|
|
new_entry.slot_list.read().as_ref().unwrap()[0]
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(0, account_maps_len_expensive(&index));
|
2021-09-15 10:36:08 -07:00
|
|
|
let w_account_maps = index.get_account_maps_write_lock(&key.pubkey());
|
2021-09-12 19:54:09 -07:00
|
|
|
w_account_maps.upsert(
|
2021-08-05 06:45:08 -07:00
|
|
|
&key.pubkey(),
|
2021-05-17 09:58:33 -07:00
|
|
|
new_entry,
|
2021-08-05 06:45:08 -07:00
|
|
|
&mut SlotList::default(),
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2021-05-17 09:58:33 -07:00
|
|
|
);
|
2021-09-12 19:54:09 -07:00
|
|
|
drop(w_account_maps);
|
2021-08-05 06:45:08 -07:00
|
|
|
assert_eq!(1, account_maps_len_expensive(&index));
|
2021-05-17 09:58:33 -07:00
|
|
|
|
|
|
|
let mut ancestors = Ancestors::default();
|
|
|
|
assert!(index.get(&key.pubkey(), Some(&ancestors), None).is_none());
|
|
|
|
assert!(index.get(&key.pubkey(), None, None).is_none());
|
|
|
|
|
|
|
|
let mut num = 0;
|
2021-08-26 16:12:43 -07:00
|
|
|
index.unchecked_scan_accounts(
|
|
|
|
"",
|
|
|
|
&ancestors,
|
|
|
|
|_pubkey, _index| num += 1,
|
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2021-05-17 09:58:33 -07:00
|
|
|
assert_eq!(num, 0);
|
|
|
|
ancestors.insert(slot, 0);
|
|
|
|
assert!(index.get(&key.pubkey(), Some(&ancestors), None).is_some());
|
2021-08-26 16:12:43 -07:00
|
|
|
index.unchecked_scan_accounts(
|
|
|
|
"",
|
|
|
|
&ancestors,
|
|
|
|
|_pubkey, _index| num += 1,
|
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2021-05-17 09:58:33 -07:00
|
|
|
assert_eq!(num, 1);
|
|
|
|
}
|
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
#[test]
|
|
|
|
fn test_insert_wrong_ancestors() {
|
|
|
|
let key = Keypair::new();
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2019-06-10 18:15:39 -07:00
|
|
|
let mut gc = Vec::new();
|
2020-12-31 18:06:03 -08:00
|
|
|
index.upsert(
|
|
|
|
0,
|
|
|
|
&key.pubkey(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
2021-05-10 07:22:48 -07:00
|
|
|
&AccountSecondaryIndexes::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
true,
|
|
|
|
&mut gc,
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
2019-04-15 17:15:50 -07:00
|
|
|
assert!(gc.is_empty());
|
|
|
|
|
|
|
|
let ancestors = vec![(1, 1)].into_iter().collect();
|
2020-09-28 16:04:46 -07:00
|
|
|
assert!(index.get(&key.pubkey(), Some(&ancestors), None).is_none());
|
2019-07-10 22:06:32 -07:00
|
|
|
|
|
|
|
let mut num = 0;
|
2021-08-26 16:12:43 -07:00
|
|
|
index.unchecked_scan_accounts(
|
|
|
|
"",
|
|
|
|
&ancestors,
|
|
|
|
|_pubkey, _index| num += 1,
|
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2019-07-10 22:06:32 -07:00
|
|
|
assert_eq!(num, 0);
|
2019-04-15 17:15:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_insert_with_ancestors() {
|
|
|
|
let key = Keypair::new();
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2019-06-10 18:15:39 -07:00
|
|
|
let mut gc = Vec::new();
|
2020-12-31 18:06:03 -08:00
|
|
|
index.upsert(
|
|
|
|
0,
|
|
|
|
&key.pubkey(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
2021-05-10 07:22:48 -07:00
|
|
|
&AccountSecondaryIndexes::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
true,
|
|
|
|
&mut gc,
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
2019-04-15 17:15:50 -07:00
|
|
|
assert!(gc.is_empty());
|
|
|
|
|
|
|
|
let ancestors = vec![(0, 0)].into_iter().collect();
|
2020-09-28 16:04:46 -07:00
|
|
|
let (list, idx) = index.get(&key.pubkey(), Some(&ancestors), None).unwrap();
|
2020-10-21 17:05:27 -07:00
|
|
|
assert_eq!(list.slot_list()[idx], (0, true));
|
2019-07-10 22:06:32 -07:00
|
|
|
|
|
|
|
let mut num = 0;
|
|
|
|
let mut found_key = false;
|
2021-08-26 16:12:43 -07:00
|
|
|
index.unchecked_scan_accounts(
|
|
|
|
"",
|
|
|
|
&ancestors,
|
|
|
|
|pubkey, _index| {
|
|
|
|
if pubkey == &key.pubkey() {
|
|
|
|
found_key = true
|
|
|
|
};
|
|
|
|
num += 1
|
|
|
|
},
|
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2019-07-10 22:06:32 -07:00
|
|
|
assert_eq!(num, 1);
|
|
|
|
assert!(found_key);
|
2019-04-15 17:15:50 -07:00
|
|
|
}
|
|
|
|
|
2020-10-21 17:05:27 -07:00
|
|
|
fn setup_accounts_index_keys(num_pubkeys: usize) -> (AccountsIndex<bool>, Vec<Pubkey>) {
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2020-10-21 17:05:27 -07:00
|
|
|
let root_slot = 0;
|
|
|
|
|
|
|
|
let mut pubkeys: Vec<Pubkey> = std::iter::repeat_with(|| {
|
2020-10-19 12:23:14 -07:00
|
|
|
let new_pubkey = solana_sdk::pubkey::new_rand();
|
2020-12-31 18:06:03 -08:00
|
|
|
index.upsert(
|
|
|
|
root_slot,
|
|
|
|
&new_pubkey,
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
2021-05-10 07:22:48 -07:00
|
|
|
&AccountSecondaryIndexes::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
true,
|
|
|
|
&mut vec![],
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
2020-10-21 17:05:27 -07:00
|
|
|
new_pubkey
|
|
|
|
})
|
|
|
|
.take(num_pubkeys.saturating_sub(1))
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
if num_pubkeys != 0 {
|
|
|
|
pubkeys.push(Pubkey::default());
|
2020-12-31 18:06:03 -08:00
|
|
|
index.upsert(
|
|
|
|
root_slot,
|
|
|
|
&Pubkey::default(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
2021-05-10 07:22:48 -07:00
|
|
|
&AccountSecondaryIndexes::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
true,
|
|
|
|
&mut vec![],
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
2020-10-21 17:05:27 -07:00
|
|
|
}
|
|
|
|
|
2021-01-11 17:00:23 -08:00
|
|
|
index.add_root(root_slot, false);
|
2020-10-21 17:05:27 -07:00
|
|
|
|
|
|
|
(index, pubkeys)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn run_test_range(
|
|
|
|
index: &AccountsIndex<bool>,
|
|
|
|
pubkeys: &[Pubkey],
|
|
|
|
start_bound: Bound<usize>,
|
|
|
|
end_bound: Bound<usize>,
|
|
|
|
) {
|
|
|
|
// Exclusive `index_start`
|
|
|
|
let (pubkey_start, index_start) = match start_bound {
|
|
|
|
Unbounded => (Unbounded, 0),
|
|
|
|
Included(i) => (Included(pubkeys[i]), i),
|
|
|
|
Excluded(i) => (Excluded(pubkeys[i]), i + 1),
|
|
|
|
};
|
|
|
|
|
|
|
|
// Exclusive `index_end`
|
|
|
|
let (pubkey_end, index_end) = match end_bound {
|
|
|
|
Unbounded => (Unbounded, pubkeys.len()),
|
|
|
|
Included(i) => (Included(pubkeys[i]), i + 1),
|
|
|
|
Excluded(i) => (Excluded(pubkeys[i]), i),
|
|
|
|
};
|
|
|
|
let pubkey_range = (pubkey_start, pubkey_end);
|
|
|
|
|
2021-04-12 08:51:57 -07:00
|
|
|
let ancestors = Ancestors::default();
|
2020-10-21 17:05:27 -07:00
|
|
|
let mut scanned_keys = HashSet::new();
|
2021-08-26 16:12:43 -07:00
|
|
|
index.range_scan_accounts(
|
|
|
|
"",
|
|
|
|
&ancestors,
|
|
|
|
pubkey_range,
|
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
|pubkey, _index| {
|
|
|
|
scanned_keys.insert(*pubkey);
|
|
|
|
},
|
|
|
|
);
|
2020-10-21 17:05:27 -07:00
|
|
|
|
|
|
|
let mut expected_len = 0;
|
|
|
|
for key in &pubkeys[index_start..index_end] {
|
|
|
|
expected_len += 1;
|
|
|
|
assert!(scanned_keys.contains(key));
|
|
|
|
}
|
|
|
|
|
|
|
|
assert_eq!(scanned_keys.len(), expected_len);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn run_test_range_indexes(
|
|
|
|
index: &AccountsIndex<bool>,
|
|
|
|
pubkeys: &[Pubkey],
|
|
|
|
start: Option<usize>,
|
|
|
|
end: Option<usize>,
|
|
|
|
) {
|
|
|
|
let start_options = start
|
|
|
|
.map(|i| vec![Included(i), Excluded(i)])
|
|
|
|
.unwrap_or_else(|| vec![Unbounded]);
|
|
|
|
let end_options = end
|
|
|
|
.map(|i| vec![Included(i), Excluded(i)])
|
|
|
|
.unwrap_or_else(|| vec![Unbounded]);
|
|
|
|
|
|
|
|
for start in &start_options {
|
|
|
|
for end in &end_options {
|
|
|
|
run_test_range(index, pubkeys, *start, *end);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_range_scan_accounts() {
|
|
|
|
let (index, mut pubkeys) = setup_accounts_index_keys(3 * ITER_BATCH_SIZE);
|
|
|
|
pubkeys.sort();
|
|
|
|
|
|
|
|
run_test_range_indexes(&index, &pubkeys, None, None);
|
|
|
|
|
|
|
|
run_test_range_indexes(&index, &pubkeys, Some(ITER_BATCH_SIZE), None);
|
|
|
|
|
|
|
|
run_test_range_indexes(&index, &pubkeys, None, Some(2 * ITER_BATCH_SIZE as usize));
|
|
|
|
|
|
|
|
run_test_range_indexes(
|
|
|
|
&index,
|
|
|
|
&pubkeys,
|
|
|
|
Some(ITER_BATCH_SIZE as usize),
|
|
|
|
Some(2 * ITER_BATCH_SIZE as usize),
|
|
|
|
);
|
|
|
|
|
|
|
|
run_test_range_indexes(
|
|
|
|
&index,
|
|
|
|
&pubkeys,
|
|
|
|
Some(ITER_BATCH_SIZE as usize),
|
|
|
|
Some(2 * ITER_BATCH_SIZE as usize - 1),
|
|
|
|
);
|
|
|
|
|
|
|
|
run_test_range_indexes(
|
|
|
|
&index,
|
|
|
|
&pubkeys,
|
2020-12-13 17:26:34 -08:00
|
|
|
Some(ITER_BATCH_SIZE - 1_usize),
|
2020-10-21 17:05:27 -07:00
|
|
|
Some(2 * ITER_BATCH_SIZE as usize + 1),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn run_test_scan_accounts(num_pubkeys: usize) {
|
|
|
|
let (index, _) = setup_accounts_index_keys(num_pubkeys);
|
2021-04-12 08:51:57 -07:00
|
|
|
let ancestors = Ancestors::default();
|
2020-10-21 17:05:27 -07:00
|
|
|
|
|
|
|
let mut scanned_keys = HashSet::new();
|
2021-08-26 16:12:43 -07:00
|
|
|
index.unchecked_scan_accounts(
|
|
|
|
"",
|
|
|
|
&ancestors,
|
|
|
|
|pubkey, _index| {
|
|
|
|
scanned_keys.insert(*pubkey);
|
|
|
|
},
|
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2020-10-21 17:05:27 -07:00
|
|
|
assert_eq!(scanned_keys.len(), num_pubkeys);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_scan_accounts() {
|
|
|
|
run_test_scan_accounts(0);
|
|
|
|
run_test_scan_accounts(1);
|
|
|
|
run_test_scan_accounts(ITER_BATCH_SIZE * 10);
|
|
|
|
run_test_scan_accounts(ITER_BATCH_SIZE * 10 - 1);
|
|
|
|
run_test_scan_accounts(ITER_BATCH_SIZE * 10 + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_accounts_iter_finished() {
|
|
|
|
let (index, _) = setup_accounts_index_keys(0);
|
2021-09-03 16:00:49 -07:00
|
|
|
let mut iter = index.iter(None::<&Range<Pubkey>>, COLLECT_ALL_UNSORTED_FALSE);
|
2020-10-21 17:05:27 -07:00
|
|
|
assert!(iter.next().is_none());
|
|
|
|
let mut gc = vec![];
|
2020-12-31 18:06:03 -08:00
|
|
|
index.upsert(
|
|
|
|
0,
|
|
|
|
&solana_sdk::pubkey::new_rand(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
2021-05-10 07:22:48 -07:00
|
|
|
&AccountSecondaryIndexes::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
true,
|
|
|
|
&mut gc,
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
2020-10-21 17:05:27 -07:00
|
|
|
assert!(iter.next().is_none());
|
|
|
|
}
|
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
#[test]
|
|
|
|
fn test_is_root() {
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2019-04-15 17:15:50 -07:00
|
|
|
assert!(!index.is_root(0));
|
2021-01-11 17:00:23 -08:00
|
|
|
index.add_root(0, false);
|
2019-04-15 17:15:50 -07:00
|
|
|
assert!(index.is_root(0));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_insert_with_root() {
|
|
|
|
let key = Keypair::new();
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2019-06-10 18:15:39 -07:00
|
|
|
let mut gc = Vec::new();
|
2020-12-31 18:06:03 -08:00
|
|
|
index.upsert(
|
|
|
|
0,
|
|
|
|
&key.pubkey(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
2021-05-10 07:22:48 -07:00
|
|
|
&AccountSecondaryIndexes::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
true,
|
|
|
|
&mut gc,
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
2019-04-15 17:15:50 -07:00
|
|
|
assert!(gc.is_empty());
|
|
|
|
|
2021-01-11 17:00:23 -08:00
|
|
|
index.add_root(0, false);
|
2020-09-28 16:04:46 -07:00
|
|
|
let (list, idx) = index.get(&key.pubkey(), None, None).unwrap();
|
2020-10-21 17:05:27 -07:00
|
|
|
assert_eq!(list.slot_list()[idx], (0, true));
|
2019-04-15 17:15:50 -07:00
|
|
|
}
|
|
|
|
|
2019-04-28 10:27:37 -07:00
|
|
|
#[test]
|
2020-03-02 21:57:25 -08:00
|
|
|
fn test_clean_first() {
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2021-01-11 17:00:23 -08:00
|
|
|
index.add_root(0, false);
|
|
|
|
index.add_root(1, false);
|
2020-03-02 21:57:25 -08:00
|
|
|
index.clean_dead_slot(0);
|
2019-04-15 17:15:50 -07:00
|
|
|
assert!(index.is_root(1));
|
|
|
|
assert!(!index.is_root(0));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2020-03-02 21:57:25 -08:00
|
|
|
fn test_clean_last() {
|
2019-10-23 22:01:22 -07:00
|
|
|
//this behavior might be undefined, clean up should only occur on older slots
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2021-01-11 17:00:23 -08:00
|
|
|
index.add_root(0, false);
|
|
|
|
index.add_root(1, false);
|
2020-03-02 21:57:25 -08:00
|
|
|
index.clean_dead_slot(1);
|
2019-04-15 17:15:50 -07:00
|
|
|
assert!(!index.is_root(1));
|
|
|
|
assert!(index.is_root(0));
|
|
|
|
}
|
|
|
|
|
2020-03-02 21:57:25 -08:00
|
|
|
#[test]
|
|
|
|
fn test_clean_and_unclean_slot() {
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2020-10-21 17:05:27 -07:00
|
|
|
assert_eq!(0, index.roots_tracker.read().unwrap().uncleaned_roots.len());
|
2021-01-11 17:00:23 -08:00
|
|
|
index.add_root(0, false);
|
|
|
|
index.add_root(1, false);
|
2020-10-21 17:05:27 -07:00
|
|
|
assert_eq!(2, index.roots_tracker.read().unwrap().uncleaned_roots.len());
|
2020-06-11 22:51:43 -07:00
|
|
|
|
2020-10-21 17:05:27 -07:00
|
|
|
assert_eq!(
|
|
|
|
0,
|
|
|
|
index
|
|
|
|
.roots_tracker
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.previous_uncleaned_roots
|
|
|
|
.len()
|
|
|
|
);
|
2020-09-28 16:04:46 -07:00
|
|
|
index.reset_uncleaned_roots(None);
|
2020-10-21 17:05:27 -07:00
|
|
|
assert_eq!(2, index.roots_tracker.read().unwrap().roots.len());
|
|
|
|
assert_eq!(0, index.roots_tracker.read().unwrap().uncleaned_roots.len());
|
|
|
|
assert_eq!(
|
|
|
|
2,
|
|
|
|
index
|
|
|
|
.roots_tracker
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.previous_uncleaned_roots
|
|
|
|
.len()
|
|
|
|
);
|
2020-06-11 22:51:43 -07:00
|
|
|
|
2021-01-11 17:00:23 -08:00
|
|
|
index.add_root(2, false);
|
|
|
|
index.add_root(3, false);
|
2020-10-21 17:05:27 -07:00
|
|
|
assert_eq!(4, index.roots_tracker.read().unwrap().roots.len());
|
|
|
|
assert_eq!(2, index.roots_tracker.read().unwrap().uncleaned_roots.len());
|
|
|
|
assert_eq!(
|
|
|
|
2,
|
|
|
|
index
|
|
|
|
.roots_tracker
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.previous_uncleaned_roots
|
|
|
|
.len()
|
|
|
|
);
|
2020-06-11 22:51:43 -07:00
|
|
|
|
|
|
|
index.clean_dead_slot(1);
|
2020-10-21 17:05:27 -07:00
|
|
|
assert_eq!(3, index.roots_tracker.read().unwrap().roots.len());
|
|
|
|
assert_eq!(2, index.roots_tracker.read().unwrap().uncleaned_roots.len());
|
|
|
|
assert_eq!(
|
|
|
|
1,
|
|
|
|
index
|
|
|
|
.roots_tracker
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.previous_uncleaned_roots
|
|
|
|
.len()
|
|
|
|
);
|
2020-06-11 22:51:43 -07:00
|
|
|
|
|
|
|
index.clean_dead_slot(2);
|
2020-10-21 17:05:27 -07:00
|
|
|
assert_eq!(2, index.roots_tracker.read().unwrap().roots.len());
|
|
|
|
assert_eq!(1, index.roots_tracker.read().unwrap().uncleaned_roots.len());
|
|
|
|
assert_eq!(
|
|
|
|
1,
|
|
|
|
index
|
|
|
|
.roots_tracker
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.previous_uncleaned_roots
|
|
|
|
.len()
|
|
|
|
);
|
2020-03-02 21:57:25 -08:00
|
|
|
}
|
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
#[test]
|
|
|
|
fn test_update_last_wins() {
|
|
|
|
let key = Keypair::new();
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2019-04-15 17:15:50 -07:00
|
|
|
let ancestors = vec![(0, 0)].into_iter().collect();
|
2019-06-10 18:15:39 -07:00
|
|
|
let mut gc = Vec::new();
|
2020-12-31 18:06:03 -08:00
|
|
|
index.upsert(
|
|
|
|
0,
|
|
|
|
&key.pubkey(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
2021-05-10 07:22:48 -07:00
|
|
|
&AccountSecondaryIndexes::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
true,
|
|
|
|
&mut gc,
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
2019-04-15 17:15:50 -07:00
|
|
|
assert!(gc.is_empty());
|
2020-09-28 16:04:46 -07:00
|
|
|
let (list, idx) = index.get(&key.pubkey(), Some(&ancestors), None).unwrap();
|
2020-10-21 17:05:27 -07:00
|
|
|
assert_eq!(list.slot_list()[idx], (0, true));
|
2019-07-20 17:58:39 -07:00
|
|
|
drop(list);
|
2019-04-15 17:15:50 -07:00
|
|
|
|
2019-06-10 18:15:39 -07:00
|
|
|
let mut gc = Vec::new();
|
2020-12-31 18:06:03 -08:00
|
|
|
index.upsert(
|
|
|
|
0,
|
|
|
|
&key.pubkey(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
2021-05-10 07:22:48 -07:00
|
|
|
&AccountSecondaryIndexes::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
false,
|
|
|
|
&mut gc,
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
2019-04-15 17:15:50 -07:00
|
|
|
assert_eq!(gc, vec![(0, true)]);
|
2020-09-28 16:04:46 -07:00
|
|
|
let (list, idx) = index.get(&key.pubkey(), Some(&ancestors), None).unwrap();
|
2020-10-21 17:05:27 -07:00
|
|
|
assert_eq!(list.slot_list()[idx], (0, false));
|
2019-04-15 17:15:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2019-10-23 22:01:22 -07:00
|
|
|
fn test_update_new_slot() {
|
2019-07-20 17:58:39 -07:00
|
|
|
solana_logger::setup();
|
2019-04-15 17:15:50 -07:00
|
|
|
let key = Keypair::new();
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2019-04-15 17:15:50 -07:00
|
|
|
let ancestors = vec![(0, 0)].into_iter().collect();
|
2019-06-10 18:15:39 -07:00
|
|
|
let mut gc = Vec::new();
|
2020-12-31 18:06:03 -08:00
|
|
|
index.upsert(
|
|
|
|
0,
|
|
|
|
&key.pubkey(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
2021-05-10 07:22:48 -07:00
|
|
|
&AccountSecondaryIndexes::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
true,
|
|
|
|
&mut gc,
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
2019-04-15 17:15:50 -07:00
|
|
|
assert!(gc.is_empty());
|
2020-12-31 18:06:03 -08:00
|
|
|
index.upsert(
|
|
|
|
1,
|
|
|
|
&key.pubkey(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
2021-05-10 07:22:48 -07:00
|
|
|
&AccountSecondaryIndexes::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
false,
|
|
|
|
&mut gc,
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
2019-04-15 17:15:50 -07:00
|
|
|
assert!(gc.is_empty());
|
2020-09-28 16:04:46 -07:00
|
|
|
let (list, idx) = index.get(&key.pubkey(), Some(&ancestors), None).unwrap();
|
2020-10-21 17:05:27 -07:00
|
|
|
assert_eq!(list.slot_list()[idx], (0, true));
|
2019-04-15 17:15:50 -07:00
|
|
|
let ancestors = vec![(1, 0)].into_iter().collect();
|
2020-09-28 16:04:46 -07:00
|
|
|
let (list, idx) = index.get(&key.pubkey(), Some(&ancestors), None).unwrap();
|
2020-10-21 17:05:27 -07:00
|
|
|
assert_eq!(list.slot_list()[idx], (1, false));
|
2019-04-15 17:15:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2019-10-23 22:01:22 -07:00
|
|
|
fn test_update_gc_purged_slot() {
|
2019-04-15 17:15:50 -07:00
|
|
|
let key = Keypair::new();
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2019-06-10 18:15:39 -07:00
|
|
|
let mut gc = Vec::new();
|
2020-12-31 18:06:03 -08:00
|
|
|
index.upsert(
|
|
|
|
0,
|
|
|
|
&key.pubkey(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
2021-05-10 07:22:48 -07:00
|
|
|
&AccountSecondaryIndexes::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
true,
|
|
|
|
&mut gc,
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
2019-04-15 17:15:50 -07:00
|
|
|
assert!(gc.is_empty());
|
2020-12-31 18:06:03 -08:00
|
|
|
index.upsert(
|
|
|
|
1,
|
|
|
|
&key.pubkey(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
2021-05-10 07:22:48 -07:00
|
|
|
&AccountSecondaryIndexes::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
false,
|
|
|
|
&mut gc,
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
|
|
|
index.upsert(
|
|
|
|
2,
|
|
|
|
&key.pubkey(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
2021-05-10 07:22:48 -07:00
|
|
|
&AccountSecondaryIndexes::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
true,
|
|
|
|
&mut gc,
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
|
|
|
index.upsert(
|
|
|
|
3,
|
|
|
|
&key.pubkey(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
2021-05-10 07:22:48 -07:00
|
|
|
&AccountSecondaryIndexes::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
true,
|
|
|
|
&mut gc,
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
2021-01-11 17:00:23 -08:00
|
|
|
index.add_root(0, false);
|
|
|
|
index.add_root(1, false);
|
|
|
|
index.add_root(3, false);
|
2020-12-31 18:06:03 -08:00
|
|
|
index.upsert(
|
|
|
|
4,
|
|
|
|
&key.pubkey(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
2021-05-10 07:22:48 -07:00
|
|
|
&AccountSecondaryIndexes::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
true,
|
|
|
|
&mut gc,
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
2020-09-28 16:04:46 -07:00
|
|
|
|
|
|
|
// Updating index should not purge older roots, only purges
|
|
|
|
// previous updates within the same slot
|
|
|
|
assert_eq!(gc, vec![]);
|
|
|
|
let (list, idx) = index.get(&key.pubkey(), None, None).unwrap();
|
2020-10-21 17:05:27 -07:00
|
|
|
assert_eq!(list.slot_list()[idx], (3, true));
|
2019-07-10 22:06:32 -07:00
|
|
|
|
|
|
|
let mut num = 0;
|
|
|
|
let mut found_key = false;
|
2021-08-26 16:12:43 -07:00
|
|
|
index.unchecked_scan_accounts(
|
|
|
|
"",
|
|
|
|
&Ancestors::default(),
|
|
|
|
|pubkey, _index| {
|
|
|
|
if pubkey == &key.pubkey() {
|
|
|
|
found_key = true;
|
|
|
|
assert_eq!(_index, (&true, 3));
|
|
|
|
};
|
|
|
|
num += 1
|
|
|
|
},
|
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2019-07-10 22:06:32 -07:00
|
|
|
assert_eq!(num, 1);
|
|
|
|
assert!(found_key);
|
2019-04-15 17:15:50 -07:00
|
|
|
}
|
2019-12-11 11:11:31 -08:00
|
|
|
|
2021-09-14 15:51:07 -07:00
|
|
|
fn account_maps_len_expensive<T: IndexValue>(index: &AccountsIndex<T>) -> usize {
|
2021-07-27 06:46:27 -07:00
|
|
|
index
|
|
|
|
.account_maps
|
|
|
|
.iter()
|
|
|
|
.map(|bin_map| bin_map.read().unwrap().len())
|
|
|
|
.sum()
|
|
|
|
}
|
|
|
|
|
2019-12-11 11:11:31 -08:00
|
|
|
#[test]
|
|
|
|
fn test_purge() {
|
|
|
|
let key = Keypair::new();
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<u64>::default_for_tests();
|
2019-12-11 11:11:31 -08:00
|
|
|
let mut gc = Vec::new();
|
2021-07-27 06:46:27 -07:00
|
|
|
assert_eq!(0, account_maps_len_expensive(&index));
|
|
|
|
index.upsert(
|
2020-12-31 18:06:03 -08:00
|
|
|
1,
|
|
|
|
&key.pubkey(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
2021-05-10 07:22:48 -07:00
|
|
|
&AccountSecondaryIndexes::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
12,
|
2021-07-27 06:46:27 -07:00
|
|
|
&mut gc,
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2021-07-27 06:46:27 -07:00
|
|
|
);
|
|
|
|
assert_eq!(1, account_maps_len_expensive(&index));
|
2020-12-31 18:06:03 -08:00
|
|
|
|
2021-07-27 06:46:27 -07:00
|
|
|
index.upsert(
|
2020-12-31 18:06:03 -08:00
|
|
|
1,
|
|
|
|
&key.pubkey(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
2021-05-10 07:22:48 -07:00
|
|
|
&AccountSecondaryIndexes::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
10,
|
2021-07-27 06:46:27 -07:00
|
|
|
&mut gc,
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2021-07-27 06:46:27 -07:00
|
|
|
);
|
|
|
|
assert_eq!(1, account_maps_len_expensive(&index));
|
2020-12-31 18:06:03 -08:00
|
|
|
|
|
|
|
let purges = index.purge_roots(&key.pubkey());
|
2019-12-11 11:11:31 -08:00
|
|
|
assert_eq!(purges, (vec![], false));
|
2021-01-11 17:00:23 -08:00
|
|
|
index.add_root(1, false);
|
2019-12-11 11:11:31 -08:00
|
|
|
|
2020-12-31 18:06:03 -08:00
|
|
|
let purges = index.purge_roots(&key.pubkey());
|
2019-12-11 11:11:31 -08:00
|
|
|
assert_eq!(purges, (vec![(1, 10)], true));
|
|
|
|
|
2021-07-27 06:46:27 -07:00
|
|
|
assert_eq!(1, account_maps_len_expensive(&index));
|
|
|
|
index.upsert(
|
2020-12-31 18:06:03 -08:00
|
|
|
1,
|
|
|
|
&key.pubkey(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
&[],
|
2021-05-10 07:22:48 -07:00
|
|
|
&AccountSecondaryIndexes::default(),
|
2020-12-31 18:06:03 -08:00
|
|
|
9,
|
2021-07-27 06:46:27 -07:00
|
|
|
&mut gc,
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2021-07-27 06:46:27 -07:00
|
|
|
);
|
|
|
|
assert_eq!(1, account_maps_len_expensive(&index));
|
2019-12-11 11:11:31 -08:00
|
|
|
}
|
2020-09-28 16:04:46 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_latest_slot() {
|
|
|
|
let slot_slice = vec![(0, true), (5, true), (3, true), (7, true)];
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2020-09-28 16:04:46 -07:00
|
|
|
|
|
|
|
// No ancestors, no root, should return None
|
|
|
|
assert!(index.latest_slot(None, &slot_slice, None).is_none());
|
|
|
|
|
|
|
|
// Given a root, should return the root
|
2021-01-11 17:00:23 -08:00
|
|
|
index.add_root(5, false);
|
2020-09-28 16:04:46 -07:00
|
|
|
assert_eq!(index.latest_slot(None, &slot_slice, None).unwrap(), 1);
|
|
|
|
|
2020-11-16 17:23:11 -08:00
|
|
|
// Given a max_root == root, should still return the root
|
2020-09-28 16:04:46 -07:00
|
|
|
assert_eq!(index.latest_slot(None, &slot_slice, Some(5)).unwrap(), 1);
|
|
|
|
|
2020-11-16 17:23:11 -08:00
|
|
|
// Given a max_root < root, should filter out the root
|
2020-09-28 16:04:46 -07:00
|
|
|
assert!(index.latest_slot(None, &slot_slice, Some(4)).is_none());
|
|
|
|
|
2020-11-16 17:23:11 -08:00
|
|
|
// Given a max_root, should filter out roots < max_root, but specified
|
|
|
|
// ancestors should not be affected
|
2021-04-12 08:51:57 -07:00
|
|
|
let ancestors = vec![(3, 1), (7, 1)].into_iter().collect();
|
2020-09-28 16:04:46 -07:00
|
|
|
assert_eq!(
|
|
|
|
index
|
|
|
|
.latest_slot(Some(&ancestors), &slot_slice, Some(4))
|
|
|
|
.unwrap(),
|
2020-11-16 17:23:11 -08:00
|
|
|
3
|
2020-09-28 16:04:46 -07:00
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
index
|
|
|
|
.latest_slot(Some(&ancestors), &slot_slice, Some(7))
|
|
|
|
.unwrap(),
|
|
|
|
3
|
|
|
|
);
|
|
|
|
|
2020-11-16 17:23:11 -08:00
|
|
|
// Given no max_root, should just return the greatest ancestor or root
|
2020-09-28 16:04:46 -07:00
|
|
|
assert_eq!(
|
|
|
|
index
|
|
|
|
.latest_slot(Some(&ancestors), &slot_slice, None)
|
|
|
|
.unwrap(),
|
|
|
|
3
|
|
|
|
);
|
|
|
|
}
|
2020-10-03 15:18:58 -07:00
|
|
|
|
2020-12-31 18:06:03 -08:00
|
|
|
fn run_test_purge_exact_secondary_index<
|
|
|
|
SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send,
|
|
|
|
>(
|
|
|
|
index: &AccountsIndex<bool>,
|
|
|
|
secondary_index: &SecondaryIndex<SecondaryIndexEntryType>,
|
|
|
|
key_start: usize,
|
|
|
|
key_end: usize,
|
2021-05-12 15:29:30 -07:00
|
|
|
secondary_indexes: &AccountSecondaryIndexes,
|
2020-12-31 18:06:03 -08:00
|
|
|
) {
|
|
|
|
// No roots, should be no reclaims
|
|
|
|
let slots = vec![1, 2, 5, 9];
|
|
|
|
let index_key = Pubkey::new_unique();
|
|
|
|
let account_key = Pubkey::new_unique();
|
|
|
|
|
|
|
|
let mut account_data = vec![0; inline_spl_token_v2_0::state::Account::get_packed_len()];
|
2021-04-18 10:27:36 -07:00
|
|
|
account_data[key_start..key_end].clone_from_slice(&(index_key.to_bytes()));
|
2020-12-31 18:06:03 -08:00
|
|
|
|
|
|
|
// Insert slots into secondary index
|
|
|
|
for slot in &slots {
|
|
|
|
index.upsert(
|
|
|
|
*slot,
|
|
|
|
&account_key,
|
|
|
|
// Make sure these accounts are added to secondary index
|
|
|
|
&inline_spl_token_v2_0::id(),
|
|
|
|
&account_data,
|
2021-05-12 15:29:30 -07:00
|
|
|
secondary_indexes,
|
2020-12-31 18:06:03 -08:00
|
|
|
true,
|
|
|
|
&mut vec![],
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only one top level index entry exists
|
|
|
|
assert_eq!(secondary_index.index.get(&index_key).unwrap().len(), 1);
|
|
|
|
|
|
|
|
// In the reverse index, one account maps across multiple slots
|
|
|
|
// to the same top level key
|
|
|
|
assert_eq!(
|
|
|
|
secondary_index
|
|
|
|
.reverse_index
|
|
|
|
.get(&account_key)
|
|
|
|
.unwrap()
|
|
|
|
.value()
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.len(),
|
2021-05-12 15:29:30 -07:00
|
|
|
1
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
|
|
|
|
2021-01-11 17:00:23 -08:00
|
|
|
index.purge_exact(
|
|
|
|
&account_key,
|
2021-01-17 20:31:03 -08:00
|
|
|
&slots.into_iter().collect::<HashSet<Slot>>(),
|
2021-01-11 17:00:23 -08:00
|
|
|
&mut vec![],
|
|
|
|
);
|
2020-12-31 18:06:03 -08:00
|
|
|
|
2021-05-12 15:29:30 -07:00
|
|
|
index.handle_dead_keys(&[&account_key], secondary_indexes);
|
2020-12-31 18:06:03 -08:00
|
|
|
assert!(secondary_index.index.is_empty());
|
|
|
|
assert!(secondary_index.reverse_index.is_empty());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_purge_exact_dashmap_secondary_index() {
|
2021-05-12 15:29:30 -07:00
|
|
|
let (key_start, key_end, secondary_indexes) = create_dashmap_secondary_index_state();
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2020-12-31 18:06:03 -08:00
|
|
|
run_test_purge_exact_secondary_index(
|
|
|
|
&index,
|
|
|
|
&index.spl_token_mint_index,
|
|
|
|
key_start,
|
|
|
|
key_end,
|
2021-05-12 15:29:30 -07:00
|
|
|
&secondary_indexes,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_purge_exact_rwlock_secondary_index() {
|
2021-05-12 15:29:30 -07:00
|
|
|
let (key_start, key_end, secondary_indexes) = create_rwlock_secondary_index_state();
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2020-12-31 18:06:03 -08:00
|
|
|
run_test_purge_exact_secondary_index(
|
|
|
|
&index,
|
|
|
|
&index.spl_token_owner_index,
|
|
|
|
key_start,
|
|
|
|
key_end,
|
2021-05-12 15:29:30 -07:00
|
|
|
&secondary_indexes,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-10-03 15:18:58 -07:00
|
|
|
#[test]
|
|
|
|
fn test_purge_older_root_entries() {
|
|
|
|
// No roots, should be no reclaims
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2020-10-03 15:18:58 -07:00
|
|
|
let mut slot_list = vec![(1, true), (2, true), (5, true), (9, true)];
|
|
|
|
let mut reclaims = vec![];
|
2021-05-12 15:29:30 -07:00
|
|
|
index.purge_older_root_entries(&mut slot_list, &mut reclaims, None);
|
2020-10-03 15:18:58 -07:00
|
|
|
assert!(reclaims.is_empty());
|
|
|
|
assert_eq!(slot_list, vec![(1, true), (2, true), (5, true), (9, true)]);
|
|
|
|
|
|
|
|
// Add a later root, earlier slots should be reclaimed
|
|
|
|
slot_list = vec![(1, true), (2, true), (5, true), (9, true)];
|
2021-01-11 17:00:23 -08:00
|
|
|
index.add_root(1, false);
|
2020-10-03 15:18:58 -07:00
|
|
|
// Note 2 is not a root
|
2021-01-11 17:00:23 -08:00
|
|
|
index.add_root(5, false);
|
2020-10-03 15:18:58 -07:00
|
|
|
reclaims = vec![];
|
2021-05-12 15:29:30 -07:00
|
|
|
index.purge_older_root_entries(&mut slot_list, &mut reclaims, None);
|
2020-10-03 15:18:58 -07:00
|
|
|
assert_eq!(reclaims, vec![(1, true), (2, true)]);
|
|
|
|
assert_eq!(slot_list, vec![(5, true), (9, true)]);
|
|
|
|
|
|
|
|
// Add a later root that is not in the list, should not affect the outcome
|
|
|
|
slot_list = vec![(1, true), (2, true), (5, true), (9, true)];
|
2021-01-11 17:00:23 -08:00
|
|
|
index.add_root(6, false);
|
2020-10-03 15:18:58 -07:00
|
|
|
reclaims = vec![];
|
2021-05-12 15:29:30 -07:00
|
|
|
index.purge_older_root_entries(&mut slot_list, &mut reclaims, None);
|
2020-10-03 15:18:58 -07:00
|
|
|
assert_eq!(reclaims, vec![(1, true), (2, true)]);
|
|
|
|
assert_eq!(slot_list, vec![(5, true), (9, true)]);
|
|
|
|
|
|
|
|
// Pass a max root >= than any root in the slot list, should not affect
|
|
|
|
// outcome
|
|
|
|
slot_list = vec![(1, true), (2, true), (5, true), (9, true)];
|
|
|
|
reclaims = vec![];
|
2021-05-12 15:29:30 -07:00
|
|
|
index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(6));
|
2020-10-03 15:18:58 -07:00
|
|
|
assert_eq!(reclaims, vec![(1, true), (2, true)]);
|
|
|
|
assert_eq!(slot_list, vec![(5, true), (9, true)]);
|
|
|
|
|
|
|
|
// Pass a max root, earlier slots should be reclaimed
|
|
|
|
slot_list = vec![(1, true), (2, true), (5, true), (9, true)];
|
|
|
|
reclaims = vec![];
|
2021-05-12 15:29:30 -07:00
|
|
|
index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(5));
|
2020-10-03 15:18:58 -07:00
|
|
|
assert_eq!(reclaims, vec![(1, true), (2, true)]);
|
|
|
|
assert_eq!(slot_list, vec![(5, true), (9, true)]);
|
|
|
|
|
|
|
|
// Pass a max root 2. This means the latest root < 2 is 1 because 2 is not a root
|
|
|
|
// so nothing will be purged
|
|
|
|
slot_list = vec![(1, true), (2, true), (5, true), (9, true)];
|
|
|
|
reclaims = vec![];
|
2021-05-12 15:29:30 -07:00
|
|
|
index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(2));
|
2020-10-03 15:18:58 -07:00
|
|
|
assert!(reclaims.is_empty());
|
|
|
|
assert_eq!(slot_list, vec![(1, true), (2, true), (5, true), (9, true)]);
|
|
|
|
|
|
|
|
// Pass a max root 1. This means the latest root < 3 is 1 because 2 is not a root
|
|
|
|
// so nothing will be purged
|
|
|
|
slot_list = vec![(1, true), (2, true), (5, true), (9, true)];
|
|
|
|
reclaims = vec![];
|
2021-05-12 15:29:30 -07:00
|
|
|
index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(1));
|
2020-10-03 15:18:58 -07:00
|
|
|
assert!(reclaims.is_empty());
|
|
|
|
assert_eq!(slot_list, vec![(1, true), (2, true), (5, true), (9, true)]);
|
|
|
|
|
|
|
|
// Pass a max root that doesn't exist in the list but is greater than
|
|
|
|
// some of the roots in the list, shouldn't return those smaller roots
|
|
|
|
slot_list = vec![(1, true), (2, true), (5, true), (9, true)];
|
|
|
|
reclaims = vec![];
|
2021-05-12 15:29:30 -07:00
|
|
|
index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(7));
|
2020-10-03 15:18:58 -07:00
|
|
|
assert_eq!(reclaims, vec![(1, true), (2, true)]);
|
|
|
|
assert_eq!(slot_list, vec![(5, true), (9, true)]);
|
|
|
|
}
|
2020-12-31 18:06:03 -08:00
|
|
|
|
2021-05-12 15:29:30 -07:00
|
|
|
fn check_secondary_index_mapping_correct<SecondaryIndexEntryType>(
|
2020-12-31 18:06:03 -08:00
|
|
|
secondary_index: &SecondaryIndex<SecondaryIndexEntryType>,
|
2021-05-12 15:29:30 -07:00
|
|
|
secondary_index_keys: &[Pubkey],
|
2020-12-31 18:06:03 -08:00
|
|
|
account_key: &Pubkey,
|
|
|
|
) where
|
|
|
|
SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send,
|
|
|
|
{
|
|
|
|
// Check secondary index has unique mapping from secondary index key
|
|
|
|
// to the account key and slot
|
2021-05-12 15:29:30 -07:00
|
|
|
for secondary_index_key in secondary_index_keys {
|
|
|
|
assert_eq!(secondary_index.index.len(), secondary_index_keys.len());
|
|
|
|
let account_key_map = secondary_index.get(secondary_index_key);
|
|
|
|
assert_eq!(account_key_map.len(), 1);
|
|
|
|
assert_eq!(account_key_map, vec![*account_key]);
|
|
|
|
}
|
|
|
|
// Check reverse index contains all of the `secondary_index_keys`
|
|
|
|
let secondary_index_key_map = secondary_index.reverse_index.get(account_key).unwrap();
|
|
|
|
assert_eq!(
|
|
|
|
&*secondary_index_key_map.value().read().unwrap(),
|
|
|
|
secondary_index_keys
|
|
|
|
);
|
2020-12-31 18:06:03 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
fn run_test_secondary_indexes<
|
|
|
|
SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send,
|
|
|
|
>(
|
|
|
|
index: &AccountsIndex<bool>,
|
|
|
|
secondary_index: &SecondaryIndex<SecondaryIndexEntryType>,
|
|
|
|
key_start: usize,
|
|
|
|
key_end: usize,
|
2021-05-12 15:29:30 -07:00
|
|
|
secondary_indexes: &AccountSecondaryIndexes,
|
2020-12-31 18:06:03 -08:00
|
|
|
) {
|
2021-05-12 15:29:30 -07:00
|
|
|
let mut secondary_indexes = secondary_indexes.clone();
|
2020-12-31 18:06:03 -08:00
|
|
|
let account_key = Pubkey::new_unique();
|
|
|
|
let index_key = Pubkey::new_unique();
|
|
|
|
let mut account_data = vec![0; inline_spl_token_v2_0::state::Account::get_packed_len()];
|
2021-04-18 10:27:36 -07:00
|
|
|
account_data[key_start..key_end].clone_from_slice(&(index_key.to_bytes()));
|
2020-12-31 18:06:03 -08:00
|
|
|
|
|
|
|
// Wrong program id
|
|
|
|
index.upsert(
|
|
|
|
0,
|
|
|
|
&account_key,
|
|
|
|
&Pubkey::default(),
|
|
|
|
&account_data,
|
2021-05-12 15:29:30 -07:00
|
|
|
&secondary_indexes,
|
2020-12-31 18:06:03 -08:00
|
|
|
true,
|
|
|
|
&mut vec![],
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
2021-05-10 06:54:30 -07:00
|
|
|
assert!(secondary_index.index.is_empty());
|
|
|
|
assert!(secondary_index.reverse_index.is_empty());
|
2020-12-31 18:06:03 -08:00
|
|
|
|
|
|
|
// Wrong account data size
|
|
|
|
index.upsert(
|
|
|
|
0,
|
|
|
|
&account_key,
|
|
|
|
&inline_spl_token_v2_0::id(),
|
|
|
|
&account_data[1..],
|
2021-05-12 15:29:30 -07:00
|
|
|
&secondary_indexes,
|
2020-12-31 18:06:03 -08:00
|
|
|
true,
|
|
|
|
&mut vec![],
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
2021-05-10 06:54:30 -07:00
|
|
|
assert!(secondary_index.index.is_empty());
|
|
|
|
assert!(secondary_index.reverse_index.is_empty());
|
2020-12-31 18:06:03 -08:00
|
|
|
|
2021-05-12 15:29:30 -07:00
|
|
|
secondary_indexes.keys = None;
|
2021-05-11 15:06:22 -07:00
|
|
|
|
2020-12-31 18:06:03 -08:00
|
|
|
// Just right. Inserting the same index multiple times should be ok
|
|
|
|
for _ in 0..2 {
|
|
|
|
index.update_secondary_indexes(
|
|
|
|
&account_key,
|
|
|
|
&inline_spl_token_v2_0::id(),
|
|
|
|
&account_data,
|
2021-05-12 15:29:30 -07:00
|
|
|
&secondary_indexes,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
2021-05-12 15:29:30 -07:00
|
|
|
check_secondary_index_mapping_correct(secondary_index, &[index_key], &account_key);
|
2020-12-31 18:06:03 -08:00
|
|
|
}
|
|
|
|
|
2021-05-10 06:54:30 -07:00
|
|
|
// included
|
|
|
|
assert!(!secondary_index.index.is_empty());
|
|
|
|
assert!(!secondary_index.reverse_index.is_empty());
|
|
|
|
|
2021-05-12 15:29:30 -07:00
|
|
|
secondary_indexes.keys = Some(AccountSecondaryIndexesIncludeExclude {
|
2021-05-11 15:06:22 -07:00
|
|
|
keys: [index_key].iter().cloned().collect::<HashSet<_>>(),
|
|
|
|
exclude: false,
|
|
|
|
});
|
|
|
|
secondary_index.index.clear();
|
|
|
|
secondary_index.reverse_index.clear();
|
|
|
|
index.update_secondary_indexes(
|
|
|
|
&account_key,
|
|
|
|
&inline_spl_token_v2_0::id(),
|
|
|
|
&account_data,
|
2021-05-12 15:29:30 -07:00
|
|
|
&secondary_indexes,
|
2021-05-11 15:06:22 -07:00
|
|
|
);
|
|
|
|
assert!(!secondary_index.index.is_empty());
|
|
|
|
assert!(!secondary_index.reverse_index.is_empty());
|
2021-05-12 15:29:30 -07:00
|
|
|
check_secondary_index_mapping_correct(secondary_index, &[index_key], &account_key);
|
2021-05-11 15:06:22 -07:00
|
|
|
|
|
|
|
// not-excluded
|
2021-05-12 15:29:30 -07:00
|
|
|
secondary_indexes.keys = Some(AccountSecondaryIndexesIncludeExclude {
|
2021-05-11 15:06:22 -07:00
|
|
|
keys: [].iter().cloned().collect::<HashSet<_>>(),
|
|
|
|
exclude: true,
|
|
|
|
});
|
|
|
|
secondary_index.index.clear();
|
|
|
|
secondary_index.reverse_index.clear();
|
|
|
|
index.update_secondary_indexes(
|
|
|
|
&account_key,
|
|
|
|
&inline_spl_token_v2_0::id(),
|
|
|
|
&account_data,
|
2021-05-12 15:29:30 -07:00
|
|
|
&secondary_indexes,
|
2021-05-11 15:06:22 -07:00
|
|
|
);
|
|
|
|
assert!(!secondary_index.index.is_empty());
|
|
|
|
assert!(!secondary_index.reverse_index.is_empty());
|
2021-05-12 15:29:30 -07:00
|
|
|
check_secondary_index_mapping_correct(secondary_index, &[index_key], &account_key);
|
2021-05-11 15:06:22 -07:00
|
|
|
|
2021-05-12 15:29:30 -07:00
|
|
|
secondary_indexes.keys = None;
|
2021-05-11 15:06:22 -07:00
|
|
|
|
2020-12-31 18:06:03 -08:00
|
|
|
index
|
|
|
|
.get_account_write_entry(&account_key)
|
|
|
|
.unwrap()
|
|
|
|
.slot_list_mut(|slot_list| slot_list.clear());
|
|
|
|
|
|
|
|
// Everything should be deleted
|
2021-05-12 15:29:30 -07:00
|
|
|
index.handle_dead_keys(&[&account_key], &secondary_indexes);
|
2021-05-10 06:54:30 -07:00
|
|
|
assert!(secondary_index.index.is_empty());
|
|
|
|
assert!(secondary_index.reverse_index.is_empty());
|
2020-12-31 18:06:03 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_dashmap_secondary_index() {
|
2021-05-12 15:29:30 -07:00
|
|
|
let (key_start, key_end, secondary_indexes) = create_dashmap_secondary_index_state();
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2020-12-31 18:06:03 -08:00
|
|
|
run_test_secondary_indexes(
|
|
|
|
&index,
|
|
|
|
&index.spl_token_mint_index,
|
|
|
|
key_start,
|
|
|
|
key_end,
|
2021-05-12 15:29:30 -07:00
|
|
|
&secondary_indexes,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_rwlock_secondary_index() {
|
2021-05-12 15:29:30 -07:00
|
|
|
let (key_start, key_end, secondary_indexes) = create_rwlock_secondary_index_state();
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2020-12-31 18:06:03 -08:00
|
|
|
run_test_secondary_indexes(
|
|
|
|
&index,
|
|
|
|
&index.spl_token_owner_index,
|
|
|
|
key_start,
|
|
|
|
key_end,
|
2021-05-12 15:29:30 -07:00
|
|
|
&secondary_indexes,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn run_test_secondary_indexes_same_slot_and_forks<
|
|
|
|
SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send,
|
|
|
|
>(
|
|
|
|
index: &AccountsIndex<bool>,
|
|
|
|
secondary_index: &SecondaryIndex<SecondaryIndexEntryType>,
|
|
|
|
index_key_start: usize,
|
|
|
|
index_key_end: usize,
|
2021-05-12 15:29:30 -07:00
|
|
|
secondary_indexes: &AccountSecondaryIndexes,
|
2020-12-31 18:06:03 -08:00
|
|
|
) {
|
|
|
|
let account_key = Pubkey::new_unique();
|
|
|
|
let secondary_key1 = Pubkey::new_unique();
|
|
|
|
let secondary_key2 = Pubkey::new_unique();
|
|
|
|
let slot = 1;
|
|
|
|
let mut account_data1 = vec![0; inline_spl_token_v2_0::state::Account::get_packed_len()];
|
|
|
|
account_data1[index_key_start..index_key_end]
|
2021-04-18 10:27:36 -07:00
|
|
|
.clone_from_slice(&(secondary_key1.to_bytes()));
|
2020-12-31 18:06:03 -08:00
|
|
|
let mut account_data2 = vec![0; inline_spl_token_v2_0::state::Account::get_packed_len()];
|
|
|
|
account_data2[index_key_start..index_key_end]
|
2021-04-18 10:27:36 -07:00
|
|
|
.clone_from_slice(&(secondary_key2.to_bytes()));
|
2020-12-31 18:06:03 -08:00
|
|
|
|
|
|
|
// First write one mint index
|
|
|
|
index.upsert(
|
|
|
|
slot,
|
|
|
|
&account_key,
|
|
|
|
&inline_spl_token_v2_0::id(),
|
|
|
|
&account_data1,
|
2021-05-12 15:29:30 -07:00
|
|
|
secondary_indexes,
|
2020-12-31 18:06:03 -08:00
|
|
|
true,
|
|
|
|
&mut vec![],
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
|
|
|
|
2021-05-12 15:29:30 -07:00
|
|
|
// Now write a different mint index for the same account
|
2020-12-31 18:06:03 -08:00
|
|
|
index.upsert(
|
|
|
|
slot,
|
|
|
|
&account_key,
|
|
|
|
&inline_spl_token_v2_0::id(),
|
|
|
|
&account_data2,
|
2021-05-12 15:29:30 -07:00
|
|
|
secondary_indexes,
|
2020-12-31 18:06:03 -08:00
|
|
|
true,
|
|
|
|
&mut vec![],
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
|
|
|
|
2021-05-12 15:29:30 -07:00
|
|
|
// Both pubkeys will now be present in the index
|
|
|
|
check_secondary_index_mapping_correct(
|
2021-06-18 06:34:46 -07:00
|
|
|
secondary_index,
|
2021-05-12 15:29:30 -07:00
|
|
|
&[secondary_key1, secondary_key2],
|
|
|
|
&account_key,
|
|
|
|
);
|
2020-12-31 18:06:03 -08:00
|
|
|
|
2021-05-12 15:29:30 -07:00
|
|
|
// If a later slot also introduces secondary_key1, then it should still exist in the index
|
|
|
|
let later_slot = slot + 1;
|
2020-12-31 18:06:03 -08:00
|
|
|
index.upsert(
|
2021-05-12 15:29:30 -07:00
|
|
|
later_slot,
|
2020-12-31 18:06:03 -08:00
|
|
|
&account_key,
|
|
|
|
&inline_spl_token_v2_0::id(),
|
|
|
|
&account_data1,
|
2021-05-12 15:29:30 -07:00
|
|
|
secondary_indexes,
|
2020-12-31 18:06:03 -08:00
|
|
|
true,
|
|
|
|
&mut vec![],
|
2021-08-09 06:58:59 -07:00
|
|
|
UPSERT_PREVIOUS_SLOT_ENTRY_WAS_CACHED_FALSE,
|
2020-12-31 18:06:03 -08:00
|
|
|
);
|
|
|
|
assert_eq!(secondary_index.get(&secondary_key1), vec![account_key]);
|
|
|
|
|
2021-05-12 15:29:30 -07:00
|
|
|
// If we set a root at `later_slot`, and clean, then even though the account with secondary_key1
|
|
|
|
// was outdated by the update in the later slot, the primary account key is still alive,
|
|
|
|
// so both secondary keys will still be kept alive.
|
|
|
|
index.add_root(later_slot, false);
|
2020-12-31 18:06:03 -08:00
|
|
|
index
|
|
|
|
.get_account_write_entry(&account_key)
|
|
|
|
.unwrap()
|
|
|
|
.slot_list_mut(|slot_list| {
|
2021-05-12 15:29:30 -07:00
|
|
|
index.purge_older_root_entries(slot_list, &mut vec![], None)
|
2020-12-31 18:06:03 -08:00
|
|
|
});
|
|
|
|
|
2021-05-12 15:29:30 -07:00
|
|
|
check_secondary_index_mapping_correct(
|
|
|
|
secondary_index,
|
|
|
|
&[secondary_key1, secondary_key2],
|
|
|
|
&account_key,
|
|
|
|
);
|
|
|
|
|
|
|
|
// Removing the remaining entry for this pubkey in the index should mark the
|
|
|
|
// pubkey as dead and finally remove all the secondary indexes
|
|
|
|
let mut reclaims = vec![];
|
|
|
|
index.purge_exact(&account_key, &later_slot, &mut reclaims);
|
|
|
|
index.handle_dead_keys(&[&account_key], secondary_indexes);
|
|
|
|
assert!(secondary_index.index.is_empty());
|
|
|
|
assert!(secondary_index.reverse_index.is_empty());
|
2020-12-31 18:06:03 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_dashmap_secondary_index_same_slot_and_forks() {
|
|
|
|
let (key_start, key_end, account_index) = create_dashmap_secondary_index_state();
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2020-12-31 18:06:03 -08:00
|
|
|
run_test_secondary_indexes_same_slot_and_forks(
|
|
|
|
&index,
|
|
|
|
&index.spl_token_mint_index,
|
|
|
|
key_start,
|
|
|
|
key_end,
|
|
|
|
&account_index,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_rwlock_secondary_index_same_slot_and_forks() {
|
|
|
|
let (key_start, key_end, account_index) = create_rwlock_secondary_index_state();
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2020-12-31 18:06:03 -08:00
|
|
|
run_test_secondary_indexes_same_slot_and_forks(
|
|
|
|
&index,
|
|
|
|
&index.spl_token_owner_index,
|
|
|
|
key_start,
|
|
|
|
key_end,
|
|
|
|
&account_index,
|
|
|
|
);
|
|
|
|
}
|
2021-01-24 09:50:19 -08:00
|
|
|
|
2021-09-14 15:51:07 -07:00
|
|
|
impl IndexValue for bool {}
|
|
|
|
impl IndexValue for u64 {}
|
2021-08-06 06:39:34 -07:00
|
|
|
impl IsCached for bool {
|
|
|
|
fn is_cached(&self) -> bool {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
impl IsCached for u64 {
|
|
|
|
fn is_cached(&self) -> bool {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
2021-01-24 09:50:19 -08:00
|
|
|
impl ZeroLamport for bool {
|
|
|
|
fn is_zero_lamport(&self) -> bool {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ZeroLamport for u64 {
|
|
|
|
fn is_zero_lamport(&self) -> bool {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
2021-07-15 08:26:50 -07:00
|
|
|
|
|
|
|
#[test]
|
2021-07-27 11:40:45 -07:00
|
|
|
fn test_bin_start_and_range() {
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2021-08-26 16:12:43 -07:00
|
|
|
let iter = AccountsIndexIterator::new(
|
|
|
|
&index,
|
2021-09-03 16:00:49 -07:00
|
|
|
None::<&RangeInclusive<Pubkey>>,
|
2021-08-26 16:12:43 -07:00
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2021-07-27 11:40:45 -07:00
|
|
|
assert_eq!((0, usize::MAX), iter.bin_start_and_range());
|
|
|
|
|
|
|
|
let key_0 = Pubkey::new(&[0; 32]);
|
|
|
|
let key_ff = Pubkey::new(&[0xff; 32]);
|
|
|
|
|
2021-08-26 16:12:43 -07:00
|
|
|
let iter = AccountsIndexIterator::new(
|
|
|
|
&index,
|
2021-09-03 16:00:49 -07:00
|
|
|
Some(&RangeInclusive::new(key_0, key_ff)),
|
2021-08-26 16:12:43 -07:00
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2021-08-04 07:18:05 -07:00
|
|
|
let bins = index.bins();
|
|
|
|
assert_eq!((0, bins), iter.bin_start_and_range());
|
2021-08-26 16:12:43 -07:00
|
|
|
let iter = AccountsIndexIterator::new(
|
|
|
|
&index,
|
2021-09-03 16:00:49 -07:00
|
|
|
Some(&RangeInclusive::new(key_ff, key_0)),
|
2021-08-26 16:12:43 -07:00
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2021-08-04 07:18:05 -07:00
|
|
|
assert_eq!((bins - 1, 0), iter.bin_start_and_range());
|
2021-08-26 16:12:43 -07:00
|
|
|
let iter = AccountsIndexIterator::new(
|
|
|
|
&index,
|
2021-09-03 16:00:49 -07:00
|
|
|
Some(&(Included(key_0), Unbounded)),
|
2021-08-26 16:12:43 -07:00
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2021-07-27 11:40:45 -07:00
|
|
|
assert_eq!((0, usize::MAX), iter.bin_start_and_range());
|
2021-08-26 16:12:43 -07:00
|
|
|
let iter = AccountsIndexIterator::new(
|
|
|
|
&index,
|
2021-09-03 16:00:49 -07:00
|
|
|
Some(&(Included(key_ff), Unbounded)),
|
2021-08-26 16:12:43 -07:00
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2021-08-04 07:18:05 -07:00
|
|
|
assert_eq!((bins - 1, usize::MAX), iter.bin_start_and_range());
|
2021-07-27 11:40:45 -07:00
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
(0..2)
|
|
|
|
.into_iter()
|
|
|
|
.skip(1)
|
|
|
|
.take(usize::MAX)
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
vec![1]
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_start_end_bin() {
|
2021-08-05 09:38:53 -07:00
|
|
|
let index = AccountsIndex::<bool>::default_for_tests();
|
2021-09-02 14:32:42 -07:00
|
|
|
assert_eq!(index.bins(), BINS_FOR_TESTING);
|
2021-08-26 16:12:43 -07:00
|
|
|
let iter = AccountsIndexIterator::new(
|
|
|
|
&index,
|
2021-09-03 16:00:49 -07:00
|
|
|
None::<&RangeInclusive<Pubkey>>,
|
2021-08-26 16:12:43 -07:00
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2021-07-15 08:26:50 -07:00
|
|
|
assert_eq!(iter.start_bin(), 0); // no range, so 0
|
2021-07-27 11:40:45 -07:00
|
|
|
assert_eq!(iter.end_bin_inclusive(), usize::MAX); // no range, so max
|
2021-07-15 08:26:50 -07:00
|
|
|
|
|
|
|
let key = Pubkey::new(&[0; 32]);
|
2021-08-26 16:12:43 -07:00
|
|
|
let iter = AccountsIndexIterator::new(
|
|
|
|
&index,
|
2021-09-03 16:00:49 -07:00
|
|
|
Some(&RangeInclusive::new(key, key)),
|
2021-08-26 16:12:43 -07:00
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2021-07-15 08:26:50 -07:00
|
|
|
assert_eq!(iter.start_bin(), 0); // start at pubkey 0, so 0
|
2021-07-27 11:40:45 -07:00
|
|
|
assert_eq!(iter.end_bin_inclusive(), 0); // end at pubkey 0, so 0
|
2021-08-26 16:12:43 -07:00
|
|
|
let iter = AccountsIndexIterator::new(
|
|
|
|
&index,
|
2021-09-03 16:00:49 -07:00
|
|
|
Some(&(Included(key), Excluded(key))),
|
2021-08-26 16:12:43 -07:00
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2021-07-15 08:26:50 -07:00
|
|
|
assert_eq!(iter.start_bin(), 0); // start at pubkey 0, so 0
|
2021-07-27 11:40:45 -07:00
|
|
|
assert_eq!(iter.end_bin_inclusive(), 0); // end at pubkey 0, so 0
|
2021-08-26 16:12:43 -07:00
|
|
|
let iter = AccountsIndexIterator::new(
|
|
|
|
&index,
|
2021-09-03 16:00:49 -07:00
|
|
|
Some(&(Excluded(key), Excluded(key))),
|
2021-08-26 16:12:43 -07:00
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2021-07-15 08:26:50 -07:00
|
|
|
assert_eq!(iter.start_bin(), 0); // start at pubkey 0, so 0
|
2021-07-27 11:40:45 -07:00
|
|
|
assert_eq!(iter.end_bin_inclusive(), 0); // end at pubkey 0, so 0
|
2021-07-15 08:26:50 -07:00
|
|
|
|
|
|
|
let key = Pubkey::new(&[0xff; 32]);
|
2021-08-26 16:12:43 -07:00
|
|
|
let iter = AccountsIndexIterator::new(
|
|
|
|
&index,
|
2021-09-03 16:00:49 -07:00
|
|
|
Some(&RangeInclusive::new(key, key)),
|
2021-08-26 16:12:43 -07:00
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2021-08-04 07:18:05 -07:00
|
|
|
let bins = index.bins();
|
|
|
|
assert_eq!(iter.start_bin(), bins - 1); // start at highest possible pubkey, so bins - 1
|
|
|
|
assert_eq!(iter.end_bin_inclusive(), bins - 1);
|
2021-08-26 16:12:43 -07:00
|
|
|
let iter = AccountsIndexIterator::new(
|
|
|
|
&index,
|
2021-09-03 16:00:49 -07:00
|
|
|
Some(&(Included(key), Excluded(key))),
|
2021-08-26 16:12:43 -07:00
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2021-08-04 07:18:05 -07:00
|
|
|
assert_eq!(iter.start_bin(), bins - 1); // start at highest possible pubkey, so bins - 1
|
|
|
|
assert_eq!(iter.end_bin_inclusive(), bins - 1);
|
2021-08-26 16:12:43 -07:00
|
|
|
let iter = AccountsIndexIterator::new(
|
|
|
|
&index,
|
2021-09-03 16:00:49 -07:00
|
|
|
Some(&(Excluded(key), Excluded(key))),
|
2021-08-26 16:12:43 -07:00
|
|
|
COLLECT_ALL_UNSORTED_FALSE,
|
|
|
|
);
|
2021-08-04 07:18:05 -07:00
|
|
|
assert_eq!(iter.start_bin(), bins - 1); // start at highest possible pubkey, so bins - 1
|
|
|
|
assert_eq!(iter.end_bin_inclusive(), bins - 1);
|
2021-07-15 08:26:50 -07:00
|
|
|
}
|
2021-08-05 09:15:26 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
#[should_panic(expected = "bins.is_power_of_two()")]
|
2021-09-17 11:12:06 -07:00
|
|
|
#[allow(clippy::field_reassign_with_default)]
|
2021-08-05 09:15:26 -07:00
|
|
|
fn test_illegal_bins() {
|
2021-09-17 11:12:06 -07:00
|
|
|
let mut config = AccountsIndexConfig::default();
|
|
|
|
config.bins = Some(3);
|
|
|
|
AccountsIndex::<bool>::new(Some(config));
|
2021-08-05 09:15:26 -07:00
|
|
|
}
|
2019-04-15 17:15:50 -07:00
|
|
|
}
|