Revert "Add an RPC API that can query the list of Top N secondary index keys and their sizes (#28887)" (#33121)
* Revert "Add an RPC API that can query the list of Top N secondary index keys and their sizes (#28887)" This reverts commit1e3d6349aa
. * Revert "Add Admin RPC Front End for Top N Secondary Index Key Sizes Query. (#29352)" This reverts commitaa353e4b83
. * fix test uses * fmt
This commit is contained in:
parent
330d6200e2
commit
fb1ba216f5
|
@ -2691,14 +2691,6 @@ impl AccountsDb {
|
|||
AccountsDb::new_for_tests_with_caching(Vec::new(), &ClusterType::Development)
|
||||
}
|
||||
|
||||
pub fn new_single_for_tests_with_secondary_indexes(
|
||||
secondary_indexes: AccountSecondaryIndexes,
|
||||
) -> Self {
|
||||
let mut accounts_db = AccountsDb::new_single_for_tests();
|
||||
accounts_db.account_indexes = secondary_indexes;
|
||||
accounts_db
|
||||
}
|
||||
|
||||
fn next_id(&self) -> AppendVecId {
|
||||
let next_id = self.next_id.fetch_add(1, Ordering::AcqRel);
|
||||
assert!(next_id != AppendVecId::MAX, "We've run out of storage ids!");
|
||||
|
@ -9928,27 +9920,23 @@ pub mod tests {
|
|||
crate::{
|
||||
account_info::StoredSize,
|
||||
account_storage::meta::{AccountMeta, StoredMeta},
|
||||
accounts::Accounts,
|
||||
accounts_hash::MERKLE_FANOUT,
|
||||
accounts_index::{
|
||||
tests::*, AccountIndex, AccountSecondaryIndexes,
|
||||
AccountSecondaryIndexesIncludeExclude, ReadAccountMapEntry, RefCount,
|
||||
tests::*, AccountSecondaryIndexesIncludeExclude, ReadAccountMapEntry, RefCount,
|
||||
},
|
||||
append_vec::{test_utils::TempFile, AppendVecStoredAccountMeta},
|
||||
cache_hash_data::CacheHashDataFile,
|
||||
inline_spl_token,
|
||||
secondary_index::MAX_NUM_LARGEST_INDEX_KEYS_RETURNED,
|
||||
},
|
||||
assert_matches::assert_matches,
|
||||
itertools::Itertools,
|
||||
rand::{distributions::Uniform, prelude::SliceRandom, thread_rng, Rng},
|
||||
rand::{prelude::SliceRandom, thread_rng, Rng},
|
||||
solana_sdk::{
|
||||
account::{
|
||||
accounts_equal, Account, AccountSharedData, ReadableAccount, WritableAccount,
|
||||
},
|
||||
hash::HASH_BYTES,
|
||||
pubkey::PUBKEY_BYTES,
|
||||
system_program,
|
||||
},
|
||||
std::{
|
||||
iter::FromIterator,
|
||||
|
@ -18111,215 +18099,4 @@ pub mod tests {
|
|||
let hashes = hashes.into_iter().collect();
|
||||
AccountsHasher::compute_merkle_root_recurse(hashes, MERKLE_FANOUT)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_largest_keys() {
|
||||
solana_logger::setup();
|
||||
// Constants
|
||||
const NUM_DUMMY_ACCOUNTS: usize = 50;
|
||||
const MAX_CHILD_ACCOUNTS: usize = 100;
|
||||
let mut slot = 0;
|
||||
|
||||
// Set secondary indexes
|
||||
let account_indexes = AccountSecondaryIndexes {
|
||||
keys: None,
|
||||
indexes: HashSet::from([AccountIndex::ProgramId]),
|
||||
};
|
||||
|
||||
// AccountDB Setup
|
||||
let accounts_db = AccountsDb::new_single_for_tests_with_secondary_indexes(account_indexes);
|
||||
|
||||
// Assert that list is empty. No accounts added yet.
|
||||
let mut test_largest_keys = accounts_db.accounts_index.get_largest_keys(
|
||||
&AccountIndex::ProgramId,
|
||||
MAX_NUM_LARGEST_INDEX_KEYS_RETURNED,
|
||||
);
|
||||
assert_eq!(0, test_largest_keys.len());
|
||||
|
||||
// Add some basic system owned accounts
|
||||
let mut dummy_account_pubkeys = Vec::with_capacity(NUM_DUMMY_ACCOUNTS);
|
||||
let mut num_generator = thread_rng();
|
||||
let key_size_range = Uniform::new_inclusive(0, MAX_CHILD_ACCOUNTS);
|
||||
for i in 1..=NUM_DUMMY_ACCOUNTS {
|
||||
let pubkey = Pubkey::new_unique();
|
||||
dummy_account_pubkeys.push(pubkey);
|
||||
let account = AccountSharedData::from(Account {
|
||||
lamports: 11111111,
|
||||
owner: system_program::id(),
|
||||
..Account::default()
|
||||
});
|
||||
// Store account in the AccountsDB
|
||||
accounts_db.store_for_tests(slot, &[(&dummy_account_pubkeys[i - 1], &account)]);
|
||||
slot += 1;
|
||||
// Check that the system pubkey increments each time
|
||||
test_largest_keys = accounts_db.accounts_index.get_largest_keys(
|
||||
&AccountIndex::ProgramId,
|
||||
MAX_NUM_LARGEST_INDEX_KEYS_RETURNED,
|
||||
);
|
||||
assert_eq!(test_largest_keys.len(), 1);
|
||||
let number_system_owned_accounts = test_largest_keys[0].0;
|
||||
assert_eq!(i, number_system_owned_accounts);
|
||||
}
|
||||
|
||||
// Now add a random number of accounts each owned by one of the newly
|
||||
// created dummy pubkeys
|
||||
for dummy_account in &dummy_account_pubkeys {
|
||||
// Add child accounts to each dummy account
|
||||
let num_children = (&mut num_generator).sample_iter(key_size_range).next();
|
||||
for j in 0..num_children.unwrap_or(0) {
|
||||
let child_pubkey = Pubkey::new_unique();
|
||||
let child_account = AccountSharedData::from(Account {
|
||||
lamports: ((j as u64) + 1) * 1000,
|
||||
owner: *dummy_account,
|
||||
..Account::default()
|
||||
});
|
||||
accounts_db.store_for_tests(slot, &[(&child_pubkey, &child_account)]);
|
||||
slot += 1;
|
||||
}
|
||||
// Check for entries with the same key size for sub sorting by pubkey
|
||||
let existing_key_size_position = test_largest_keys
|
||||
.iter()
|
||||
.position(|(x, _)| *x == num_children.unwrap_or(0));
|
||||
// Find where it should go and insert it
|
||||
let key_position = match test_largest_keys
|
||||
.binary_search_by_key(&num_children.unwrap_or(0), |(size, _)| *size)
|
||||
{
|
||||
Ok(found_position) => found_position,
|
||||
Err(woudbe_position) => woudbe_position,
|
||||
};
|
||||
test_largest_keys.insert(key_position, (num_children.unwrap_or(0), *dummy_account));
|
||||
// If there were indeed more elements with the same key size sort them by Pubkey
|
||||
if existing_key_size_position.is_some() {
|
||||
// Obtain a slice of mutable references to all elements with the same key_size
|
||||
let mut sub_slice = test_largest_keys
|
||||
.split_mut(|(k, _)| *k != num_children.unwrap_or(0))
|
||||
.flatten()
|
||||
.collect_vec();
|
||||
// Sort them...
|
||||
let mut sorting_buffer = sub_slice.iter().map(|x| *(*x)).collect_vec();
|
||||
sorting_buffer.sort_unstable_by_key(|(_, v)| *v);
|
||||
// Copy back into the list
|
||||
for i in 0..sub_slice.len() {
|
||||
*(sub_slice[i]) = (sorting_buffer[i].0, sorting_buffer[i].1);
|
||||
}
|
||||
}
|
||||
// Prune list
|
||||
while test_largest_keys.len() > MAX_NUM_LARGEST_INDEX_KEYS_RETURNED {
|
||||
test_largest_keys.remove(0);
|
||||
}
|
||||
}
|
||||
|
||||
// Verify secondary index list matches expected list built above.
|
||||
let largest_keys = accounts_db.accounts_index.get_largest_keys(
|
||||
&AccountIndex::ProgramId,
|
||||
MAX_NUM_LARGEST_INDEX_KEYS_RETURNED,
|
||||
);
|
||||
// Reverse the tracking vector and check for equality
|
||||
// Note: Backend stores the `key_size_index` in ascending key size, but
|
||||
// `get_largest_keys` returns the data in descending order, ie. the largest at the top.
|
||||
test_largest_keys = test_largest_keys.into_iter().rev().collect_vec();
|
||||
assert_eq!(test_largest_keys, largest_keys);
|
||||
|
||||
// Test queries for a partial list and past max return size
|
||||
let mut largest_program_id_keys = Vec::<(usize, Pubkey)>::new();
|
||||
for i in 0..=MAX_NUM_LARGEST_INDEX_KEYS_RETURNED + 1 {
|
||||
largest_program_id_keys = accounts_db
|
||||
.accounts_index
|
||||
.get_largest_keys(&AccountIndex::ProgramId, i);
|
||||
if i <= MAX_NUM_LARGEST_INDEX_KEYS_RETURNED {
|
||||
assert_eq!(largest_program_id_keys.len(), i);
|
||||
} else {
|
||||
assert_eq!(
|
||||
largest_program_id_keys.len(),
|
||||
MAX_NUM_LARGEST_INDEX_KEYS_RETURNED
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Root the bank preparing for removal
|
||||
(0..slot).for_each(|slot| {
|
||||
accounts_db.calculate_accounts_delta_hash(slot);
|
||||
accounts_db.add_root_and_flush_write_cache(slot);
|
||||
});
|
||||
|
||||
// Test Removal of Keys
|
||||
// First just remove a single key
|
||||
let mut smallest_key: Pubkey;
|
||||
let mut smallest_key_size: usize;
|
||||
let mut smallest_key_inner_keys: Vec<Pubkey>;
|
||||
let mut try_again = 0;
|
||||
let zero_lamport_account =
|
||||
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
|
||||
loop {
|
||||
smallest_key = largest_program_id_keys[largest_program_id_keys.len() - 1 - try_again].1;
|
||||
smallest_key_size =
|
||||
largest_program_id_keys[largest_program_id_keys.len() - 1 - try_again].0;
|
||||
let mut collector = Vec::new();
|
||||
accounts_db
|
||||
.scan_accounts(
|
||||
&Ancestors::default(),
|
||||
0,
|
||||
|some_account_tuple| {
|
||||
if let Some(mapped_account_tuple) = some_account_tuple
|
||||
.filter(|(_, account, _)| {
|
||||
Accounts::is_loadable(account.lamports())
|
||||
&& account.owner() == &smallest_key
|
||||
})
|
||||
.map(|(pubkey, account, _slot)| (*pubkey, account))
|
||||
{
|
||||
collector.push(mapped_account_tuple)
|
||||
}
|
||||
},
|
||||
&ScanConfig::new(true),
|
||||
)
|
||||
.ok();
|
||||
smallest_key_inner_keys = collector.into_iter().map(|(k, _)| k).collect_vec();
|
||||
let single_inner_key = smallest_key_inner_keys.pop().unwrap();
|
||||
// Overwrite the account as a 0 lamport account and clean.
|
||||
accounts_db.store_for_tests(slot, &[(&single_inner_key, &zero_lamport_account)]);
|
||||
accounts_db.calculate_accounts_delta_hash(slot);
|
||||
accounts_db.add_root_and_flush_write_cache(slot);
|
||||
slot += 1;
|
||||
accounts_db.clean_accounts_for_tests();
|
||||
// Read back
|
||||
largest_program_id_keys = accounts_db.accounts_index.get_largest_keys(
|
||||
&AccountIndex::ProgramId,
|
||||
MAX_NUM_LARGEST_INDEX_KEYS_RETURNED,
|
||||
);
|
||||
// Ensure the below check is comparing the same pubkey in case there were ties in the list for keysize.
|
||||
if largest_program_id_keys[largest_program_id_keys.len() - 1 - try_again].1
|
||||
== smallest_key
|
||||
{
|
||||
break;
|
||||
}
|
||||
// If there were a duplicate keysize, just move up in the largest key list.
|
||||
// worst case use the second largest key and keep removing till the tie is broken.
|
||||
else if try_again < largest_program_id_keys.len() - 2 {
|
||||
try_again += 1;
|
||||
}
|
||||
}
|
||||
// Make sure outer key size decreased
|
||||
assert_eq!(
|
||||
smallest_key_size - 1,
|
||||
largest_program_id_keys[largest_program_id_keys.len() - 1 - try_again].0
|
||||
);
|
||||
|
||||
// Test removal of multiple keys
|
||||
for key in smallest_key_inner_keys {
|
||||
accounts_db.store_for_tests(slot, &[(&key, &zero_lamport_account)]);
|
||||
}
|
||||
accounts_db.calculate_accounts_delta_hash(slot);
|
||||
accounts_db.add_root_and_flush_write_cache(slot);
|
||||
accounts_db.clean_accounts_for_tests();
|
||||
// Read back
|
||||
largest_program_id_keys = accounts_db.accounts_index.get_largest_keys(
|
||||
&AccountIndex::ProgramId,
|
||||
MAX_NUM_LARGEST_INDEX_KEYS_RETURNED,
|
||||
);
|
||||
// Since all inner keys were removed, make sure outer key is gone too.
|
||||
let outer_key_removed = !largest_program_id_keys
|
||||
.iter()
|
||||
.any(|(_, v)| *v == smallest_key);
|
||||
assert!(outer_key_removed);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1504,27 +1504,6 @@ impl<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> AccountsIndex<T, U> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn get_largest_keys(
|
||||
&self,
|
||||
index: &AccountIndex,
|
||||
max_entries: usize,
|
||||
) -> Vec<(usize, Pubkey)> {
|
||||
match index {
|
||||
AccountIndex::ProgramId => self
|
||||
.program_id_index
|
||||
.key_size_index
|
||||
.get_largest_keys(max_entries),
|
||||
AccountIndex::SplTokenOwner => self
|
||||
.spl_token_owner_index
|
||||
.key_size_index
|
||||
.get_largest_keys(max_entries),
|
||||
AccountIndex::SplTokenMint => self
|
||||
.spl_token_mint_index
|
||||
.key_size_index
|
||||
.get_largest_keys(max_entries),
|
||||
}
|
||||
}
|
||||
|
||||
/// log any secondary index counts, if non-zero
|
||||
pub(crate) fn log_secondary_indexes(&self) {
|
||||
if !self.program_id_index.index.is_empty() {
|
||||
|
|
|
@ -12,9 +12,6 @@ use {
|
|||
},
|
||||
};
|
||||
|
||||
pub const MAX_NUM_LARGEST_INDEX_KEYS_RETURNED: usize = 20;
|
||||
pub const NUM_LARGEST_INDEX_KEYS_CACHED: usize = 200;
|
||||
|
||||
// The only cases where an inner key should map to a different outer key is
|
||||
// if the key had different account data for the indexed key across different
|
||||
// slots. As this is rare, it should be ok to use a Vec here over a HashSet, even
|
||||
|
@ -102,112 +99,12 @@ impl SecondaryIndexEntry for RwLockSecondaryIndexEntry {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct HierarchicalOrderedMap<K, V>
|
||||
where
|
||||
K: Default + PartialEq + Ord + Clone,
|
||||
V: Default + PartialEq + Ord + Clone,
|
||||
{
|
||||
capacity: usize,
|
||||
map: Vec<(K, V)>,
|
||||
}
|
||||
|
||||
impl<K, V> HierarchicalOrderedMap<K, V>
|
||||
where
|
||||
K: Default + PartialEq + Ord + Clone,
|
||||
V: Default + PartialEq + Ord + Clone,
|
||||
{
|
||||
pub fn new(capacity: usize) -> Self {
|
||||
Self {
|
||||
capacity,
|
||||
map: Vec::new(),
|
||||
}
|
||||
}
|
||||
fn get_map(&self) -> &Vec<(K, V)> {
|
||||
&self.map
|
||||
}
|
||||
fn sort_slice_by_value(&mut self, slice_key: &K) {
|
||||
// Obtain a slice of mutable references to all elements with the same key
|
||||
for sub_slice in self.map.split_mut(|(k, _)| k != slice_key) {
|
||||
// Sort them
|
||||
if !sub_slice.is_empty() {
|
||||
sub_slice.sort_unstable_by_key(|(_, v)| v.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
fn update_map(&mut self, key: &K, value: &V) {
|
||||
// Check if the value already exists.
|
||||
let existing_value_position = self.map.iter().position(|(_, y)| y == value);
|
||||
// Remove it if it does.
|
||||
// Note: Removal maintains sorted order, updating would require a re-sort.
|
||||
// Thus, since we have to search to find the new position anyways,
|
||||
// just throw it away and re-insert as if its a new element.
|
||||
if let Some(position) = existing_value_position {
|
||||
self.map.remove(position);
|
||||
}
|
||||
// If its a new value...
|
||||
else {
|
||||
// Check if the list is full, and if the key is less than the smallest element, if so exit early.
|
||||
if self.map.len() >= self.capacity && self.map[0].0 > *key {
|
||||
return;
|
||||
}
|
||||
};
|
||||
// Find where the new entry goes and insert it.
|
||||
// Also report if there are more elements in the list with the same key => they need sorting.
|
||||
let (key_position, needs_sort) =
|
||||
match self.map.binary_search_by_key(key, |(k, _)| k.clone()) {
|
||||
Ok(found_position) => (found_position, true),
|
||||
Err(woudbe_position) => (woudbe_position, false),
|
||||
};
|
||||
self.map.insert(key_position, (key.clone(), value.clone()));
|
||||
// If there were indeed more elements with the same key sort them by value
|
||||
if needs_sort {
|
||||
self.sort_slice_by_value(key);
|
||||
}
|
||||
// Prune list if too big
|
||||
while self.map.len() > self.capacity {
|
||||
self.map.remove(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SecondaryIndexLargestKeys(RwLock<HierarchicalOrderedMap<usize, Pubkey>>);
|
||||
impl Default for SecondaryIndexLargestKeys {
|
||||
fn default() -> Self {
|
||||
let container = HierarchicalOrderedMap::<usize, Pubkey>::new(NUM_LARGEST_INDEX_KEYS_CACHED);
|
||||
SecondaryIndexLargestKeys(RwLock::new(container))
|
||||
}
|
||||
}
|
||||
impl SecondaryIndexLargestKeys {
|
||||
pub fn get_largest_keys(&self, max_entries: usize) -> Vec<(usize, Pubkey)> {
|
||||
// Obtain the shared resource.
|
||||
let largest_key_list = self.0.read().unwrap();
|
||||
// Collect elements into a vector.
|
||||
let num_entries = std::cmp::min(MAX_NUM_LARGEST_INDEX_KEYS_RETURNED, max_entries);
|
||||
largest_key_list
|
||||
.get_map()
|
||||
.iter()
|
||||
.rev()
|
||||
.take(num_entries)
|
||||
.copied()
|
||||
.collect::<Vec<(usize, Pubkey)>>()
|
||||
}
|
||||
pub fn update(&self, key_size: &usize, pubkey: &Pubkey) {
|
||||
// Obtain the shared resource.
|
||||
let mut largest_key_list = self.0.write().unwrap();
|
||||
// Update the list
|
||||
largest_key_list.update_map(key_size, pubkey);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct SecondaryIndex<SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send> {
|
||||
metrics_name: &'static str,
|
||||
// Map from index keys to index values
|
||||
pub index: DashMap<Pubkey, SecondaryIndexEntryType>,
|
||||
pub reverse_index: DashMap<Pubkey, SecondaryReverseIndexEntry>,
|
||||
pub key_size_index: SecondaryIndexLargestKeys,
|
||||
stats: SecondaryIndexStats,
|
||||
}
|
||||
|
||||
|
@ -228,11 +125,7 @@ impl<SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send>
|
|||
.get(key)
|
||||
.unwrap_or_else(|| self.index.entry(*key).or_default().downgrade());
|
||||
|
||||
let key_size_cache = pubkeys_map.len();
|
||||
pubkeys_map.insert_if_not_exists(inner_key, &self.stats.num_inner_keys);
|
||||
if key_size_cache != pubkeys_map.len() {
|
||||
self.key_size_index.update(&pubkeys_map.len(), key);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -280,7 +173,6 @@ impl<SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send>
|
|||
// If we deleted a pubkey from the reverse_index, then the corresponding entry
|
||||
// better exist in this index as well or the two indexes are out of sync!
|
||||
assert!(inner_key_map.value().remove_inner_key(removed_inner_key));
|
||||
self.key_size_index.update(&inner_key_map.len(), outer_key);
|
||||
inner_key_map.is_empty()
|
||||
};
|
||||
|
||||
|
|
|
@ -220,14 +220,6 @@ pub trait AdminRpc {
|
|||
pubkey_str: String,
|
||||
) -> Result<HashMap<RpcAccountIndex, usize>>;
|
||||
|
||||
#[rpc(meta, name = "getLargestIndexKeys")]
|
||||
fn get_largest_index_keys(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
secondary_index: RpcAccountIndex,
|
||||
max_entries: usize,
|
||||
) -> Result<Vec<(String, usize)>>;
|
||||
|
||||
#[rpc(meta, name = "setPublicTpuAddress")]
|
||||
fn set_public_tpu_address(
|
||||
&self,
|
||||
|
@ -576,34 +568,6 @@ impl AdminRpc for AdminRpcImpl {
|
|||
})
|
||||
}
|
||||
|
||||
fn get_largest_index_keys(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
secondary_index: RpcAccountIndex,
|
||||
max_entries: usize,
|
||||
) -> Result<Vec<(String, usize)>> {
|
||||
debug!(
|
||||
"get_largest_index_keys rpc request received: {:?}",
|
||||
max_entries
|
||||
);
|
||||
let secondary_index = account_index_from_rpc_account_index(&secondary_index);
|
||||
meta.with_post_init(|post_init| {
|
||||
let bank = post_init.bank_forks.read().unwrap().root_bank();
|
||||
let enabled_account_indexes = &bank.accounts().accounts_db.account_indexes;
|
||||
if enabled_account_indexes.is_empty() {
|
||||
debug!("get_secondary_index_key_size: secondary index not enabled.");
|
||||
return Ok(Vec::new());
|
||||
};
|
||||
let accounts_index = &bank.accounts().accounts_db.accounts_index;
|
||||
let largest_keys = accounts_index
|
||||
.get_largest_keys(&secondary_index, max_entries)
|
||||
.iter()
|
||||
.map(|&(x, y)| (y.to_string(), x))
|
||||
.collect::<Vec<_>>();
|
||||
Ok(largest_keys)
|
||||
})
|
||||
}
|
||||
|
||||
fn set_public_tpu_address(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
|
@ -736,14 +700,6 @@ fn rpc_account_index_from_account_index(account_index: &AccountIndex) -> RpcAcco
|
|||
}
|
||||
}
|
||||
|
||||
fn account_index_from_rpc_account_index(rpc_account_index: &RpcAccountIndex) -> AccountIndex {
|
||||
match rpc_account_index {
|
||||
RpcAccountIndex::ProgramId => AccountIndex::ProgramId,
|
||||
RpcAccountIndex::SplTokenOwner => AccountIndex::SplTokenOwner,
|
||||
RpcAccountIndex::SplTokenMint => AccountIndex::SplTokenMint,
|
||||
}
|
||||
}
|
||||
|
||||
// Start the Admin RPC interface
|
||||
pub fn run(ledger_path: &Path, metadata: AdminRpcRequestMetadata) {
|
||||
let admin_rpc_path = admin_rpc_path(ledger_path);
|
||||
|
@ -862,12 +818,8 @@ pub fn load_staked_nodes_overrides(
|
|||
mod tests {
|
||||
use {
|
||||
super::*,
|
||||
rand::{distributions::Uniform, thread_rng, Rng},
|
||||
serde_json::Value,
|
||||
solana_accounts_db::{
|
||||
accounts_index::AccountSecondaryIndexes, inline_spl_token,
|
||||
secondary_index::MAX_NUM_LARGEST_INDEX_KEYS_RETURNED,
|
||||
},
|
||||
solana_accounts_db::{accounts_index::AccountSecondaryIndexes, inline_spl_token},
|
||||
solana_core::consensus::tower_storage::NullTowerStorage,
|
||||
solana_gossip::cluster_info::ClusterInfo,
|
||||
solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
|
@ -886,7 +838,7 @@ mod tests {
|
|||
solana_program::{program_option::COption, program_pack::Pack},
|
||||
state::{Account as TokenAccount, AccountState as TokenAccountState, Mint},
|
||||
},
|
||||
std::{collections::HashSet, str::FromStr, sync::atomic::AtomicBool},
|
||||
std::{collections::HashSet, sync::atomic::AtomicBool},
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
|
@ -1276,183 +1228,4 @@ mod tests {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_largest_index_keys() {
|
||||
// Constants
|
||||
const NUM_DUMMY_ACCOUNTS: usize = 50;
|
||||
const MAX_CHILD_ACCOUNTS: usize = 5; // Set low because it induces lots of same key size entries in the ProgramID list
|
||||
const MAX_MINT_ACCOUNTS: usize = 50;
|
||||
const MAX_TOKEN_ACCOUNTS: usize = 100;
|
||||
|
||||
// Set secondary indexes
|
||||
let account_indexes = AccountSecondaryIndexes {
|
||||
keys: None,
|
||||
indexes: HashSet::from([
|
||||
AccountIndex::ProgramId,
|
||||
AccountIndex::SplTokenMint,
|
||||
AccountIndex::SplTokenOwner,
|
||||
]),
|
||||
};
|
||||
|
||||
// RPC & Bank Setup
|
||||
let rpc = RpcHandler::start_with_config(TestConfig { account_indexes });
|
||||
|
||||
let bank = rpc.root_bank();
|
||||
let RpcHandler { io, meta, .. } = rpc;
|
||||
|
||||
// Add some basic system owned account
|
||||
let mut dummy_account_pubkeys = Vec::with_capacity(NUM_DUMMY_ACCOUNTS);
|
||||
let mut num_generator = thread_rng();
|
||||
let key_size_range = Uniform::new_inclusive(0, MAX_CHILD_ACCOUNTS);
|
||||
for _i in 1..=NUM_DUMMY_ACCOUNTS {
|
||||
let pubkey = Pubkey::new_unique();
|
||||
dummy_account_pubkeys.push(pubkey);
|
||||
let account = AccountSharedData::from(Account {
|
||||
lamports: 11111111,
|
||||
owner: system_program::id(),
|
||||
..Account::default()
|
||||
});
|
||||
bank.store_account(&pubkey, &account);
|
||||
}
|
||||
|
||||
// Now add a random number of accounts each owned by one of the newely
|
||||
// created dummy accounts
|
||||
for dummy_account in &dummy_account_pubkeys {
|
||||
// Add child accounts to each dummy account
|
||||
let num_children = (&mut num_generator).sample_iter(key_size_range).next();
|
||||
for _j in 0..num_children.unwrap_or(0) {
|
||||
let child_pubkey = Pubkey::new_unique();
|
||||
let child_account = AccountSharedData::from(Account {
|
||||
lamports: bank.get_minimum_balance_for_rent_exemption(0),
|
||||
owner: *dummy_account,
|
||||
..Account::default()
|
||||
});
|
||||
bank.store_account(&child_pubkey, &child_account);
|
||||
}
|
||||
}
|
||||
|
||||
let num_token_accounts_range = Uniform::new_inclusive(1, MAX_TOKEN_ACCOUNTS);
|
||||
let num_mint_accounts_range = Uniform::new_inclusive(NUM_DUMMY_ACCOUNTS, MAX_MINT_ACCOUNTS);
|
||||
let dummy_account_pubkey_index_range = Uniform::new(0, NUM_DUMMY_ACCOUNTS);
|
||||
|
||||
let num_token_accounts = (&mut num_generator)
|
||||
.sample_iter(num_token_accounts_range)
|
||||
.next();
|
||||
let num_mint_accounts = (&mut num_generator)
|
||||
.sample_iter(num_mint_accounts_range)
|
||||
.next();
|
||||
|
||||
let mut account_data = vec![0; TokenAccount::get_packed_len()];
|
||||
let mut mint_data = vec![0; Mint::get_packed_len()];
|
||||
|
||||
// Make a bunch of SPL Tokens each with some random number of SPL Token Accounts that have the token in them
|
||||
for _i in 0..num_mint_accounts.unwrap_or(NUM_DUMMY_ACCOUNTS) {
|
||||
let mint_pubkey = Pubkey::new_unique();
|
||||
for _j in 0..num_token_accounts.unwrap_or(1) {
|
||||
let owner_pubkey = dummy_account_pubkeys[(&mut num_generator)
|
||||
.sample_iter(dummy_account_pubkey_index_range)
|
||||
.next()
|
||||
.unwrap()];
|
||||
let delagate_pubkey = dummy_account_pubkeys[(&mut num_generator)
|
||||
.sample_iter(dummy_account_pubkey_index_range)
|
||||
.next()
|
||||
.unwrap()];
|
||||
let account_pubkey = Pubkey::new_unique();
|
||||
// Add a token account
|
||||
let token_state = TokenAccount {
|
||||
mint: mint_pubkey,
|
||||
owner: owner_pubkey,
|
||||
delegate: COption::Some(delagate_pubkey),
|
||||
amount: 100,
|
||||
state: TokenAccountState::Initialized,
|
||||
is_native: COption::None,
|
||||
delegated_amount: 10,
|
||||
close_authority: COption::Some(owner_pubkey),
|
||||
};
|
||||
TokenAccount::pack(token_state, &mut account_data).unwrap();
|
||||
let token_account = AccountSharedData::from(Account {
|
||||
lamports: 22222222,
|
||||
data: account_data.to_vec(),
|
||||
owner: inline_spl_token::id(),
|
||||
..Account::default()
|
||||
});
|
||||
bank.store_account(&account_pubkey, &token_account);
|
||||
}
|
||||
// Add the mint
|
||||
let mint_authority_pubkey = dummy_account_pubkeys[(&mut num_generator)
|
||||
.sample_iter(dummy_account_pubkey_index_range)
|
||||
.next()
|
||||
.unwrap()];
|
||||
let mint_state = Mint {
|
||||
mint_authority: COption::Some(mint_authority_pubkey),
|
||||
supply: 100 * (num_token_accounts.unwrap_or(1) as u64),
|
||||
decimals: 2,
|
||||
is_initialized: true,
|
||||
freeze_authority: COption::Some(mint_authority_pubkey),
|
||||
};
|
||||
Mint::pack(mint_state, &mut mint_data).unwrap();
|
||||
let mint_account = AccountSharedData::from(Account {
|
||||
lamports: 33333333,
|
||||
data: mint_data.to_vec(),
|
||||
owner: inline_spl_token::id(),
|
||||
..Account::default()
|
||||
});
|
||||
bank.store_account(&mint_pubkey, &mint_account);
|
||||
}
|
||||
|
||||
// Collect largest key list for ProgramIDs
|
||||
let req = format!(
|
||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getLargestIndexKeys","params":["{}", {}]}}"#,
|
||||
"programId", MAX_NUM_LARGEST_INDEX_KEYS_RETURNED,
|
||||
);
|
||||
let res = io.handle_request_sync(&req, meta.clone());
|
||||
let result: Value = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
let largest_program_id_keys: Vec<(String, usize)> =
|
||||
serde_json::from_value(result["result"].clone()).unwrap();
|
||||
// Collect largest key list for SPLTokenOwners
|
||||
let req = format!(
|
||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getLargestIndexKeys","params":["{}", {}]}}"#,
|
||||
"splTokenOwner", MAX_NUM_LARGEST_INDEX_KEYS_RETURNED,
|
||||
);
|
||||
let res = io.handle_request_sync(&req, meta.clone());
|
||||
let result: Value = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
let largest_spl_token_owner_keys: Vec<(String, usize)> =
|
||||
serde_json::from_value(result["result"].clone()).unwrap();
|
||||
// Collect largest key list for SPLTokenMints
|
||||
let req = format!(
|
||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getLargestIndexKeys","params":["{}", {}]}}"#,
|
||||
"splTokenMint", MAX_NUM_LARGEST_INDEX_KEYS_RETURNED,
|
||||
);
|
||||
let res = io.handle_request_sync(&req, meta);
|
||||
let result: Value = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
let largest_spl_token_mint_keys: Vec<(String, usize)> =
|
||||
serde_json::from_value(result["result"].clone()).unwrap();
|
||||
|
||||
let largest_keys = vec![
|
||||
largest_program_id_keys,
|
||||
largest_spl_token_owner_keys,
|
||||
largest_spl_token_mint_keys,
|
||||
];
|
||||
|
||||
// Make sure key lists conform to expected output
|
||||
for key_list in largest_keys {
|
||||
// No longer than the max
|
||||
assert!(key_list.len() <= MAX_NUM_LARGEST_INDEX_KEYS_RETURNED);
|
||||
let key_list_pubkeys = key_list
|
||||
.iter()
|
||||
.map(|(k, _)| Pubkey::from_str(k).unwrap())
|
||||
.collect::<Vec<Pubkey>>();
|
||||
// In sorted order: Descending key size, where ties are sorted by descending pubkey
|
||||
for i in 0..key_list.len() - 1 {
|
||||
assert!(key_list[i].1 >= key_list[i + 1].1);
|
||||
if key_list[i].1 == key_list[i + 1].1 {
|
||||
assert!(key_list_pubkeys[i] >= key_list_pubkeys[i + 1]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue