2019-04-15 17:15:50 -07:00
|
|
|
//! Persistent accounts are stored in below path location:
|
|
|
|
//! <path>/<pid>/data/
|
|
|
|
//!
|
|
|
|
//! The persistent store would allow for this mode of operation:
|
|
|
|
//! - Concurrent single thread append with many concurrent readers.
|
|
|
|
//!
|
|
|
|
//! The underlying memory is memory mapped to a file. The accounts would be
|
|
|
|
//! stored across multiple files and the mappings of file and offset of a
|
|
|
|
//! particular account would be stored in a shared index. This will allow for
|
|
|
|
//! concurrent commits without blocking reads, which will sequentially write
|
|
|
|
//! to memory, ssd or disk, and should be as fast as the hardware allow for.
|
|
|
|
//! The only required in memory data structure with a write lock is the index,
|
|
|
|
//! which should be fast to update.
|
|
|
|
//!
|
|
|
|
//! AppendVec's only store accounts for single forks. To bootstrap the
|
|
|
|
//! index from a persistent store of AppendVec's, the entries include
|
|
|
|
//! a "write_version". A single global atomic `AccountsDB::write_version`
|
|
|
|
//! tracks the number of commits to the entire data store. So the latest
|
|
|
|
//! commit for each fork entry would be indexed.
|
|
|
|
|
|
|
|
use crate::accounts_index::{AccountsIndex, Fork};
|
|
|
|
use crate::append_vec::{AppendVec, StoredAccount};
|
2019-04-02 08:35:38 -07:00
|
|
|
use crate::message_processor::has_duplicates;
|
2018-12-17 12:41:23 -08:00
|
|
|
use bincode::serialize;
|
|
|
|
use hashbrown::{HashMap, HashSet};
|
2019-02-27 20:57:01 -08:00
|
|
|
use log::*;
|
2019-03-07 12:41:00 -08:00
|
|
|
use rand::{thread_rng, Rng};
|
2019-04-15 17:15:50 -07:00
|
|
|
use rayon::prelude::*;
|
2019-02-18 22:26:22 -08:00
|
|
|
use solana_metrics::counter::Counter;
|
2018-12-17 12:41:23 -08:00
|
|
|
use solana_sdk::account::Account;
|
2019-03-29 15:11:21 -07:00
|
|
|
use solana_sdk::fee_calculator::FeeCalculator;
|
2019-04-15 17:15:50 -07:00
|
|
|
use solana_sdk::hash::{hash, Hash, Hasher};
|
2019-02-07 08:03:40 -08:00
|
|
|
use solana_sdk::native_loader;
|
2018-12-17 12:41:23 -08:00
|
|
|
use solana_sdk::pubkey::Pubkey;
|
2019-02-20 21:31:24 -08:00
|
|
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
2019-04-05 09:42:54 -07:00
|
|
|
use solana_sdk::transaction::Result;
|
2019-03-13 13:37:24 -07:00
|
|
|
use solana_sdk::transaction::{Transaction, TransactionError};
|
2019-02-23 14:23:55 -08:00
|
|
|
use std::env;
|
|
|
|
use std::fs::{create_dir_all, remove_dir_all};
|
2019-04-15 17:15:50 -07:00
|
|
|
use std::ops::Neg;
|
2018-12-24 16:11:20 -08:00
|
|
|
use std::path::Path;
|
|
|
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
2019-04-15 17:15:50 -07:00
|
|
|
use std::sync::{Arc, Mutex, RwLock};
|
2018-12-17 12:41:23 -08:00
|
|
|
|
2019-01-06 22:06:55 -08:00
|
|
|
pub type InstructionAccounts = Vec<Account>;
|
|
|
|
pub type InstructionLoaders = Vec<Vec<(Pubkey, Account)>>;
|
|
|
|
|
2019-01-08 09:20:25 -08:00
|
|
|
#[derive(Debug, Default)]
|
2018-12-17 12:41:23 -08:00
|
|
|
pub struct ErrorCounters {
|
|
|
|
pub account_not_found: usize,
|
|
|
|
pub account_in_use: usize,
|
2019-02-07 11:14:10 -08:00
|
|
|
pub account_loaded_twice: usize,
|
2019-03-02 10:25:16 -08:00
|
|
|
pub blockhash_not_found: usize,
|
|
|
|
pub blockhash_too_old: usize,
|
|
|
|
pub reserve_blockhash: usize,
|
2018-12-17 12:41:23 -08:00
|
|
|
pub insufficient_funds: usize,
|
2019-03-19 18:03:07 -07:00
|
|
|
pub invalid_account_index: usize,
|
2018-12-17 12:41:23 -08:00
|
|
|
pub duplicate_signature: usize,
|
2019-01-08 09:20:25 -08:00
|
|
|
pub call_chain_too_deep: usize,
|
|
|
|
pub missing_signature_for_fee: usize,
|
2018-12-17 12:41:23 -08:00
|
|
|
}
|
|
|
|
|
2018-12-24 16:11:20 -08:00
|
|
|
const ACCOUNT_DATA_FILE_SIZE: u64 = 64 * 1024 * 1024;
|
|
|
|
const ACCOUNT_DATA_FILE: &str = "data";
|
2019-02-20 21:31:24 -08:00
|
|
|
const ACCOUNTSDB_DIR: &str = "accountsdb";
|
|
|
|
const NUM_ACCOUNT_DIRS: usize = 4;
|
2018-12-24 16:11:20 -08:00
|
|
|
|
|
|
|
/// An offset into the AccountsDB::storage vector
|
|
|
|
type AppendVecId = usize;
|
|
|
|
|
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
enum AccountStorageStatus {
|
|
|
|
StorageAvailable = 0,
|
|
|
|
StorageFull = 1,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<usize> for AccountStorageStatus {
|
|
|
|
fn from(status: usize) -> Self {
|
|
|
|
use self::AccountStorageStatus::*;
|
|
|
|
match status {
|
|
|
|
0 => StorageAvailable,
|
|
|
|
1 => StorageFull,
|
|
|
|
_ => unreachable!(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-16 23:42:32 -07:00
|
|
|
#[derive(Default, Clone)]
|
|
|
|
struct AccountInfo {
|
|
|
|
/// index identifying the append storage
|
|
|
|
id: AppendVecId,
|
|
|
|
|
|
|
|
/// offset into the storage
|
2019-04-12 04:30:17 -07:00
|
|
|
offset: usize,
|
2019-03-16 23:42:32 -07:00
|
|
|
|
|
|
|
/// lamports in the account used when squashing kept for optimization
|
|
|
|
/// purposes to remove accounts with zero balance.
|
|
|
|
lamports: u64,
|
|
|
|
}
|
|
|
|
|
2018-12-24 16:11:20 -08:00
|
|
|
/// Persistent storage structure holding the accounts
|
2019-03-06 15:12:50 -08:00
|
|
|
struct AccountStorageEntry {
|
2019-04-15 17:15:50 -07:00
|
|
|
fork_id: Fork,
|
|
|
|
|
2018-12-24 16:11:20 -08:00
|
|
|
/// storage holding the accounts
|
2019-04-12 04:30:17 -07:00
|
|
|
accounts: AppendVec,
|
2018-12-24 16:11:20 -08:00
|
|
|
|
|
|
|
/// Keeps track of the number of accounts stored in a specific AppendVec.
|
|
|
|
/// This is periodically checked to reuse the stores that do not have
|
|
|
|
/// any accounts in it.
|
|
|
|
count: AtomicUsize,
|
|
|
|
|
|
|
|
/// status corresponding to the storage
|
|
|
|
status: AtomicUsize,
|
|
|
|
}
|
|
|
|
|
2019-03-06 15:12:50 -08:00
|
|
|
impl AccountStorageEntry {
|
2019-04-15 17:15:50 -07:00
|
|
|
pub fn new(path: &str, fork_id: Fork, id: usize, file_size: u64) -> Self {
|
2019-03-06 15:12:50 -08:00
|
|
|
let p = format!("{}/{}", path, id);
|
|
|
|
let path = Path::new(&p);
|
|
|
|
let _ignored = remove_dir_all(path);
|
|
|
|
create_dir_all(path).expect("Create directory failed");
|
2019-04-12 04:30:17 -07:00
|
|
|
let accounts = AppendVec::new(&path.join(ACCOUNT_DATA_FILE), true, file_size as usize);
|
2019-03-06 15:12:50 -08:00
|
|
|
|
|
|
|
AccountStorageEntry {
|
2019-04-15 17:15:50 -07:00
|
|
|
fork_id,
|
2019-03-06 15:12:50 -08:00
|
|
|
accounts,
|
|
|
|
count: AtomicUsize::new(0),
|
|
|
|
status: AtomicUsize::new(AccountStorageStatus::StorageAvailable as usize),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-24 16:11:20 -08:00
|
|
|
pub fn set_status(&self, status: AccountStorageStatus) {
|
|
|
|
self.status.store(status as usize, Ordering::Relaxed);
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn get_status(&self) -> AccountStorageStatus {
|
|
|
|
self.status.load(Ordering::Relaxed).into()
|
|
|
|
}
|
2019-03-06 15:12:50 -08:00
|
|
|
|
|
|
|
fn add_account(&self) {
|
|
|
|
self.count.fetch_add(1, Ordering::Relaxed);
|
|
|
|
}
|
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
fn remove_account(&self) -> bool {
|
2019-03-06 15:12:50 -08:00
|
|
|
if self.count.fetch_sub(1, Ordering::Relaxed) == 1 {
|
2019-04-12 04:30:17 -07:00
|
|
|
self.accounts.reset();
|
2019-03-06 15:12:50 -08:00
|
|
|
self.set_status(AccountStorageStatus::StorageAvailable);
|
2019-04-15 17:15:50 -07:00
|
|
|
true
|
|
|
|
} else {
|
|
|
|
false
|
2019-03-06 15:12:50 -08:00
|
|
|
}
|
|
|
|
}
|
2018-12-24 16:11:20 -08:00
|
|
|
}
|
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
type AccountStorage = Vec<Arc<AccountStorageEntry>>;
|
|
|
|
type AccountStorageSlice = [Arc<AccountStorageEntry>];
|
2019-03-06 15:12:50 -08:00
|
|
|
|
2018-12-24 16:11:20 -08:00
|
|
|
// This structure handles the load/store of the accounts
|
2019-03-12 14:36:09 -07:00
|
|
|
#[derive(Default)]
|
2018-12-17 12:41:23 -08:00
|
|
|
pub struct AccountsDB {
|
2018-12-24 16:11:20 -08:00
|
|
|
/// Keeps tracks of index into AppendVec on a per fork basis
|
2019-04-15 17:15:50 -07:00
|
|
|
accounts_index: RwLock<AccountsIndex<AccountInfo>>,
|
2018-12-24 16:11:20 -08:00
|
|
|
|
|
|
|
/// Account storage
|
2019-03-06 15:12:50 -08:00
|
|
|
storage: RwLock<AccountStorage>,
|
2018-12-24 16:11:20 -08:00
|
|
|
|
|
|
|
/// distribute the accounts across storage lists
|
|
|
|
next_id: AtomicUsize,
|
2018-12-17 12:41:23 -08:00
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
/// write version
|
|
|
|
write_version: AtomicUsize,
|
2019-03-07 12:41:00 -08:00
|
|
|
|
|
|
|
/// Set of storage paths to pick from
|
|
|
|
paths: Vec<String>,
|
|
|
|
|
|
|
|
/// Starting file size of appendvecs
|
|
|
|
file_size: u64,
|
2018-12-17 12:41:23 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// This structure handles synchronization for db
|
2019-03-12 14:36:09 -07:00
|
|
|
#[derive(Default)]
|
2018-12-17 12:41:23 -08:00
|
|
|
pub struct Accounts {
|
2019-04-15 17:15:50 -07:00
|
|
|
/// Single global AccountsDB
|
|
|
|
pub accounts_db: Arc<AccountsDB>,
|
2018-12-17 12:41:23 -08:00
|
|
|
|
|
|
|
/// set of accounts which are currently in the pipeline
|
2019-04-15 17:15:50 -07:00
|
|
|
account_locks: Mutex<HashSet<Pubkey>>,
|
2018-12-17 12:41:23 -08:00
|
|
|
|
2018-12-24 16:11:20 -08:00
|
|
|
/// List of persistent stores
|
|
|
|
paths: String,
|
2019-03-02 14:02:11 -08:00
|
|
|
|
|
|
|
/// set to true if object created the directories in paths
|
|
|
|
/// when true, delete parents of 'paths' on drop
|
|
|
|
own_paths: bool,
|
2018-12-17 12:41:23 -08:00
|
|
|
}
|
|
|
|
|
2019-02-23 14:23:55 -08:00
|
|
|
fn get_paths_vec(paths: &str) -> Vec<String> {
|
2019-04-15 14:56:08 -07:00
|
|
|
paths.split(',').map(ToString::to_string).collect()
|
2019-02-23 14:23:55 -08:00
|
|
|
}
|
|
|
|
|
2018-12-24 16:11:20 -08:00
|
|
|
impl Drop for Accounts {
|
|
|
|
fn drop(&mut self) {
|
2019-03-02 14:02:11 -08:00
|
|
|
let paths = get_paths_vec(&self.paths);
|
|
|
|
paths.iter().for_each(|p| {
|
|
|
|
let _ignored = remove_dir_all(p);
|
|
|
|
|
|
|
|
// it is safe to delete the parent
|
|
|
|
if self.own_paths {
|
|
|
|
let path = Path::new(p);
|
|
|
|
let _ignored = remove_dir_all(path.parent().unwrap());
|
|
|
|
}
|
|
|
|
});
|
2018-12-17 12:41:23 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl AccountsDB {
|
2019-04-15 17:15:50 -07:00
|
|
|
pub fn new_with_file_size(paths: &str, file_size: u64) -> Self {
|
2019-03-07 12:41:00 -08:00
|
|
|
let paths = get_paths_vec(&paths);
|
2019-04-15 17:15:50 -07:00
|
|
|
AccountsDB {
|
|
|
|
accounts_index: RwLock::new(AccountsIndex::default()),
|
2019-02-20 21:31:24 -08:00
|
|
|
storage: RwLock::new(vec![]),
|
|
|
|
next_id: AtomicUsize::new(0),
|
2019-04-15 17:15:50 -07:00
|
|
|
write_version: AtomicUsize::new(0),
|
2019-03-07 12:41:00 -08:00
|
|
|
paths,
|
|
|
|
file_size,
|
2019-04-15 17:15:50 -07:00
|
|
|
}
|
2019-03-07 12:41:00 -08:00
|
|
|
}
|
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
pub fn new(paths: &str) -> Self {
|
|
|
|
Self::new_with_file_size(paths, ACCOUNT_DATA_FILE_SIZE)
|
2019-02-20 21:31:24 -08:00
|
|
|
}
|
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
fn new_storage_entry(&self, fork_id: Fork, path: &str) -> AccountStorageEntry {
|
2019-03-07 12:41:00 -08:00
|
|
|
AccountStorageEntry::new(
|
|
|
|
path,
|
2019-04-15 17:15:50 -07:00
|
|
|
fork_id,
|
2019-03-07 12:41:00 -08:00
|
|
|
self.next_id.fetch_add(1, Ordering::Relaxed),
|
|
|
|
self.file_size,
|
|
|
|
)
|
2019-03-06 15:12:50 -08:00
|
|
|
}
|
|
|
|
|
2019-03-16 23:42:32 -07:00
|
|
|
pub fn has_accounts(&self, fork: Fork) -> bool {
|
2019-04-15 17:15:50 -07:00
|
|
|
for x in self.storage.read().unwrap().iter() {
|
|
|
|
if x.fork_id == fork && x.count.load(Ordering::Relaxed) > 0 {
|
2019-02-20 21:31:24 -08:00
|
|
|
return true;
|
2019-02-20 12:17:32 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
false
|
|
|
|
}
|
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
/// Scan a specific fork through all the account storage in parallel with sequential read
|
|
|
|
// PERF: Sequentially read each storage entry in parallel
|
|
|
|
pub fn scan_account_storage<F, B>(&self, fork_id: Fork, scan_func: F) -> Vec<B>
|
|
|
|
where
|
|
|
|
F: Fn(&StoredAccount, &mut B) -> (),
|
|
|
|
F: Send + Sync,
|
|
|
|
B: Send + Default,
|
|
|
|
{
|
|
|
|
let storage_maps: Vec<Arc<AccountStorageEntry>> = self
|
|
|
|
.storage
|
2019-03-03 16:04:04 -08:00
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.iter()
|
2019-04-15 17:15:50 -07:00
|
|
|
.filter(|store| store.fork_id == fork_id)
|
|
|
|
.cloned()
|
2019-03-03 16:04:04 -08:00
|
|
|
.collect();
|
2019-04-15 17:15:50 -07:00
|
|
|
storage_maps
|
|
|
|
.into_par_iter()
|
|
|
|
.map(|storage| {
|
|
|
|
let accounts = storage.accounts.accounts(0);
|
|
|
|
let mut retval = B::default();
|
|
|
|
accounts
|
|
|
|
.iter()
|
|
|
|
.for_each(|stored_account| scan_func(stored_account, &mut retval));
|
|
|
|
retval
|
2019-03-07 08:51:56 -08:00
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
pub fn hash_internal_state(&self, fork_id: Fork) -> Option<Hash> {
|
|
|
|
let accumulator: Vec<Vec<(Pubkey, u64, Hash)>> = self.scan_account_storage(
|
|
|
|
fork_id,
|
|
|
|
|stored_account: &StoredAccount, accum: &mut Vec<(Pubkey, u64, Hash)>| {
|
|
|
|
accum.push((
|
|
|
|
stored_account.pubkey,
|
|
|
|
stored_account.write_version,
|
|
|
|
hash(&serialize(&stored_account.account).unwrap()),
|
|
|
|
));
|
|
|
|
},
|
|
|
|
);
|
|
|
|
let mut account_hashes: Vec<_> = accumulator.into_iter().flat_map(|x| x).collect();
|
|
|
|
account_hashes.sort_by_key(|s| (s.0, (s.1 as i64).neg()));
|
|
|
|
account_hashes.dedup_by_key(|s| s.0);
|
|
|
|
if account_hashes.is_empty() {
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
let mut hasher = Hasher::default();
|
|
|
|
for (_, _, hash) in account_hashes {
|
|
|
|
hasher.hash(hash.as_ref());
|
2018-12-24 16:11:20 -08:00
|
|
|
}
|
2019-04-15 17:15:50 -07:00
|
|
|
Some(hasher.result())
|
2018-12-24 16:11:20 -08:00
|
|
|
}
|
2019-02-28 17:34:37 -08:00
|
|
|
}
|
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
fn load(
|
|
|
|
storage: &AccountStorageSlice,
|
|
|
|
ancestors: &HashMap<Fork, usize>,
|
|
|
|
accounts_index: &AccountsIndex<AccountInfo>,
|
2019-02-28 10:29:29 -08:00
|
|
|
pubkey: &Pubkey,
|
2019-04-15 17:15:50 -07:00
|
|
|
) -> Option<Account> {
|
|
|
|
let info = accounts_index.get(pubkey, ancestors)?;
|
|
|
|
//TODO: thread this as a ref
|
|
|
|
storage
|
|
|
|
.get(info.id)
|
|
|
|
.map(|store| store.accounts.get_account(info.offset).account.clone())
|
2018-12-17 12:41:23 -08:00
|
|
|
}
|
2018-12-24 16:11:20 -08:00
|
|
|
|
|
|
|
fn load_tx_accounts(
|
2019-04-15 17:15:50 -07:00
|
|
|
storage: &AccountStorageSlice,
|
|
|
|
ancestors: &HashMap<Fork, usize>,
|
|
|
|
accounts_index: &AccountsIndex<AccountInfo>,
|
2018-12-17 12:41:23 -08:00
|
|
|
tx: &Transaction,
|
2019-03-29 15:11:21 -07:00
|
|
|
fee: u64,
|
2018-12-17 12:41:23 -08:00
|
|
|
error_counters: &mut ErrorCounters,
|
2018-12-24 16:11:20 -08:00
|
|
|
) -> Result<Vec<Account>> {
|
2018-12-17 12:41:23 -08:00
|
|
|
// Copy all the accounts
|
2019-03-29 09:05:06 -07:00
|
|
|
let message = tx.message();
|
2019-03-29 15:11:21 -07:00
|
|
|
if tx.signatures.is_empty() && fee != 0 {
|
2019-03-13 12:58:44 -07:00
|
|
|
Err(TransactionError::MissingSignatureForFee)
|
2018-12-17 12:41:23 -08:00
|
|
|
} else {
|
2019-02-07 11:14:10 -08:00
|
|
|
// Check for unique account keys
|
2019-03-29 09:05:06 -07:00
|
|
|
if has_duplicates(&message.account_keys) {
|
2019-02-07 11:14:10 -08:00
|
|
|
error_counters.account_loaded_twice += 1;
|
2019-03-13 12:58:44 -07:00
|
|
|
return Err(TransactionError::AccountLoadedTwice);
|
2019-02-07 11:14:10 -08:00
|
|
|
}
|
|
|
|
|
2018-12-17 12:41:23 -08:00
|
|
|
// There is no way to predict what program will execute without an error
|
|
|
|
// If a fee can pay for execution then the program will be scheduled
|
2019-01-29 16:33:28 -08:00
|
|
|
let mut called_accounts: Vec<Account> = vec![];
|
2019-03-29 09:05:06 -07:00
|
|
|
for key in &message.account_keys {
|
2019-04-15 17:15:50 -07:00
|
|
|
called_accounts
|
|
|
|
.push(Self::load(storage, ancestors, accounts_index, key).unwrap_or_default());
|
2019-01-29 16:33:28 -08:00
|
|
|
}
|
2019-03-05 16:28:14 -08:00
|
|
|
if called_accounts.is_empty() || called_accounts[0].lamports == 0 {
|
2019-01-29 16:33:28 -08:00
|
|
|
error_counters.account_not_found += 1;
|
2019-03-13 12:58:44 -07:00
|
|
|
Err(TransactionError::AccountNotFound)
|
2019-03-29 15:11:21 -07:00
|
|
|
} else if called_accounts[0].lamports < fee {
|
2019-01-29 16:33:28 -08:00
|
|
|
error_counters.insufficient_funds += 1;
|
2019-03-13 12:58:44 -07:00
|
|
|
Err(TransactionError::InsufficientFundsForFee)
|
2019-01-29 16:33:28 -08:00
|
|
|
} else {
|
2019-03-29 15:11:21 -07:00
|
|
|
called_accounts[0].lamports -= fee;
|
2019-01-29 16:33:28 -08:00
|
|
|
Ok(called_accounts)
|
|
|
|
}
|
2018-12-17 12:41:23 -08:00
|
|
|
}
|
|
|
|
}
|
2019-01-04 16:39:04 -08:00
|
|
|
|
2018-12-24 16:11:20 -08:00
|
|
|
fn load_executable_accounts(
|
2019-04-15 17:15:50 -07:00
|
|
|
storage: &AccountStorageSlice,
|
|
|
|
ancestors: &HashMap<Fork, usize>,
|
|
|
|
accounts_index: &AccountsIndex<AccountInfo>,
|
2019-03-09 19:28:43 -08:00
|
|
|
program_id: &Pubkey,
|
2019-01-08 09:20:25 -08:00
|
|
|
error_counters: &mut ErrorCounters,
|
2018-12-24 16:11:20 -08:00
|
|
|
) -> Result<Vec<(Pubkey, Account)>> {
|
2019-01-06 22:06:55 -08:00
|
|
|
let mut accounts = Vec::new();
|
|
|
|
let mut depth = 0;
|
2019-03-09 19:28:43 -08:00
|
|
|
let mut program_id = *program_id;
|
2019-01-06 22:06:55 -08:00
|
|
|
loop {
|
2019-02-07 08:03:40 -08:00
|
|
|
if native_loader::check_id(&program_id) {
|
2019-01-06 22:06:55 -08:00
|
|
|
// at the root of the chain, ready to dispatch
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if depth >= 5 {
|
2019-01-08 09:20:25 -08:00
|
|
|
error_counters.call_chain_too_deep += 1;
|
2019-03-13 12:58:44 -07:00
|
|
|
return Err(TransactionError::CallChainTooDeep);
|
2019-01-06 22:06:55 -08:00
|
|
|
}
|
|
|
|
depth += 1;
|
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
let program = match Self::load(storage, ancestors, accounts_index, &program_id) {
|
2019-01-06 22:06:55 -08:00
|
|
|
Some(program) => program,
|
2019-01-08 09:20:25 -08:00
|
|
|
None => {
|
|
|
|
error_counters.account_not_found += 1;
|
2019-03-13 12:58:44 -07:00
|
|
|
return Err(TransactionError::AccountNotFound);
|
2019-01-08 09:20:25 -08:00
|
|
|
}
|
2019-01-06 22:06:55 -08:00
|
|
|
};
|
2019-02-14 09:57:54 -08:00
|
|
|
if !program.executable || program.owner == Pubkey::default() {
|
2019-01-08 09:20:25 -08:00
|
|
|
error_counters.account_not_found += 1;
|
2019-03-13 12:58:44 -07:00
|
|
|
return Err(TransactionError::AccountNotFound);
|
2019-01-06 22:06:55 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// add loader to chain
|
2019-02-14 09:57:54 -08:00
|
|
|
program_id = program.owner;
|
2019-04-15 17:15:50 -07:00
|
|
|
accounts.insert(0, (program_id, program));
|
2019-01-06 22:06:55 -08:00
|
|
|
}
|
|
|
|
Ok(accounts)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// For each program_id in the transaction, load its loaders.
|
2018-12-24 16:11:20 -08:00
|
|
|
fn load_loaders(
|
2019-04-15 17:15:50 -07:00
|
|
|
storage: &AccountStorageSlice,
|
|
|
|
ancestors: &HashMap<Fork, usize>,
|
|
|
|
accounts_index: &AccountsIndex<AccountInfo>,
|
2019-01-08 09:20:25 -08:00
|
|
|
tx: &Transaction,
|
|
|
|
error_counters: &mut ErrorCounters,
|
2018-12-24 16:11:20 -08:00
|
|
|
) -> Result<Vec<Vec<(Pubkey, Account)>>> {
|
2019-03-29 09:05:06 -07:00
|
|
|
let message = tx.message();
|
|
|
|
message
|
|
|
|
.instructions
|
2019-01-06 22:06:55 -08:00
|
|
|
.iter()
|
|
|
|
.map(|ix| {
|
2019-04-02 15:02:57 -07:00
|
|
|
if message.program_ids().len() <= ix.program_ids_index as usize {
|
2019-01-08 09:20:25 -08:00
|
|
|
error_counters.account_not_found += 1;
|
2019-03-13 12:58:44 -07:00
|
|
|
return Err(TransactionError::AccountNotFound);
|
2019-01-08 09:20:25 -08:00
|
|
|
}
|
2019-04-02 15:02:57 -07:00
|
|
|
let program_id = message.program_ids()[ix.program_ids_index as usize];
|
2019-04-15 17:15:50 -07:00
|
|
|
Self::load_executable_accounts(
|
|
|
|
storage,
|
|
|
|
ancestors,
|
|
|
|
accounts_index,
|
|
|
|
&program_id,
|
|
|
|
error_counters,
|
|
|
|
)
|
2019-01-06 22:06:55 -08:00
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2018-12-24 16:11:20 -08:00
|
|
|
fn load_accounts(
|
|
|
|
&self,
|
2019-04-15 17:15:50 -07:00
|
|
|
ancestors: &HashMap<Fork, usize>,
|
2018-12-17 12:41:23 -08:00
|
|
|
txs: &[Transaction],
|
2019-01-06 22:06:55 -08:00
|
|
|
lock_results: Vec<Result<()>>,
|
2019-03-29 15:11:21 -07:00
|
|
|
fee_calculator: &FeeCalculator,
|
2018-12-17 12:41:23 -08:00
|
|
|
error_counters: &mut ErrorCounters,
|
2018-12-24 16:11:20 -08:00
|
|
|
) -> Vec<Result<(InstructionAccounts, InstructionLoaders)>> {
|
2019-04-15 17:15:50 -07:00
|
|
|
//PERF: hold the lock to scan for the references, but not to clone the accounts
|
|
|
|
//TODO: two locks usually leads to deadlocks, should this be one structure?
|
|
|
|
let accounts_index = self.accounts_index.read().unwrap();
|
|
|
|
let storage = self.storage.read().unwrap();
|
2018-12-17 12:41:23 -08:00
|
|
|
txs.iter()
|
2019-01-06 22:06:55 -08:00
|
|
|
.zip(lock_results.into_iter())
|
2018-12-17 12:41:23 -08:00
|
|
|
.map(|etx| match etx {
|
2019-01-06 22:06:55 -08:00
|
|
|
(tx, Ok(())) => {
|
2019-03-29 15:11:21 -07:00
|
|
|
let fee = fee_calculator.calculate_fee(tx.message());
|
2019-04-15 17:15:50 -07:00
|
|
|
let accounts = Self::load_tx_accounts(
|
|
|
|
&storage,
|
|
|
|
ancestors,
|
|
|
|
&accounts_index,
|
|
|
|
tx,
|
|
|
|
fee,
|
|
|
|
error_counters,
|
|
|
|
)?;
|
|
|
|
let loaders = Self::load_loaders(
|
|
|
|
&storage,
|
|
|
|
ancestors,
|
|
|
|
&accounts_index,
|
|
|
|
tx,
|
|
|
|
error_counters,
|
|
|
|
)?;
|
2019-01-06 22:06:55 -08:00
|
|
|
Ok((accounts, loaders))
|
|
|
|
}
|
2018-12-17 12:41:23 -08:00
|
|
|
(_, Err(e)) => Err(e),
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
2019-01-04 16:04:31 -08:00
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
fn load_slow(&self, ancestors: &HashMap<Fork, usize>, pubkey: &Pubkey) -> Option<Account> {
|
|
|
|
let accounts_index = self.accounts_index.read().unwrap();
|
|
|
|
let storage = self.storage.read().unwrap();
|
|
|
|
Self::load(&storage, ancestors, &accounts_index, pubkey)
|
2019-02-23 23:43:10 -08:00
|
|
|
}
|
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
fn get_storage_id(&self, fork_id: Fork, start: usize, current: usize) -> usize {
|
|
|
|
let mut id = current;
|
|
|
|
let len: usize;
|
2019-03-30 18:18:42 -07:00
|
|
|
{
|
|
|
|
let stores = self.storage.read().unwrap();
|
2019-04-15 17:15:50 -07:00
|
|
|
len = stores.len();
|
|
|
|
if len > 0 {
|
|
|
|
if id == std::usize::MAX {
|
|
|
|
id = start % len;
|
|
|
|
if stores[id].get_status() == AccountStorageStatus::StorageAvailable {
|
|
|
|
return id;
|
2019-03-30 18:18:42 -07:00
|
|
|
}
|
|
|
|
} else {
|
2019-04-15 17:15:50 -07:00
|
|
|
stores[id].set_status(AccountStorageStatus::StorageFull);
|
|
|
|
}
|
|
|
|
|
|
|
|
loop {
|
|
|
|
id = (id + 1) % len;
|
|
|
|
if fork_id == stores[id].fork_id
|
|
|
|
&& stores[id].get_status() == AccountStorageStatus::StorageAvailable
|
|
|
|
{
|
|
|
|
break;
|
2019-03-30 18:18:42 -07:00
|
|
|
}
|
2019-04-15 17:15:50 -07:00
|
|
|
if id == start % len {
|
|
|
|
break;
|
2019-03-30 18:18:42 -07:00
|
|
|
}
|
2019-02-20 21:31:24 -08:00
|
|
|
}
|
|
|
|
}
|
2019-02-23 23:43:10 -08:00
|
|
|
}
|
2019-04-15 17:15:50 -07:00
|
|
|
if len == 0 || id == start % len {
|
|
|
|
let mut stores = self.storage.write().unwrap();
|
|
|
|
// check if new store was already created
|
|
|
|
if stores.len() == len {
|
|
|
|
let path_idx = thread_rng().gen_range(0, self.paths.len());
|
|
|
|
let storage = self.new_storage_entry(fork_id, &self.paths[path_idx]);
|
|
|
|
stores.push(Arc::new(storage));
|
|
|
|
}
|
|
|
|
id = stores.len() - 1;
|
|
|
|
}
|
|
|
|
id
|
|
|
|
}
|
|
|
|
|
|
|
|
fn append_account(&self, fork_id: Fork, pubkey: &Pubkey, account: &Account) -> (usize, usize) {
|
|
|
|
let offset: usize;
|
|
|
|
let start = self.next_id.fetch_add(1, Ordering::Relaxed);
|
|
|
|
let mut id = self.get_storage_id(fork_id, start, std::usize::MAX);
|
|
|
|
|
|
|
|
// Even if no lamports, need to preserve the account owner so
|
|
|
|
// we can update the vote_accounts correctly if this account is purged
|
|
|
|
// when squashing.
|
|
|
|
let acc = &mut account.clone();
|
|
|
|
if account.lamports == 0 {
|
|
|
|
acc.data.resize(0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
loop {
|
|
|
|
let result: Option<usize>;
|
|
|
|
{
|
|
|
|
let accounts = &self.storage.read().unwrap()[id];
|
|
|
|
let write_version = self.write_version.fetch_add(1, Ordering::Relaxed) as u64;
|
|
|
|
let stored_account = StoredAccount {
|
|
|
|
write_version,
|
|
|
|
pubkey: *pubkey,
|
|
|
|
//TODO: fix all this copy
|
|
|
|
account: account.clone(),
|
|
|
|
};
|
|
|
|
result = accounts.accounts.append_account(&stored_account);
|
|
|
|
accounts.add_account();
|
|
|
|
}
|
|
|
|
if let Some(val) = result {
|
|
|
|
offset = val;
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
id = self.get_storage_id(fork_id, start, id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
(id, offset)
|
|
|
|
}
|
2019-03-16 23:42:32 -07:00
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
pub fn purge_fork(&self, fork: Fork) {
|
|
|
|
//add_root should be called first
|
|
|
|
let is_root = self.accounts_index.read().unwrap().is_root(fork);
|
|
|
|
trace!("PURGING {} {}", fork, is_root);
|
|
|
|
if !is_root {
|
|
|
|
self.storage.write().unwrap().retain(|x| {
|
|
|
|
trace!("PURGING {} {}", x.fork_id, fork);
|
|
|
|
x.fork_id != fork
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Store the account update.
|
|
|
|
pub fn store(&self, fork_id: Fork, accounts: &[(&Pubkey, &Account)]) {
|
|
|
|
//TODO; these blocks should be separate functions and unit tested
|
|
|
|
let infos: Vec<_> = accounts
|
|
|
|
.iter()
|
|
|
|
.map(|(pubkey, account)| {
|
|
|
|
let (id, offset) = self.append_account(fork_id, pubkey, account);
|
|
|
|
AccountInfo {
|
|
|
|
id,
|
|
|
|
offset,
|
|
|
|
lamports: account.lamports,
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
let reclaims: Vec<(Fork, AccountInfo)> = {
|
|
|
|
let mut index = self.accounts_index.write().unwrap();
|
|
|
|
let mut reclaims = vec![];
|
|
|
|
for (i, info) in infos.into_iter().enumerate() {
|
|
|
|
let key = &accounts[i].0;
|
|
|
|
reclaims.extend(index.insert(fork_id, key, info).into_iter())
|
|
|
|
}
|
|
|
|
reclaims
|
|
|
|
};
|
|
|
|
|
|
|
|
let dead_forks: HashSet<Fork> = {
|
|
|
|
let stores = self.storage.read().unwrap();
|
|
|
|
let mut cleared_forks: HashSet<Fork> = HashSet::new();
|
|
|
|
for (fork_id, account_info) in reclaims {
|
|
|
|
let cleared = stores[account_info.id].remove_account();
|
|
|
|
if cleared {
|
|
|
|
cleared_forks.insert(fork_id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let live_forks: HashSet<Fork> = stores.iter().map(|x| x.fork_id).collect();
|
|
|
|
cleared_forks.difference(&live_forks).cloned().collect()
|
|
|
|
};
|
|
|
|
{
|
|
|
|
let mut index = self.accounts_index.write().unwrap();
|
|
|
|
for fork in dead_forks {
|
|
|
|
index.cleanup_dead_fork(fork);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn store_accounts(
|
|
|
|
&self,
|
|
|
|
fork: Fork,
|
|
|
|
txs: &[Transaction],
|
|
|
|
res: &[Result<()>],
|
|
|
|
loaded: &[Result<(InstructionAccounts, InstructionLoaders)>],
|
|
|
|
) {
|
|
|
|
let mut accounts: Vec<(&Pubkey, &Account)> = vec![];
|
|
|
|
for (i, raccs) in loaded.iter().enumerate() {
|
|
|
|
if res[i].is_err() || raccs.is_err() {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
let message = &txs[i].message();
|
|
|
|
let acc = raccs.as_ref().unwrap();
|
|
|
|
for (key, account) in message.account_keys.iter().zip(acc.0.iter()) {
|
|
|
|
accounts.push((key, account));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
self.store(fork, &accounts);
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn add_root(&self, fork: Fork) {
|
|
|
|
self.accounts_index.write().unwrap().add_root(fork)
|
2019-02-21 12:08:50 -08:00
|
|
|
}
|
2019-01-29 16:33:28 -08:00
|
|
|
}
|
2018-12-17 12:41:23 -08:00
|
|
|
|
2019-01-29 16:33:28 -08:00
|
|
|
impl Accounts {
|
2019-02-20 12:17:32 -08:00
|
|
|
fn make_new_dir() -> String {
|
2018-12-24 16:11:20 -08:00
|
|
|
static ACCOUNT_DIR: AtomicUsize = AtomicUsize::new(0);
|
2019-02-20 12:17:32 -08:00
|
|
|
let dir = ACCOUNT_DIR.fetch_add(1, Ordering::Relaxed);
|
2019-02-23 14:23:55 -08:00
|
|
|
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
|
|
|
|
let keypair = Keypair::new();
|
|
|
|
format!(
|
|
|
|
"{}/{}/{}/{}",
|
|
|
|
out_dir,
|
|
|
|
ACCOUNTSDB_DIR,
|
|
|
|
keypair.pubkey(),
|
|
|
|
dir.to_string()
|
|
|
|
)
|
2019-02-20 12:17:32 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
fn make_default_paths() -> String {
|
2019-02-20 21:31:24 -08:00
|
|
|
let mut paths = "".to_string();
|
|
|
|
for index in 0..NUM_ACCOUNT_DIRS {
|
|
|
|
if index > 0 {
|
|
|
|
paths.push_str(",");
|
|
|
|
}
|
2019-02-20 12:17:32 -08:00
|
|
|
paths.push_str(&Self::make_new_dir());
|
|
|
|
}
|
|
|
|
paths
|
|
|
|
}
|
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
pub fn new(in_paths: Option<String>) -> Self {
|
2019-03-02 14:02:11 -08:00
|
|
|
let (paths, own_paths) = if in_paths.is_none() {
|
|
|
|
(Self::make_default_paths(), true)
|
2019-02-25 21:22:00 -08:00
|
|
|
} else {
|
2019-03-02 14:02:11 -08:00
|
|
|
(in_paths.unwrap(), false)
|
2018-12-24 16:11:20 -08:00
|
|
|
};
|
2019-04-15 17:15:50 -07:00
|
|
|
let accounts_db = Arc::new(AccountsDB::new(&paths));
|
2018-12-24 16:11:20 -08:00
|
|
|
Accounts {
|
|
|
|
accounts_db,
|
2019-04-15 17:15:50 -07:00
|
|
|
account_locks: Mutex::new(HashSet::new()),
|
2018-12-24 16:11:20 -08:00
|
|
|
paths,
|
2019-03-02 14:02:11 -08:00
|
|
|
own_paths,
|
2018-12-24 16:11:20 -08:00
|
|
|
}
|
|
|
|
}
|
2019-04-15 17:15:50 -07:00
|
|
|
pub fn new_from_parent(parent: &Accounts) -> Self {
|
|
|
|
let accounts_db = parent.accounts_db.clone();
|
|
|
|
Accounts {
|
|
|
|
accounts_db,
|
|
|
|
account_locks: Mutex::new(HashSet::new()),
|
|
|
|
paths: parent.paths.clone(),
|
|
|
|
own_paths: parent.own_paths,
|
|
|
|
}
|
2019-02-20 21:31:24 -08:00
|
|
|
}
|
|
|
|
|
2019-03-07 14:58:11 -08:00
|
|
|
/// Slow because lock is held for 1 operation instead of many
|
2019-04-15 17:15:50 -07:00
|
|
|
pub fn load_slow(&self, ancestors: &HashMap<Fork, usize>, pubkey: &Pubkey) -> Option<Account> {
|
2019-02-26 21:41:05 -08:00
|
|
|
self.accounts_db
|
2019-04-15 17:15:50 -07:00
|
|
|
.load_slow(ancestors, pubkey)
|
2019-03-05 16:28:14 -08:00
|
|
|
.filter(|acc| acc.lamports != 0)
|
2018-12-17 12:41:23 -08:00
|
|
|
}
|
2018-12-24 16:11:20 -08:00
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
pub fn load_by_program(&self, fork: Fork, program_id: &Pubkey) -> Vec<(Pubkey, Account)> {
|
|
|
|
let accumulator: Vec<Vec<StoredAccount>> = self.accounts_db.scan_account_storage(
|
|
|
|
fork,
|
|
|
|
|stored_account: &StoredAccount, accum: &mut Vec<StoredAccount>| {
|
|
|
|
if stored_account.account.owner == *program_id {
|
|
|
|
accum.push(stored_account.clone())
|
|
|
|
}
|
|
|
|
},
|
|
|
|
);
|
|
|
|
let mut versions: Vec<StoredAccount> = accumulator.into_iter().flat_map(|x| x).collect();
|
|
|
|
versions.sort_by_key(|s| (s.pubkey, (s.write_version as i64).neg()));
|
|
|
|
versions.dedup_by_key(|s| s.pubkey);
|
|
|
|
versions
|
2019-03-07 08:51:56 -08:00
|
|
|
.into_iter()
|
2019-04-15 17:15:50 -07:00
|
|
|
.map(|s| (s.pubkey, s.account))
|
2019-03-07 08:51:56 -08:00
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2019-03-07 14:58:11 -08:00
|
|
|
/// Slow because lock is held for 1 operation instead of many
|
2019-02-27 16:06:06 -08:00
|
|
|
pub fn store_slow(&self, fork: Fork, pubkey: &Pubkey, account: &Account) {
|
2019-04-15 17:15:50 -07:00
|
|
|
self.accounts_db.store(fork, &[(pubkey, account)]);
|
2018-12-17 12:41:23 -08:00
|
|
|
}
|
2019-01-04 16:04:31 -08:00
|
|
|
|
2018-12-17 12:41:23 -08:00
|
|
|
fn lock_account(
|
2019-04-15 17:15:50 -07:00
|
|
|
locks: &mut HashSet<Pubkey>,
|
2018-12-17 12:41:23 -08:00
|
|
|
keys: &[Pubkey],
|
|
|
|
error_counters: &mut ErrorCounters,
|
|
|
|
) -> Result<()> {
|
|
|
|
// Copy all the accounts
|
|
|
|
for k in keys {
|
2019-02-20 21:31:24 -08:00
|
|
|
if locks.contains(k) {
|
2018-12-17 12:41:23 -08:00
|
|
|
error_counters.account_in_use += 1;
|
2019-03-28 11:45:34 -07:00
|
|
|
debug!("Account in use: {:?}", k);
|
2019-03-13 12:58:44 -07:00
|
|
|
return Err(TransactionError::AccountInUse);
|
2018-12-17 12:41:23 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for k in keys {
|
2019-02-20 21:31:24 -08:00
|
|
|
locks.insert(*k);
|
2018-12-17 12:41:23 -08:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
fn unlock_account(tx: &Transaction, result: &Result<()>, locks: &mut HashSet<Pubkey>) {
|
2018-12-17 12:41:23 -08:00
|
|
|
match result {
|
2019-03-13 12:58:44 -07:00
|
|
|
Err(TransactionError::AccountInUse) => (),
|
2018-12-17 12:41:23 -08:00
|
|
|
_ => {
|
2019-04-15 17:15:50 -07:00
|
|
|
for k in &tx.message().account_keys {
|
|
|
|
locks.remove(k);
|
2018-12-17 12:41:23 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-01-04 16:04:31 -08:00
|
|
|
|
2018-12-24 16:11:20 -08:00
|
|
|
pub fn hash_internal_state(&self, fork: Fork) -> Option<Hash> {
|
|
|
|
self.accounts_db.hash_internal_state(fork)
|
2018-12-17 12:41:23 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// This function will prevent multiple threads from modifying the same account state at the
|
|
|
|
/// same time
|
|
|
|
#[must_use]
|
2019-04-15 17:15:50 -07:00
|
|
|
pub fn lock_accounts(&self, txs: &[Transaction]) -> Vec<Result<()>> {
|
2018-12-17 12:41:23 -08:00
|
|
|
let mut account_locks = self.account_locks.lock().unwrap();
|
|
|
|
let mut error_counters = ErrorCounters::default();
|
|
|
|
let rv = txs
|
|
|
|
.iter()
|
2019-02-20 21:31:24 -08:00
|
|
|
.map(|tx| {
|
|
|
|
Self::lock_account(
|
|
|
|
&mut account_locks,
|
2019-03-29 09:05:06 -07:00
|
|
|
&tx.message().account_keys,
|
2019-02-20 21:31:24 -08:00
|
|
|
&mut error_counters,
|
|
|
|
)
|
|
|
|
})
|
2018-12-17 12:41:23 -08:00
|
|
|
.collect();
|
|
|
|
if error_counters.account_in_use != 0 {
|
|
|
|
inc_new_counter_info!(
|
|
|
|
"bank-process_transactions-account_in_use",
|
|
|
|
error_counters.account_in_use
|
|
|
|
);
|
|
|
|
}
|
|
|
|
rv
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Once accounts are unlocked, new transactions that modify that state can enter the pipeline
|
2019-04-15 17:15:50 -07:00
|
|
|
pub fn unlock_accounts(&self, txs: &[Transaction], results: &[Result<()>]) {
|
2018-12-17 12:41:23 -08:00
|
|
|
let mut account_locks = self.account_locks.lock().unwrap();
|
|
|
|
debug!("bank unlock accounts");
|
|
|
|
txs.iter()
|
|
|
|
.zip(results.iter())
|
2019-04-15 17:15:50 -07:00
|
|
|
.for_each(|(tx, result)| Self::unlock_account(tx, result, &mut account_locks));
|
2018-12-17 12:41:23 -08:00
|
|
|
}
|
|
|
|
|
2019-02-20 12:17:32 -08:00
|
|
|
pub fn has_accounts(&self, fork: Fork) -> bool {
|
|
|
|
self.accounts_db.has_accounts(fork)
|
|
|
|
}
|
|
|
|
|
2018-12-24 16:11:20 -08:00
|
|
|
pub fn load_accounts(
|
|
|
|
&self,
|
2019-04-15 17:15:50 -07:00
|
|
|
ancestors: &HashMap<Fork, usize>,
|
2018-12-17 12:41:23 -08:00
|
|
|
txs: &[Transaction],
|
2019-01-29 16:33:28 -08:00
|
|
|
results: Vec<Result<()>>,
|
2019-03-29 15:11:21 -07:00
|
|
|
fee_calculator: &FeeCalculator,
|
2018-12-17 12:41:23 -08:00
|
|
|
error_counters: &mut ErrorCounters,
|
2018-12-24 16:11:20 -08:00
|
|
|
) -> Vec<Result<(InstructionAccounts, InstructionLoaders)>> {
|
|
|
|
self.accounts_db
|
2019-04-15 17:15:50 -07:00
|
|
|
.load_accounts(ancestors, txs, results, fee_calculator, error_counters)
|
2018-12-17 12:41:23 -08:00
|
|
|
}
|
|
|
|
|
2019-01-29 16:33:28 -08:00
|
|
|
/// Store the accounts into the DB
|
2018-12-17 12:41:23 -08:00
|
|
|
pub fn store_accounts(
|
|
|
|
&self,
|
2018-12-24 16:11:20 -08:00
|
|
|
fork: Fork,
|
2018-12-17 12:41:23 -08:00
|
|
|
txs: &[Transaction],
|
|
|
|
res: &[Result<()>],
|
2019-01-06 22:06:55 -08:00
|
|
|
loaded: &[Result<(InstructionAccounts, InstructionLoaders)>],
|
2019-02-19 17:11:43 -08:00
|
|
|
) {
|
2019-02-27 16:06:06 -08:00
|
|
|
self.accounts_db.store_accounts(fork, txs, res, loaded)
|
2018-12-17 12:41:23 -08:00
|
|
|
}
|
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
/// Purge a fork if it is not a root
|
|
|
|
/// Root forks cannot be purged
|
|
|
|
pub fn purge_fork(&self, fork: Fork) {
|
|
|
|
self.accounts_db.purge_fork(fork);
|
2018-12-24 16:11:20 -08:00
|
|
|
}
|
2019-04-15 17:15:50 -07:00
|
|
|
/// Add a fork to root. Root forks cannot be purged
|
|
|
|
pub fn add_root(&self, fork: Fork) {
|
|
|
|
self.accounts_db.add_root(fork)
|
2019-03-21 17:36:10 -07:00
|
|
|
}
|
2018-12-17 12:41:23 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
// TODO: all the bank tests are bank specific, issue: 2194
|
2019-01-08 09:20:25 -08:00
|
|
|
|
|
|
|
use super::*;
|
2018-12-24 16:11:20 -08:00
|
|
|
use rand::{thread_rng, Rng};
|
2019-01-08 09:20:25 -08:00
|
|
|
use solana_sdk::account::Account;
|
|
|
|
use solana_sdk::hash::Hash;
|
2019-03-23 20:12:27 -07:00
|
|
|
use solana_sdk::instruction::CompiledInstruction;
|
|
|
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
2019-01-08 09:20:25 -08:00
|
|
|
use solana_sdk::transaction::Transaction;
|
|
|
|
|
2019-03-02 14:02:11 -08:00
|
|
|
fn cleanup_paths(paths: &str) {
|
|
|
|
let paths = get_paths_vec(&paths);
|
|
|
|
paths.iter().for_each(|p| {
|
|
|
|
let _ignored = remove_dir_all(p);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2019-03-29 15:11:21 -07:00
|
|
|
fn load_accounts_with_fee(
|
2019-01-08 09:20:25 -08:00
|
|
|
tx: Transaction,
|
|
|
|
ka: &Vec<(Pubkey, Account)>,
|
2019-03-29 15:11:21 -07:00
|
|
|
fee_calculator: &FeeCalculator,
|
2019-01-08 09:20:25 -08:00
|
|
|
error_counters: &mut ErrorCounters,
|
|
|
|
) -> Vec<Result<(InstructionAccounts, InstructionLoaders)>> {
|
2019-04-15 17:15:50 -07:00
|
|
|
let accounts = Accounts::new(None);
|
2019-01-08 09:20:25 -08:00
|
|
|
for ka in ka.iter() {
|
2019-02-27 16:06:06 -08:00
|
|
|
accounts.store_slow(0, &ka.0, &ka.1);
|
2019-01-08 09:20:25 -08:00
|
|
|
}
|
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
let ancestors = vec![(0, 0)].into_iter().collect();
|
|
|
|
let res = accounts.load_accounts(
|
|
|
|
&ancestors,
|
|
|
|
&[tx],
|
|
|
|
vec![Ok(())],
|
|
|
|
&fee_calculator,
|
|
|
|
error_counters,
|
|
|
|
);
|
2018-12-24 16:11:20 -08:00
|
|
|
res
|
2019-01-08 09:20:25 -08:00
|
|
|
}
|
|
|
|
|
2019-03-29 15:11:21 -07:00
|
|
|
fn load_accounts(
|
|
|
|
tx: Transaction,
|
|
|
|
ka: &Vec<(Pubkey, Account)>,
|
|
|
|
error_counters: &mut ErrorCounters,
|
|
|
|
) -> Vec<Result<(InstructionAccounts, InstructionLoaders)>> {
|
|
|
|
let fee_calculator = FeeCalculator::default();
|
|
|
|
load_accounts_with_fee(tx, ka, &fee_calculator, error_counters)
|
|
|
|
}
|
|
|
|
|
2019-01-08 09:20:25 -08:00
|
|
|
#[test]
|
|
|
|
fn test_load_accounts_no_key() {
|
|
|
|
let accounts: Vec<(Pubkey, Account)> = Vec::new();
|
|
|
|
let mut error_counters = ErrorCounters::default();
|
|
|
|
|
2019-03-15 08:47:25 -07:00
|
|
|
let instructions = vec![CompiledInstruction::new(1, &(), vec![0])];
|
2019-03-15 09:02:28 -07:00
|
|
|
let tx = Transaction::new_with_compiled_instructions::<Keypair>(
|
2019-01-08 09:20:25 -08:00
|
|
|
&[],
|
|
|
|
&[],
|
|
|
|
Hash::default(),
|
2019-02-07 08:03:40 -08:00
|
|
|
vec![native_loader::id()],
|
2019-01-08 09:20:25 -08:00
|
|
|
instructions,
|
|
|
|
);
|
|
|
|
|
2019-01-29 16:33:28 -08:00
|
|
|
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
|
2019-01-08 09:20:25 -08:00
|
|
|
|
2019-02-07 01:54:27 -08:00
|
|
|
assert_eq!(error_counters.account_not_found, 1);
|
2019-01-08 09:20:25 -08:00
|
|
|
assert_eq!(loaded_accounts.len(), 1);
|
2019-03-13 12:58:44 -07:00
|
|
|
assert_eq!(loaded_accounts[0], Err(TransactionError::AccountNotFound));
|
2019-01-08 09:20:25 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_load_accounts_no_account_0_exists() {
|
|
|
|
let accounts: Vec<(Pubkey, Account)> = Vec::new();
|
|
|
|
let mut error_counters = ErrorCounters::default();
|
|
|
|
|
|
|
|
let keypair = Keypair::new();
|
|
|
|
|
2019-03-15 08:47:25 -07:00
|
|
|
let instructions = vec![CompiledInstruction::new(1, &(), vec![0])];
|
2019-03-15 09:02:28 -07:00
|
|
|
let tx = Transaction::new_with_compiled_instructions(
|
2019-01-08 09:20:25 -08:00
|
|
|
&[&keypair],
|
|
|
|
&[],
|
|
|
|
Hash::default(),
|
2019-02-07 08:03:40 -08:00
|
|
|
vec![native_loader::id()],
|
2019-01-08 09:20:25 -08:00
|
|
|
instructions,
|
|
|
|
);
|
|
|
|
|
2019-01-29 16:33:28 -08:00
|
|
|
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
|
2019-01-08 09:20:25 -08:00
|
|
|
|
2019-02-07 01:54:27 -08:00
|
|
|
assert_eq!(error_counters.account_not_found, 1);
|
2019-01-08 09:20:25 -08:00
|
|
|
assert_eq!(loaded_accounts.len(), 1);
|
2019-03-13 12:58:44 -07:00
|
|
|
assert_eq!(loaded_accounts[0], Err(TransactionError::AccountNotFound));
|
2019-01-08 09:20:25 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_load_accounts_unknown_program_id() {
|
|
|
|
let mut accounts: Vec<(Pubkey, Account)> = Vec::new();
|
|
|
|
let mut error_counters = ErrorCounters::default();
|
|
|
|
|
|
|
|
let keypair = Keypair::new();
|
|
|
|
let key0 = keypair.pubkey();
|
|
|
|
let key1 = Pubkey::new(&[5u8; 32]);
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let account = Account::new(1, 1, &Pubkey::default());
|
2019-01-08 09:20:25 -08:00
|
|
|
accounts.push((key0, account));
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let account = Account::new(2, 1, &Pubkey::default());
|
2019-01-08 09:20:25 -08:00
|
|
|
accounts.push((key1, account));
|
|
|
|
|
2019-03-15 08:47:25 -07:00
|
|
|
let instructions = vec![CompiledInstruction::new(1, &(), vec![0])];
|
2019-03-15 09:02:28 -07:00
|
|
|
let tx = Transaction::new_with_compiled_instructions(
|
2019-01-08 09:20:25 -08:00
|
|
|
&[&keypair],
|
|
|
|
&[],
|
|
|
|
Hash::default(),
|
|
|
|
vec![Pubkey::default()],
|
|
|
|
instructions,
|
|
|
|
);
|
|
|
|
|
2019-01-29 16:33:28 -08:00
|
|
|
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
|
2019-01-08 09:20:25 -08:00
|
|
|
|
2019-02-07 01:54:27 -08:00
|
|
|
assert_eq!(error_counters.account_not_found, 1);
|
2019-01-08 09:20:25 -08:00
|
|
|
assert_eq!(loaded_accounts.len(), 1);
|
2019-03-13 12:58:44 -07:00
|
|
|
assert_eq!(loaded_accounts[0], Err(TransactionError::AccountNotFound));
|
2019-01-08 09:20:25 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_load_accounts_insufficient_funds() {
|
|
|
|
let mut accounts: Vec<(Pubkey, Account)> = Vec::new();
|
|
|
|
let mut error_counters = ErrorCounters::default();
|
|
|
|
|
|
|
|
let keypair = Keypair::new();
|
|
|
|
let key0 = keypair.pubkey();
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let account = Account::new(1, 1, &Pubkey::default());
|
2019-01-08 09:20:25 -08:00
|
|
|
accounts.push((key0, account));
|
|
|
|
|
2019-03-15 08:47:25 -07:00
|
|
|
let instructions = vec![CompiledInstruction::new(1, &(), vec![0])];
|
2019-03-15 09:02:28 -07:00
|
|
|
let tx = Transaction::new_with_compiled_instructions(
|
2019-01-08 09:20:25 -08:00
|
|
|
&[&keypair],
|
|
|
|
&[],
|
|
|
|
Hash::default(),
|
2019-02-07 08:03:40 -08:00
|
|
|
vec![native_loader::id()],
|
2019-01-08 09:20:25 -08:00
|
|
|
instructions,
|
|
|
|
);
|
|
|
|
|
2019-03-29 15:11:21 -07:00
|
|
|
let fee_calculator = FeeCalculator::new(10);
|
|
|
|
assert_eq!(fee_calculator.calculate_fee(tx.message()), 10);
|
|
|
|
|
|
|
|
let loaded_accounts =
|
|
|
|
load_accounts_with_fee(tx, &accounts, &fee_calculator, &mut error_counters);
|
2019-01-08 09:20:25 -08:00
|
|
|
|
2019-02-07 01:54:27 -08:00
|
|
|
assert_eq!(error_counters.insufficient_funds, 1);
|
2019-01-08 09:20:25 -08:00
|
|
|
assert_eq!(loaded_accounts.len(), 1);
|
2019-03-13 12:58:44 -07:00
|
|
|
assert_eq!(
|
|
|
|
loaded_accounts[0],
|
|
|
|
Err(TransactionError::InsufficientFundsForFee)
|
|
|
|
);
|
2019-01-08 09:20:25 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_load_accounts_no_loaders() {
|
|
|
|
let mut accounts: Vec<(Pubkey, Account)> = Vec::new();
|
|
|
|
let mut error_counters = ErrorCounters::default();
|
|
|
|
|
|
|
|
let keypair = Keypair::new();
|
|
|
|
let key0 = keypair.pubkey();
|
|
|
|
let key1 = Pubkey::new(&[5u8; 32]);
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let account = Account::new(1, 1, &Pubkey::default());
|
2019-01-08 09:20:25 -08:00
|
|
|
accounts.push((key0, account));
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let account = Account::new(2, 1, &Pubkey::default());
|
2019-01-08 09:20:25 -08:00
|
|
|
accounts.push((key1, account));
|
|
|
|
|
2019-03-15 08:47:25 -07:00
|
|
|
let instructions = vec![CompiledInstruction::new(0, &(), vec![0, 1])];
|
2019-03-15 09:02:28 -07:00
|
|
|
let tx = Transaction::new_with_compiled_instructions(
|
2019-01-08 09:20:25 -08:00
|
|
|
&[&keypair],
|
|
|
|
&[key1],
|
|
|
|
Hash::default(),
|
2019-02-07 08:03:40 -08:00
|
|
|
vec![native_loader::id()],
|
2019-01-08 09:20:25 -08:00
|
|
|
instructions,
|
|
|
|
);
|
|
|
|
|
2019-01-29 16:33:28 -08:00
|
|
|
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
|
2019-01-08 09:20:25 -08:00
|
|
|
|
2019-02-07 01:54:27 -08:00
|
|
|
assert_eq!(error_counters.account_not_found, 0);
|
2019-01-08 09:20:25 -08:00
|
|
|
assert_eq!(loaded_accounts.len(), 1);
|
|
|
|
match &loaded_accounts[0] {
|
|
|
|
Ok((a, l)) => {
|
|
|
|
assert_eq!(a.len(), 2);
|
|
|
|
assert_eq!(a[0], accounts[0].1);
|
|
|
|
assert_eq!(l.len(), 1);
|
|
|
|
assert_eq!(l[0].len(), 0);
|
|
|
|
}
|
|
|
|
Err(e) => Err(e).unwrap(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_load_accounts_max_call_depth() {
|
|
|
|
let mut accounts: Vec<(Pubkey, Account)> = Vec::new();
|
|
|
|
let mut error_counters = ErrorCounters::default();
|
|
|
|
|
|
|
|
let keypair = Keypair::new();
|
|
|
|
let key0 = keypair.pubkey();
|
|
|
|
let key1 = Pubkey::new(&[5u8; 32]);
|
|
|
|
let key2 = Pubkey::new(&[6u8; 32]);
|
|
|
|
let key3 = Pubkey::new(&[7u8; 32]);
|
|
|
|
let key4 = Pubkey::new(&[8u8; 32]);
|
|
|
|
let key5 = Pubkey::new(&[9u8; 32]);
|
|
|
|
let key6 = Pubkey::new(&[10u8; 32]);
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let account = Account::new(1, 1, &Pubkey::default());
|
2019-01-08 09:20:25 -08:00
|
|
|
accounts.push((key0, account));
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let mut account = Account::new(40, 1, &Pubkey::default());
|
2019-01-08 09:20:25 -08:00
|
|
|
account.executable = true;
|
2019-02-14 09:57:54 -08:00
|
|
|
account.owner = native_loader::id();
|
2019-01-08 09:20:25 -08:00
|
|
|
accounts.push((key1, account));
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let mut account = Account::new(41, 1, &Pubkey::default());
|
2019-01-08 09:20:25 -08:00
|
|
|
account.executable = true;
|
2019-02-14 09:57:54 -08:00
|
|
|
account.owner = key1;
|
2019-01-08 09:20:25 -08:00
|
|
|
accounts.push((key2, account));
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let mut account = Account::new(42, 1, &Pubkey::default());
|
2019-01-08 09:20:25 -08:00
|
|
|
account.executable = true;
|
2019-02-14 09:57:54 -08:00
|
|
|
account.owner = key2;
|
2019-01-08 09:20:25 -08:00
|
|
|
accounts.push((key3, account));
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let mut account = Account::new(43, 1, &Pubkey::default());
|
2019-01-08 09:20:25 -08:00
|
|
|
account.executable = true;
|
2019-02-14 09:57:54 -08:00
|
|
|
account.owner = key3;
|
2019-01-08 09:20:25 -08:00
|
|
|
accounts.push((key4, account));
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let mut account = Account::new(44, 1, &Pubkey::default());
|
2019-01-08 09:20:25 -08:00
|
|
|
account.executable = true;
|
2019-02-14 09:57:54 -08:00
|
|
|
account.owner = key4;
|
2019-01-08 09:20:25 -08:00
|
|
|
accounts.push((key5, account));
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let mut account = Account::new(45, 1, &Pubkey::default());
|
2019-01-08 09:20:25 -08:00
|
|
|
account.executable = true;
|
2019-02-14 09:57:54 -08:00
|
|
|
account.owner = key5;
|
2019-01-08 09:20:25 -08:00
|
|
|
accounts.push((key6, account));
|
|
|
|
|
2019-03-15 08:47:25 -07:00
|
|
|
let instructions = vec![CompiledInstruction::new(0, &(), vec![0])];
|
2019-03-15 09:02:28 -07:00
|
|
|
let tx = Transaction::new_with_compiled_instructions(
|
2019-01-08 09:20:25 -08:00
|
|
|
&[&keypair],
|
|
|
|
&[],
|
|
|
|
Hash::default(),
|
|
|
|
vec![key6],
|
|
|
|
instructions,
|
|
|
|
);
|
|
|
|
|
2019-01-29 16:33:28 -08:00
|
|
|
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
|
2019-01-08 09:20:25 -08:00
|
|
|
|
2019-02-07 01:54:27 -08:00
|
|
|
assert_eq!(error_counters.call_chain_too_deep, 1);
|
2019-01-08 09:20:25 -08:00
|
|
|
assert_eq!(loaded_accounts.len(), 1);
|
2019-03-13 12:58:44 -07:00
|
|
|
assert_eq!(loaded_accounts[0], Err(TransactionError::CallChainTooDeep));
|
2019-01-08 09:20:25 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_load_accounts_bad_program_id() {
|
|
|
|
let mut accounts: Vec<(Pubkey, Account)> = Vec::new();
|
|
|
|
let mut error_counters = ErrorCounters::default();
|
|
|
|
|
|
|
|
let keypair = Keypair::new();
|
|
|
|
let key0 = keypair.pubkey();
|
|
|
|
let key1 = Pubkey::new(&[5u8; 32]);
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let account = Account::new(1, 1, &Pubkey::default());
|
2019-01-08 09:20:25 -08:00
|
|
|
accounts.push((key0, account));
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let mut account = Account::new(40, 1, &Pubkey::default());
|
2019-01-08 09:20:25 -08:00
|
|
|
account.executable = true;
|
2019-02-14 09:57:54 -08:00
|
|
|
account.owner = Pubkey::default();
|
2019-01-08 09:20:25 -08:00
|
|
|
accounts.push((key1, account));
|
|
|
|
|
2019-03-15 08:47:25 -07:00
|
|
|
let instructions = vec![CompiledInstruction::new(0, &(), vec![0])];
|
2019-03-15 09:02:28 -07:00
|
|
|
let tx = Transaction::new_with_compiled_instructions(
|
2019-01-08 09:20:25 -08:00
|
|
|
&[&keypair],
|
|
|
|
&[],
|
|
|
|
Hash::default(),
|
|
|
|
vec![key1],
|
|
|
|
instructions,
|
|
|
|
);
|
|
|
|
|
2019-01-29 16:33:28 -08:00
|
|
|
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
|
2019-01-08 09:20:25 -08:00
|
|
|
|
2019-02-07 01:54:27 -08:00
|
|
|
assert_eq!(error_counters.account_not_found, 1);
|
2019-01-08 09:20:25 -08:00
|
|
|
assert_eq!(loaded_accounts.len(), 1);
|
2019-03-13 12:58:44 -07:00
|
|
|
assert_eq!(loaded_accounts[0], Err(TransactionError::AccountNotFound));
|
2019-01-08 09:20:25 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_load_accounts_not_executable() {
|
|
|
|
let mut accounts: Vec<(Pubkey, Account)> = Vec::new();
|
|
|
|
let mut error_counters = ErrorCounters::default();
|
|
|
|
|
|
|
|
let keypair = Keypair::new();
|
|
|
|
let key0 = keypair.pubkey();
|
|
|
|
let key1 = Pubkey::new(&[5u8; 32]);
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let account = Account::new(1, 1, &Pubkey::default());
|
2019-01-08 09:20:25 -08:00
|
|
|
accounts.push((key0, account));
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let mut account = Account::new(40, 1, &Pubkey::default());
|
2019-02-14 09:57:54 -08:00
|
|
|
account.owner = native_loader::id();
|
2019-01-08 09:20:25 -08:00
|
|
|
accounts.push((key1, account));
|
|
|
|
|
2019-03-15 08:47:25 -07:00
|
|
|
let instructions = vec![CompiledInstruction::new(0, &(), vec![0])];
|
2019-03-15 09:02:28 -07:00
|
|
|
let tx = Transaction::new_with_compiled_instructions(
|
2019-01-08 09:20:25 -08:00
|
|
|
&[&keypair],
|
|
|
|
&[],
|
|
|
|
Hash::default(),
|
|
|
|
vec![key1],
|
|
|
|
instructions,
|
|
|
|
);
|
|
|
|
|
2019-01-29 16:33:28 -08:00
|
|
|
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
|
2019-01-08 09:20:25 -08:00
|
|
|
|
2019-02-07 01:54:27 -08:00
|
|
|
assert_eq!(error_counters.account_not_found, 1);
|
2019-01-08 09:20:25 -08:00
|
|
|
assert_eq!(loaded_accounts.len(), 1);
|
2019-03-13 12:58:44 -07:00
|
|
|
assert_eq!(loaded_accounts[0], Err(TransactionError::AccountNotFound));
|
2019-01-08 09:20:25 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_load_accounts_multiple_loaders() {
|
|
|
|
let mut accounts: Vec<(Pubkey, Account)> = Vec::new();
|
|
|
|
let mut error_counters = ErrorCounters::default();
|
|
|
|
|
|
|
|
let keypair = Keypair::new();
|
|
|
|
let key0 = keypair.pubkey();
|
|
|
|
let key1 = Pubkey::new(&[5u8; 32]);
|
|
|
|
let key2 = Pubkey::new(&[6u8; 32]);
|
|
|
|
let key3 = Pubkey::new(&[7u8; 32]);
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let account = Account::new(1, 1, &Pubkey::default());
|
2019-01-08 09:20:25 -08:00
|
|
|
accounts.push((key0, account));
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let mut account = Account::new(40, 1, &Pubkey::default());
|
2019-01-08 09:20:25 -08:00
|
|
|
account.executable = true;
|
2019-02-14 09:57:54 -08:00
|
|
|
account.owner = native_loader::id();
|
2019-01-08 09:20:25 -08:00
|
|
|
accounts.push((key1, account));
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let mut account = Account::new(41, 1, &Pubkey::default());
|
2019-01-08 09:20:25 -08:00
|
|
|
account.executable = true;
|
2019-02-14 09:57:54 -08:00
|
|
|
account.owner = key1;
|
2019-01-08 09:20:25 -08:00
|
|
|
accounts.push((key2, account));
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let mut account = Account::new(42, 1, &Pubkey::default());
|
2019-01-08 09:20:25 -08:00
|
|
|
account.executable = true;
|
2019-02-14 09:57:54 -08:00
|
|
|
account.owner = key2;
|
2019-01-08 09:20:25 -08:00
|
|
|
accounts.push((key3, account));
|
|
|
|
|
|
|
|
let instructions = vec![
|
2019-03-15 08:47:25 -07:00
|
|
|
CompiledInstruction::new(0, &(), vec![0]),
|
|
|
|
CompiledInstruction::new(1, &(), vec![0]),
|
2019-01-08 09:20:25 -08:00
|
|
|
];
|
2019-03-15 09:02:28 -07:00
|
|
|
let tx = Transaction::new_with_compiled_instructions(
|
2019-01-08 09:20:25 -08:00
|
|
|
&[&keypair],
|
|
|
|
&[],
|
|
|
|
Hash::default(),
|
|
|
|
vec![key1, key2],
|
|
|
|
instructions,
|
|
|
|
);
|
|
|
|
|
2019-01-29 16:33:28 -08:00
|
|
|
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
|
2019-01-08 09:20:25 -08:00
|
|
|
|
2019-02-07 01:54:27 -08:00
|
|
|
assert_eq!(error_counters.account_not_found, 0);
|
2019-01-08 09:20:25 -08:00
|
|
|
assert_eq!(loaded_accounts.len(), 1);
|
|
|
|
match &loaded_accounts[0] {
|
|
|
|
Ok((a, l)) => {
|
|
|
|
assert_eq!(a.len(), 1);
|
|
|
|
assert_eq!(a[0], accounts[0].1);
|
|
|
|
assert_eq!(l.len(), 2);
|
|
|
|
assert_eq!(l[0].len(), 1);
|
|
|
|
assert_eq!(l[1].len(), 2);
|
|
|
|
for instruction_loaders in l.iter() {
|
|
|
|
for (i, a) in instruction_loaders.iter().enumerate() {
|
|
|
|
// +1 to skip first not loader account
|
|
|
|
assert_eq![a.1, accounts[i + 1].1];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(e) => Err(e).unwrap(),
|
|
|
|
}
|
|
|
|
}
|
2019-02-07 11:14:10 -08:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_load_account_pay_to_self() {
|
|
|
|
let mut accounts: Vec<(Pubkey, Account)> = Vec::new();
|
|
|
|
let mut error_counters = ErrorCounters::default();
|
|
|
|
|
|
|
|
let keypair = Keypair::new();
|
|
|
|
let pubkey = keypair.pubkey();
|
|
|
|
|
2019-03-09 19:28:43 -08:00
|
|
|
let account = Account::new(10, 1, &Pubkey::default());
|
2019-02-07 11:14:10 -08:00
|
|
|
accounts.push((pubkey, account));
|
|
|
|
|
2019-03-15 08:47:25 -07:00
|
|
|
let instructions = vec![CompiledInstruction::new(0, &(), vec![0, 1])];
|
2019-02-07 11:14:10 -08:00
|
|
|
// Simulate pay-to-self transaction, which loads the same account twice
|
2019-03-15 09:02:28 -07:00
|
|
|
let tx = Transaction::new_with_compiled_instructions(
|
2019-02-07 11:14:10 -08:00
|
|
|
&[&keypair],
|
|
|
|
&[pubkey],
|
|
|
|
Hash::default(),
|
|
|
|
vec![native_loader::id()],
|
|
|
|
instructions,
|
|
|
|
);
|
|
|
|
let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters);
|
|
|
|
|
|
|
|
assert_eq!(error_counters.account_loaded_twice, 1);
|
|
|
|
assert_eq!(loaded_accounts.len(), 1);
|
|
|
|
loaded_accounts[0].clone().unwrap_err();
|
2019-03-13 12:58:44 -07:00
|
|
|
assert_eq!(
|
|
|
|
loaded_accounts[0],
|
|
|
|
Err(TransactionError::AccountLoadedTwice)
|
|
|
|
);
|
2019-02-07 11:14:10 -08:00
|
|
|
}
|
2019-02-21 12:08:50 -08:00
|
|
|
|
2019-03-03 10:46:22 -08:00
|
|
|
#[macro_export]
|
|
|
|
macro_rules! tmp_accounts_name {
|
|
|
|
() => {
|
|
|
|
&format!("{}-{}", file!(), line!())
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
#[macro_export]
|
|
|
|
macro_rules! get_tmp_accounts_path {
|
|
|
|
() => {
|
|
|
|
get_tmp_accounts_path(tmp_accounts_name!())
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
struct TempPaths {
|
|
|
|
pub paths: String,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for TempPaths {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
cleanup_paths(&self.paths);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_tmp_accounts_path(paths: &str) -> TempPaths {
|
|
|
|
let vpaths = get_paths_vec(paths);
|
|
|
|
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
|
|
|
|
let vpaths: Vec<_> = vpaths
|
|
|
|
.iter()
|
|
|
|
.map(|path| format!("{}/{}", out_dir, path))
|
|
|
|
.collect();
|
|
|
|
TempPaths {
|
|
|
|
paths: vpaths.join(","),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-21 12:08:50 -08:00
|
|
|
#[test]
|
2019-04-15 17:15:50 -07:00
|
|
|
fn test_accountsdb_add_root() {
|
|
|
|
solana_logger::setup();
|
2019-03-03 10:46:22 -08:00
|
|
|
let paths = get_tmp_accounts_path!();
|
2019-04-15 17:15:50 -07:00
|
|
|
let db = AccountsDB::new(&paths.paths);
|
2019-02-21 12:08:50 -08:00
|
|
|
let key = Pubkey::default();
|
2019-03-09 19:28:43 -08:00
|
|
|
let account0 = Account::new(1, 0, &key);
|
2019-02-21 12:08:50 -08:00
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
db.store(0, &[(&key, &account0)]);
|
|
|
|
db.add_root(0);
|
|
|
|
let ancestors = vec![(1, 1)].into_iter().collect();
|
|
|
|
assert_eq!(db.load_slow(&ancestors, &key), Some(account0));
|
|
|
|
}
|
2019-02-23 23:43:10 -08:00
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
#[test]
|
|
|
|
fn test_accountsdb_latest_ancestor() {
|
|
|
|
solana_logger::setup();
|
|
|
|
let paths = get_tmp_accounts_path!();
|
|
|
|
let db = AccountsDB::new(&paths.paths);
|
|
|
|
let key = Pubkey::default();
|
|
|
|
let account0 = Account::new(1, 0, &key);
|
|
|
|
|
|
|
|
db.store(0, &[(&key, &account0)]);
|
|
|
|
|
|
|
|
let account1 = Account::new(0, 0, &key);
|
|
|
|
db.store(1, &[(&key, &account1)]);
|
|
|
|
|
|
|
|
let ancestors = vec![(1, 1)].into_iter().collect();
|
|
|
|
assert_eq!(&db.load_slow(&ancestors, &key).unwrap(), &account1);
|
|
|
|
|
|
|
|
let ancestors = vec![(1, 1), (0, 0)].into_iter().collect();
|
|
|
|
assert_eq!(&db.load_slow(&ancestors, &key).unwrap(), &account1);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_accountsdb_latest_ancestor_with_root() {
|
|
|
|
solana_logger::setup();
|
|
|
|
let paths = get_tmp_accounts_path!();
|
|
|
|
let db = AccountsDB::new(&paths.paths);
|
|
|
|
let key = Pubkey::default();
|
|
|
|
let account0 = Account::new(1, 0, &key);
|
|
|
|
|
|
|
|
db.store(0, &[(&key, &account0)]);
|
|
|
|
|
|
|
|
let account1 = Account::new(0, 0, &key);
|
|
|
|
db.store(1, &[(&key, &account1)]);
|
|
|
|
db.add_root(0);
|
|
|
|
|
|
|
|
let ancestors = vec![(1, 1)].into_iter().collect();
|
|
|
|
assert_eq!(&db.load_slow(&ancestors, &key).unwrap(), &account1);
|
|
|
|
|
|
|
|
let ancestors = vec![(1, 1), (0, 0)].into_iter().collect();
|
|
|
|
assert_eq!(&db.load_slow(&ancestors, &key).unwrap(), &account1);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_accountsdb_root_one_fork() {
|
|
|
|
solana_logger::setup();
|
|
|
|
let paths = get_tmp_accounts_path!();
|
|
|
|
let db = AccountsDB::new(&paths.paths);
|
|
|
|
let key = Pubkey::default();
|
|
|
|
let account0 = Account::new(1, 0, &key);
|
|
|
|
|
|
|
|
// store value 1 in the "root", i.e. db zero
|
|
|
|
db.store(0, &[(&key, &account0)]);
|
2019-02-27 21:42:14 -08:00
|
|
|
|
|
|
|
// now we have:
|
|
|
|
//
|
2019-03-05 16:28:14 -08:00
|
|
|
// root0 -> key.lamports==1
|
2019-02-27 21:42:14 -08:00
|
|
|
// / \
|
|
|
|
// / \
|
2019-03-05 16:28:14 -08:00
|
|
|
// key.lamports==0 <- fork1 \
|
|
|
|
// fork2 -> key.lamports==1
|
2019-02-27 21:42:14 -08:00
|
|
|
// (via root0)
|
|
|
|
|
|
|
|
// store value 0 in one child
|
2019-03-09 19:28:43 -08:00
|
|
|
let account1 = Account::new(0, 0, &key);
|
2019-04-15 17:15:50 -07:00
|
|
|
db.store(1, &[(&key, &account1)]);
|
2019-02-27 21:42:14 -08:00
|
|
|
|
|
|
|
// masking accounts is done at the Accounts level, at accountsDB we see
|
|
|
|
// original account (but could also accept "None", which is implemented
|
|
|
|
// at the Accounts level)
|
2019-04-15 17:15:50 -07:00
|
|
|
let ancestors = vec![(0, 0), (1, 1)].into_iter().collect();
|
|
|
|
assert_eq!(&db.load_slow(&ancestors, &key).unwrap(), &account1);
|
2019-02-27 21:42:14 -08:00
|
|
|
|
|
|
|
// we should see 1 token in fork 2
|
2019-04-15 17:15:50 -07:00
|
|
|
let ancestors = vec![(0, 0), (2, 2)].into_iter().collect();
|
|
|
|
assert_eq!(&db.load_slow(&ancestors, &key).unwrap(), &account0);
|
2019-02-27 21:42:14 -08:00
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
db.add_root(0);
|
2019-02-27 21:42:14 -08:00
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
let ancestors = vec![(1, 1)].into_iter().collect();
|
|
|
|
assert_eq!(db.load_slow(&ancestors, &key), Some(account1));
|
|
|
|
let ancestors = vec![(2, 2)].into_iter().collect();
|
|
|
|
assert_eq!(db.load_slow(&ancestors, &key), Some(account0)); // original value
|
2019-02-27 21:42:14 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2019-04-15 17:15:50 -07:00
|
|
|
fn test_accountsdb_add_root_many() {
|
2019-03-03 10:46:22 -08:00
|
|
|
let paths = get_tmp_accounts_path!();
|
2019-04-15 17:15:50 -07:00
|
|
|
let db = AccountsDB::new(&paths.paths);
|
2019-02-27 21:42:14 -08:00
|
|
|
|
2019-02-23 23:43:10 -08:00
|
|
|
let mut pubkeys: Vec<Pubkey> = vec![];
|
2019-04-09 13:48:13 -07:00
|
|
|
create_account(&db, &mut pubkeys, 0, 100, 0, 0);
|
2019-02-23 23:43:10 -08:00
|
|
|
for _ in 1..100 {
|
|
|
|
let idx = thread_rng().gen_range(0, 99);
|
2019-04-15 17:15:50 -07:00
|
|
|
let ancestors = vec![(0, 0)].into_iter().collect();
|
|
|
|
let account = db.load_slow(&ancestors, &pubkeys[idx]).unwrap();
|
2019-02-23 23:43:10 -08:00
|
|
|
let mut default_account = Account::default();
|
2019-03-05 16:28:14 -08:00
|
|
|
default_account.lamports = (idx + 1) as u64;
|
2019-03-31 21:31:19 -07:00
|
|
|
assert_eq!(default_account, account);
|
2019-02-23 23:43:10 -08:00
|
|
|
}
|
2019-02-26 13:51:39 -08:00
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
db.add_root(0);
|
2019-02-27 21:42:14 -08:00
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
// check that all the accounts appear with a new root
|
2019-02-23 23:43:10 -08:00
|
|
|
for _ in 1..100 {
|
|
|
|
let idx = thread_rng().gen_range(0, 99);
|
2019-04-15 17:15:50 -07:00
|
|
|
let ancestors = vec![(0, 0)].into_iter().collect();
|
|
|
|
let account0 = db.load_slow(&ancestors, &pubkeys[idx]).unwrap();
|
|
|
|
let ancestors = vec![(1, 1)].into_iter().collect();
|
|
|
|
let account1 = db.load_slow(&ancestors, &pubkeys[idx]).unwrap();
|
2019-02-23 23:43:10 -08:00
|
|
|
let mut default_account = Account::default();
|
2019-03-05 16:28:14 -08:00
|
|
|
default_account.lamports = (idx + 1) as u64;
|
2019-02-27 21:42:14 -08:00
|
|
|
assert_eq!(&default_account, &account0);
|
|
|
|
assert_eq!(&default_account, &account1);
|
2019-02-23 23:43:10 -08:00
|
|
|
}
|
2019-02-21 12:08:50 -08:00
|
|
|
}
|
|
|
|
|
2019-04-09 13:48:13 -07:00
|
|
|
#[test]
|
2019-04-15 17:15:50 -07:00
|
|
|
#[ignore]
|
|
|
|
fn test_accountsdb_count_stores() {
|
2019-04-09 13:48:13 -07:00
|
|
|
let paths = get_tmp_accounts_path!();
|
2019-04-15 17:15:50 -07:00
|
|
|
let db = AccountsDB::new(&paths.paths);
|
2019-04-09 13:48:13 -07:00
|
|
|
|
|
|
|
let mut pubkeys: Vec<Pubkey> = vec![];
|
|
|
|
create_account(
|
|
|
|
&db,
|
|
|
|
&mut pubkeys,
|
|
|
|
0,
|
|
|
|
2,
|
|
|
|
ACCOUNT_DATA_FILE_SIZE as usize / 3,
|
|
|
|
0,
|
|
|
|
);
|
|
|
|
assert!(check_storage(&db, 2));
|
|
|
|
|
|
|
|
let pubkey = Pubkey::new_rand();
|
|
|
|
let account = Account::new(1, ACCOUNT_DATA_FILE_SIZE as usize / 3, &pubkey);
|
2019-04-15 17:15:50 -07:00
|
|
|
db.store(1, &[(&pubkey, &account)]);
|
|
|
|
db.store(1, &[(&pubkeys[0], &account)]);
|
2019-04-09 13:48:13 -07:00
|
|
|
{
|
|
|
|
let stores = db.storage.read().unwrap();
|
|
|
|
assert_eq!(stores.len(), 2);
|
|
|
|
assert_eq!(stores[0].count.load(Ordering::Relaxed), 2);
|
|
|
|
assert_eq!(stores[1].count.load(Ordering::Relaxed), 2);
|
|
|
|
}
|
2019-04-15 17:15:50 -07:00
|
|
|
db.add_root(1);
|
2019-04-09 13:48:13 -07:00
|
|
|
{
|
|
|
|
let stores = db.storage.read().unwrap();
|
|
|
|
assert_eq!(stores.len(), 2);
|
2019-04-15 17:15:50 -07:00
|
|
|
assert_eq!(stores[0].count.load(Ordering::Relaxed), 2);
|
2019-04-09 13:48:13 -07:00
|
|
|
assert_eq!(stores[1].count.load(Ordering::Relaxed), 2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-26 13:51:39 -08:00
|
|
|
#[test]
|
|
|
|
fn test_accounts_unsquashed() {
|
|
|
|
let key = Pubkey::default();
|
|
|
|
|
|
|
|
// 1 token in the "root", i.e. db zero
|
2019-03-03 10:46:22 -08:00
|
|
|
let paths = get_tmp_accounts_path!();
|
2019-04-15 17:15:50 -07:00
|
|
|
let db0 = AccountsDB::new(&paths.paths);
|
2019-03-09 19:28:43 -08:00
|
|
|
let account0 = Account::new(1, 0, &key);
|
2019-04-15 17:15:50 -07:00
|
|
|
db0.store(0, &[(&key, &account0)]);
|
2019-02-26 13:51:39 -08:00
|
|
|
|
2019-03-05 16:28:14 -08:00
|
|
|
// 0 lamports in the child
|
2019-03-09 19:28:43 -08:00
|
|
|
let account1 = Account::new(0, 0, &key);
|
2019-04-15 17:15:50 -07:00
|
|
|
db0.store(1, &[(&key, &account1)]);
|
2019-02-26 13:51:39 -08:00
|
|
|
|
|
|
|
// masking accounts is done at the Accounts level, at accountsDB we see
|
|
|
|
// original account
|
2019-04-15 17:15:50 -07:00
|
|
|
let ancestors = vec![(0, 0), (1, 1)].into_iter().collect();
|
|
|
|
assert_eq!(db0.load_slow(&ancestors, &key), Some(account1));
|
2018-12-24 16:11:20 -08:00
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
let mut accounts1 = Accounts::new(None);
|
|
|
|
accounts1.accounts_db = Arc::new(db0);
|
|
|
|
assert_eq!(accounts1.load_slow(&ancestors, &key), None);
|
|
|
|
let ancestors = vec![(0, 0)].into_iter().collect();
|
|
|
|
assert_eq!(accounts1.load_slow(&ancestors, &key), Some(account0));
|
2018-12-24 16:11:20 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
fn create_account(
|
|
|
|
accounts: &AccountsDB,
|
|
|
|
pubkeys: &mut Vec<Pubkey>,
|
2019-03-21 17:36:10 -07:00
|
|
|
fork: Fork,
|
2018-12-24 16:11:20 -08:00
|
|
|
num: usize,
|
2019-04-09 13:48:13 -07:00
|
|
|
space: usize,
|
2018-12-24 16:11:20 -08:00
|
|
|
num_vote: usize,
|
|
|
|
) {
|
|
|
|
for t in 0..num {
|
2019-03-30 20:37:33 -07:00
|
|
|
let pubkey = Pubkey::new_rand();
|
2019-04-09 13:48:13 -07:00
|
|
|
let account = Account::new((t + 1) as u64, space, &Account::default().owner);
|
2018-12-24 16:11:20 -08:00
|
|
|
pubkeys.push(pubkey.clone());
|
2019-04-15 17:15:50 -07:00
|
|
|
let ancestors = vec![(fork, 0)].into_iter().collect();
|
|
|
|
assert!(accounts.load_slow(&ancestors, &pubkey).is_none());
|
|
|
|
accounts.store(fork, &[(&pubkey, &account)]);
|
2019-02-28 17:34:37 -08:00
|
|
|
}
|
|
|
|
for t in 0..num_vote {
|
2019-03-30 20:37:33 -07:00
|
|
|
let pubkey = Pubkey::new_rand();
|
2019-04-09 13:48:13 -07:00
|
|
|
let account = Account::new((num + t + 1) as u64, space, &solana_vote_api::id());
|
2019-02-28 17:34:37 -08:00
|
|
|
pubkeys.push(pubkey.clone());
|
2019-04-15 17:15:50 -07:00
|
|
|
let ancestors = vec![(fork, 0)].into_iter().collect();
|
|
|
|
assert!(accounts.load_slow(&ancestors, &pubkey).is_none());
|
|
|
|
accounts.store(fork, &[(&pubkey, &account)]);
|
2018-12-24 16:11:20 -08:00
|
|
|
}
|
|
|
|
}
|
2019-02-26 13:51:39 -08:00
|
|
|
|
2019-03-21 17:36:10 -07:00
|
|
|
fn update_accounts(accounts: &AccountsDB, pubkeys: &Vec<Pubkey>, fork: Fork, range: usize) {
|
2018-12-24 16:11:20 -08:00
|
|
|
for _ in 1..1000 {
|
|
|
|
let idx = thread_rng().gen_range(0, range);
|
2019-04-15 17:15:50 -07:00
|
|
|
let ancestors = vec![(fork, 0)].into_iter().collect();
|
|
|
|
if let Some(mut account) = accounts.load_slow(&ancestors, &pubkeys[idx]) {
|
2019-03-05 16:28:14 -08:00
|
|
|
account.lamports = account.lamports + 1;
|
2019-04-15 17:15:50 -07:00
|
|
|
accounts.store(fork, &[(&pubkeys[idx], &account)]);
|
2019-03-05 16:28:14 -08:00
|
|
|
if account.lamports == 0 {
|
2019-04-15 17:15:50 -07:00
|
|
|
let ancestors = vec![(fork, 0)].into_iter().collect();
|
|
|
|
assert!(accounts.load_slow(&ancestors, &pubkeys[idx]).is_none());
|
2018-12-24 16:11:20 -08:00
|
|
|
} else {
|
|
|
|
let mut default_account = Account::default();
|
2019-03-05 16:28:14 -08:00
|
|
|
default_account.lamports = account.lamports;
|
2019-03-31 21:31:19 -07:00
|
|
|
assert_eq!(default_account, account);
|
2018-12-24 16:11:20 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-02-26 13:51:39 -08:00
|
|
|
}
|
|
|
|
|
2019-03-21 17:36:10 -07:00
|
|
|
fn check_storage(accounts: &AccountsDB, count: usize) -> bool {
|
|
|
|
let stores = accounts.storage.read().unwrap();
|
|
|
|
assert_eq!(stores.len(), 1);
|
|
|
|
assert_eq!(
|
|
|
|
stores[0].get_status(),
|
|
|
|
AccountStorageStatus::StorageAvailable
|
|
|
|
);
|
|
|
|
stores[0].count.load(Ordering::Relaxed) == count
|
|
|
|
}
|
|
|
|
|
|
|
|
fn check_accounts(accounts: &AccountsDB, pubkeys: &Vec<Pubkey>, fork: Fork) {
|
|
|
|
for _ in 1..100 {
|
|
|
|
let idx = thread_rng().gen_range(0, 99);
|
2019-04-15 17:15:50 -07:00
|
|
|
let ancestors = vec![(fork, 0)].into_iter().collect();
|
|
|
|
let account = accounts.load_slow(&ancestors, &pubkeys[idx]).unwrap();
|
2019-03-21 17:36:10 -07:00
|
|
|
let mut default_account = Account::default();
|
|
|
|
default_account.lamports = (idx + 1) as u64;
|
2019-03-31 21:31:19 -07:00
|
|
|
assert_eq!(default_account, account);
|
2019-03-21 17:36:10 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-24 16:11:20 -08:00
|
|
|
#[test]
|
|
|
|
fn test_account_one() {
|
2019-03-03 10:46:22 -08:00
|
|
|
let paths = get_tmp_accounts_path!();
|
2019-04-15 17:15:50 -07:00
|
|
|
let accounts = AccountsDB::new(&paths.paths);
|
2018-12-24 16:11:20 -08:00
|
|
|
let mut pubkeys: Vec<Pubkey> = vec![];
|
2019-04-09 13:48:13 -07:00
|
|
|
create_account(&accounts, &mut pubkeys, 0, 1, 0, 0);
|
2019-04-15 17:15:50 -07:00
|
|
|
let ancestors = vec![(0, 0)].into_iter().collect();
|
|
|
|
let account = accounts.load_slow(&ancestors, &pubkeys[0]).unwrap();
|
2018-12-24 16:11:20 -08:00
|
|
|
let mut default_account = Account::default();
|
2019-03-05 16:28:14 -08:00
|
|
|
default_account.lamports = 1;
|
2019-03-31 21:31:19 -07:00
|
|
|
assert_eq!(default_account, account);
|
2018-12-24 16:11:20 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_account_many() {
|
2019-03-03 10:46:22 -08:00
|
|
|
let paths = get_tmp_accounts_path("many0,many1");
|
2019-04-15 17:15:50 -07:00
|
|
|
let accounts = AccountsDB::new(&paths.paths);
|
2018-12-24 16:11:20 -08:00
|
|
|
let mut pubkeys: Vec<Pubkey> = vec![];
|
2019-04-09 13:48:13 -07:00
|
|
|
create_account(&accounts, &mut pubkeys, 0, 100, 0, 0);
|
2019-03-21 17:36:10 -07:00
|
|
|
check_accounts(&accounts, &pubkeys, 0);
|
2018-12-24 16:11:20 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_account_update() {
|
2019-03-03 10:46:22 -08:00
|
|
|
let paths = get_tmp_accounts_path!();
|
2019-04-15 17:15:50 -07:00
|
|
|
let accounts = AccountsDB::new(&paths.paths);
|
2018-12-24 16:11:20 -08:00
|
|
|
let mut pubkeys: Vec<Pubkey> = vec![];
|
2019-04-09 13:48:13 -07:00
|
|
|
create_account(&accounts, &mut pubkeys, 0, 100, 0, 0);
|
2019-03-21 17:36:10 -07:00
|
|
|
update_accounts(&accounts, &pubkeys, 0, 99);
|
|
|
|
assert_eq!(check_storage(&accounts, 100), true);
|
2018-12-24 16:11:20 -08:00
|
|
|
}
|
|
|
|
|
2019-03-07 12:41:00 -08:00
|
|
|
#[test]
|
|
|
|
fn test_account_grow_many() {
|
|
|
|
let paths = get_tmp_accounts_path("many2,many3");
|
|
|
|
let size = 4096;
|
2019-04-15 17:15:50 -07:00
|
|
|
let accounts = AccountsDB::new_with_file_size(&paths.paths, size);
|
2019-03-07 12:41:00 -08:00
|
|
|
let mut keys = vec![];
|
|
|
|
for i in 0..9 {
|
2019-03-30 20:37:33 -07:00
|
|
|
let key = Pubkey::new_rand();
|
2019-03-09 19:28:43 -08:00
|
|
|
let account = Account::new(i + 1, size as usize / 4, &key);
|
2019-04-15 17:15:50 -07:00
|
|
|
accounts.store(0, &[(&key, &account)]);
|
2019-03-07 12:41:00 -08:00
|
|
|
keys.push(key);
|
|
|
|
}
|
|
|
|
for (i, key) in keys.iter().enumerate() {
|
2019-04-15 17:15:50 -07:00
|
|
|
let ancestors = vec![(0, 0)].into_iter().collect();
|
2019-03-07 12:41:00 -08:00
|
|
|
assert_eq!(
|
2019-04-15 17:15:50 -07:00
|
|
|
accounts.load_slow(&ancestors, &key).unwrap().lamports,
|
2019-03-07 12:41:00 -08:00
|
|
|
(i as u64) + 1
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut append_vec_histogram = HashMap::new();
|
2019-04-15 17:15:50 -07:00
|
|
|
for storage in accounts.storage.read().unwrap().iter() {
|
|
|
|
*append_vec_histogram.entry(storage.fork_id).or_insert(0) += 1;
|
2019-03-07 12:41:00 -08:00
|
|
|
}
|
|
|
|
for count in append_vec_histogram.values() {
|
|
|
|
assert!(*count >= 2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-24 16:11:20 -08:00
|
|
|
#[test]
|
2019-04-15 17:15:50 -07:00
|
|
|
#[ignore]
|
2018-12-24 16:11:20 -08:00
|
|
|
fn test_account_grow() {
|
2019-03-03 10:46:22 -08:00
|
|
|
let paths = get_tmp_accounts_path!();
|
2019-04-15 17:15:50 -07:00
|
|
|
let accounts = AccountsDB::new(&paths.paths);
|
2018-12-24 16:11:20 -08:00
|
|
|
let count = [0, 1];
|
|
|
|
let status = [
|
|
|
|
AccountStorageStatus::StorageAvailable,
|
|
|
|
AccountStorageStatus::StorageFull,
|
|
|
|
];
|
2019-03-30 20:37:33 -07:00
|
|
|
let pubkey1 = Pubkey::new_rand();
|
2019-03-09 19:28:43 -08:00
|
|
|
let account1 = Account::new(1, ACCOUNT_DATA_FILE_SIZE as usize / 2, &pubkey1);
|
2019-04-15 17:15:50 -07:00
|
|
|
accounts.store(0, &[(&pubkey1, &account1)]);
|
2018-12-24 16:11:20 -08:00
|
|
|
{
|
|
|
|
let stores = accounts.storage.read().unwrap();
|
|
|
|
assert_eq!(stores.len(), 1);
|
|
|
|
assert_eq!(stores[0].count.load(Ordering::Relaxed), 1);
|
|
|
|
assert_eq!(stores[0].get_status(), status[0]);
|
|
|
|
}
|
|
|
|
|
2019-03-30 20:37:33 -07:00
|
|
|
let pubkey2 = Pubkey::new_rand();
|
2019-03-09 19:28:43 -08:00
|
|
|
let account2 = Account::new(1, ACCOUNT_DATA_FILE_SIZE as usize / 2, &pubkey2);
|
2019-04-15 17:15:50 -07:00
|
|
|
accounts.store(0, &[(&pubkey2, &account2)]);
|
2018-12-24 16:11:20 -08:00
|
|
|
{
|
|
|
|
let stores = accounts.storage.read().unwrap();
|
|
|
|
assert_eq!(stores.len(), 2);
|
|
|
|
assert_eq!(stores[0].count.load(Ordering::Relaxed), 1);
|
|
|
|
assert_eq!(stores[0].get_status(), status[1]);
|
|
|
|
assert_eq!(stores[1].count.load(Ordering::Relaxed), 1);
|
|
|
|
assert_eq!(stores[1].get_status(), status[0]);
|
|
|
|
}
|
2019-04-15 17:15:50 -07:00
|
|
|
let ancestors = vec![(0, 0)].into_iter().collect();
|
|
|
|
assert_eq!(accounts.load_slow(&ancestors, &pubkey1).unwrap(), account1);
|
|
|
|
assert_eq!(accounts.load_slow(&ancestors, &pubkey2).unwrap(), account2);
|
2018-12-24 16:11:20 -08:00
|
|
|
|
|
|
|
for i in 0..25 {
|
|
|
|
let index = i % 2;
|
2019-04-15 17:15:50 -07:00
|
|
|
accounts.store(0, &[(&pubkey1, &account1)]);
|
2018-12-24 16:11:20 -08:00
|
|
|
{
|
|
|
|
let stores = accounts.storage.read().unwrap();
|
|
|
|
assert_eq!(stores.len(), 3);
|
|
|
|
assert_eq!(stores[0].count.load(Ordering::Relaxed), count[index]);
|
|
|
|
assert_eq!(stores[0].get_status(), status[0]);
|
|
|
|
assert_eq!(stores[1].count.load(Ordering::Relaxed), 1);
|
|
|
|
assert_eq!(stores[1].get_status(), status[1]);
|
|
|
|
assert_eq!(stores[2].count.load(Ordering::Relaxed), count[index ^ 1]);
|
|
|
|
assert_eq!(stores[2].get_status(), status[0]);
|
|
|
|
}
|
2019-04-15 17:15:50 -07:00
|
|
|
let ancestors = vec![(0, 0)].into_iter().collect();
|
|
|
|
assert_eq!(accounts.load_slow(&ancestors, &pubkey1).unwrap(), account1);
|
|
|
|
assert_eq!(accounts.load_slow(&ancestors, &pubkey2).unwrap(), account2);
|
2018-12-24 16:11:20 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-21 17:36:10 -07:00
|
|
|
#[test]
|
2019-04-15 17:15:50 -07:00
|
|
|
fn test_purge_fork_not_root() {
|
2019-03-21 17:36:10 -07:00
|
|
|
let paths = get_tmp_accounts_path!();
|
2019-04-15 17:15:50 -07:00
|
|
|
let accounts = AccountsDB::new(&paths.paths);
|
|
|
|
let mut pubkeys: Vec<Pubkey> = vec![];
|
|
|
|
create_account(&accounts, &mut pubkeys, 0, 1, 0, 0);
|
|
|
|
let ancestors = vec![(0, 0)].into_iter().collect();
|
|
|
|
assert!(accounts.load_slow(&ancestors, &pubkeys[0]).is_some());;
|
|
|
|
accounts.purge_fork(0);
|
|
|
|
assert!(accounts.load_slow(&ancestors, &pubkeys[0]).is_none());;
|
2019-03-21 17:36:10 -07:00
|
|
|
}
|
|
|
|
|
2019-02-27 21:42:14 -08:00
|
|
|
#[test]
|
2019-04-15 17:15:50 -07:00
|
|
|
fn test_purge_fork_after_root() {
|
2019-03-03 10:46:22 -08:00
|
|
|
let paths = get_tmp_accounts_path!();
|
2019-04-15 17:15:50 -07:00
|
|
|
let accounts = AccountsDB::new(&paths.paths);
|
|
|
|
let mut pubkeys: Vec<Pubkey> = vec![];
|
|
|
|
create_account(&accounts, &mut pubkeys, 0, 1, 0, 0);
|
|
|
|
let ancestors = vec![(0, 0)].into_iter().collect();
|
|
|
|
accounts.add_root(0);
|
|
|
|
accounts.purge_fork(0);
|
|
|
|
assert!(accounts.load_slow(&ancestors, &pubkeys[0]).is_some());
|
2019-02-27 21:42:14 -08:00
|
|
|
}
|
|
|
|
|
2019-03-01 11:53:39 -08:00
|
|
|
#[test]
|
2019-04-15 17:15:50 -07:00
|
|
|
fn test_accounts_empty_hash_internal_state() {
|
2019-03-03 10:46:22 -08:00
|
|
|
let paths = get_tmp_accounts_path!();
|
2019-04-15 17:15:50 -07:00
|
|
|
let accounts = AccountsDB::new(&paths.paths);
|
|
|
|
assert_eq!(accounts.hash_internal_state(0), None);
|
2019-03-01 11:53:39 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_accountsdb_account_not_found() {
|
2019-03-03 10:46:22 -08:00
|
|
|
let paths = get_tmp_accounts_path!();
|
2019-04-15 17:15:50 -07:00
|
|
|
let accounts = AccountsDB::new(&paths.paths);
|
2019-03-01 11:53:39 -08:00
|
|
|
let mut error_counters = ErrorCounters::default();
|
2019-04-15 17:15:50 -07:00
|
|
|
let ancestors = vec![(0, 0)].into_iter().collect();
|
|
|
|
|
|
|
|
let accounts_index = accounts.accounts_index.read().unwrap();
|
|
|
|
let storage = accounts.storage.read().unwrap();
|
2019-03-01 11:53:39 -08:00
|
|
|
assert_eq!(
|
2019-04-15 17:15:50 -07:00
|
|
|
AccountsDB::load_executable_accounts(
|
|
|
|
&storage,
|
|
|
|
&ancestors,
|
|
|
|
&accounts_index,
|
|
|
|
&Pubkey::new_rand(),
|
|
|
|
&mut error_counters
|
|
|
|
),
|
2019-03-13 12:58:44 -07:00
|
|
|
Err(TransactionError::AccountNotFound)
|
2019-03-01 11:53:39 -08:00
|
|
|
);
|
|
|
|
assert_eq!(error_counters.account_not_found, 1);
|
|
|
|
}
|
2019-03-05 12:34:21 -08:00
|
|
|
|
2019-03-07 08:51:56 -08:00
|
|
|
#[test]
|
|
|
|
fn test_load_by_program() {
|
|
|
|
let paths = get_tmp_accounts_path!();
|
2019-04-15 17:15:50 -07:00
|
|
|
let accounts_db = AccountsDB::new(&paths.paths);
|
2019-03-07 08:51:56 -08:00
|
|
|
|
|
|
|
// Load accounts owned by various programs into AccountsDB
|
2019-03-30 20:37:33 -07:00
|
|
|
let pubkey0 = Pubkey::new_rand();
|
2019-03-09 19:28:43 -08:00
|
|
|
let account0 = Account::new(1, 0, &Pubkey::new(&[2; 32]));
|
2019-04-15 17:15:50 -07:00
|
|
|
accounts_db.store(0, &[(&pubkey0, &account0)]);
|
2019-03-30 20:37:33 -07:00
|
|
|
let pubkey1 = Pubkey::new_rand();
|
2019-03-09 19:28:43 -08:00
|
|
|
let account1 = Account::new(1, 0, &Pubkey::new(&[2; 32]));
|
2019-04-15 17:15:50 -07:00
|
|
|
accounts_db.store(0, &[(&pubkey1, &account1)]);
|
2019-03-30 20:37:33 -07:00
|
|
|
let pubkey2 = Pubkey::new_rand();
|
2019-03-09 19:28:43 -08:00
|
|
|
let account2 = Account::new(1, 0, &Pubkey::new(&[3; 32]));
|
2019-04-15 17:15:50 -07:00
|
|
|
accounts_db.store(0, &[(&pubkey2, &account2)]);
|
2019-03-07 08:51:56 -08:00
|
|
|
|
2019-04-15 17:15:50 -07:00
|
|
|
let mut accounts_proper = Accounts::new(None);
|
|
|
|
accounts_proper.accounts_db = Arc::new(accounts_db);
|
|
|
|
let accounts = accounts_proper.load_by_program(0, &Pubkey::new(&[2; 32]));
|
2019-03-07 08:51:56 -08:00
|
|
|
assert_eq!(accounts.len(), 2);
|
2019-04-15 17:15:50 -07:00
|
|
|
let accounts = accounts_proper.load_by_program(0, &Pubkey::new(&[3; 32]));
|
2019-03-07 08:51:56 -08:00
|
|
|
assert_eq!(accounts, vec![(pubkey2, account2)]);
|
2019-04-15 17:15:50 -07:00
|
|
|
let accounts = accounts_proper.load_by_program(0, &Pubkey::new(&[4; 32]));
|
2019-03-07 08:51:56 -08:00
|
|
|
assert_eq!(accounts, vec![]);
|
|
|
|
}
|
2018-12-17 12:41:23 -08:00
|
|
|
}
|