- Fix lock/unlock of accounts
- Fix format check warnings
This commit is contained in:
parent
2158ba5863
commit
c46b2541fe
|
@ -1,5 +1,4 @@
|
|||
use crate::appendvec::AppendVec;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use crate::bank::{BankError, Result};
|
||||
use crate::runtime::has_duplicates;
|
||||
use bincode::serialize;
|
||||
|
@ -10,10 +9,11 @@ use solana_sdk::account::Account;
|
|||
use solana_sdk::hash::{hash, Hash};
|
||||
use solana_sdk::native_loader;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::transaction::Transaction;
|
||||
use solana_sdk::vote_program;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fs::{create_dir_all, remove_dir_all};
|
||||
use std::fs::{create_dir_all, read_dir, remove_dir_all};
|
||||
use std::path::Path;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
|
@ -67,14 +67,15 @@ pub struct ErrorCounters {
|
|||
|
||||
const ACCOUNT_DATA_FILE_SIZE: u64 = 64 * 1024 * 1024;
|
||||
const ACCOUNT_DATA_FILE: &str = "data";
|
||||
const NUM_ACCOUNT_DIRS: usize = 1;
|
||||
const ACCOUNTSDB_DIR: &str = "accountsdb";
|
||||
const NUM_ACCOUNT_DIRS: usize = 4;
|
||||
|
||||
/// An offset into the AccountsDB::storage vector
|
||||
type AppendVecId = usize;
|
||||
|
||||
type Fork = u64;
|
||||
|
||||
struct AccountMap(Vec<(Fork, (AppendVecId, u64))>);
|
||||
struct AccountMap(HashMap<Fork, (AppendVecId, u64)>);
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
enum AccountStorageStatus {
|
||||
|
@ -130,6 +131,16 @@ impl AccountStorage {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct AccountsForkInfo {
|
||||
/// The number of transactions the bank has processed without error since the
|
||||
/// start of the ledger.
|
||||
transaction_count: u64,
|
||||
|
||||
/// List of all parents corresponding to this fork
|
||||
parents: Vec<Fork>,
|
||||
}
|
||||
|
||||
// This structure handles the load/store of the accounts
|
||||
pub struct AccountsDB {
|
||||
/// Keeps tracks of index into AppendVec on a per fork basis
|
||||
|
@ -141,24 +152,8 @@ pub struct AccountsDB {
|
|||
/// distribute the accounts across storage lists
|
||||
next_id: AtomicUsize,
|
||||
|
||||
/// The number of transactions the bank has processed without error since the
|
||||
/// start of the ledger.
|
||||
transaction_count: RwLock<HashMap<u64, u64>>,
|
||||
}
|
||||
|
||||
impl Default for AccountsDB {
|
||||
fn default() -> Self {
|
||||
let index_info = AccountIndexInfo {
|
||||
index: RwLock::new(HashMap::new()),
|
||||
vote_index: RwLock::new(HashSet::new()),
|
||||
};
|
||||
AccountsDB {
|
||||
index_info,
|
||||
storage: RwLock::new(vec![]),
|
||||
next_id: AtomicUsize::new(0),
|
||||
transaction_count: RwLock::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
/// Information related to the fork
|
||||
fork_info: RwLock<HashMap<Fork, AccountsForkInfo>>,
|
||||
}
|
||||
|
||||
/// This structure handles synchronization for db
|
||||
|
@ -166,7 +161,7 @@ pub struct Accounts {
|
|||
pub accounts_db: AccountsDB,
|
||||
|
||||
/// set of accounts which are currently in the pipeline
|
||||
account_locks: Mutex<HashSet<Pubkey>>,
|
||||
account_locks: Mutex<HashMap<Fork, HashSet<Pubkey>>>,
|
||||
|
||||
/// List of persistent stores
|
||||
paths: String,
|
||||
|
@ -177,12 +172,44 @@ impl Drop for Accounts {
|
|||
let paths: Vec<String> = self.paths.split(',').map(|s| s.to_string()).collect();
|
||||
paths.iter().for_each(|p| {
|
||||
let _ignored = remove_dir_all(p);
|
||||
})
|
||||
});
|
||||
let entry = read_dir(ACCOUNTSDB_DIR);
|
||||
if entry.is_ok() && entry.unwrap().count() == 0 {
|
||||
let _ignored = remove_dir_all(ACCOUNTSDB_DIR);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AccountsDB {
|
||||
pub fn add_storage(&self, paths: &str) {
|
||||
pub fn new(fork: Fork, paths: &str) -> Self {
|
||||
let index_info = AccountIndexInfo {
|
||||
index: RwLock::new(HashMap::new()),
|
||||
vote_index: RwLock::new(HashSet::new()),
|
||||
};
|
||||
let accounts_db = AccountsDB {
|
||||
index_info,
|
||||
storage: RwLock::new(vec![]),
|
||||
next_id: AtomicUsize::new(0),
|
||||
fork_info: RwLock::new(HashMap::new()),
|
||||
};
|
||||
accounts_db.add_storage(paths);
|
||||
accounts_db.add_fork(fork, None);
|
||||
accounts_db
|
||||
}
|
||||
|
||||
pub fn add_fork(&self, fork: Fork, parent: Option<Fork>) {
|
||||
let mut info = self.fork_info.write().unwrap();
|
||||
let mut fork_info = AccountsForkInfo::default();
|
||||
if parent.is_some() {
|
||||
fork_info.parents.push(parent.unwrap());
|
||||
if let Some(list) = info.get(&parent.unwrap()) {
|
||||
fork_info.parents.extend_from_slice(&list.parents);
|
||||
}
|
||||
}
|
||||
info.insert(fork, fork_info);
|
||||
}
|
||||
|
||||
fn add_storage(&self, paths: &str) {
|
||||
let paths: Vec<String> = paths.split(',').map(|s| s.to_string()).collect();
|
||||
let mut stores: Vec<AccountStorage> = vec![];
|
||||
paths.iter().for_each(|p| {
|
||||
|
@ -196,7 +223,6 @@ impl AccountsDB {
|
|||
};
|
||||
stores.push(storage);
|
||||
});
|
||||
let _ignored = stores[0].appendvec.write().unwrap().grow_file();
|
||||
let mut storage = self.storage.write().unwrap();
|
||||
storage.append(&mut stores);
|
||||
}
|
||||
|
@ -224,8 +250,7 @@ impl AccountsDB {
|
|||
.iter()
|
||||
.for_each(|p| {
|
||||
if let Some(forks) = self.index_info.index.read().unwrap().get(p) {
|
||||
for (v_fork, (id, index)) in forks.0.iter() {
|
||||
if fork == *v_fork {
|
||||
if let Some((id, index)) = forks.0.get(&fork) {
|
||||
accounts.push(
|
||||
self.storage.read().unwrap()[*id]
|
||||
.appendvec
|
||||
|
@ -235,10 +260,6 @@ impl AccountsDB {
|
|||
.unwrap(),
|
||||
);
|
||||
}
|
||||
if fork > *v_fork {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
accounts
|
||||
|
@ -248,12 +269,10 @@ impl AccountsDB {
|
|||
let index = self.index_info.index.read().unwrap();
|
||||
|
||||
for account_map in index.values() {
|
||||
for (v_fork, _) in account_map.0.iter() {
|
||||
if fork == *v_fork {
|
||||
if account_map.0.contains_key(&fork) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
|
@ -261,8 +280,7 @@ impl AccountsDB {
|
|||
let mut ordered_accounts = BTreeMap::new();
|
||||
let rindex = self.index_info.index.read().unwrap();
|
||||
rindex.iter().for_each(|(p, forks)| {
|
||||
for (v_fork, (id, index)) in forks.0.iter() {
|
||||
if fork == *v_fork {
|
||||
if let Some((id, index)) = forks.0.get(&fork) {
|
||||
let account = self.storage.read().unwrap()[*id]
|
||||
.appendvec
|
||||
.read()
|
||||
|
@ -271,10 +289,6 @@ impl AccountsDB {
|
|||
.unwrap();
|
||||
ordered_accounts.insert(*p, account);
|
||||
}
|
||||
if fork > *v_fork {
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if ordered_accounts.is_empty() {
|
||||
|
@ -283,17 +297,27 @@ impl AccountsDB {
|
|||
Some(hash(&serialize(&ordered_accounts).unwrap()))
|
||||
}
|
||||
|
||||
fn load(&self, fork: Fork, pubkey: &Pubkey) -> Option<Account> {
|
||||
fn get_account(&self, id: AppendVecId, offset: u64) -> Option<Account> {
|
||||
let appendvec = &self.storage.read().unwrap()[id].appendvec;
|
||||
let av = appendvec.read().unwrap();
|
||||
Some(av.get_account(offset).unwrap())
|
||||
}
|
||||
|
||||
fn load(&self, fork: Fork, pubkey: &Pubkey, walk_back: bool) -> Option<Account> {
|
||||
let index = self.index_info.index.read().unwrap();
|
||||
if let Some(forks) = index.get(pubkey) {
|
||||
// find most recent fork that is an ancestor of current_fork
|
||||
for (v_fork, (id, offset)) in forks.0.iter() {
|
||||
if *v_fork > fork {
|
||||
continue;
|
||||
if let Some((id, offset)) = forks.0.get(&fork) {
|
||||
return self.get_account(*id, *offset);
|
||||
} else {
|
||||
let appendvec = &self.storage.read().unwrap()[*id].appendvec;
|
||||
let av = appendvec.read().unwrap();
|
||||
return Some(av.get_account(*offset).unwrap());
|
||||
if !walk_back {
|
||||
return None;
|
||||
}
|
||||
let fork_info = self.fork_info.read().unwrap();
|
||||
for parent_fork in fork_info.get(&fork).unwrap().parents.iter() {
|
||||
if let Some((id, offset)) = forks.0.get(&parent_fork) {
|
||||
return self.get_account(*id, *offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -366,16 +390,26 @@ impl AccountsDB {
|
|||
(id, offset)
|
||||
}
|
||||
|
||||
fn remove_account_entry(&self, fork: Fork, forks: &mut AccountMap) {
|
||||
if let Some((id, _)) = forks.0.remove(&fork) {
|
||||
let stores = self.storage.read().unwrap();
|
||||
if stores[id].count.fetch_sub(1, Ordering::Relaxed) == 1 {
|
||||
stores[id].appendvec.write().unwrap().reset();
|
||||
stores[id].set_status(AccountStorageStatus::StorageAvailable);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Store the account update. If the update is to delete the account because the token balance
|
||||
/// is 0, purge needs to be set to true for the delete to occur in place.
|
||||
pub fn store(&self, fork: Fork, purge: bool, pubkey: &Pubkey, account: &Account) {
|
||||
if account.tokens == 0 && purge {
|
||||
// purge if balance is 0 and no checkpoints
|
||||
let mut index = self.index_info.index.write().unwrap();
|
||||
if let Some(forks) = index.remove(pubkey) {
|
||||
let stores = self.storage.read().unwrap();
|
||||
for (_, (id, _)) in forks.0.iter() {
|
||||
stores[*id].count.fetch_sub(1, Ordering::Relaxed);
|
||||
if let Some(mut forks) = index.get_mut(&pubkey) {
|
||||
self.remove_account_entry(fork, &mut forks);
|
||||
if forks.0.is_empty() {
|
||||
index.remove(pubkey);
|
||||
}
|
||||
}
|
||||
if vote_program::check_id(&account.owner) {
|
||||
|
@ -393,34 +427,15 @@ impl AccountsDB {
|
|||
}
|
||||
}
|
||||
|
||||
let mut result: Option<usize> = None;
|
||||
let result: Option<(AppendVecId, u64)>;
|
||||
{
|
||||
let mut insert: Option<usize> = None;
|
||||
let mut windex = self.index_info.index.write().unwrap();
|
||||
let forks = windex.entry(*pubkey).or_insert(AccountMap(vec![]));
|
||||
for (i, (v_fork, (v_id, _))) in forks.0.iter().enumerate() {
|
||||
if *v_fork > fork {
|
||||
continue;
|
||||
}
|
||||
if *v_fork == fork {
|
||||
result = Some(*v_id);
|
||||
forks.0[i] = (fork, (id, offset));
|
||||
break;
|
||||
}
|
||||
insert = Some(i);
|
||||
break;
|
||||
}
|
||||
if result.is_none() {
|
||||
if let Some(index) = insert {
|
||||
forks.0.insert(index, (fork, (id, offset)));
|
||||
} else {
|
||||
forks.0.push((fork, (id, offset)));
|
||||
}
|
||||
}
|
||||
let forks = windex.entry(*pubkey).or_insert(AccountMap(HashMap::new()));
|
||||
result = forks.0.insert(fork, (id, offset));
|
||||
}
|
||||
let stores = self.storage.read().unwrap();
|
||||
stores[id].count.fetch_add(1, Ordering::Relaxed);
|
||||
if let Some(old_id) = result {
|
||||
if let Some((old_id, _)) = result {
|
||||
if stores[old_id].count.fetch_sub(1, Ordering::Relaxed) == 1 {
|
||||
stores[old_id].appendvec.write().unwrap().reset();
|
||||
stores[old_id].set_status(AccountStorageStatus::StorageAvailable);
|
||||
|
@ -429,7 +444,6 @@ impl AccountsDB {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn store_accounts(
|
||||
&self,
|
||||
fork: Fork,
|
||||
|
@ -471,7 +485,7 @@ impl AccountsDB {
|
|||
// If a fee can pay for execution then the program will be scheduled
|
||||
let mut called_accounts: Vec<Account> = vec![];
|
||||
for key in &tx.account_keys {
|
||||
called_accounts.push(self.load(fork, key).unwrap_or_default());
|
||||
called_accounts.push(self.load(fork, key, true).unwrap_or_default());
|
||||
}
|
||||
if called_accounts.is_empty() || called_accounts[0].tokens == 0 {
|
||||
error_counters.account_not_found += 1;
|
||||
|
@ -506,7 +520,7 @@ impl AccountsDB {
|
|||
}
|
||||
depth += 1;
|
||||
|
||||
let program = match self.load(fork, &program_id) {
|
||||
let program = match self.load(fork, &program_id, true) {
|
||||
Some(program) => program,
|
||||
None => {
|
||||
error_counters.account_not_found += 1;
|
||||
|
@ -567,43 +581,70 @@ impl AccountsDB {
|
|||
}
|
||||
|
||||
pub fn increment_transaction_count(&self, fork: Fork, tx_count: usize) {
|
||||
let mut tx = self.transaction_count.write().unwrap();
|
||||
let entry = tx.entry(fork).or_insert(0);
|
||||
*entry += tx_count as u64;
|
||||
let mut info = self.fork_info.write().unwrap();
|
||||
let entry = info.entry(fork).or_insert(AccountsForkInfo::default());
|
||||
entry.transaction_count += tx_count as u64;
|
||||
}
|
||||
|
||||
pub fn transaction_count(&self, fork: Fork) -> u64 {
|
||||
let tx = self.transaction_count.read().unwrap();
|
||||
if let Some(entry) = tx.get(&fork) {
|
||||
*entry
|
||||
let info = self.fork_info.read().unwrap();
|
||||
if let Some(entry) = info.get(&fork) {
|
||||
entry.transaction_count
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
/// become the root accountsDB
|
||||
fn squash<U>(&mut self, parents: &[U])
|
||||
where
|
||||
U: std::ops::Deref<Target = Self>,
|
||||
fn squash(&self, fork: Fork) {
|
||||
let parents: Vec<Fork>;
|
||||
{
|
||||
/*
|
||||
self.transaction_count += parents
|
||||
let info = self.fork_info.read().unwrap();
|
||||
let fork_info = info.get(&fork).unwrap();
|
||||
parents = fork_info.parents.clone();
|
||||
}
|
||||
let tx_count = parents
|
||||
.iter()
|
||||
.fold(0, |sum, parent| sum + parent.transaction_count);
|
||||
.fold(0, |sum, parent| sum + self.transaction_count(*parent));
|
||||
self.increment_transaction_count(fork, tx_count as usize);
|
||||
|
||||
// for every account in all the parents, load latest and update self if
|
||||
// absent
|
||||
for pubkey in parents.iter().flat_map(|parent| parent.accounts.keys()) {
|
||||
// update self with data from parents unless in self
|
||||
if self.accounts.get(pubkey).is_none() {
|
||||
self.accounts
|
||||
.insert(pubkey.clone(), Self::load(parents, pubkey).unwrap().clone());
|
||||
let mut index = self.index_info.index.write().unwrap();
|
||||
index.iter_mut().for_each(|(_, mut forks)| {
|
||||
if forks.0.get(&fork).is_none() {
|
||||
for parent_fork in parents.iter() {
|
||||
if let Some((id, offset)) = forks.0.get(parent_fork) {
|
||||
forks.0.insert(fork, (*id, *offset));
|
||||
forks.0.remove(parent_fork);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
for parent_fork in parents.iter() {
|
||||
self.remove_account_entry(*parent_fork, &mut forks);
|
||||
}
|
||||
});
|
||||
|
||||
// toss any zero-balance accounts, since self is root now
|
||||
self.accounts.retain(|_, account| account.tokens != 0);
|
||||
*/
|
||||
{
|
||||
let mut info = self.fork_info.write().unwrap();
|
||||
let fork_info = info.get_mut(&fork).unwrap();
|
||||
fork_info.parents = vec![];
|
||||
for parent_fork in parents.iter() {
|
||||
info.remove(parent_fork);
|
||||
}
|
||||
}
|
||||
index.iter_mut().for_each(|(pubkey, mut forks)| {
|
||||
if let Some((id, offset)) = forks.0.get(&fork) {
|
||||
let account = self.get_account(*id, *offset).unwrap();
|
||||
if account.tokens == 0 {
|
||||
self.remove_account_entry(fork, &mut forks);
|
||||
if vote_program::check_id(&account.owner) {
|
||||
self.index_info.vote_index.write().unwrap().remove(pubkey);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -611,36 +652,47 @@ impl Accounts {
|
|||
fn make_new_dir() -> String {
|
||||
static ACCOUNT_DIR: AtomicUsize = AtomicUsize::new(0);
|
||||
let dir = ACCOUNT_DIR.fetch_add(1, Ordering::Relaxed);
|
||||
format!("accountsdb/{}", dir.to_string())
|
||||
format!("{}/{}", ACCOUNTSDB_DIR, dir.to_string())
|
||||
}
|
||||
|
||||
fn make_default_paths() -> String {
|
||||
let mut paths = Self::make_new_dir();
|
||||
for _ in 1..NUM_ACCOUNT_DIRS {
|
||||
let mut paths = "".to_string();
|
||||
for index in 0..NUM_ACCOUNT_DIRS {
|
||||
if index > 0 {
|
||||
paths.push_str(",");
|
||||
}
|
||||
paths.push_str(&Self::make_new_dir());
|
||||
}
|
||||
paths
|
||||
}
|
||||
|
||||
pub fn new(in_paths: &str) -> Self {
|
||||
pub fn new(fork: Fork, in_paths: &str) -> Self {
|
||||
let paths = if !in_paths.is_empty() {
|
||||
in_paths.to_string()
|
||||
} else {
|
||||
Self::make_default_paths()
|
||||
};
|
||||
let accounts_db = AccountsDB::default();
|
||||
accounts_db.add_storage(&paths);
|
||||
let accounts_db = AccountsDB::new(fork, &paths);
|
||||
accounts_db.add_fork(fork, None);
|
||||
Accounts {
|
||||
accounts_db,
|
||||
account_locks: Mutex::new(HashSet::new()),
|
||||
account_locks: Mutex::new(HashMap::new()),
|
||||
paths,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_from_parent(&self, fork: Fork, parent: Fork) {
|
||||
self.accounts_db.add_fork(fork, Some(parent));
|
||||
}
|
||||
|
||||
/// Slow because lock is held for 1 operation insted of many
|
||||
pub fn load_slow(&self, fork: Fork, pubkey: &Pubkey) -> Option<Account> {
|
||||
self.accounts_db.load(fork, pubkey).filter(|acc| acc.tokens != 0)
|
||||
self.accounts_db.load(fork, pubkey, true).filter(|acc| acc.tokens != 0)
|
||||
}
|
||||
|
||||
/// Slow because lock is held for 1 operation insted of many
|
||||
pub fn load_slow_no_parent(&self, fork: Fork, pubkey: &Pubkey) -> Option<Account> {
|
||||
self.accounts_db.load(fork, pubkey, false).filter(|acc| acc.tokens != 0)
|
||||
}
|
||||
|
||||
/// Slow because lock is held for 1 operation insted of many
|
||||
|
@ -651,29 +703,41 @@ impl Accounts {
|
|||
}
|
||||
|
||||
fn lock_account(
|
||||
account_locks: &mut HashSet<Pubkey>,
|
||||
fork: Fork,
|
||||
account_locks: &mut HashMap<Fork, HashSet<Pubkey>>,
|
||||
keys: &[Pubkey],
|
||||
error_counters: &mut ErrorCounters,
|
||||
) -> Result<()> {
|
||||
// Copy all the accounts
|
||||
let locks = account_locks.entry(fork).or_insert(HashSet::new());
|
||||
for k in keys {
|
||||
if account_locks.contains(k) {
|
||||
if locks.contains(k) {
|
||||
error_counters.account_in_use += 1;
|
||||
return Err(BankError::AccountInUse);
|
||||
}
|
||||
}
|
||||
for k in keys {
|
||||
account_locks.insert(*k);
|
||||
locks.insert(*k);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn unlock_account(tx: &Transaction, result: &Result<()>, account_locks: &mut HashSet<Pubkey>) {
|
||||
fn unlock_account(
|
||||
fork: Fork,
|
||||
tx: &Transaction,
|
||||
result: &Result<()>,
|
||||
account_locks: &mut HashMap<Fork, HashSet<Pubkey>>,
|
||||
) {
|
||||
match result {
|
||||
Err(BankError::AccountInUse) => (),
|
||||
_ => {
|
||||
if let Some(locks) = account_locks.get_mut(&fork) {
|
||||
for k in &tx.account_keys {
|
||||
account_locks.remove(k);
|
||||
locks.remove(k);
|
||||
}
|
||||
if locks.is_empty() {
|
||||
account_locks.remove(&fork);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -686,12 +750,19 @@ impl Accounts {
|
|||
/// This function will prevent multiple threads from modifying the same account state at the
|
||||
/// same time
|
||||
#[must_use]
|
||||
pub fn lock_accounts(&self, txs: &[Transaction]) -> Vec<Result<()>> {
|
||||
pub fn lock_accounts(&self, fork: Fork, txs: &[Transaction]) -> Vec<Result<()>> {
|
||||
let mut account_locks = self.account_locks.lock().unwrap();
|
||||
let mut error_counters = ErrorCounters::default();
|
||||
let rv = txs
|
||||
.iter()
|
||||
.map(|tx| Self::lock_account(&mut account_locks, &tx.account_keys, &mut error_counters))
|
||||
.map(|tx| {
|
||||
Self::lock_account(
|
||||
fork,
|
||||
&mut account_locks,
|
||||
&tx.account_keys,
|
||||
&mut error_counters,
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
if error_counters.account_in_use != 0 {
|
||||
inc_new_counter_info!(
|
||||
|
@ -703,12 +774,12 @@ impl Accounts {
|
|||
}
|
||||
|
||||
/// Once accounts are unlocked, new transactions that modify that state can enter the pipeline
|
||||
pub fn unlock_accounts(&self, txs: &[Transaction], results: &[Result<()>]) {
|
||||
pub fn unlock_accounts(&self, fork: Fork, txs: &[Transaction], results: &[Result<()>]) {
|
||||
let mut account_locks = self.account_locks.lock().unwrap();
|
||||
debug!("bank unlock accounts");
|
||||
txs.iter()
|
||||
.zip(results.iter())
|
||||
.for_each(|(tx, result)| Self::unlock_account(tx, result, &mut account_locks));
|
||||
.for_each(|(tx, result)| Self::unlock_account(fork, tx, result, &mut account_locks));
|
||||
}
|
||||
|
||||
pub fn has_accounts(&self, fork: Fork) -> bool {
|
||||
|
@ -751,20 +822,9 @@ impl Accounts {
|
|||
|
||||
/// accounts starts with an empty data structure for every child/fork
|
||||
/// this function squashes all the parents into this instance
|
||||
pub fn squash<U>(&self, parents: &[U])
|
||||
where
|
||||
U: std::ops::Deref<Target = Self>,
|
||||
{
|
||||
assert!(self.account_locks.lock().unwrap().is_empty());
|
||||
|
||||
/*
|
||||
let dbs: Vec<_> = parents
|
||||
.iter()
|
||||
.map(|obj| obj.accounts_db.read().unwrap())
|
||||
.collect();
|
||||
|
||||
self.accounts_db.write().unwrap().squash(&dbs);
|
||||
*/
|
||||
pub fn squash(&self, fork: Fork) {
|
||||
assert!(!self.account_locks.lock().unwrap().contains_key(&fork));
|
||||
self.accounts_db.squash(fork);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -784,16 +844,15 @@ mod tests {
|
|||
#[test]
|
||||
fn test_purge() {
|
||||
let paths = "purge".to_string();
|
||||
let db = AccountsDB::default();
|
||||
db.add_storage(&paths);
|
||||
let db = AccountsDB::new(0, &paths);
|
||||
let key = Pubkey::default();
|
||||
let account = Account::new(0, 0, Pubkey::default());
|
||||
// accounts are deleted when their token value is 0 and purge is true
|
||||
db.store(0, false, &key, &account);
|
||||
assert_eq!(db.load(0, &key), Some(account.clone()));
|
||||
assert_eq!(db.load(0, &key, true), Some(account.clone()));
|
||||
// purge should be set to true for the root checkpoint
|
||||
db.store(0, true, &key, &account);
|
||||
assert_eq!(db.load(0, &key), None);
|
||||
assert_eq!(db.load(0, &key, true), None);
|
||||
cleanup_dirs(&paths);
|
||||
}
|
||||
|
||||
|
@ -802,7 +861,7 @@ mod tests {
|
|||
ka: &Vec<(Pubkey, Account)>,
|
||||
error_counters: &mut ErrorCounters,
|
||||
) -> Vec<Result<(InstructionAccounts, InstructionLoaders)>> {
|
||||
let accounts = Accounts::new("");
|
||||
let accounts = Accounts::new(0, "");
|
||||
for ka in ka.iter() {
|
||||
accounts.store_slow(0, true, &ka.0, &ka.1);
|
||||
}
|
||||
|
@ -1183,25 +1242,28 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_accountsdb_squash() {
|
||||
let mut db0 = AccountsDB::default();
|
||||
let paths = "squash".to_string();
|
||||
let db0 = AccountsDB::new(0, &paths);
|
||||
let key = Pubkey::default();
|
||||
let account0 = Account::new(1, 0, key);
|
||||
|
||||
// store value 1 in the "root", i.e. db zero
|
||||
db0.store(0, true, &key, &account0);
|
||||
|
||||
db0.add_fork(1, Some(0));
|
||||
// store value 0 in the child, but don't purge (see purge test above)
|
||||
let mut db1 = AccountsDB::default();
|
||||
let account1 = Account::new(0, 0, key);
|
||||
db1.store(1, false, &key, &account1);
|
||||
db0.store(1, false, &key, &account1);
|
||||
|
||||
// masking accounts is done at the Accounts level, at accountsDB we see
|
||||
// original account
|
||||
assert_eq!(db1.load(1, &key), Some(account1));
|
||||
assert_eq!(db0.load(1, &key, true), Some(account1));
|
||||
|
||||
// squash, which should whack key's account
|
||||
db1.squash(&[&db0]);
|
||||
assert_eq!(db1.load(1, &key), None);
|
||||
// merge, which should whack key's account
|
||||
db0.squash(1);
|
||||
|
||||
assert_eq!(db0.load(1, &key, true), None);
|
||||
cleanup_dirs(&paths);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1209,24 +1271,25 @@ mod tests {
|
|||
let key = Pubkey::default();
|
||||
|
||||
// 1 token in the "root", i.e. db zero
|
||||
let mut db0 = AccountsDB::default();
|
||||
let paths = "unsquash".to_string();
|
||||
let db0 = AccountsDB::new(0, &paths);
|
||||
let account0 = Account::new(1, 0, key);
|
||||
db0.store(0, true, &key, &account0);
|
||||
|
||||
db0.add_fork(1, Some(0));
|
||||
// 0 tokens in the child
|
||||
let mut db1 = AccountsDB::default();
|
||||
let account1 = Account::new(0, 0, key);
|
||||
db1.store(1, false, &key, &account1);
|
||||
db0.store(1, false, &key, &account1);
|
||||
|
||||
// masking accounts is done at the Accounts level, at accountsDB we see
|
||||
// original account
|
||||
assert_eq!(db1.load(1, &key), Some(account1));
|
||||
assert_eq!(db0.load(1, &key, true), Some(account1));
|
||||
|
||||
let mut accounts0 = Accounts::new("");
|
||||
accounts0.accounts_db = db0;
|
||||
let mut accounts1 = Accounts::new("");
|
||||
accounts1.accounts_db = db1;
|
||||
let mut accounts1 = Accounts::new(3, "");
|
||||
accounts1.accounts_db = db0;
|
||||
assert_eq!(accounts1.load_slow(1, &key), None);
|
||||
assert_eq!(accounts1.load_slow(0, &key), Some(account0));
|
||||
cleanup_dirs(&paths);
|
||||
}
|
||||
|
||||
fn create_account(
|
||||
|
@ -1245,7 +1308,7 @@ mod tests {
|
|||
default_account.owner = vote_program::id();
|
||||
nvote -= 1;
|
||||
}
|
||||
assert!(accounts.load(0, &pubkey).is_none());
|
||||
assert!(accounts.load(0, &pubkey, true).is_none());
|
||||
accounts.store(0, true, &pubkey, &default_account);
|
||||
}
|
||||
}
|
||||
|
@ -1253,11 +1316,11 @@ mod tests {
|
|||
fn update_accounts(accounts: &AccountsDB, pubkeys: Vec<Pubkey>, range: usize) {
|
||||
for _ in 1..1000 {
|
||||
let idx = thread_rng().gen_range(0, range);
|
||||
if let Some(mut account) = accounts.load(0, &pubkeys[idx]) {
|
||||
if let Some(mut account) = accounts.load(0, &pubkeys[idx], true) {
|
||||
account.tokens = account.tokens + 1;
|
||||
accounts.store(0, true, &pubkeys[idx], &account);
|
||||
if account.tokens == 0 {
|
||||
assert!(accounts.load(0, &pubkeys[idx]).is_none());
|
||||
assert!(accounts.load(0, &pubkeys[idx], true).is_none());
|
||||
} else {
|
||||
let mut default_account = Account::default();
|
||||
default_account.tokens = account.tokens;
|
||||
|
@ -1288,11 +1351,10 @@ mod tests {
|
|||
#[test]
|
||||
fn test_account_one() {
|
||||
let paths = "one".to_string();
|
||||
let accounts = AccountsDB::default();
|
||||
accounts.add_storage(&paths);
|
||||
let accounts = AccountsDB::new(0, &paths);
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
create_account(&accounts, &mut pubkeys, 1, 0);
|
||||
let account = accounts.load(0, &pubkeys[0]).unwrap();
|
||||
let account = accounts.load(0, &pubkeys[0], true).unwrap();
|
||||
let mut default_account = Account::default();
|
||||
default_account.tokens = 1;
|
||||
assert_eq!(compare_account(&default_account, &account), true);
|
||||
|
@ -1302,13 +1364,12 @@ mod tests {
|
|||
#[test]
|
||||
fn test_account_many() {
|
||||
let paths = "many0,many1".to_string();
|
||||
let accounts = AccountsDB::default();
|
||||
accounts.add_storage(&paths);
|
||||
let accounts = AccountsDB::new(0, &paths);
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
create_account(&accounts, &mut pubkeys, 100, 0);
|
||||
for _ in 1..100 {
|
||||
let idx = thread_rng().gen_range(0, 99);
|
||||
let account = accounts.load(0, &pubkeys[idx]).unwrap();
|
||||
let account = accounts.load(0, &pubkeys[idx], true).unwrap();
|
||||
let mut default_account = Account::default();
|
||||
default_account.tokens = (idx + 1) as u64;
|
||||
assert_eq!(compare_account(&default_account, &account), true);
|
||||
|
@ -1319,8 +1380,7 @@ mod tests {
|
|||
#[test]
|
||||
fn test_account_update() {
|
||||
let paths = "update0".to_string();
|
||||
let accounts = AccountsDB::default();
|
||||
accounts.add_storage(&paths);
|
||||
let accounts = AccountsDB::new(0, &paths);
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
create_account(&accounts, &mut pubkeys, 100, 0);
|
||||
update_accounts(&accounts, pubkeys, 99);
|
||||
|
@ -1339,8 +1399,7 @@ mod tests {
|
|||
#[test]
|
||||
fn test_account_grow() {
|
||||
let paths = "grow0".to_string();
|
||||
let accounts = AccountsDB::default();
|
||||
accounts.add_storage(&paths);
|
||||
let accounts = AccountsDB::new(0, &paths);
|
||||
let count = [0, 1];
|
||||
let status = [
|
||||
AccountStorageStatus::StorageAvailable,
|
||||
|
@ -1367,8 +1426,8 @@ mod tests {
|
|||
assert_eq!(stores[1].count.load(Ordering::Relaxed), 1);
|
||||
assert_eq!(stores[1].get_status(), status[0]);
|
||||
}
|
||||
assert_eq!(accounts.load(0, &pubkey1).unwrap(), account1);
|
||||
assert_eq!(accounts.load(0, &pubkey2).unwrap(), account2);
|
||||
assert_eq!(accounts.load(0, &pubkey1, true).unwrap(), account1);
|
||||
assert_eq!(accounts.load(0, &pubkey2, true).unwrap(), account2);
|
||||
|
||||
for i in 0..25 {
|
||||
let index = i % 2;
|
||||
|
@ -1383,8 +1442,8 @@ mod tests {
|
|||
assert_eq!(stores[2].count.load(Ordering::Relaxed), count[index ^ 1]);
|
||||
assert_eq!(stores[2].get_status(), status[0]);
|
||||
}
|
||||
assert_eq!(accounts.load(0, &pubkey1).unwrap(), account1);
|
||||
assert_eq!(accounts.load(0, &pubkey2).unwrap(), account2);
|
||||
assert_eq!(accounts.load(0, &pubkey1, true).unwrap(), account1);
|
||||
assert_eq!(accounts.load(0, &pubkey2, true).unwrap(), account2);
|
||||
}
|
||||
cleanup_dirs(&paths);
|
||||
}
|
||||
|
@ -1392,8 +1451,7 @@ mod tests {
|
|||
#[test]
|
||||
fn test_account_vote() {
|
||||
let paths = "vote0".to_string();
|
||||
let accounts = AccountsDB::default();
|
||||
accounts.add_storage(&paths);
|
||||
let accounts = AccountsDB::new(0, &paths);
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
create_account(&accounts, &mut pubkeys, 100, 6);
|
||||
let accounts = accounts.get_vote_accounts(0);
|
||||
|
|
|
@ -27,8 +27,8 @@ use solana_sdk::timing::{duration_as_us, MAX_ENTRY_IDS, NUM_TICKS_PER_SECOND};
|
|||
use solana_sdk::token_program;
|
||||
use solana_sdk::transaction::Transaction;
|
||||
use solana_sdk::vote_program::{self, VoteState};
|
||||
use std::collections::HashSet;
|
||||
use std::result;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::Instant;
|
||||
|
||||
|
@ -120,7 +120,7 @@ impl Bank {
|
|||
|
||||
pub fn new_with_paths(genesis_block: &GenesisBlock, paths: &str) -> Self {
|
||||
let mut bank = Self::default();
|
||||
bank.accounts = Some(Arc::new(Accounts::new(&paths)));
|
||||
bank.accounts = Some(Arc::new(Accounts::new(bank.id, &paths)));
|
||||
bank.process_genesis_block(genesis_block);
|
||||
bank.add_builtin_programs();
|
||||
bank
|
||||
|
@ -140,16 +140,18 @@ impl Bank {
|
|||
bank.parent = RwLock::new(Some(parent.clone()));
|
||||
bank.parent_hash = parent.hash();
|
||||
bank.collector_id = collector_id;
|
||||
|
||||
bank.accounts = Some(parent.accounts());
|
||||
bank.accounts().new_from_parent(bank.id, parent.id);
|
||||
|
||||
bank
|
||||
}
|
||||
|
||||
/// Create a new bank that points to an immutable checkpoint of another bank.
|
||||
/// TODO: remove me in favor of _and_id(), id should not be an assumed value
|
||||
pub fn new_from_parent(parent: &Arc<Bank>) -> Self {
|
||||
static BANK_ID: AtomicUsize = AtomicUsize::new(1);
|
||||
let collector_id = parent.collector_id;
|
||||
Self::new_from_parent_and_id(parent, collector_id, parent.id() + 1)
|
||||
Self::new_from_parent_and_id(parent, collector_id, BANK_ID.fetch_add(1, Ordering::Relaxed) as u64)
|
||||
}
|
||||
|
||||
pub fn id(&self) -> u64 {
|
||||
|
@ -181,8 +183,7 @@ impl Bank {
|
|||
let parents = self.parents();
|
||||
*self.parent.write().unwrap() = None;
|
||||
|
||||
let parent_accounts: Vec<_> = parents.iter().map(|b| b.accounts()).collect();
|
||||
self.accounts().squash(&parent_accounts);
|
||||
self.accounts().squash(self.id);
|
||||
|
||||
let parent_caches: Vec<_> = parents
|
||||
.iter()
|
||||
|
@ -345,11 +346,11 @@ impl Bank {
|
|||
}
|
||||
// TODO: put this assert back in
|
||||
// assert!(!self.is_frozen());
|
||||
self.accounts().lock_accounts(txs)
|
||||
self.accounts().lock_accounts(self.id, txs)
|
||||
}
|
||||
|
||||
pub fn unlock_accounts(&self, txs: &[Transaction], results: &[Result<()>]) {
|
||||
self.accounts().unlock_accounts(txs, results)
|
||||
self.accounts().unlock_accounts(self.id, txs, results)
|
||||
}
|
||||
|
||||
fn load_accounts(
|
||||
|
@ -466,7 +467,8 @@ impl Bank {
|
|||
inc_new_counter_info!("bank-process_transactions-error_count", err_count);
|
||||
}
|
||||
|
||||
self.accounts().increment_transaction_count(self.id, tx_count);
|
||||
self.accounts()
|
||||
.increment_transaction_count(self.id, tx_count);
|
||||
|
||||
inc_new_counter_info!("bank-process_transactions-txs", tx_count);
|
||||
if 0 != error_counters.last_id_not_found {
|
||||
|
@ -637,7 +639,8 @@ impl Bank {
|
|||
pub fn deposit(&self, pubkey: &Pubkey, tokens: u64) {
|
||||
let mut account = self.get_account(pubkey).unwrap_or_default();
|
||||
account.tokens += tokens;
|
||||
self.accounts().store_slow(self.id, self.is_root(), pubkey, &account);
|
||||
self.accounts()
|
||||
.store_slow(self.id, self.is_root(), pubkey, &account);
|
||||
}
|
||||
|
||||
fn accounts(&self) -> Arc<Accounts> {
|
||||
|
@ -653,7 +656,7 @@ impl Bank {
|
|||
}
|
||||
|
||||
pub fn get_account_modified_since_parent(&self, pubkey: &Pubkey) -> Option<Account> {
|
||||
self.accounts().load_slow(self.id, pubkey)
|
||||
self.accounts().load_slow_no_parent(self.id, pubkey)
|
||||
}
|
||||
|
||||
pub fn transaction_count(&self) -> u64 {
|
||||
|
|
|
@ -47,9 +47,8 @@ impl BankForks {
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::genesis_block::GenesisBlock;
|
||||
use solana_sdk::hash::Hash;
|
||||
|
||||
#[test]
|
||||
fn test_bank_forks_root() {
|
||||
|
|
Loading…
Reference in New Issue