solana/src/bank.rs

1781 lines
64 KiB
Rust
Raw Normal View History

//! The `bank` module tracks client accounts and the progress of smart
2018-06-06 08:49:22 -07:00
//! contracts. It offers a high-level API that signs transactions
//! on behalf of the caller, and a low-level API for when they have
2018-03-29 11:20:54 -07:00
//! already been signed and verified.
2018-02-23 13:08:19 -08:00
use bincode::deserialize;
use bincode::serialize;
2018-09-20 19:17:37 -07:00
use budget_program::BudgetState;
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
use budget_transaction::BudgetTransaction;
use counter::Counter;
use dynamic_program::DynamicProgram;
use entry::Entry;
use hash::{hash, Hash};
use itertools::Itertools;
use jsonrpc_macros::pubsub::Sink;
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
use leader_scheduler::LeaderScheduler;
use ledger::Block;
use log::Level;
use mint::Mint;
use payment_plan::Payment;
use poh_recorder::PohRecorder;
use rpc::RpcSignatureStatus;
use signature::Keypair;
use signature::Signature;
use solana_program_interface::account::{Account, KeyedAccount};
use solana_program_interface::pubkey::Pubkey;
use std;
use std::collections::{BTreeMap, HashMap, HashSet, VecDeque};
use std::result;
2018-06-22 10:45:22 -07:00
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Mutex, RwLock};
2018-06-07 19:35:38 -07:00
use std::time::Instant;
use storage_program::StorageProgram;
2018-09-19 17:25:57 -07:00
use system_program::SystemProgram;
use system_transaction::SystemTransaction;
2018-09-27 11:51:08 -07:00
use tictactoe_dashboard_program::TicTacToeDashboardProgram;
2018-09-24 13:53:02 -07:00
use tictactoe_program::TicTacToeProgram;
use timing::{duration_as_us, timestamp};
2018-10-08 20:41:31 -07:00
use token_program::TokenProgram;
use tokio::prelude::Future;
use transaction::Transaction;
2018-08-09 12:31:34 -07:00
use window::WINDOW_SIZE;
2018-06-06 08:49:22 -07:00
/// The number of most recent `last_id` values that the bank will track the signatures
/// of. Once the bank discards a `last_id`, it will reject any transactions that use
/// that `last_id` in a transaction. Lowering this value reduces memory consumption,
/// but requires clients to update its `last_id` more frequently. Raising the value
/// lengthens the time a client must wait to be certain a missing transaction will
/// not be processed by the network.
pub const MAX_ENTRY_IDS: usize = 1024 * 32;
pub const VERIFY_BLOCK_SIZE: usize = 16;
2018-06-06 08:49:22 -07:00
/// Reasons a transaction might be rejected.
#[derive(Debug, PartialEq, Eq, Clone)]
2018-05-14 14:33:11 -07:00
pub enum BankError {
/// This Pubkey is being processed in another transaction
AccountInUse,
/// Attempt to debit from `Pubkey`, but no found no record of a prior credit.
AccountNotFound,
2018-06-06 08:49:22 -07:00
/// The from `Pubkey` does not have sufficient balance to pay the fee to schedule the transaction
InsufficientFundsForFee,
2018-06-06 08:49:22 -07:00
/// The bank has seen `Signature` before. This can occur under normal operation
/// when a UDP packet is duplicated, as a user error from a client not updating
/// its `last_id`, or as a double-spend attack.
DuplicateSignature,
2018-06-06 08:49:22 -07:00
/// The bank has not seen the given `last_id` or the transaction is too old and
/// the `last_id` has been discarded.
LastIdNotFound,
2018-06-06 08:49:22 -07:00
/// The bank has not seen a transaction with the given `Signature` or the transaction is
/// too old and has been discarded.
SignatureNotFound,
/// Proof of History verification failed.
LedgerVerificationFailed,
/// Contract's transaction token balance does not equal the balance after the transaction
UnbalancedTransaction(u8),
/// Contract's transactions resulted in an account with a negative balance
/// The difference from InsufficientFundsForFee is that the transaction was executed by the
/// contract
ResultWithNegativeTokens(u8),
/// Contract id is unknown
UnknownContractId(u8),
/// Contract modified an accounts contract id
ModifiedContractId(u8),
/// Contract spent the tokens of an account that doesn't belong to it
ExternalAccountTokenSpend(u8),
/// The program returned an error
ProgramRuntimeError(u8),
/// Recoding into PoH failed
RecordFailure,
}
2018-05-14 14:33:11 -07:00
pub type Result<T> = result::Result<T, BankError>;
2018-09-27 13:49:50 -07:00
type SignatureStatusMap = HashMap<Signature, Result<()>>;
#[derive(Default)]
struct ErrorCounters {
account_not_found_validator: usize,
account_not_found_leader: usize,
account_in_use: usize,
}
2018-06-06 08:49:22 -07:00
/// The state of all accounts and contracts after processing its entries.
2018-05-14 14:33:11 -07:00
pub struct Bank {
2018-06-06 08:49:22 -07:00
/// A map of account public keys to the balance in that account.
accounts: RwLock<HashMap<Pubkey, Account>>,
2018-06-06 08:49:22 -07:00
/// set of accounts which are currently in the pipeline
account_locks: Mutex<HashSet<Pubkey>>,
2018-06-06 08:49:22 -07:00
/// A FIFO queue of `last_id` items, where each item is a set of signatures
2018-06-22 10:45:22 -07:00
/// that have been processed using that `last_id`. Rejected `last_id`
/// values are so old that the `last_id` has been pulled out of the queue.
2018-06-22 10:44:31 -07:00
last_ids: RwLock<VecDeque<Hash>>,
/// Mapping of hashes to signature sets along with timestamp. The bank uses this data to
2018-06-22 10:45:22 -07:00
/// reject transactions with signatures its seen before
2018-09-27 13:49:50 -07:00
last_ids_sigs: RwLock<HashMap<Hash, (SignatureStatusMap, u64)>>,
2018-06-06 08:49:22 -07:00
/// The number of transactions the bank has processed without error since the
/// start of the ledger.
2018-05-14 05:49:48 -07:00
transaction_count: AtomicUsize,
/// This bool allows us to submit metrics that are specific for leaders or validators
/// It is set to `true` by fullnode before creating the bank.
pub is_leader: bool,
// The latest finality time for the network
finality_time: AtomicUsize,
// loaded contracts hashed by program_id
loaded_contracts: RwLock<HashMap<Pubkey, DynamicProgram>>,
// Mapping of account ids to Subscriber ids and sinks to notify on userdata update
account_subscriptions: RwLock<HashMap<Pubkey, HashMap<Pubkey, Sink<Account>>>>,
// Mapping of signatures to Subscriber ids and sinks to notify on confirmation
signature_subscriptions: RwLock<HashMap<Signature, HashMap<Pubkey, Sink<RpcSignatureStatus>>>>,
2018-02-23 13:08:19 -08:00
}
impl Default for Bank {
fn default() -> Self {
Bank {
accounts: RwLock::new(HashMap::new()),
account_locks: Mutex::new(HashSet::new()),
last_ids: RwLock::new(VecDeque::new()),
2018-06-22 10:44:31 -07:00
last_ids_sigs: RwLock::new(HashMap::new()),
2018-05-14 05:49:48 -07:00
transaction_count: AtomicUsize::new(0),
is_leader: true,
finality_time: AtomicUsize::new(std::usize::MAX),
loaded_contracts: RwLock::new(HashMap::new()),
account_subscriptions: RwLock::new(HashMap::new()),
signature_subscriptions: RwLock::new(HashMap::new()),
}
}
}
impl Bank {
/// Create a default Bank
pub fn new_default(is_leader: bool) -> Self {
let mut bank = Bank::default();
bank.is_leader = is_leader;
bank
}
/// Create an Bank using a deposit.
pub fn new_from_deposit(deposit: &Payment) -> Self {
let bank = Self::default();
{
let mut accounts = bank.accounts.write().unwrap();
let account = accounts.entry(deposit.to).or_insert_with(Account::default);
Self::apply_payment(deposit, account);
}
bank
2018-04-02 12:51:44 -07:00
}
2018-05-14 14:33:11 -07:00
/// Create an Bank with only a Mint. Typically used by unit tests.
pub fn new(mint: &Mint) -> Self {
2018-04-02 12:51:44 -07:00
let deposit = Payment {
to: mint.pubkey(),
tokens: mint.tokens,
};
2018-05-14 14:33:11 -07:00
let bank = Self::new_from_deposit(&deposit);
bank.register_entry_id(&mint.last_id());
bank
2018-04-02 12:51:44 -07:00
}
/// Commit funds to the given account
fn apply_payment(payment: &Payment, account: &mut Account) {
trace!("apply payments {}", payment.tokens);
account.tokens += payment.tokens;
2018-06-07 19:35:38 -07:00
}
2018-06-06 08:49:22 -07:00
/// Return the last entry ID registered.
2018-05-03 12:24:35 -07:00
pub fn last_id(&self) -> Hash {
let last_ids = self.last_ids.read().expect("'last_ids' read lock");
let last_item = last_ids
.iter()
.last()
.expect("get last item from 'last_ids' list");
2018-06-22 10:44:31 -07:00
*last_item
2018-05-03 12:24:35 -07:00
}
2018-06-06 08:49:22 -07:00
/// Store the given signature. The bank will reject any transaction with the same signature.
2018-09-27 13:49:50 -07:00
fn reserve_signature(signatures: &mut SignatureStatusMap, signature: &Signature) -> Result<()> {
if let Some(_result) = signatures.get(signature) {
return Err(BankError::DuplicateSignature);
2018-06-22 10:44:31 -07:00
}
signatures.insert(*signature, Ok(()));
Ok(())
}
/// Forget all signatures. Useful for benchmarking.
pub fn clear_signatures(&self) {
for (_, sigs) in self.last_ids_sigs.write().unwrap().iter_mut() {
sigs.0.clear();
}
}
fn reserve_signature_with_last_id(
last_ids_sigs: &mut HashMap<Hash, (SignatureStatusMap, u64)>,
last_id: &Hash,
sig: &Signature,
) -> Result<()> {
if let Some(entry) = last_ids_sigs.get_mut(last_id) {
return Self::reserve_signature(&mut entry.0, sig);
}
Err(BankError::LastIdNotFound)
}
#[cfg(test)]
fn reserve_signature_with_last_id_test(&self, sig: &Signature, last_id: &Hash) -> Result<()> {
let mut last_ids_sigs = self.last_ids_sigs.write().unwrap();
Self::reserve_signature_with_last_id(&mut last_ids_sigs, last_id, sig)
}
fn update_signature_status(
2018-09-27 13:49:50 -07:00
signatures: &mut SignatureStatusMap,
signature: &Signature,
result: &Result<()>,
) {
let entry = signatures.entry(*signature).or_insert(Ok(()));
*entry = result.clone();
}
fn update_signature_status_with_last_id(
last_ids_sigs: &mut HashMap<Hash, (SignatureStatusMap, u64)>,
signature: &Signature,
result: &Result<()>,
last_id: &Hash,
) {
if let Some(entry) = last_ids_sigs.get_mut(last_id) {
Self::update_signature_status(&mut entry.0, signature, result);
}
}
fn update_transaction_statuses(&self, txs: &[Transaction], res: &[Result<()>]) {
let mut last_ids = self.last_ids_sigs.write().unwrap();
for (i, tx) in txs.iter().enumerate() {
Self::update_signature_status_with_last_id(
&mut last_ids,
&tx.signature,
&res[i],
&tx.last_id,
);
if res[i] != Err(BankError::SignatureNotFound) {
let status = match res[i] {
Ok(_) => RpcSignatureStatus::Confirmed,
Err(BankError::ProgramRuntimeError(_)) => {
RpcSignatureStatus::ProgramRuntimeError
}
Err(_) => RpcSignatureStatus::GenericFailure,
};
if status != RpcSignatureStatus::SignatureNotFound {
self.check_signature_subscriptions(&tx.signature, status);
}
}
}
}
/// Look through the last_ids and find all the valid ids
/// This is batched to avoid holding the lock for a significant amount of time
///
/// Return a vec of tuple of (valid index, timestamp)
/// index is into the passed ids slice to avoid copying hashes
pub fn count_valid_ids(&self, ids: &[Hash]) -> Vec<(usize, u64)> {
let last_ids = self.last_ids_sigs.read().unwrap();
let mut ret = Vec::new();
for (i, id) in ids.iter().enumerate() {
if let Some(entry) = last_ids.get(id) {
ret.push((i, entry.1));
}
}
ret
}
2018-05-14 14:33:11 -07:00
/// Tell the bank which Entry IDs exist on the ledger. This function
/// assumes subsequent calls correspond to later entries, and will boot
/// the oldest ones once its internal cache is full. Once boot, the
2018-05-14 14:33:11 -07:00
/// bank will reject transactions using that `last_id`.
pub fn register_entry_id(&self, last_id: &Hash) {
2018-08-03 11:27:44 -07:00
let mut last_ids = self
.last_ids
2018-05-11 11:38:52 -07:00
.write()
.expect("'last_ids' write lock in register_entry_id");
2018-08-03 11:27:44 -07:00
let mut last_ids_sigs = self
.last_ids_sigs
2018-06-22 10:44:31 -07:00
.write()
.expect("last_ids_sigs write lock");
if last_ids.len() >= MAX_ENTRY_IDS {
2018-06-22 10:44:31 -07:00
let id = last_ids.pop_front().unwrap();
last_ids_sigs.remove(&id);
}
last_ids_sigs.insert(*last_id, (HashMap::new(), timestamp()));
2018-06-22 10:44:31 -07:00
last_ids.push_back(*last_id);
}
/// Process a Transaction. This is used for unit tests and simply calls the vector Bank::process_transactions method.
pub fn process_transaction(&self, tx: &Transaction) -> Result<()> {
let txs = vec![tx.clone()];
match self.process_transactions(&txs)[0] {
Err(ref e) => {
info!("process_transaction error: {:?}", e);
Err((*e).clone())
}
Ok(_) => Ok(()),
}
}
fn lock_account(
account_locks: &mut HashSet<Pubkey>,
keys: &[Pubkey],
error_counters: &mut ErrorCounters,
) -> Result<()> {
// Copy all the accounts
for k in keys {
if account_locks.contains(k) {
error_counters.account_in_use += 1;
return Err(BankError::AccountInUse);
}
}
for k in keys {
account_locks.insert(*k);
}
Ok(())
}
fn unlock_account(tx: &Transaction, result: &Result<()>, account_locks: &mut HashSet<Pubkey>) {
match result {
Err(BankError::AccountInUse) => (),
_ => for k in &tx.account_keys {
account_locks.remove(k);
},
}
}
fn load_account(
&self,
tx: &Transaction,
accounts: &HashMap<Pubkey, Account>,
last_ids_sigs: &mut HashMap<Hash, (SignatureStatusMap, u64)>,
error_counters: &mut ErrorCounters,
) -> Result<Vec<Account>> {
// Copy all the accounts
if accounts.get(&tx.account_keys[0]).is_none() {
if !self.is_leader {
error_counters.account_not_found_validator += 1;
} else {
error_counters.account_not_found_leader += 1;
}
Err(BankError::AccountNotFound)
} else if accounts.get(&tx.account_keys[0]).unwrap().tokens < tx.fee {
Err(BankError::InsufficientFundsForFee)
} else {
let mut called_accounts: Vec<Account> = tx
.account_keys
.iter()
2018-09-14 11:40:05 -07:00
.map(|key| accounts.get(key).cloned().unwrap_or_default())
.collect();
// There is no way to predict what contract will execute without an error
// If a fee can pay for execution then the contract will be scheduled
let err =
Self::reserve_signature_with_last_id(last_ids_sigs, &tx.last_id, &tx.signature);
err?;
called_accounts[0].tokens -= tx.fee;
Ok(called_accounts)
}
}
/// This function will prevent multiple threads from modifying the same account state at the
/// same time
#[must_use]
fn lock_accounts(&self, txs: &[Transaction]) -> Vec<Result<()>> {
let mut account_locks = self.account_locks.lock().unwrap();
let mut error_counters = ErrorCounters::default();
let rv = txs
.iter()
.map(|tx| Self::lock_account(&mut account_locks, &tx.account_keys, &mut error_counters))
.collect();
inc_new_counter_info!(
"bank-process_transactions-account_in_use",
error_counters.account_in_use
);
rv
}
/// Once accounts are unlocked, new transactions that modify that state can enter the pipeline
fn unlock_accounts(&self, txs: &[Transaction], results: &[Result<()>]) {
debug!("bank unlock accounts");
let mut account_locks = self.account_locks.lock().unwrap();
txs.iter()
.zip(results.iter())
.for_each(|(tx, result)| Self::unlock_account(tx, result, &mut account_locks));
}
fn load_accounts(
&self,
2018-09-14 11:40:05 -07:00
txs: &[Transaction],
results: Vec<Result<()>>,
error_counters: &mut ErrorCounters,
) -> Vec<(Result<Vec<Account>>)> {
let accounts = self.accounts.read().unwrap();
let mut last_sigs = self.last_ids_sigs.write().unwrap();
txs.iter()
.zip(results.into_iter())
.map(|etx| match etx {
(tx, Ok(())) => self.load_account(tx, &accounts, &mut last_sigs, error_counters),
(_, Err(e)) => Err(e),
}).collect()
}
pub fn verify_transaction(
2018-10-04 13:36:15 -07:00
instruction_index: usize,
tx_program_id: &Pubkey,
2018-09-19 17:25:57 -07:00
pre_program_id: &Pubkey,
pre_tokens: i64,
account: &Account,
) -> Result<()> {
// Verify the transaction
2018-09-19 17:25:57 -07:00
// make sure that program_id is still the same or this was just assigned by the system call contract
if !((*pre_program_id == account.program_id)
|| (SystemProgram::check_id(&tx_program_id)
2018-09-19 17:25:57 -07:00
&& SystemProgram::check_id(&pre_program_id)))
{
//TODO, this maybe redundant bpf should be able to guarantee this property
2018-10-04 13:36:15 -07:00
return Err(BankError::ModifiedContractId(instruction_index as u8));
}
// For accounts unassigned to the contract, the individual balance of each accounts cannot decrease.
if *tx_program_id != account.program_id && pre_tokens > account.tokens {
2018-10-04 13:36:15 -07:00
return Err(BankError::ExternalAccountTokenSpend(
instruction_index as u8,
));
}
if account.tokens < 0 {
2018-10-04 13:36:15 -07:00
return Err(BankError::ResultWithNegativeTokens(instruction_index as u8));
}
Ok(())
}
fn loaded_contract(
&self,
tx_program_id: &Pubkey,
tx: &Transaction,
program_index: usize,
accounts: &mut [&mut Account],
) -> bool {
let loaded_contracts = self.loaded_contracts.write().unwrap();
match loaded_contracts.get(&tx_program_id) {
Some(dc) => {
let mut infos: Vec<_> = (&tx.account_keys)
.into_iter()
.zip(accounts)
.map(|(key, account)| KeyedAccount { key, account })
.collect();
dc.call(&mut infos, tx.userdata(program_index));
true
}
None => false,
}
}
/// Execute a function with a subset of accounts as writable references.
/// Since the subset can point to the same references, in any order there is no way
/// for the borrow checker to track them with regards to the original set.
fn with_subset<F, A>(accounts: &mut [Account], ixes: &[u8], func: F) -> A
where
F: Fn(&mut [&mut Account]) -> A,
{
let mut subset: Vec<&mut Account> = ixes
.iter()
.map(|ix| {
let ptr = &mut accounts[*ix as usize] as *mut Account;
// lifetime of this unsafe is only within the scope of the closure
// there is no way to reorder them without breaking borrow checker rules
unsafe { &mut *ptr }
}).collect();
func(&mut subset)
}
/// Execute an instruction
/// This method calls the instruction's program entry pont method and verifies that the result of
/// the call does not violate the bank's accounting rules.
2018-09-17 19:37:59 -07:00
/// The accounts are committed back to the bank only if this function returns Ok(_).
fn execute_instruction(
&self,
tx: &Transaction,
2018-10-04 13:36:15 -07:00
instruction_index: usize,
program_accounts: &mut [&mut Account],
) -> Result<()> {
2018-10-04 13:36:15 -07:00
let tx_program_id = tx.program_id(instruction_index);
// TODO: the runtime should be checking read/write access to memory
// we are trusting the hard coded contracts not to clobber or allocate
let pre_total: i64 = program_accounts.iter().map(|a| a.tokens).sum();
let pre_data: Vec<_> = program_accounts
.iter_mut()
2018-09-19 17:25:57 -07:00
.map(|a| (a.program_id, a.tokens))
.collect();
// Check account subscriptions before storing data for notifications
let subscriptions = self.account_subscriptions.read().unwrap();
let pre_userdata: Vec<_> = tx
.account_keys
.iter()
.enumerate()
.zip(program_accounts.iter_mut())
.filter(|((_, pubkey), _)| subscriptions.get(&pubkey).is_some())
.map(|((i, pubkey), a)| ((i, pubkey), a.userdata.clone()))
.collect();
// Call the contract method
// It's up to the contract to implement its own rules on moving funds
if SystemProgram::check_id(&tx_program_id) {
if SystemProgram::process_transaction(
&tx,
2018-10-04 13:36:15 -07:00
instruction_index,
program_accounts,
&self.loaded_contracts,
).is_err()
{
return Err(BankError::ProgramRuntimeError(instruction_index as u8));
}
} else if BudgetState::check_id(&tx_program_id) {
2018-10-04 13:36:15 -07:00
if BudgetState::process_transaction(&tx, instruction_index, program_accounts).is_err() {
return Err(BankError::ProgramRuntimeError(instruction_index as u8));
}
} else if StorageProgram::check_id(&tx_program_id) {
2018-10-04 13:36:15 -07:00
if StorageProgram::process_transaction(&tx, instruction_index, program_accounts)
.is_err()
{
return Err(BankError::ProgramRuntimeError(instruction_index as u8));
2018-09-26 13:50:30 -07:00
}
} else if TicTacToeProgram::check_id(&tx_program_id) {
2018-10-04 13:36:15 -07:00
if TicTacToeProgram::process_transaction(&tx, instruction_index, program_accounts)
.is_err()
{
2018-10-04 13:36:15 -07:00
return Err(BankError::ProgramRuntimeError(instruction_index as u8));
2018-09-24 13:53:02 -07:00
}
2018-09-27 11:51:08 -07:00
} else if TicTacToeDashboardProgram::check_id(&tx_program_id) {
2018-10-04 13:36:15 -07:00
if TicTacToeDashboardProgram::process_transaction(
&tx,
instruction_index,
program_accounts,
).is_err()
2018-09-27 11:51:08 -07:00
{
2018-10-04 13:36:15 -07:00
return Err(BankError::ProgramRuntimeError(instruction_index as u8));
2018-09-27 11:51:08 -07:00
}
2018-10-08 20:41:31 -07:00
} else if TokenProgram::check_id(&tx_program_id) {
if TokenProgram::process_transaction(&tx, instruction_index, program_accounts).is_err()
{
return Err(BankError::ProgramRuntimeError(instruction_index as u8));
}
2018-10-04 13:36:15 -07:00
} else if self.loaded_contract(tx_program_id, tx, instruction_index, program_accounts) {
} else {
2018-10-04 13:36:15 -07:00
return Err(BankError::UnknownContractId(instruction_index as u8));
}
// Verify the transaction
for ((pre_program_id, pre_tokens), post_account) in
pre_data.iter().zip(program_accounts.iter())
{
Self::verify_transaction(
2018-10-04 13:36:15 -07:00
instruction_index,
&tx_program_id,
pre_program_id,
*pre_tokens,
post_account,
)?;
}
// Send notifications
for ((i, pubkey), userdata) in &pre_userdata {
let account = &program_accounts[*i];
if userdata != &account.userdata {
self.check_account_subscriptions(&pubkey, &account);
}
}
// The total sum of all the tokens in all the pages cannot change.
let post_total: i64 = program_accounts.iter().map(|a| a.tokens).sum();
if pre_total != post_total {
2018-10-04 13:36:15 -07:00
Err(BankError::UnbalancedTransaction(instruction_index as u8))
} else {
Ok(())
}
}
/// Execute a transaction.
/// This method calls each instruction in the transaction over the set of loaded Accounts
/// The accounts are committed back to the bank only if every instruction succeeds
fn execute_transaction(&self, tx: &Transaction, tx_accounts: &mut [Account]) -> Result<()> {
2018-10-04 13:36:15 -07:00
for (instruction_index, prog) in tx.instructions.iter().enumerate() {
Self::with_subset(tx_accounts, &prog.accounts, |program_accounts| {
2018-10-04 13:36:15 -07:00
self.execute_instruction(tx, instruction_index, program_accounts)
})?;
}
Ok(())
}
pub fn store_accounts(
&self,
txs: &[Transaction],
res: &[Result<()>],
2018-09-14 11:40:05 -07:00
loaded: &[Result<Vec<Account>>],
) {
let mut accounts = self.accounts.write().unwrap();
for (i, racc) in loaded.iter().enumerate() {
if res[i].is_err() || racc.is_err() {
continue;
}
let tx = &txs[i];
let acc = racc.as_ref().unwrap();
for (key, account) in tx.account_keys.iter().zip(acc.iter()) {
//purge if 0
if account.tokens == 0 {
accounts.remove(&key);
} else {
*accounts.entry(*key).or_insert_with(Account::default) = account.clone();
assert_eq!(accounts.get(key).unwrap().tokens, account.tokens);
}
}
}
2018-02-23 13:08:19 -08:00
}
pub fn process_and_record_transactions(
&self,
txs: &[Transaction],
poh: &PohRecorder,
) -> Result<()> {
let now = Instant::now();
// Once accounts are locked, other threads cannot encode transactions that will modify the
// same account state
let locked_accounts = self.lock_accounts(txs);
let lock_time = now.elapsed();
let now = Instant::now();
let results = self.execute_and_commit_transactions(txs, locked_accounts);
let process_time = now.elapsed();
let now = Instant::now();
self.record_transactions(txs, &results, poh)?;
let record_time = now.elapsed();
let now = Instant::now();
// Once the accounts are unlocked new transactions can enter the pipeline to process them
self.unlock_accounts(&txs, &results);
let unlock_time = now.elapsed();
debug!(
"lock: {}us process: {}us record: {}us unlock: {}us txs_len={}",
duration_as_us(&lock_time),
duration_as_us(&process_time),
duration_as_us(&record_time),
duration_as_us(&unlock_time),
txs.len(),
);
Ok(())
}
fn record_transactions(
&self,
txs: &[Transaction],
results: &[Result<()>],
poh: &PohRecorder,
) -> Result<()> {
let processed_transactions: Vec<_> = results
.iter()
.zip(txs.iter())
.filter_map(|(r, x)| match r {
Ok(_) => Some(x.clone()),
Err(ref e) => {
debug!("process transaction failed {:?}", e);
None
}
}).collect();
// unlock all the accounts with errors which are filtered by the above `filter_map`
if !processed_transactions.is_empty() {
let hash = Transaction::hash(&processed_transactions);
debug!("processed ok: {} {}", processed_transactions.len(), hash);
// record and unlock will unlock all the successfull transactions
poh.record(hash, processed_transactions).map_err(|e| {
warn!("record failure: {:?}", e);
BankError::RecordFailure
})?;
}
Ok(())
}
2018-06-22 10:45:22 -07:00
/// Process a batch of transactions.
#[must_use]
pub fn execute_and_commit_transactions(
&self,
txs: &[Transaction],
locked_accounts: Vec<Result<()>>,
) -> Vec<Result<()>> {
2018-09-18 11:31:00 -07:00
debug!("processing transactions: {}", txs.len());
let mut error_counters = ErrorCounters::default();
2018-06-07 19:35:38 -07:00
let now = Instant::now();
let mut loaded_accounts = self.load_accounts(txs, locked_accounts, &mut error_counters);
let load_elapsed = now.elapsed();
2018-06-22 10:44:31 -07:00
let now = Instant::now();
let executed: Vec<Result<()>> = loaded_accounts
.iter_mut()
.zip(txs.iter())
.map(|(acc, tx)| match acc {
Err(e) => Err(e.clone()),
Ok(ref mut accounts) => self.execute_transaction(tx, accounts),
2018-09-14 16:25:14 -07:00
}).collect();
let execution_elapsed = now.elapsed();
let now = Instant::now();
self.store_accounts(txs, &executed, &loaded_accounts);
// once committed there is no way to unroll
let write_elapsed = now.elapsed();
2018-06-22 10:45:22 -07:00
debug!(
"load: {}us execute: {}us store: {}us txs_len={}",
duration_as_us(&load_elapsed),
duration_as_us(&execution_elapsed),
duration_as_us(&write_elapsed),
txs.len(),
2018-06-22 10:45:22 -07:00
);
self.update_transaction_statuses(txs, &executed);
2018-06-22 12:11:27 -07:00
let mut tx_count = 0;
let mut err_count = 0;
for r in &executed {
2018-06-07 19:35:38 -07:00
if r.is_ok() {
2018-06-22 12:11:27 -07:00
tx_count += 1;
2018-06-22 10:44:31 -07:00
} else {
if err_count == 0 {
2018-09-18 11:31:00 -07:00
debug!("tx error: {:?}", r);
}
err_count += 1;
2018-06-07 19:35:38 -07:00
}
}
if err_count > 0 {
info!("{} errors of {} txs", err_count, err_count + tx_count);
if !self.is_leader {
inc_new_counter_info!("bank-process_transactions_err-validator", err_count);
inc_new_counter_info!(
"bank-appy_debits-account_not_found-validator",
error_counters.account_not_found_validator
);
} else {
inc_new_counter_info!("bank-process_transactions_err-leader", err_count);
inc_new_counter_info!(
"bank-appy_debits-account_not_found-leader",
error_counters.account_not_found_leader
);
}
inc_new_counter_info!("bank-process_transactions-error_count", err_count);
}
2018-06-22 10:45:22 -07:00
self.transaction_count
2018-06-22 12:11:27 -07:00
.fetch_add(tx_count, Ordering::Relaxed);
inc_new_counter_info!("bank-process_transactions-txs", tx_count);
executed
}
#[must_use]
pub fn process_transactions(&self, txs: &[Transaction]) -> Vec<Result<()>> {
let locked_accounts = self.lock_accounts(txs);
let results = self.execute_and_commit_transactions(txs, locked_accounts);
self.unlock_accounts(txs, &results);
results
}
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
pub fn process_entry_votes(
bank: &Bank,
entry: &Entry,
entry_height: u64,
leader_scheduler: &mut LeaderScheduler,
) {
for tx in &entry.transactions {
if tx.vote().is_some() {
// Update the active set in the leader scheduler
leader_scheduler.push_vote(*tx.from(), entry_height);
}
}
leader_scheduler.update_height(entry_height, bank);
}
2018-09-24 12:26:47 -07:00
pub fn process_entry(&self, entry: &Entry) -> Result<()> {
2018-07-11 10:30:07 -07:00
if !entry.transactions.is_empty() {
for result in self.process_transactions(&entry.transactions) {
2018-07-11 10:30:07 -07:00
result?;
}
}
self.register_entry_id(&entry.id);
2018-07-11 10:30:07 -07:00
Ok(())
}
/// Process an ordered list of entries, populating a circular buffer "tail"
/// as we go.
fn process_entries_tail(
2018-07-10 11:22:45 -07:00
&self,
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
entries: &[Entry],
2018-07-10 11:28:12 -07:00
tail: &mut Vec<Entry>,
2018-07-10 15:58:08 -07:00
tail_idx: &mut usize,
2018-07-10 11:22:45 -07:00
) -> Result<u64> {
let mut entry_count = 0;
for entry in entries {
2018-07-10 15:58:08 -07:00
if tail.len() > *tail_idx {
tail[*tail_idx] = entry.clone();
2018-07-10 12:23:55 -07:00
} else {
tail.push(entry.clone());
}
2018-07-10 15:58:08 -07:00
*tail_idx = (*tail_idx + 1) % WINDOW_SIZE as usize;
2018-07-10 11:22:45 -07:00
2018-07-11 10:30:07 -07:00
entry_count += 1;
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
self.process_entry(entry)?;
2018-07-10 11:22:45 -07:00
}
Ok(entry_count)
}
2018-06-06 08:49:22 -07:00
/// Process an ordered list of entries.
2018-09-24 12:26:47 -07:00
pub fn process_entries(&self, entries: &[Entry]) -> Result<()> {
for entry in entries {
2018-09-24 12:26:47 -07:00
self.process_entry(&entry)?;
}
Ok(())
}
/// Append entry blocks to the ledger, verifying them along the way.
2018-07-11 10:30:07 -07:00
fn process_blocks<I>(
2018-07-10 11:22:45 -07:00
&self,
start_hash: Hash,
2018-07-10 11:22:45 -07:00
entries: I,
2018-07-10 11:28:12 -07:00
tail: &mut Vec<Entry>,
2018-07-10 15:58:08 -07:00
tail_idx: &mut usize,
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
leader_scheduler: &mut LeaderScheduler,
2018-07-10 11:22:45 -07:00
) -> Result<u64>
where
I: IntoIterator<Item = Entry>,
{
// Ledger verification needs to be parallelized, but we can't pull the whole
// thing into memory. We therefore chunk it.
let mut entry_count = *tail_idx as u64;
let mut id = start_hash;
for block in &entries.into_iter().chunks(VERIFY_BLOCK_SIZE) {
let block: Vec<_> = block.collect();
if !block.verify(&id) {
warn!("Ledger proof of history failed at entry: {}", entry_count);
return Err(BankError::LedgerVerificationFailed);
}
id = block.last().unwrap().id;
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
let tail_count = self.process_entries_tail(&block, tail, tail_idx)?;
if !leader_scheduler.use_only_bootstrap_leader {
for (i, entry) in block.iter().enumerate() {
Self::process_entry_votes(
self,
&entry,
entry_count + i as u64 + 1,
leader_scheduler,
);
}
}
entry_count += tail_count;
}
Ok(entry_count)
}
/// Process a full ledger.
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
pub fn process_ledger<I>(
&self,
entries: I,
leader_scheduler: &mut LeaderScheduler,
) -> Result<(u64, Vec<Entry>)>
where
I: IntoIterator<Item = Entry>,
{
let mut entries = entries.into_iter();
// The first item in the ledger is required to be an entry with zero num_hashes,
// which implies its id can be used as the ledger's seed.
let entry0 = entries.next().expect("invalid ledger: empty");
// The second item in the ledger is a special transaction where the to and from
// fields are the same. That entry should be treated as a deposit, not a
// transfer to oneself.
let entry1 = entries
.next()
.expect("invalid ledger: need at least 2 entries");
{
let tx = &entry1.transactions[0];
assert!(SystemProgram::check_id(tx.program_id(0)), "Invalid ledger");
let instruction: SystemProgram = deserialize(tx.userdata(0)).unwrap();
2018-09-19 17:25:57 -07:00
let deposit = if let SystemProgram::Move { tokens } = instruction {
Some(tokens)
} else {
None
}.expect("invalid ledger, needs to start with a contract");
{
let mut accounts = self.accounts.write().unwrap();
let account = accounts
.entry(tx.account_keys[0])
.or_insert_with(Account::default);
account.tokens += deposit;
trace!("applied genesis payment {:?} => {:?}", deposit, account);
}
}
self.register_entry_id(&entry0.id);
self.register_entry_id(&entry1.id);
let entry1_id = entry1.id;
2018-07-10 11:28:12 -07:00
let mut tail = Vec::with_capacity(WINDOW_SIZE as usize);
2018-07-10 12:23:55 -07:00
tail.push(entry0);
tail.push(entry1);
2018-07-10 11:22:45 -07:00
let mut tail_idx = 2;
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
let entry_count = self.process_blocks(
entry1_id,
entries,
&mut tail,
&mut tail_idx,
leader_scheduler,
)?;
// check if we need to rotate tail
2018-07-27 11:27:52 -07:00
if tail.len() == WINDOW_SIZE as usize {
tail.rotate_left(tail_idx)
}
2018-07-10 12:23:55 -07:00
Ok((entry_count, tail))
}
2018-03-29 11:20:54 -07:00
/// Create, sign, and process a Transaction from `keypair` to `to` of
/// `n` tokens where `last_id` is the last Entry ID observed by the client.
2018-02-23 13:08:19 -08:00
pub fn transfer(
&self,
n: i64,
2018-08-09 07:56:04 -07:00
keypair: &Keypair,
to: Pubkey,
2018-03-20 22:15:44 -07:00
last_id: Hash,
) -> Result<Signature> {
let tx = Transaction::system_new(keypair, to, n, last_id);
let signature = tx.signature;
self.process_transaction(&tx).map(|_| signature)
2018-02-23 13:08:19 -08:00
}
pub fn read_balance(account: &Account) -> i64 {
2018-09-19 17:25:57 -07:00
if SystemProgram::check_id(&account.program_id) {
SystemProgram::get_balance(account)
2018-09-20 19:17:37 -07:00
} else if BudgetState::check_id(&account.program_id) {
BudgetState::get_balance(account)
} else {
account.tokens
}
}
/// Each contract would need to be able to introspect its own state
2018-09-24 13:53:02 -07:00
/// this is hard coded to the budget contract language
pub fn get_balance(&self, pubkey: &Pubkey) -> i64 {
self.get_account(pubkey)
.map(|x| Self::read_balance(&x))
.unwrap_or(0)
}
pub fn get_account(&self, pubkey: &Pubkey) -> Option<Account> {
let accounts = self
.accounts
2018-05-11 11:38:52 -07:00
.read()
.expect("'accounts' read lock in get_balance");
accounts.get(pubkey).cloned()
2018-02-23 13:08:19 -08:00
}
2018-05-14 05:49:48 -07:00
pub fn transaction_count(&self) -> usize {
self.transaction_count.load(Ordering::Relaxed)
}
pub fn get_signature_status(&self, signature: &Signature) -> Result<()> {
let last_ids_sigs = self.last_ids_sigs.read().unwrap();
for (_hash, (signatures, _)) in last_ids_sigs.iter() {
if let Some(res) = signatures.get(signature) {
return res.clone();
}
}
Err(BankError::SignatureNotFound)
}
pub fn has_signature(&self, signature: &Signature) -> bool {
self.get_signature_status(signature) != Err(BankError::SignatureNotFound)
}
pub fn get_signature(&self, last_id: &Hash, signature: &Signature) -> Option<Result<()>> {
self.last_ids_sigs
.read()
.unwrap()
.get(last_id)
.and_then(|sigs| sigs.0.get(signature).cloned())
}
/// Hash the `accounts` HashMap. This represents a validator's interpretation
/// of the ledger up to the `last_id`, to be sent back to the leader when voting.
pub fn hash_internal_state(&self) -> Hash {
let mut ordered_accounts = BTreeMap::new();
for (pubkey, account) in self.accounts.read().unwrap().iter() {
ordered_accounts.insert(*pubkey, account.clone());
}
hash(&serialize(&ordered_accounts).unwrap())
}
pub fn finality(&self) -> usize {
self.finality_time.load(Ordering::Relaxed)
}
pub fn set_finality(&self, finality: usize) {
self.finality_time.store(finality, Ordering::Relaxed);
}
pub fn add_account_subscription(
&self,
bank_sub_id: Pubkey,
pubkey: Pubkey,
sink: Sink<Account>,
) {
let mut subscriptions = self.account_subscriptions.write().unwrap();
if let Some(current_hashmap) = subscriptions.get_mut(&pubkey) {
current_hashmap.insert(bank_sub_id, sink);
return;
}
let mut hashmap = HashMap::new();
hashmap.insert(bank_sub_id, sink);
subscriptions.insert(pubkey, hashmap);
}
pub fn remove_account_subscription(&self, bank_sub_id: &Pubkey, pubkey: &Pubkey) -> bool {
let mut subscriptions = self.account_subscriptions.write().unwrap();
match subscriptions.get_mut(pubkey) {
Some(ref current_hashmap) if current_hashmap.len() == 1 => {}
Some(current_hashmap) => {
return current_hashmap.remove(bank_sub_id).is_some();
}
None => {
return false;
}
}
subscriptions.remove(pubkey).is_some()
}
fn check_account_subscriptions(&self, pubkey: &Pubkey, account: &Account) {
let subscriptions = self.account_subscriptions.read().unwrap();
if let Some(hashmap) = subscriptions.get(pubkey) {
for (_bank_sub_id, sink) in hashmap.iter() {
sink.notify(Ok(account.clone())).wait().unwrap();
}
}
}
pub fn add_signature_subscription(
&self,
bank_sub_id: Pubkey,
signature: Signature,
sink: Sink<RpcSignatureStatus>,
) {
let mut subscriptions = self.signature_subscriptions.write().unwrap();
if let Some(current_hashmap) = subscriptions.get_mut(&signature) {
current_hashmap.insert(bank_sub_id, sink);
return;
}
let mut hashmap = HashMap::new();
hashmap.insert(bank_sub_id, sink);
subscriptions.insert(signature, hashmap);
}
pub fn remove_signature_subscription(
&self,
bank_sub_id: &Pubkey,
signature: &Signature,
) -> bool {
let mut subscriptions = self.signature_subscriptions.write().unwrap();
match subscriptions.get_mut(signature) {
Some(ref current_hashmap) if current_hashmap.len() == 1 => {}
Some(current_hashmap) => {
return current_hashmap.remove(bank_sub_id).is_some();
}
None => {
return false;
}
}
subscriptions.remove(signature).is_some()
}
fn check_signature_subscriptions(&self, signature: &Signature, status: RpcSignatureStatus) {
let mut subscriptions = self.signature_subscriptions.write().unwrap();
if let Some(hashmap) = subscriptions.get(signature) {
for (_bank_sub_id, sink) in hashmap.iter() {
sink.notify(Ok(status)).wait().unwrap();
}
}
subscriptions.remove(&signature);
}
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
#[cfg(test)]
// Used to access accounts for things like controlling stake to control
// the eligible set of nodes for leader selection
pub fn accounts(&self) -> &RwLock<HashMap<Pubkey, Account>> {
&self.accounts
}
2018-02-23 13:08:19 -08:00
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialize;
use budget_program::BudgetState;
2018-06-14 16:32:39 -07:00
use entry::next_entry;
use entry::Entry;
use entry_writer::{self, EntryWriter};
use hash::hash;
use jsonrpc_macros::pubsub::{Subscriber, SubscriptionId};
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
use leader_scheduler::LeaderScheduler;
use ledger;
use logger;
use signature::Keypair;
use signature::{GenKeys, KeypairUtil};
use std;
2018-07-03 15:19:57 -07:00
use std::io::{BufReader, Cursor, Seek, SeekFrom};
use system_transaction::SystemTransaction;
use tokio::prelude::{Async, Stream};
use transaction::Instruction;
2018-02-23 13:08:19 -08:00
#[test]
fn test_bank_new() {
let mint = Mint::new(10_000);
let bank = Bank::new(&mint);
assert_eq!(bank.get_balance(&mint.pubkey()), 10_000);
}
2018-02-23 13:08:19 -08:00
#[test]
2018-06-06 08:49:22 -07:00
fn test_two_payments_to_one_party() {
2018-05-14 14:39:34 -07:00
let mint = Mint::new(10_000);
2018-08-09 07:56:04 -07:00
let pubkey = Keypair::new().pubkey();
2018-05-14 14:39:34 -07:00
let bank = Bank::new(&mint);
assert_eq!(bank.last_id(), mint.last_id());
2018-05-03 12:24:35 -07:00
2018-05-14 14:39:34 -07:00
bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
2018-03-20 22:15:44 -07:00
.unwrap();
assert_eq!(bank.get_balance(&pubkey), 1_000);
2018-02-23 13:08:19 -08:00
2018-05-14 14:39:34 -07:00
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
2018-03-20 22:15:44 -07:00
.unwrap();
assert_eq!(bank.get_balance(&pubkey), 1_500);
2018-05-14 14:33:11 -07:00
assert_eq!(bank.transaction_count(), 2);
2018-02-23 13:08:19 -08:00
}
2018-02-27 10:28:10 -08:00
#[test]
fn test_one_source_two_tx_one_batch() {
let mint = Mint::new(1);
let key1 = Keypair::new().pubkey();
let key2 = Keypair::new().pubkey();
let bank = Bank::new(&mint);
assert_eq!(bank.last_id(), mint.last_id());
let t1 = Transaction::system_move(&mint.keypair(), key1, 1, mint.last_id(), 0);
let t2 = Transaction::system_move(&mint.keypair(), key2, 1, mint.last_id(), 0);
let res = bank.process_transactions(&vec![t1.clone(), t2.clone()]);
assert_eq!(res.len(), 2);
assert_eq!(res[0], Ok(()));
assert_eq!(res[1], Err(BankError::AccountInUse));
assert_eq!(bank.get_balance(&mint.pubkey()), 0);
assert_eq!(bank.get_balance(&key1), 1);
assert_eq!(bank.get_balance(&key2), 0);
assert_eq!(bank.get_signature(&t1.last_id, &t1.signature), Some(Ok(())));
// TODO: Transactions that fail to pay a fee could be dropped silently
assert_eq!(
bank.get_signature(&t2.last_id, &t2.signature),
Some(Err(BankError::AccountInUse))
);
}
#[test]
fn test_one_tx_two_out_atomic_fail() {
let mint = Mint::new(1);
let key1 = Keypair::new().pubkey();
let key2 = Keypair::new().pubkey();
let bank = Bank::new(&mint);
let spend = SystemProgram::Move { tokens: 1 };
let instructions = vec![
Instruction {
2018-10-04 13:36:15 -07:00
program_ids_index: 0,
userdata: serialize(&spend).unwrap(),
accounts: vec![0, 1],
},
Instruction {
2018-10-04 13:36:15 -07:00
program_ids_index: 0,
userdata: serialize(&spend).unwrap(),
accounts: vec![0, 2],
},
];
let t1 = Transaction::new_with_instructions(
&mint.keypair(),
&[key1, key2],
mint.last_id(),
0,
vec![SystemProgram::id()],
instructions,
);
let res = bank.process_transactions(&vec![t1.clone()]);
assert_eq!(res.len(), 1);
assert_eq!(res[0], Err(BankError::ResultWithNegativeTokens(1)));
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
assert_eq!(bank.get_balance(&key1), 0);
assert_eq!(bank.get_balance(&key2), 0);
assert_eq!(
bank.get_signature(&t1.last_id, &t1.signature),
Some(Err(BankError::ResultWithNegativeTokens(1)))
);
}
#[test]
fn test_one_tx_two_out_atomic_pass() {
let mint = Mint::new(2);
let key1 = Keypair::new().pubkey();
let key2 = Keypair::new().pubkey();
let bank = Bank::new(&mint);
let spend = SystemProgram::Move { tokens: 1 };
let instructions = vec![
Instruction {
2018-10-04 13:36:15 -07:00
program_ids_index: 0,
userdata: serialize(&spend).unwrap(),
accounts: vec![0, 1],
},
Instruction {
2018-10-04 13:36:15 -07:00
program_ids_index: 0,
userdata: serialize(&spend).unwrap(),
accounts: vec![0, 2],
},
];
let t1 = Transaction::new_with_instructions(
&mint.keypair(),
&[key1, key2],
mint.last_id(),
0,
vec![SystemProgram::id()],
instructions,
);
let res = bank.process_transactions(&vec![t1.clone()]);
assert_eq!(res.len(), 1);
assert_eq!(res[0], Ok(()));
assert_eq!(bank.get_balance(&mint.pubkey()), 0);
assert_eq!(bank.get_balance(&key1), 1);
assert_eq!(bank.get_balance(&key2), 1);
assert_eq!(bank.get_signature(&t1.last_id, &t1.signature), Some(Ok(())));
}
2018-05-29 19:35:14 -07:00
#[test]
2018-06-06 08:49:22 -07:00
fn test_negative_tokens() {
logger::setup();
2018-05-29 19:35:14 -07:00
let mint = Mint::new(1);
2018-08-09 07:56:04 -07:00
let pubkey = Keypair::new().pubkey();
2018-05-29 19:35:14 -07:00
let bank = Bank::new(&mint);
let res = bank.transfer(-1, &mint.keypair(), pubkey, mint.last_id());
println!("{:?}", bank.get_account(&pubkey));
assert_matches!(res, Err(BankError::ResultWithNegativeTokens(0)));
2018-05-29 19:35:14 -07:00
assert_eq!(bank.transaction_count(), 0);
}
// TODO: This test demonstrates that fees are not paid when a program fails.
// See github issue 1157 (https://github.com/solana-labs/solana/issues/1157)
#[test]
fn test_detect_failed_duplicate_transactions_issue_1157() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let dest = Keypair::new();
// source with 0 contract context
let tx = Transaction::system_create(
&mint.keypair(),
dest.pubkey(),
mint.last_id(),
2,
0,
Pubkey::default(),
1,
);
let signature = tx.signature;
assert!(!bank.has_signature(&signature));
let res = bank.process_transaction(&tx);
// Result failed, but signature is registered
assert!(res.is_err());
assert!(bank.has_signature(&signature));
assert_matches!(
bank.get_signature_status(&signature),
Err(BankError::ResultWithNegativeTokens(0))
);
// The tokens didn't move, but the from address paid the transaction fee.
assert_eq!(bank.get_balance(&dest.pubkey()), 0);
// BUG: This should be the original balance minus the transaction fee.
//assert_eq!(bank.get_balance(&mint.pubkey()), 0);
}
#[test]
fn test_account_not_found() {
let mint = Mint::new(1);
2018-05-14 14:33:11 -07:00
let bank = Bank::new(&mint);
2018-08-09 07:56:04 -07:00
let keypair = Keypair::new();
assert_eq!(
2018-05-23 11:26:32 -07:00
bank.transfer(1, &keypair, mint.pubkey(), mint.last_id()),
Err(BankError::AccountNotFound)
);
2018-05-14 14:33:11 -07:00
assert_eq!(bank.transaction_count(), 0);
}
2018-02-27 10:28:10 -08:00
#[test]
2018-06-06 08:49:22 -07:00
fn test_insufficient_funds() {
2018-05-14 14:39:34 -07:00
let mint = Mint::new(11_000);
let bank = Bank::new(&mint);
2018-08-09 07:56:04 -07:00
let pubkey = Keypair::new().pubkey();
2018-05-14 14:39:34 -07:00
bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
2018-03-20 22:15:44 -07:00
.unwrap();
2018-05-14 14:33:11 -07:00
assert_eq!(bank.transaction_count(), 1);
assert_eq!(bank.get_balance(&pubkey), 1_000);
assert_matches!(
2018-05-14 14:39:34 -07:00
bank.transfer(10_001, &mint.keypair(), pubkey, mint.last_id()),
Err(BankError::ResultWithNegativeTokens(0))
);
2018-05-14 14:33:11 -07:00
assert_eq!(bank.transaction_count(), 1);
2018-05-14 14:39:34 -07:00
let mint_pubkey = mint.keypair().pubkey();
assert_eq!(bank.get_balance(&mint_pubkey), 10_000);
assert_eq!(bank.get_balance(&pubkey), 1_000);
2018-02-27 10:28:10 -08:00
}
#[test]
fn test_transfer_to_newb() {
2018-05-14 14:39:34 -07:00
let mint = Mint::new(10_000);
let bank = Bank::new(&mint);
2018-08-09 07:56:04 -07:00
let pubkey = Keypair::new().pubkey();
2018-05-14 14:39:34 -07:00
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
2018-03-20 22:15:44 -07:00
.unwrap();
assert_eq!(bank.get_balance(&pubkey), 500);
2018-02-27 10:28:10 -08:00
}
#[test]
2018-05-25 14:51:41 -07:00
fn test_duplicate_transaction_signature() {
2018-05-14 14:39:34 -07:00
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let signature = Signature::default();
assert_eq!(
bank.reserve_signature_with_last_id_test(&signature, &mint.last_id()),
Ok(())
);
assert_eq!(
bank.reserve_signature_with_last_id_test(&signature, &mint.last_id()),
Err(BankError::DuplicateSignature)
);
}
2018-04-26 04:22:11 -07:00
#[test]
fn test_clear_signatures() {
2018-05-14 14:39:34 -07:00
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let signature = Signature::default();
bank.reserve_signature_with_last_id_test(&signature, &mint.last_id())
.unwrap();
bank.clear_signatures();
assert_eq!(
bank.reserve_signature_with_last_id_test(&signature, &mint.last_id()),
Ok(())
);
2018-04-26 04:22:11 -07:00
}
#[test]
fn test_get_signature_status() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let signature = Signature::default();
bank.reserve_signature_with_last_id_test(&signature, &mint.last_id())
.expect("reserve signature");
assert_eq!(bank.get_signature_status(&signature), Ok(()));
}
#[test]
2018-06-29 09:30:09 -07:00
fn test_has_signature() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let signature = Signature::default();
bank.reserve_signature_with_last_id_test(&signature, &mint.last_id())
.expect("reserve signature");
assert!(bank.has_signature(&signature));
}
#[test]
2018-06-06 08:49:22 -07:00
fn test_reject_old_last_id() {
2018-05-14 14:39:34 -07:00
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let signature = Signature::default();
for i in 0..MAX_ENTRY_IDS {
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
2018-05-14 14:33:11 -07:00
bank.register_entry_id(&last_id);
}
// Assert we're no longer able to use the oldest entry ID.
assert_eq!(
bank.reserve_signature_with_last_id_test(&signature, &mint.last_id()),
Err(BankError::LastIdNotFound)
);
}
#[test]
fn test_count_valid_ids() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let ids: Vec<_> = (0..MAX_ENTRY_IDS)
.map(|i| {
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
bank.register_entry_id(&last_id);
last_id
2018-09-14 16:25:14 -07:00
}).collect();
assert_eq!(bank.count_valid_ids(&[]).len(), 0);
assert_eq!(bank.count_valid_ids(&[mint.last_id()]).len(), 0);
for (i, id) in bank.count_valid_ids(&ids).iter().enumerate() {
assert_eq!(id.0, i);
}
}
#[test]
fn test_debits_before_credits() {
let mint = Mint::new(2);
2018-05-14 14:33:11 -07:00
let bank = Bank::new(&mint);
2018-08-09 07:56:04 -07:00
let keypair = Keypair::new();
let tx0 = Transaction::system_new(&mint.keypair(), keypair.pubkey(), 2, mint.last_id());
let tx1 = Transaction::system_new(&keypair, mint.pubkey(), 1, mint.last_id());
2018-05-29 09:12:27 -07:00
let txs = vec![tx0, tx1];
let results = bank.process_transactions(&txs);
2018-05-14 05:49:48 -07:00
assert!(results[1].is_err());
// Assert bad transactions aren't counted.
2018-05-14 14:33:11 -07:00
assert_eq!(bank.transaction_count(), 1);
}
2018-06-14 16:32:39 -07:00
#[test]
fn test_process_empty_entry_is_registered() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
2018-08-09 07:56:04 -07:00
let keypair = Keypair::new();
2018-06-14 16:32:39 -07:00
let entry = next_entry(&mint.last_id(), 1, vec![]);
let tx = Transaction::system_new(&mint.keypair(), keypair.pubkey(), 1, entry.id);
2018-06-14 16:32:39 -07:00
// First, ensure the TX is rejected because of the unregistered last ID
assert_eq!(
bank.process_transaction(&tx),
Err(BankError::LastIdNotFound)
2018-06-14 16:32:39 -07:00
);
// Now ensure the TX is accepted despite pointing to the ID of an empty entry.
2018-09-24 12:26:47 -07:00
bank.process_entries(&[entry]).unwrap();
assert_eq!(bank.process_transaction(&tx), Ok(()));
2018-06-14 16:32:39 -07:00
}
2018-07-01 21:06:40 -07:00
#[test]
fn test_process_genesis() {
let mint = Mint::new(1);
let genesis = mint.create_entries();
let bank = Bank::default();
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
bank.process_ledger(genesis, &mut LeaderScheduler::default())
.unwrap();
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
2018-07-01 21:06:40 -07:00
}
fn create_sample_block_with_next_entries_using_keypairs(
mint: &Mint,
keypairs: &[Keypair],
) -> impl Iterator<Item = Entry> {
let mut hash = mint.last_id();
let mut entries: Vec<Entry> = vec![];
for k in keypairs {
let txs = vec![Transaction::system_new(
&mint.keypair(),
k.pubkey(),
1,
hash,
)];
let mut e = ledger::next_entries(&hash, 0, txs);
entries.append(&mut e);
hash = entries.last().unwrap().id;
}
entries.into_iter()
}
fn create_sample_block(mint: &Mint, length: usize) -> impl Iterator<Item = Entry> {
let mut entries = Vec::with_capacity(length);
let mut hash = mint.last_id();
2018-08-09 07:31:00 -07:00
let mut num_hashes = 0;
for _ in 0..length {
2018-08-09 07:56:04 -07:00
let keypair = Keypair::new();
let tx = Transaction::system_new(&mint.keypair(), keypair.pubkey(), 1, hash);
let entry = Entry::new_mut(&mut hash, &mut num_hashes, vec![tx]);
entries.push(entry);
}
entries.into_iter()
2018-07-01 21:06:40 -07:00
}
fn create_sample_ledger(length: usize) -> (impl Iterator<Item = Entry>, Pubkey) {
let mint = Mint::new(1 + length as i64);
2018-07-01 21:06:40 -07:00
let genesis = mint.create_entries();
let block = create_sample_block(&mint, length);
2018-07-01 21:06:40 -07:00
(genesis.into_iter().chain(block), mint.pubkey())
}
fn create_sample_ledger_with_mint_and_keypairs(
mint: &Mint,
keypairs: &[Keypair],
) -> impl Iterator<Item = Entry> {
let genesis = mint.create_entries();
let block = create_sample_block_with_next_entries_using_keypairs(mint, keypairs);
genesis.into_iter().chain(block)
}
2018-07-01 21:06:40 -07:00
#[test]
fn test_process_ledger() {
let (ledger, pubkey) = create_sample_ledger(1);
let (ledger, dup) = ledger.tee();
2018-07-01 21:06:40 -07:00
let bank = Bank::default();
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
let (ledger_height, tail) = bank
.process_ledger(ledger, &mut LeaderScheduler::default())
.unwrap();
assert_eq!(bank.get_balance(&pubkey), 1);
assert_eq!(ledger_height, 3);
assert_eq!(tail.len(), 3);
assert_eq!(tail, dup.collect_vec());
let last_entry = &tail[tail.len() - 1];
assert_eq!(bank.last_id(), last_entry.id);
}
#[test]
fn test_process_ledger_around_window_size() {
2018-07-11 09:24:21 -07:00
// TODO: put me back in when Criterion is up
2018-07-11 09:12:26 -07:00
// for _ in 0..10 {
2018-07-11 09:24:21 -07:00
// let (ledger, _) = create_sample_ledger(WINDOW_SIZE as usize);
2018-07-11 09:12:26 -07:00
// let bank = Bank::default();
// let (_, _) = bank.process_ledger(ledger).unwrap();
// }
2018-07-10 11:22:45 -07:00
2018-07-10 12:23:55 -07:00
let window_size = WINDOW_SIZE as usize;
2018-07-10 15:58:08 -07:00
for entry_count in window_size - 3..window_size + 2 {
2018-07-10 12:23:55 -07:00
let (ledger, pubkey) = create_sample_ledger(entry_count);
let bank = Bank::default();
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
let (ledger_height, tail) = bank
.process_ledger(ledger, &mut LeaderScheduler::default())
.unwrap();
2018-07-10 12:23:55 -07:00
assert_eq!(bank.get_balance(&pubkey), 1);
assert_eq!(ledger_height, entry_count as u64 + 2);
assert!(tail.len() <= window_size);
let last_entry = &tail[tail.len() - 1];
assert_eq!(bank.last_id(), last_entry.id);
}
2018-07-01 21:06:40 -07:00
}
// Write the given entries to a file and then return a file iterator to them.
fn to_file_iter(entries: impl Iterator<Item = Entry>) -> impl Iterator<Item = Entry> {
let mut file = Cursor::new(vec![]);
EntryWriter::write_entries(&mut file, entries).unwrap();
2018-07-01 21:06:40 -07:00
file.seek(SeekFrom::Start(0)).unwrap();
2018-07-01 21:06:40 -07:00
let reader = BufReader::new(file);
2018-07-03 15:19:57 -07:00
entry_writer::read_entries(reader).map(|x| x.unwrap())
2018-07-01 21:06:40 -07:00
}
#[test]
fn test_process_ledger_from_file() {
let (ledger, pubkey) = create_sample_ledger(1);
2018-07-01 21:06:40 -07:00
let ledger = to_file_iter(ledger);
let bank = Bank::default();
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
bank.process_ledger(ledger, &mut LeaderScheduler::default())
.unwrap();
assert_eq!(bank.get_balance(&pubkey), 1);
2018-07-01 21:06:40 -07:00
}
#[test]
fn test_process_ledger_from_files() {
let mint = Mint::new(2);
let genesis = to_file_iter(mint.create_entries().into_iter());
let block = to_file_iter(create_sample_block(&mint, 1));
2018-07-01 21:06:40 -07:00
let bank = Bank::default();
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
bank.process_ledger(genesis.chain(block), &mut LeaderScheduler::default())
.unwrap();
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
2018-07-01 21:06:40 -07:00
}
2018-07-11 09:12:26 -07:00
#[test]
fn test_new_default() {
let def_bank = Bank::default();
assert!(def_bank.is_leader);
let leader_bank = Bank::new_default(true);
assert!(leader_bank.is_leader);
let validator_bank = Bank::new_default(false);
assert!(!validator_bank.is_leader);
}
#[test]
fn test_hash_internal_state() {
let mint = Mint::new(2_000);
let seed = [0u8; 32];
let mut rnd = GenKeys::new(seed);
let keypairs = rnd.gen_n_keypairs(5);
let ledger0 = create_sample_ledger_with_mint_and_keypairs(&mint, &keypairs);
let ledger1 = create_sample_ledger_with_mint_and_keypairs(&mint, &keypairs);
let bank0 = Bank::default();
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
bank0
.process_ledger(ledger0, &mut LeaderScheduler::default())
.unwrap();
let bank1 = Bank::default();
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
bank1
.process_ledger(ledger1, &mut LeaderScheduler::default())
.unwrap();
let initial_state = bank0.hash_internal_state();
assert_eq!(bank1.hash_internal_state(), initial_state);
let pubkey = keypairs[0].pubkey();
bank0
.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_ne!(bank0.hash_internal_state(), initial_state);
bank1
.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank0.hash_internal_state(), bank1.hash_internal_state());
}
#[test]
fn test_finality() {
let def_bank = Bank::default();
assert_eq!(def_bank.finality(), std::usize::MAX);
def_bank.set_finality(90);
assert_eq!(def_bank.finality(), 90);
}
#[test]
fn test_interleaving_locks() {
let mint = Mint::new(3);
let bank = Bank::new(&mint);
let alice = Keypair::new();
let bob = Keypair::new();
let tx1 = Transaction::system_new(&mint.keypair(), alice.pubkey(), 1, mint.last_id());
let pay_alice = vec![tx1];
let locked_alice = bank.lock_accounts(&pay_alice);
let results_alice = bank.execute_and_commit_transactions(&pay_alice, locked_alice);
assert_eq!(results_alice[0], Ok(()));
// try executing an interleaved transfer twice
assert_eq!(
bank.transfer(1, &mint.keypair(), bob.pubkey(), mint.last_id()),
Err(BankError::AccountInUse)
);
// the second time shoudl fail as well
// this verifies that `unlock_accounts` doesn't unlock `AccountInUse` accounts
assert_eq!(
bank.transfer(1, &mint.keypair(), bob.pubkey(), mint.last_id()),
Err(BankError::AccountInUse)
);
bank.unlock_accounts(&pay_alice, &results_alice);
assert_matches!(
bank.transfer(2, &mint.keypair(), bob.pubkey(), mint.last_id()),
Ok(_)
);
}
#[test]
fn test_bank_account_subscribe() {
let mint = Mint::new(100);
let bank = Bank::new(&mint);
let alice = Keypair::new();
let bank_sub_id = Keypair::new().pubkey();
let last_id = bank.last_id();
let tx = Transaction::system_create(
&mint.keypair(),
alice.pubkey(),
last_id,
1,
16,
BudgetState::id(),
0,
);
bank.process_transaction(&tx).unwrap();
let (subscriber, _id_receiver, mut transport_receiver) =
Subscriber::new_test("accountNotification");
let sub_id = SubscriptionId::Number(0 as u64);
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
bank.add_account_subscription(bank_sub_id, alice.pubkey(), sink);
assert!(
bank.account_subscriptions
.write()
.unwrap()
.contains_key(&alice.pubkey())
);
let account = bank.get_account(&alice.pubkey()).unwrap();
bank.check_account_subscriptions(&alice.pubkey(), &account);
let string = transport_receiver.poll();
assert!(string.is_ok());
if let Async::Ready(Some(response)) = string.unwrap() {
let expected = format!(r#"{{"jsonrpc":"2.0","method":"accountNotification","params":{{"result":{{"program_id":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"tokens":1,"userdata":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}},"subscription":0}}}}"#);
assert_eq!(expected, response);
}
bank.remove_account_subscription(&bank_sub_id, &alice.pubkey());
assert!(
!bank
.account_subscriptions
.write()
.unwrap()
.contains_key(&alice.pubkey())
);
}
#[test]
fn test_bank_signature_subscribe() {
let mint = Mint::new(100);
let bank = Bank::new(&mint);
let alice = Keypair::new();
let bank_sub_id = Keypair::new().pubkey();
let last_id = bank.last_id();
let tx = Transaction::system_move(&mint.keypair(), alice.pubkey(), 20, last_id, 0);
let signature = tx.signature;
bank.process_transaction(&tx).unwrap();
let (subscriber, _id_receiver, mut transport_receiver) =
Subscriber::new_test("signatureNotification");
let sub_id = SubscriptionId::Number(0 as u64);
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
bank.add_signature_subscription(bank_sub_id, signature, sink);
assert!(
bank.signature_subscriptions
.write()
.unwrap()
.contains_key(&signature)
);
bank.check_signature_subscriptions(&signature, RpcSignatureStatus::Confirmed);
let string = transport_receiver.poll();
assert!(string.is_ok());
if let Async::Ready(Some(response)) = string.unwrap() {
let expected = format!(r#"{{"jsonrpc":"2.0","method":"signatureNotification","params":{{"result":"Confirmed","subscription":0}}}}"#);
assert_eq!(expected, response);
}
bank.remove_signature_subscription(&bank_sub_id, &signature);
assert!(
!bank
.signature_subscriptions
.write()
.unwrap()
.contains_key(&signature)
);
}
2018-02-23 13:08:19 -08:00
}