solana/core/src/blocktree_processor.rs

740 lines
25 KiB
Rust
Raw Normal View History

use crate::bank_forks::BankForks;
use crate::blocktree::Blocktree;
use crate::entry::{Entry, EntrySlice};
use crate::leader_schedule_utils;
use rayon::prelude::*;
use solana_metrics::counter::Counter;
2019-03-13 12:47:09 -07:00
use solana_runtime::bank::{Bank, Result};
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::timing::duration_as_ms;
2019-03-02 16:35:13 -08:00
use solana_sdk::timing::MAX_RECENT_BLOCKHASHES;
2019-03-13 12:47:09 -07:00
use std::result;
use std::sync::Arc;
use std::time::Instant;
fn first_err(results: &[Result<()>]) -> Result<()> {
for r in results {
r.clone()?;
}
Ok(())
}
2019-02-21 13:37:08 -08:00
fn par_execute_entries(bank: &Bank, entries: &[(&Entry, Vec<Result<()>>)]) -> Result<()> {
inc_new_counter_info!("bank-par_execute_entries-count", entries.len());
2019-02-21 13:37:08 -08:00
let results: Vec<Result<()>> = entries
.into_par_iter()
.map(|(e, lock_results)| {
let results = bank.load_execute_and_commit_transactions(
&e.transactions,
lock_results.to_vec(),
2019-03-02 16:35:13 -08:00
MAX_RECENT_BLOCKHASHES,
);
bank.unlock_accounts(&e.transactions, &results);
2019-02-21 13:37:08 -08:00
first_err(&results)
})
.collect();
2019-02-21 13:37:08 -08:00
first_err(&results)
}
2019-03-12 16:46:41 -07:00
/// Process an ordered list of entries in parallel
/// 1. In order lock accounts for each entry while the lock succeeds, up to a Tick entry
/// 2. Process the locked group in parallel
/// 3. Register the `Tick` if it's available
/// 4. Update the leader scheduler, goto 1
2019-03-12 16:46:41 -07:00
pub fn process_entries(bank: &Bank, entries: &[Entry]) -> Result<()> {
// accumulator for entries that can be processed in parallel
let mut mt_group = vec![];
for entry in entries {
if entry.is_tick() {
// if its a tick, execute the group and register the tick
2019-02-21 13:37:08 -08:00
par_execute_entries(bank, &mt_group)?;
2019-03-01 08:57:42 -08:00
bank.register_tick(&entry.hash);
mt_group = vec![];
continue;
}
// try to lock the accounts
let lock_results = bank.lock_accounts(&entry.transactions);
// if any of the locks error out
// execute the current group
if first_err(&lock_results).is_err() {
2019-02-21 13:37:08 -08:00
par_execute_entries(bank, &mt_group)?;
mt_group = vec![];
//reset the lock and push the entry
bank.unlock_accounts(&entry.transactions, &lock_results);
let lock_results = bank.lock_accounts(&entry.transactions);
mt_group.push((entry, lock_results));
} else {
// push the entry to the mt_group
mt_group.push((entry, lock_results));
}
}
2019-02-21 13:37:08 -08:00
par_execute_entries(bank, &mt_group)?;
Ok(())
}
2019-02-20 15:42:35 -08:00
#[derive(Debug, PartialEq)]
pub struct BankForksInfo {
2019-03-04 16:40:28 -08:00
pub bank_slot: u64,
2019-02-20 15:42:35 -08:00
pub entry_height: u64,
}
2019-03-13 12:47:09 -07:00
#[derive(Debug)]
pub enum BlocktreeProcessorError {
LedgerVerificationFailed,
}
pub fn process_blocktree(
genesis_block: &GenesisBlock,
blocktree: &Blocktree,
2019-02-25 21:22:00 -08:00
account_paths: Option<String>,
2019-03-13 12:47:09 -07:00
) -> result::Result<(BankForks, Vec<BankForksInfo>), BlocktreeProcessorError> {
let now = Instant::now();
info!("processing ledger...");
2019-02-20 15:42:35 -08:00
// Setup bank for slot 0
let mut pending_slots = {
2019-02-20 15:42:35 -08:00
let slot = 0;
let bank = Arc::new(Bank::new_with_paths(&genesis_block, account_paths));
2019-02-20 15:42:35 -08:00
let entry_height = 0;
2019-03-02 10:25:16 -08:00
let last_entry_hash = bank.last_blockhash();
2019-02-20 15:42:35 -08:00
// Load the metadata for this slot
let meta = blocktree
.meta(slot)
.map_err(|err| {
2019-03-05 13:26:59 -08:00
warn!("Failed to load meta for slot {}: {:?}", slot, err);
2019-03-13 12:47:09 -07:00
BlocktreeProcessorError::LedgerVerificationFailed
2019-02-20 15:42:35 -08:00
})?
.unwrap();
vec![(slot, meta, bank, entry_height, last_entry_hash)]
};
let mut fork_info = vec![];
while !pending_slots.is_empty() {
let (slot, meta, bank, mut entry_height, mut last_entry_hash) =
pending_slots.pop().unwrap();
2019-02-20 15:42:35 -08:00
// Fetch all entries for this slot
let mut entries = blocktree.get_slot_entries(slot, 0, None).map_err(|err| {
warn!("Failed to load entries for slot {}: {:?}", slot, err);
2019-03-13 12:47:09 -07:00
BlocktreeProcessorError::LedgerVerificationFailed
2019-02-20 15:42:35 -08:00
})?;
if slot == 0 {
// The first entry in the ledger is a pseudo-tick used only to ensure the number of ticks
// in slot 0 is the same as the number of ticks in all subsequent slots. It is not
// processed by the bank, skip over it.
2019-02-20 15:42:35 -08:00
if entries.is_empty() {
warn!("entry0 not present");
2019-03-13 12:47:09 -07:00
return Err(BlocktreeProcessorError::LedgerVerificationFailed);
2019-02-20 15:42:35 -08:00
}
let entry0 = &entries[0];
if !(entry0.is_tick() && entry0.verify(&last_entry_hash)) {
2019-02-20 15:42:35 -08:00
warn!("Ledger proof of history failed at entry0");
2019-03-13 12:47:09 -07:00
return Err(BlocktreeProcessorError::LedgerVerificationFailed);
2019-02-20 15:42:35 -08:00
}
2019-03-01 08:57:42 -08:00
last_entry_hash = entry0.hash;
2019-02-20 15:42:35 -08:00
entry_height += 1;
entries = entries.drain(1..).collect();
}
if !entries.is_empty() {
if !entries.verify(&last_entry_hash) {
warn!(
"Ledger proof of history failed at slot: {}, entry: {}",
slot, entry_height
);
2019-03-13 12:47:09 -07:00
return Err(BlocktreeProcessorError::LedgerVerificationFailed);
2019-02-20 15:42:35 -08:00
}
2019-03-08 09:15:33 -08:00
process_entries(&bank, &entries).map_err(|err| {
2019-02-20 15:42:35 -08:00
warn!("Failed to process entries for slot {}: {:?}", slot, err);
2019-03-13 12:47:09 -07:00
BlocktreeProcessorError::LedgerVerificationFailed
2019-02-20 15:42:35 -08:00
})?;
2019-03-01 08:57:42 -08:00
last_entry_hash = entries.last().unwrap().hash;
2019-02-20 15:42:35 -08:00
entry_height += entries.len() as u64;
}
// TODO merge with locktower, voting, bank.vote_accounts()...
bank.squash();
if meta.next_slots.is_empty() {
2019-03-01 08:57:42 -08:00
// Reached the end of this fork. Record the final entry height and last entry.hash
let bfi = BankForksInfo {
2019-03-04 16:40:28 -08:00
bank_slot: slot,
entry_height,
};
fork_info.push((bank, bfi));
continue;
}
2019-02-27 10:15:18 -08:00
// This is a fork point, create a new child bank for each fork
for next_slot in meta.next_slots {
let next_meta = blocktree
.meta(next_slot)
.map_err(|err| {
2019-03-05 13:26:59 -08:00
warn!("Failed to load meta for slot {}: {:?}", slot, err);
2019-03-13 12:47:09 -07:00
BlocktreeProcessorError::LedgerVerificationFailed
})?
.unwrap();
// only process full slots in blocktree_processor, replay_stage
// handles any partials
if next_meta.is_full() {
let next_bank = Arc::new(Bank::new_from_parent(
&bank,
&leader_schedule_utils::slot_leader_at(next_slot, &bank).unwrap(),
next_slot,
));
trace!("Add child bank for slot={}", next_slot);
// bank_forks.insert(*next_slot, child_bank);
pending_slots.push((
next_slot,
next_meta,
next_bank,
entry_height,
last_entry_hash,
));
} else {
let bfi = BankForksInfo {
bank_slot: slot,
entry_height,
};
fork_info.push((bank.clone(), bfi));
}
}
2019-02-27 10:15:18 -08:00
// reverse sort by slot, so the next slot to be processed can be pop()ed
// TODO: remove me once leader_scheduler can hang with out-of-order slots?
pending_slots.sort_by(|a, b| b.0.cmp(&a.0));
2019-02-20 15:42:35 -08:00
}
let (banks, bank_forks_info): (Vec<_>, Vec<_>) = fork_info.into_iter().unzip();
let bank_forks = BankForks::new_from_banks(&banks);
info!(
2019-02-20 15:42:35 -08:00
"processed ledger in {}ms, forks={}...",
duration_as_ms(&now.elapsed()),
2019-02-20 15:42:35 -08:00
bank_forks_info.len(),
);
2019-02-20 15:42:35 -08:00
Ok((bank_forks, bank_forks_info))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::blocktree::create_new_tmp_ledger;
use crate::blocktree::tests::entries_to_blobs;
use crate::entry::{create_ticks, next_entry, Entry};
2019-03-13 12:47:09 -07:00
use solana_runtime::bank::BankError;
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction::SystemTransaction;
fn fill_blocktree_slot_with_ticks(
blocktree: &Blocktree,
2019-02-21 18:46:04 -08:00
ticks_per_slot: u64,
slot: u64,
parent_slot: u64,
last_entry_hash: Hash,
) -> Hash {
let entries = create_ticks(ticks_per_slot, last_entry_hash);
2019-03-01 08:57:42 -08:00
let last_entry_hash = entries.last().unwrap().hash;
let blobs = entries_to_blobs(&entries, slot, parent_slot, true);
blocktree.insert_data_blobs(blobs.iter()).unwrap();
last_entry_hash
}
#[test]
fn test_process_blocktree_with_incomplete_slot() {
solana_logger::setup();
let (genesis_block, _mint_keypair) = GenesisBlock::new(10_000);
let ticks_per_slot = genesis_block.ticks_per_slot;
/*
Build a blocktree in the ledger with the following fork structure:
slot 0 (all ticks)
|
slot 1 (all ticks but one)
|
slot 2 (all ticks)
where slot 1 is incomplete (missing 1 tick at the end)
*/
// Create a new ledger with slot 0 full of ticks
2019-03-02 10:25:16 -08:00
let (ledger_path, mut blockhash) = create_new_tmp_ledger!(&genesis_block);
debug!("ledger_path: {:?}", ledger_path);
let blocktree = Blocktree::open_config(&ledger_path, ticks_per_slot)
.expect("Expected to successfully open database ledger");
// Write slot 1
// slot 1, points at slot 0. Missing one tick
{
let parent_slot = 0;
let slot = 1;
2019-03-02 10:25:16 -08:00
let mut entries = create_ticks(ticks_per_slot, blockhash);
blockhash = entries.last().unwrap().hash;
// throw away last one
entries.pop();
let blobs = entries_to_blobs(&entries, slot, parent_slot, false);
blocktree.insert_data_blobs(blobs.iter()).unwrap();
}
// slot 2, points at slot 1
2019-03-02 10:25:16 -08:00
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, blockhash);
let (mut _bank_forks, bank_forks_info) =
2019-02-25 21:22:00 -08:00
process_blocktree(&genesis_block, &blocktree, None).unwrap();
assert_eq!(bank_forks_info.len(), 1);
assert_eq!(
bank_forks_info[0],
BankForksInfo {
bank_slot: 0, // slot 1 isn't "full", we stop at slot zero
entry_height: ticks_per_slot,
}
);
}
#[test]
fn test_process_blocktree_with_two_forks() {
solana_logger::setup();
2019-02-21 22:36:01 -08:00
let (genesis_block, _mint_keypair) = GenesisBlock::new(10_000);
let ticks_per_slot = genesis_block.ticks_per_slot;
// Create a new ledger with slot 0 full of ticks
2019-03-02 10:25:16 -08:00
let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_block);
debug!("ledger_path: {:?}", ledger_path);
2019-03-02 10:25:16 -08:00
let mut last_entry_hash = blockhash;
/*
Build a blocktree in the ledger with the following fork structure:
slot 0
|
slot 1
/ \
slot 2 |
/ |
slot 3 |
|
slot 4
*/
2019-02-21 18:46:04 -08:00
let blocktree = Blocktree::open_config(&ledger_path, ticks_per_slot)
2019-02-20 15:42:35 -08:00
.expect("Expected to successfully open database ledger");
// Fork 1, ending at slot 3
2019-03-01 08:57:42 -08:00
let last_slot1_entry_hash =
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 1, 0, last_entry_hash);
last_entry_hash =
2019-03-01 08:57:42 -08:00
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, last_slot1_entry_hash);
let last_fork1_entry_hash =
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 3, 2, last_entry_hash);
// Fork 2, ending at slot 4
2019-03-01 08:57:42 -08:00
let last_fork2_entry_hash =
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 4, 1, last_slot1_entry_hash);
2019-03-01 08:57:42 -08:00
info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
let (bank_forks, bank_forks_info) =
2019-02-25 21:22:00 -08:00
process_blocktree(&genesis_block, &blocktree, None).unwrap();
2019-02-20 15:42:35 -08:00
assert_eq!(bank_forks_info.len(), 2); // There are two forks
assert_eq!(
bank_forks_info[0],
BankForksInfo {
2019-03-04 16:40:28 -08:00
bank_slot: 3, // Fork 1's head is slot 3
2019-02-21 18:46:04 -08:00
entry_height: ticks_per_slot * 4,
2019-02-20 15:42:35 -08:00
}
);
assert_eq!(
bank_forks_info[1],
BankForksInfo {
2019-03-04 16:40:28 -08:00
bank_slot: 4, // Fork 2's head is slot 4
2019-02-21 18:46:04 -08:00
entry_height: ticks_per_slot * 3,
2019-02-20 15:42:35 -08:00
}
);
// Ensure bank_forks holds the right banks, and that everything's
// frozen
2019-02-20 15:42:35 -08:00
for info in bank_forks_info {
2019-03-04 16:40:28 -08:00
assert_eq!(bank_forks[info.bank_slot].slot(), info.bank_slot);
assert!(bank_forks[info.bank_slot].is_frozen());
2019-02-20 15:42:35 -08:00
}
}
#[test]
fn test_first_err() {
assert_eq!(first_err(&[Ok(())]), Ok(()));
assert_eq!(
first_err(&[Ok(()), Err(BankError::DuplicateSignature)]),
Err(BankError::DuplicateSignature)
);
assert_eq!(
first_err(&[
Ok(()),
Err(BankError::DuplicateSignature),
Err(BankError::AccountInUse)
]),
Err(BankError::DuplicateSignature)
);
assert_eq!(
first_err(&[
Ok(()),
Err(BankError::AccountInUse),
Err(BankError::DuplicateSignature)
]),
Err(BankError::AccountInUse)
);
assert_eq!(
first_err(&[
Err(BankError::AccountInUse),
Ok(()),
Err(BankError::DuplicateSignature)
]),
Err(BankError::AccountInUse)
);
}
#[test]
fn test_process_empty_entry_is_registered() {
2019-03-01 14:52:27 -08:00
solana_logger::setup();
let (genesis_block, mint_keypair) = GenesisBlock::new(2);
let bank = Bank::new(&genesis_block);
let keypair = Keypair::new();
2019-03-01 14:52:27 -08:00
let slot_entries = create_ticks(genesis_block.ticks_per_slot - 1, genesis_block.hash());
let tx = SystemTransaction::new_account(
&mint_keypair,
&keypair.pubkey(),
2019-03-01 14:52:27 -08:00
1,
slot_entries.last().unwrap().hash,
0,
);
// First, ensure the TX is rejected because of the unregistered last ID
assert_eq!(
bank.process_transaction(&tx),
2019-03-02 10:25:16 -08:00
Err(BankError::BlockhashNotFound)
);
// Now ensure the TX is accepted despite pointing to the ID of an empty entry.
2019-03-12 16:46:41 -07:00
process_entries(&bank, &slot_entries).unwrap();
assert_eq!(bank.process_transaction(&tx), Ok(()));
}
2019-02-20 15:42:35 -08:00
#[test]
fn test_process_ledger_simple() {
2019-03-01 14:52:27 -08:00
solana_logger::setup();
2019-02-21 22:36:01 -08:00
let leader_pubkey = Keypair::new().pubkey();
let (genesis_block, mint_keypair) = GenesisBlock::new_with_leader(100, &leader_pubkey, 50);
2019-03-01 14:52:27 -08:00
let (ledger_path, mut last_entry_hash) = create_new_tmp_ledger!(&genesis_block);
2019-02-20 15:42:35 -08:00
debug!("ledger_path: {:?}", ledger_path);
2019-02-20 15:42:35 -08:00
let mut entries = vec![];
2019-03-02 10:25:16 -08:00
let blockhash = genesis_block.hash();
2019-02-20 15:42:35 -08:00
for _ in 0..3 {
// Transfer one token from the mint to a random account
let keypair = Keypair::new();
2019-03-02 10:20:10 -08:00
let tx =
SystemTransaction::new_account(&mint_keypair, &keypair.pubkey(), 1, blockhash, 0);
let entry = Entry::new(&last_entry_hash, 1, vec![tx]);
2019-03-01 08:57:42 -08:00
last_entry_hash = entry.hash;
entries.push(entry);
// Add a second Transaction that will produce a
2019-03-05 16:28:14 -08:00
// ProgramError<0, ResultWithNegativeLamports> error when processed
let keypair2 = Keypair::new();
let tx = SystemTransaction::new_account(&keypair, &keypair2.pubkey(), 42, blockhash, 0);
let entry = Entry::new(&last_entry_hash, 1, vec![tx]);
2019-03-01 08:57:42 -08:00
last_entry_hash = entry.hash;
entries.push(entry);
}
// Fill up the rest of slot 1 with ticks
entries.extend(create_ticks(genesis_block.ticks_per_slot, last_entry_hash));
2019-02-21 22:36:01 -08:00
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger");
blocktree.write_entries(1, 0, 0, &entries).unwrap();
let entry_height = genesis_block.ticks_per_slot + entries.len() as u64;
2019-02-26 21:41:05 -08:00
let (bank_forks, bank_forks_info) =
process_blocktree(&genesis_block, &blocktree, None).unwrap();
2019-02-20 15:42:35 -08:00
assert_eq!(bank_forks_info.len(), 1);
assert_eq!(
bank_forks_info[0],
BankForksInfo {
2019-03-04 16:40:28 -08:00
bank_slot: 1,
2019-02-20 15:42:35 -08:00
entry_height,
}
);
let bank = bank_forks[1].clone();
assert_eq!(bank.get_balance(&mint_keypair.pubkey()), 50 - 3);
assert_eq!(bank.tick_height(), 2 * genesis_block.ticks_per_slot - 1);
2019-03-02 10:25:16 -08:00
assert_eq!(bank.last_blockhash(), entries.last().unwrap().hash);
}
#[test]
fn test_process_ledger_with_one_tick_per_slot() {
let (mut genesis_block, _mint_keypair) = GenesisBlock::new(123);
genesis_block.ticks_per_slot = 1;
2019-03-02 10:25:16 -08:00
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
let blocktree = Blocktree::open(&ledger_path).unwrap();
let (bank_forks, bank_forks_info) =
process_blocktree(&genesis_block, &blocktree, None).unwrap();
assert_eq!(bank_forks_info.len(), 1);
assert_eq!(
bank_forks_info[0],
BankForksInfo {
2019-03-04 16:40:28 -08:00
bank_slot: 0,
entry_height: 1,
}
);
let bank = bank_forks[0].clone();
assert_eq!(bank.tick_height(), 0);
}
#[test]
2019-03-12 16:46:41 -07:00
fn test_process_entries_tick() {
let (genesis_block, _mint_keypair) = GenesisBlock::new(1000);
let bank = Bank::new(&genesis_block);
// ensure bank can process a tick
2019-03-01 14:52:27 -08:00
assert_eq!(bank.tick_height(), 0);
let tick = next_entry(&genesis_block.hash(), 1, vec![]);
2019-03-12 16:46:41 -07:00
assert_eq!(process_entries(&bank, &[tick.clone()]), Ok(()));
2019-03-01 14:52:27 -08:00
assert_eq!(bank.tick_height(), 1);
}
#[test]
2019-03-12 16:46:41 -07:00
fn test_process_entries_2_entries_collision() {
let (genesis_block, mint_keypair) = GenesisBlock::new(1000);
let bank = Bank::new(&genesis_block);
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
2019-03-02 10:25:16 -08:00
let blockhash = bank.last_blockhash();
// ensure bank can process 2 entries that have a common account and no tick is registered
2019-03-02 10:20:10 -08:00
let tx = SystemTransaction::new_account(
&mint_keypair,
&keypair1.pubkey(),
2019-03-02 10:20:10 -08:00
2,
2019-03-02 10:25:16 -08:00
bank.last_blockhash(),
2019-03-02 10:20:10 -08:00
0,
);
2019-03-02 10:25:16 -08:00
let entry_1 = next_entry(&blockhash, 1, vec![tx]);
2019-03-02 10:20:10 -08:00
let tx = SystemTransaction::new_account(
&mint_keypair,
&keypair2.pubkey(),
2019-03-02 10:20:10 -08:00
2,
2019-03-02 10:25:16 -08:00
bank.last_blockhash(),
2019-03-02 10:20:10 -08:00
0,
);
2019-03-01 08:57:42 -08:00
let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]);
2019-03-12 16:46:41 -07:00
assert_eq!(process_entries(&bank, &[entry_1, entry_2]), Ok(()));
assert_eq!(bank.get_balance(&keypair1.pubkey()), 2);
assert_eq!(bank.get_balance(&keypair2.pubkey()), 2);
2019-03-02 10:25:16 -08:00
assert_eq!(bank.last_blockhash(), blockhash);
}
#[test]
2019-03-12 16:46:41 -07:00
fn test_process_entries_2_txes_collision() {
let (genesis_block, mint_keypair) = GenesisBlock::new(1000);
let bank = Bank::new(&genesis_block);
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
// fund: put 4 in each of 1 and 2
assert_matches!(
bank.transfer(4, &mint_keypair, &keypair1.pubkey(), bank.last_blockhash()),
Ok(_)
);
assert_matches!(
bank.transfer(4, &mint_keypair, &keypair2.pubkey(), bank.last_blockhash()),
Ok(_)
);
// construct an Entry whose 2nd transaction would cause a lock conflict with previous entry
let entry_1_to_mint = next_entry(
2019-03-02 10:25:16 -08:00
&bank.last_blockhash(),
1,
vec![SystemTransaction::new_account(
&keypair1,
&mint_keypair.pubkey(),
1,
2019-03-02 10:25:16 -08:00
bank.last_blockhash(),
0,
)],
);
let entry_2_to_3_mint_to_1 = next_entry(
2019-03-01 08:57:42 -08:00
&entry_1_to_mint.hash,
1,
vec![
2019-03-02 10:20:10 -08:00
SystemTransaction::new_account(
&keypair2,
&keypair3.pubkey(),
2019-03-02 10:20:10 -08:00
2,
2019-03-02 10:25:16 -08:00
bank.last_blockhash(),
2019-03-02 10:20:10 -08:00
0,
), // should be fine
SystemTransaction::new_account(
&keypair1,
&mint_keypair.pubkey(),
2,
2019-03-02 10:25:16 -08:00
bank.last_blockhash(),
0,
), // will collide
],
);
assert_eq!(
2019-03-12 16:46:41 -07:00
process_entries(&bank, &[entry_1_to_mint, entry_2_to_3_mint_to_1]),
Ok(())
);
assert_eq!(bank.get_balance(&keypair1.pubkey()), 1);
assert_eq!(bank.get_balance(&keypair2.pubkey()), 2);
assert_eq!(bank.get_balance(&keypair3.pubkey()), 2);
}
#[test]
2019-03-12 16:46:41 -07:00
fn test_process_entries_2_entries_par() {
let (genesis_block, mint_keypair) = GenesisBlock::new(1000);
let bank = Bank::new(&genesis_block);
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
let keypair4 = Keypair::new();
//load accounts
2019-03-02 10:20:10 -08:00
let tx = SystemTransaction::new_account(
&mint_keypair,
&keypair1.pubkey(),
2019-03-02 10:20:10 -08:00
1,
2019-03-02 10:25:16 -08:00
bank.last_blockhash(),
2019-03-02 10:20:10 -08:00
0,
);
assert_eq!(bank.process_transaction(&tx), Ok(()));
2019-03-02 10:20:10 -08:00
let tx = SystemTransaction::new_account(
&mint_keypair,
&keypair2.pubkey(),
2019-03-02 10:20:10 -08:00
1,
2019-03-02 10:25:16 -08:00
bank.last_blockhash(),
2019-03-02 10:20:10 -08:00
0,
);
assert_eq!(bank.process_transaction(&tx), Ok(()));
// ensure bank can process 2 entries that do not have a common account and no tick is registered
2019-03-02 10:25:16 -08:00
let blockhash = bank.last_blockhash();
2019-03-02 10:20:10 -08:00
let tx = SystemTransaction::new_account(
&keypair1,
&keypair3.pubkey(),
2019-03-02 10:20:10 -08:00
1,
2019-03-02 10:25:16 -08:00
bank.last_blockhash(),
2019-03-02 10:20:10 -08:00
0,
);
2019-03-02 10:25:16 -08:00
let entry_1 = next_entry(&blockhash, 1, vec![tx]);
2019-03-02 10:20:10 -08:00
let tx = SystemTransaction::new_account(
&keypair2,
&keypair4.pubkey(),
2019-03-02 10:20:10 -08:00
1,
2019-03-02 10:25:16 -08:00
bank.last_blockhash(),
2019-03-02 10:20:10 -08:00
0,
);
2019-03-01 08:57:42 -08:00
let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]);
2019-03-12 16:46:41 -07:00
assert_eq!(process_entries(&bank, &[entry_1, entry_2]), Ok(()));
assert_eq!(bank.get_balance(&keypair3.pubkey()), 1);
assert_eq!(bank.get_balance(&keypair4.pubkey()), 1);
2019-03-02 10:25:16 -08:00
assert_eq!(bank.last_blockhash(), blockhash);
}
#[test]
2019-03-12 16:46:41 -07:00
fn test_process_entries_2_entries_tick() {
let (genesis_block, mint_keypair) = GenesisBlock::new(1000);
let bank = Bank::new(&genesis_block);
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
let keypair4 = Keypair::new();
//load accounts
2019-03-02 10:20:10 -08:00
let tx = SystemTransaction::new_account(
&mint_keypair,
&keypair1.pubkey(),
2019-03-02 10:20:10 -08:00
1,
2019-03-02 10:25:16 -08:00
bank.last_blockhash(),
2019-03-02 10:20:10 -08:00
0,
);
assert_eq!(bank.process_transaction(&tx), Ok(()));
2019-03-02 10:20:10 -08:00
let tx = SystemTransaction::new_account(
&mint_keypair,
&keypair2.pubkey(),
2019-03-02 10:20:10 -08:00
1,
2019-03-02 10:25:16 -08:00
bank.last_blockhash(),
2019-03-02 10:20:10 -08:00
0,
);
assert_eq!(bank.process_transaction(&tx), Ok(()));
2019-03-02 10:25:16 -08:00
let blockhash = bank.last_blockhash();
while blockhash == bank.last_blockhash() {
2019-03-01 14:52:27 -08:00
bank.register_tick(&Hash::default());
}
// ensure bank can process 2 entries that do not have a common account and tick is registered
let tx = SystemTransaction::new_account(&keypair2, &keypair3.pubkey(), 1, blockhash, 0);
2019-03-02 10:25:16 -08:00
let entry_1 = next_entry(&blockhash, 1, vec![tx]);
2019-03-01 08:57:42 -08:00
let tick = next_entry(&entry_1.hash, 1, vec![]);
2019-03-02 10:20:10 -08:00
let tx = SystemTransaction::new_account(
&keypair1,
&keypair4.pubkey(),
2019-03-02 10:20:10 -08:00
1,
2019-03-02 10:25:16 -08:00
bank.last_blockhash(),
2019-03-02 10:20:10 -08:00
0,
);
2019-03-01 08:57:42 -08:00
let entry_2 = next_entry(&tick.hash, 1, vec![tx]);
2019-02-21 13:37:08 -08:00
assert_eq!(
2019-03-12 16:46:41 -07:00
process_entries(&bank, &[entry_1.clone(), tick.clone(), entry_2.clone()]),
2019-02-21 13:37:08 -08:00
Ok(())
);
assert_eq!(bank.get_balance(&keypair3.pubkey()), 1);
assert_eq!(bank.get_balance(&keypair4.pubkey()), 1);
2019-03-01 14:52:27 -08:00
// ensure that an error is returned for an empty account (keypair2)
2019-03-02 10:20:10 -08:00
let tx = SystemTransaction::new_account(
&keypair2,
&keypair3.pubkey(),
2019-03-02 10:20:10 -08:00
1,
2019-03-02 10:25:16 -08:00
bank.last_blockhash(),
2019-03-02 10:20:10 -08:00
0,
);
2019-03-01 08:57:42 -08:00
let entry_3 = next_entry(&entry_2.hash, 1, vec![tx]);
2019-02-21 13:37:08 -08:00
assert_eq!(
2019-03-12 16:46:41 -07:00
process_entries(&bank, &[entry_3]),
2019-02-21 13:37:08 -08:00
Err(BankError::AccountNotFound)
);
}
}