2018-06-06 08:58:49 -07:00
|
|
|
//! The `banking_stage` processes Transaction messages. It is intended to be used
|
|
|
|
//! to contruct a software pipeline. The stage uses all available CPU cores and
|
|
|
|
//! can do its processing in parallel with signature verification on the GPU.
|
2018-05-14 16:31:13 -07:00
|
|
|
|
|
|
|
use bank::Bank;
|
|
|
|
use bincode::deserialize;
|
2018-09-26 09:33:52 -07:00
|
|
|
use budget_transaction::BudgetTransaction;
|
2018-05-30 21:24:21 -07:00
|
|
|
use counter::Counter;
|
2018-09-21 21:01:13 -07:00
|
|
|
use entry::Entry;
|
2018-08-06 11:35:45 -07:00
|
|
|
use log::Level;
|
2018-09-18 08:02:57 -07:00
|
|
|
use packet::{Packets, SharedPackets};
|
2018-09-26 05:52:13 -07:00
|
|
|
use poh_recorder::PohRecorder;
|
2018-05-14 16:31:13 -07:00
|
|
|
use rayon::prelude::*;
|
2018-07-05 14:41:53 -07:00
|
|
|
use result::{Error, Result};
|
2018-07-03 21:14:08 -07:00
|
|
|
use service::Service;
|
2018-05-14 16:31:13 -07:00
|
|
|
use std::net::SocketAddr;
|
2018-09-26 05:52:13 -07:00
|
|
|
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
|
|
|
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError};
|
|
|
|
use std::sync::{Arc, Mutex};
|
|
|
|
use std::thread::sleep;
|
2018-07-03 21:14:08 -07:00
|
|
|
use std::thread::{self, Builder, JoinHandle};
|
2018-05-14 16:31:13 -07:00
|
|
|
use std::time::Duration;
|
|
|
|
use std::time::Instant;
|
|
|
|
use timing;
|
2018-05-23 23:29:01 -07:00
|
|
|
use transaction::Transaction;
|
2018-05-14 16:31:13 -07:00
|
|
|
|
2018-09-26 05:52:13 -07:00
|
|
|
// number of threads is 1 until mt bank is ready
|
|
|
|
pub const NUM_THREADS: usize = 1;
|
|
|
|
|
2018-06-06 08:58:49 -07:00
|
|
|
/// Stores the stage's thread handle and output receiver.
|
2018-05-14 16:31:13 -07:00
|
|
|
pub struct BankingStage {
|
2018-06-06 08:58:49 -07:00
|
|
|
/// Handle to the stage's thread.
|
2018-09-26 05:52:13 -07:00
|
|
|
thread_hdls: Vec<JoinHandle<()>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
pub enum Config {
|
|
|
|
/// * `Tick` - Run full PoH thread. Tick is a rough estimate of how many hashes to roll before transmitting a new entry.
|
|
|
|
Tick(usize),
|
|
|
|
/// * `Sleep`- Low power mode. Sleep is a rough estimate of how long to sleep before rolling 1 poh once and producing 1
|
|
|
|
/// tick.
|
|
|
|
Sleep(Duration),
|
2018-05-14 16:31:13 -07:00
|
|
|
}
|
|
|
|
|
2018-09-26 05:52:13 -07:00
|
|
|
impl Default for Config {
|
|
|
|
fn default() -> Config {
|
|
|
|
// TODO: Change this to Tick to enable PoH
|
|
|
|
Config::Sleep(Duration::from_millis(500))
|
|
|
|
}
|
|
|
|
}
|
2018-05-14 16:31:13 -07:00
|
|
|
impl BankingStage {
|
2018-07-05 14:41:53 -07:00
|
|
|
/// Create the stage using `bank`. Exit when `verified_receiver` is dropped.
|
2018-05-14 16:31:13 -07:00
|
|
|
pub fn new(
|
|
|
|
bank: Arc<Bank>,
|
|
|
|
verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
2018-09-26 05:52:13 -07:00
|
|
|
config: Config,
|
2018-09-21 21:01:13 -07:00
|
|
|
) -> (Self, Receiver<Vec<Entry>>) {
|
|
|
|
let (entry_sender, entry_receiver) = channel();
|
2018-09-26 05:52:13 -07:00
|
|
|
let shared_verified_receiver = Arc::new(Mutex::new(verified_receiver));
|
|
|
|
let poh = PohRecorder::new(bank.clone(), entry_sender);
|
|
|
|
let tick_poh = poh.clone();
|
|
|
|
// Tick producer is a headless producer, so when it exits it should notify the banking stage.
|
|
|
|
// Since channel are not used to talk between these threads an AtomicBool is used as a
|
|
|
|
// signal.
|
|
|
|
let poh_exit = Arc::new(AtomicBool::new(false));
|
|
|
|
let banking_exit = poh_exit.clone();
|
|
|
|
// Single thread to generate entries from many banks.
|
|
|
|
// This thread talks to poh_service and broadcasts the entries once they have been recorded.
|
|
|
|
// Once an entry has been recorded, its last_id is registered with the bank.
|
|
|
|
let tick_producer = Builder::new()
|
|
|
|
.name("solana-banking-stage-tick_producer".to_string())
|
2018-09-21 21:01:13 -07:00
|
|
|
.spawn(move || {
|
2018-09-26 05:52:13 -07:00
|
|
|
if let Err(e) = Self::tick_producer(tick_poh, config, &poh_exit) {
|
|
|
|
match e {
|
|
|
|
Error::SendError => (),
|
|
|
|
_ => error!(
|
|
|
|
"solana-banking-stage-tick_producer unexpected error {:?}",
|
|
|
|
e
|
|
|
|
),
|
2018-09-25 15:01:51 -07:00
|
|
|
}
|
2018-05-14 16:31:13 -07:00
|
|
|
}
|
2018-09-26 05:52:13 -07:00
|
|
|
debug!("tick producer exiting");
|
|
|
|
poh_exit.store(true, Ordering::Relaxed);
|
2018-09-14 16:25:14 -07:00
|
|
|
}).unwrap();
|
2018-09-26 05:52:13 -07:00
|
|
|
|
|
|
|
// Many banks that process transactions in parallel.
|
|
|
|
let mut thread_hdls: Vec<JoinHandle<()>> = (0..NUM_THREADS)
|
|
|
|
.into_iter()
|
|
|
|
.map(|_| {
|
|
|
|
let thread_bank = bank.clone();
|
|
|
|
let thread_verified_receiver = shared_verified_receiver.clone();
|
|
|
|
let thread_poh = poh.clone();
|
|
|
|
let thread_banking_exit = banking_exit.clone();
|
|
|
|
Builder::new()
|
|
|
|
.name("solana-banking-stage-tx".to_string())
|
|
|
|
.spawn(move || {
|
|
|
|
loop {
|
|
|
|
if let Err(e) = Self::process_packets(
|
|
|
|
&thread_bank,
|
|
|
|
&thread_verified_receiver,
|
|
|
|
&thread_poh,
|
|
|
|
) {
|
|
|
|
debug!("got error {:?}", e);
|
|
|
|
match e {
|
|
|
|
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
|
|
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
Error::RecvError(_) => break,
|
|
|
|
Error::SendError => break,
|
|
|
|
_ => error!("solana-banking-stage-tx {:?}", e),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if thread_banking_exit.load(Ordering::Relaxed) {
|
|
|
|
debug!("tick service exited");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
thread_banking_exit.store(true, Ordering::Relaxed);
|
|
|
|
}).unwrap()
|
|
|
|
}).collect();
|
|
|
|
thread_hdls.push(tick_producer);
|
|
|
|
(BankingStage { thread_hdls }, entry_receiver)
|
2018-05-14 16:31:13 -07:00
|
|
|
}
|
|
|
|
|
2018-06-06 08:58:49 -07:00
|
|
|
/// Convert the transactions from a blob of binary data to a vector of transactions and
|
|
|
|
/// an unused `SocketAddr` that could be used to send a response.
|
2018-06-27 11:33:56 -07:00
|
|
|
fn deserialize_transactions(p: &Packets) -> Vec<Option<(Transaction, SocketAddr)>> {
|
2018-05-14 16:31:13 -07:00
|
|
|
p.packets
|
|
|
|
.par_iter()
|
|
|
|
.map(|x| {
|
|
|
|
deserialize(&x.data[0..x.meta.size])
|
|
|
|
.map(|req| (req, x.meta.addr()))
|
|
|
|
.ok()
|
2018-09-14 16:25:14 -07:00
|
|
|
}).collect()
|
2018-05-14 16:31:13 -07:00
|
|
|
}
|
|
|
|
|
2018-09-26 05:52:13 -07:00
|
|
|
fn tick_producer(poh: PohRecorder, config: Config, poh_exit: &AtomicBool) -> Result<()> {
|
|
|
|
loop {
|
|
|
|
match config {
|
|
|
|
Config::Tick(num) => {
|
|
|
|
for _ in 0..num {
|
|
|
|
poh.hash();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Config::Sleep(duration) => {
|
|
|
|
sleep(duration);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
poh.tick()?;
|
|
|
|
if poh_exit.load(Ordering::Relaxed) {
|
|
|
|
debug!("tick service exited");
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
}
|
2018-09-25 15:01:51 -07:00
|
|
|
}
|
|
|
|
|
2018-09-21 21:01:13 -07:00
|
|
|
fn process_transactions(
|
|
|
|
bank: &Arc<Bank>,
|
2018-09-26 05:52:13 -07:00
|
|
|
transactions: Vec<Transaction>,
|
|
|
|
poh: &PohRecorder,
|
|
|
|
) -> Result<()> {
|
|
|
|
debug!("transactions: {}", transactions.len());
|
2018-09-21 21:01:13 -07:00
|
|
|
let mut chunk_start = 0;
|
|
|
|
while chunk_start != transactions.len() {
|
2018-09-24 12:26:47 -07:00
|
|
|
let chunk_end = chunk_start + Entry::num_will_fit(&transactions[chunk_start..]);
|
2018-09-21 21:01:13 -07:00
|
|
|
|
2018-09-21 14:48:10 -07:00
|
|
|
let results = bank.process_transactions(&transactions[chunk_start..chunk_end]);
|
2018-09-21 21:01:13 -07:00
|
|
|
|
2018-09-21 14:48:10 -07:00
|
|
|
let processed_transactions: Vec<_> = transactions[chunk_start..chunk_end]
|
2018-09-21 21:01:13 -07:00
|
|
|
.into_iter()
|
2018-09-21 14:48:10 -07:00
|
|
|
.enumerate()
|
|
|
|
.filter_map(|(i, x)| match results[i] {
|
2018-09-26 18:37:24 -07:00
|
|
|
Ok(_) => Some(x.clone()),
|
2018-09-21 14:48:10 -07:00
|
|
|
Err(ref e) => {
|
2018-09-21 21:01:13 -07:00
|
|
|
debug!("process transaction failed {:?}", e);
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}).collect();
|
|
|
|
|
2018-09-24 12:26:47 -07:00
|
|
|
if !processed_transactions.is_empty() {
|
2018-09-26 18:37:24 -07:00
|
|
|
let hash = Transaction::hash(&processed_transactions);
|
2018-09-26 05:52:13 -07:00
|
|
|
debug!("processed ok: {} {}", processed_transactions.len(), hash);
|
|
|
|
poh.record(hash, processed_transactions)?;
|
2018-09-21 21:01:13 -07:00
|
|
|
}
|
2018-09-26 05:52:13 -07:00
|
|
|
chunk_start = chunk_end;
|
2018-09-21 21:01:13 -07:00
|
|
|
}
|
2018-09-26 05:52:13 -07:00
|
|
|
debug!("done process_transactions");
|
|
|
|
Ok(())
|
2018-09-21 21:01:13 -07:00
|
|
|
}
|
|
|
|
|
2018-06-06 08:58:49 -07:00
|
|
|
/// Process the incoming packets and send output `Signal` messages to `signal_sender`.
|
|
|
|
/// Discard packets via `packet_recycler`.
|
2018-07-10 19:33:16 -07:00
|
|
|
pub fn process_packets(
|
2018-07-11 20:10:25 -07:00
|
|
|
bank: &Arc<Bank>,
|
2018-09-26 05:52:13 -07:00
|
|
|
verified_receiver: &Arc<Mutex<Receiver<Vec<(SharedPackets, Vec<u8>)>>>>,
|
|
|
|
poh: &PohRecorder,
|
2018-05-14 16:31:13 -07:00
|
|
|
) -> Result<()> {
|
|
|
|
let recv_start = Instant::now();
|
2018-09-26 05:52:13 -07:00
|
|
|
let mms = verified_receiver
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.recv_timeout(Duration::from_millis(100))?;
|
2018-05-14 16:31:13 -07:00
|
|
|
let mut reqs_len = 0;
|
|
|
|
let mms_len = mms.len();
|
|
|
|
info!(
|
|
|
|
"@{:?} process start stalled for: {:?}ms batches: {}",
|
|
|
|
timing::timestamp(),
|
|
|
|
timing::duration_as_ms(&recv_start.elapsed()),
|
|
|
|
mms.len(),
|
|
|
|
);
|
2018-09-24 17:13:49 -07:00
|
|
|
inc_new_counter_info!("banking_stage-entries_received", mms_len);
|
2018-07-19 15:38:35 -07:00
|
|
|
let bank_starting_tx_count = bank.transaction_count();
|
2018-05-30 21:24:21 -07:00
|
|
|
let count = mms.iter().map(|x| x.1.len()).sum();
|
2018-05-14 16:31:13 -07:00
|
|
|
let proc_start = Instant::now();
|
|
|
|
for (msgs, vers) in mms {
|
2018-09-26 09:50:12 -07:00
|
|
|
let transactions = Self::deserialize_transactions(&msgs.read().unwrap());
|
2018-05-25 14:51:41 -07:00
|
|
|
reqs_len += transactions.len();
|
2018-09-21 21:01:13 -07:00
|
|
|
|
2018-09-25 15:01:51 -07:00
|
|
|
debug!("transactions received {}", transactions.len());
|
|
|
|
|
2018-09-21 21:01:13 -07:00
|
|
|
let transactions: Vec<_> = transactions
|
2018-05-14 16:31:13 -07:00
|
|
|
.into_iter()
|
|
|
|
.zip(vers)
|
2018-05-25 14:51:41 -07:00
|
|
|
.filter_map(|(tx, ver)| match tx {
|
2018-05-14 16:31:13 -07:00
|
|
|
None => None,
|
2018-05-25 14:51:41 -07:00
|
|
|
Some((tx, _addr)) => if tx.verify_plan() && ver != 0 {
|
|
|
|
Some(tx)
|
2018-05-14 16:31:13 -07:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
},
|
2018-09-14 16:25:14 -07:00
|
|
|
}).collect();
|
2018-09-25 15:01:51 -07:00
|
|
|
debug!("verified transactions {}", transactions.len());
|
2018-09-26 05:52:13 -07:00
|
|
|
Self::process_transactions(bank, transactions, poh)?;
|
2018-05-14 16:31:13 -07:00
|
|
|
}
|
2018-09-21 21:01:13 -07:00
|
|
|
|
2018-09-24 17:13:49 -07:00
|
|
|
inc_new_counter_info!(
|
|
|
|
"banking_stage-time_ms",
|
2018-09-26 05:52:13 -07:00
|
|
|
timing::duration_as_ms(&proc_start.elapsed()) as usize
|
2018-09-24 17:13:49 -07:00
|
|
|
);
|
2018-05-14 16:31:13 -07:00
|
|
|
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
|
|
|
|
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
|
|
|
|
info!(
|
2018-05-25 14:51:41 -07:00
|
|
|
"@{:?} done processing transaction batches: {} time: {:?}ms reqs: {} reqs/s: {}",
|
2018-05-14 16:31:13 -07:00
|
|
|
timing::timestamp(),
|
|
|
|
mms_len,
|
|
|
|
total_time_ms,
|
|
|
|
reqs_len,
|
|
|
|
(reqs_len as f32) / (total_time_s)
|
|
|
|
);
|
2018-08-06 11:35:45 -07:00
|
|
|
inc_new_counter_info!("banking_stage-process_packets", count);
|
|
|
|
inc_new_counter_info!(
|
2018-07-19 15:38:35 -07:00
|
|
|
"banking_stage-process_transactions",
|
|
|
|
bank.transaction_count() - bank_starting_tx_count
|
|
|
|
);
|
2018-05-14 16:31:13 -07:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-03 21:14:08 -07:00
|
|
|
impl Service for BankingStage {
|
2018-09-13 14:00:17 -07:00
|
|
|
type JoinReturnType = ();
|
2018-07-03 21:14:08 -07:00
|
|
|
|
|
|
|
fn join(self) -> thread::Result<()> {
|
2018-09-26 05:52:13 -07:00
|
|
|
for thread_hdl in self.thread_hdls {
|
|
|
|
thread_hdl.join()?;
|
|
|
|
}
|
|
|
|
Ok(())
|
2018-07-03 21:14:08 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-25 15:01:51 -07:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
|
|
|
use bank::Bank;
|
|
|
|
use ledger::Block;
|
|
|
|
use mint::Mint;
|
2018-09-26 09:50:12 -07:00
|
|
|
use packet::to_packets;
|
2018-09-25 15:01:51 -07:00
|
|
|
use signature::{Keypair, KeypairUtil};
|
|
|
|
use std::thread::sleep;
|
2018-09-26 09:07:53 -07:00
|
|
|
use system_transaction::SystemTransaction;
|
|
|
|
use transaction::Transaction;
|
2018-09-25 15:01:51 -07:00
|
|
|
|
|
|
|
#[test]
|
2018-09-26 05:52:13 -07:00
|
|
|
fn test_banking_stage_shutdown1() {
|
2018-09-25 15:01:51 -07:00
|
|
|
let bank = Bank::new(&Mint::new(2));
|
|
|
|
let (verified_sender, verified_receiver) = channel();
|
|
|
|
let (banking_stage, _entry_receiver) =
|
2018-09-26 05:52:13 -07:00
|
|
|
BankingStage::new(Arc::new(bank), verified_receiver, Default::default());
|
2018-09-25 15:01:51 -07:00
|
|
|
drop(verified_sender);
|
|
|
|
assert_eq!(banking_stage.join().unwrap(), ());
|
|
|
|
}
|
|
|
|
|
2018-09-26 05:52:13 -07:00
|
|
|
#[test]
|
|
|
|
fn test_banking_stage_shutdown2() {
|
|
|
|
let bank = Bank::new(&Mint::new(2));
|
|
|
|
let (_verified_sender, verified_receiver) = channel();
|
|
|
|
let (banking_stage, entry_receiver) =
|
|
|
|
BankingStage::new(Arc::new(bank), verified_receiver, Default::default());
|
|
|
|
drop(entry_receiver);
|
|
|
|
assert_eq!(banking_stage.join().unwrap(), ());
|
|
|
|
}
|
|
|
|
|
2018-09-25 15:01:51 -07:00
|
|
|
#[test]
|
|
|
|
fn test_banking_stage_tick() {
|
2018-09-26 13:31:39 -07:00
|
|
|
let bank = Arc::new(Bank::new(&Mint::new(2)));
|
2018-09-25 15:01:51 -07:00
|
|
|
let start_hash = bank.last_id();
|
|
|
|
let (verified_sender, verified_receiver) = channel();
|
|
|
|
let (banking_stage, entry_receiver) = BankingStage::new(
|
2018-09-26 13:31:39 -07:00
|
|
|
bank.clone(),
|
2018-09-25 15:01:51 -07:00
|
|
|
verified_receiver,
|
2018-09-26 05:52:13 -07:00
|
|
|
Config::Sleep(Duration::from_millis(1)),
|
2018-09-25 15:01:51 -07:00
|
|
|
);
|
2018-09-26 05:52:13 -07:00
|
|
|
sleep(Duration::from_millis(500));
|
2018-09-25 15:01:51 -07:00
|
|
|
drop(verified_sender);
|
|
|
|
|
|
|
|
let entries: Vec<_> = entry_receiver.iter().flat_map(|x| x).collect();
|
|
|
|
assert!(entries.len() != 0);
|
|
|
|
assert!(entries.verify(&start_hash));
|
2018-09-26 13:31:39 -07:00
|
|
|
assert_eq!(entries[entries.len() - 1].id, bank.last_id());
|
2018-09-25 15:01:51 -07:00
|
|
|
assert_eq!(banking_stage.join().unwrap(), ());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2018-09-26 13:31:39 -07:00
|
|
|
fn test_banking_stage_entries_only() {
|
2018-09-25 15:01:51 -07:00
|
|
|
let mint = Mint::new(2);
|
2018-09-26 13:31:39 -07:00
|
|
|
let bank = Arc::new(Bank::new(&mint));
|
2018-09-25 15:01:51 -07:00
|
|
|
let start_hash = bank.last_id();
|
|
|
|
let (verified_sender, verified_receiver) = channel();
|
|
|
|
let (banking_stage, entry_receiver) =
|
2018-09-26 05:52:13 -07:00
|
|
|
BankingStage::new(bank, verified_receiver, Default::default());
|
2018-09-25 15:01:51 -07:00
|
|
|
|
|
|
|
// good tx
|
|
|
|
let keypair = mint.keypair();
|
2018-09-26 08:51:51 -07:00
|
|
|
let tx = Transaction::system_new(&keypair, keypair.pubkey(), 1, start_hash);
|
2018-09-25 15:01:51 -07:00
|
|
|
|
|
|
|
// good tx, but no verify
|
2018-09-26 08:51:51 -07:00
|
|
|
let tx_no_ver = Transaction::system_new(&keypair, keypair.pubkey(), 1, start_hash);
|
2018-09-25 15:01:51 -07:00
|
|
|
|
|
|
|
// bad tx, AccountNotFound
|
|
|
|
let keypair = Keypair::new();
|
2018-09-26 08:51:51 -07:00
|
|
|
let tx_anf = Transaction::system_new(&keypair, keypair.pubkey(), 1, start_hash);
|
2018-09-25 15:01:51 -07:00
|
|
|
|
|
|
|
// send 'em over
|
2018-09-26 09:50:12 -07:00
|
|
|
let packets = to_packets(&[tx, tx_no_ver, tx_anf]);
|
2018-09-25 15:01:51 -07:00
|
|
|
|
|
|
|
// glad they all fit
|
|
|
|
assert_eq!(packets.len(), 1);
|
|
|
|
verified_sender // tx, no_ver, anf
|
|
|
|
.send(vec![(packets[0].clone(), vec![1u8, 0u8, 1u8])])
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
drop(verified_sender);
|
|
|
|
|
2018-09-26 05:52:13 -07:00
|
|
|
//receive entries + ticks
|
|
|
|
let entries: Vec<_> = entry_receiver.iter().map(|x| x).collect();
|
|
|
|
assert!(entries.len() >= 1);
|
|
|
|
|
|
|
|
let mut last_id = start_hash;
|
|
|
|
entries.iter().for_each(|entries| {
|
|
|
|
assert_eq!(entries.len(), 1);
|
|
|
|
assert!(entries.verify(&last_id));
|
|
|
|
last_id = entries.last().unwrap().id;
|
|
|
|
});
|
|
|
|
drop(entry_receiver);
|
2018-09-26 13:31:39 -07:00
|
|
|
assert_eq!(banking_stage.join().unwrap(), ());
|
|
|
|
}
|
2018-09-25 15:01:51 -07:00
|
|
|
#[test]
|
|
|
|
fn test_banking_stage_entryfication() {
|
|
|
|
// In this attack we'll demonstrate that a verifier can interpret the ledger
|
|
|
|
// differently if either the server doesn't signal the ledger to add an
|
|
|
|
// Entry OR if the verifier tries to parallelize across multiple Entries.
|
|
|
|
let mint = Mint::new(2);
|
2018-09-26 13:31:39 -07:00
|
|
|
let bank = Arc::new(Bank::new(&mint));
|
2018-09-25 15:01:51 -07:00
|
|
|
let (verified_sender, verified_receiver) = channel();
|
|
|
|
let (banking_stage, entry_receiver) =
|
2018-09-26 05:52:13 -07:00
|
|
|
BankingStage::new(bank.clone(), verified_receiver, Default::default());
|
2018-09-25 15:01:51 -07:00
|
|
|
|
|
|
|
// Process a batch that includes a transaction that receives two tokens.
|
|
|
|
let alice = Keypair::new();
|
2018-09-26 08:51:51 -07:00
|
|
|
let tx = Transaction::system_new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
|
2018-09-25 15:01:51 -07:00
|
|
|
|
2018-09-26 09:50:12 -07:00
|
|
|
let packets = to_packets(&[tx]);
|
2018-09-25 15:01:51 -07:00
|
|
|
verified_sender
|
|
|
|
.send(vec![(packets[0].clone(), vec![1u8])])
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
// Process a second batch that spends one of those tokens.
|
2018-09-26 08:51:51 -07:00
|
|
|
let tx = Transaction::system_new(&alice, mint.pubkey(), 1, mint.last_id());
|
2018-09-26 09:50:12 -07:00
|
|
|
let packets = to_packets(&[tx]);
|
2018-09-25 15:01:51 -07:00
|
|
|
verified_sender
|
|
|
|
.send(vec![(packets[0].clone(), vec![1u8])])
|
|
|
|
.unwrap();
|
|
|
|
drop(verified_sender);
|
|
|
|
assert_eq!(banking_stage.join().unwrap(), ());
|
|
|
|
|
|
|
|
// Collect the ledger and feed it to a new bank.
|
2018-09-26 13:31:39 -07:00
|
|
|
let entries: Vec<_> = entry_receiver.iter().flat_map(|x| x).collect();
|
|
|
|
// same assertion as running through the bank, really...
|
2018-09-26 05:52:13 -07:00
|
|
|
assert!(entries.len() >= 2);
|
2018-09-25 15:01:51 -07:00
|
|
|
|
|
|
|
// Assert the user holds one token, not two. If the stage only outputs one
|
|
|
|
// entry, then the second transaction will be rejected, because it drives
|
|
|
|
// the account balance below zero before the credit is added.
|
|
|
|
let bank = Bank::new(&mint);
|
2018-09-26 13:31:39 -07:00
|
|
|
for entry in entries {
|
|
|
|
assert!(
|
|
|
|
bank.process_transactions(&entry.transactions)
|
|
|
|
.into_iter()
|
|
|
|
.all(|x| x.is_ok())
|
|
|
|
);
|
2018-09-25 15:01:51 -07:00
|
|
|
}
|
|
|
|
assert_eq!(bank.get_balance(&alice.pubkey()), 1);
|
|
|
|
}
|
|
|
|
}
|