2018-06-06 08:58:49 -07:00
|
|
|
//! The `banking_stage` processes Transaction messages. It is intended to be used
|
|
|
|
//! to contruct a software pipeline. The stage uses all available CPU cores and
|
|
|
|
//! can do its processing in parallel with signature verification on the GPU.
|
2019-03-29 20:00:36 -07:00
|
|
|
use crate::blocktree::Blocktree;
|
2019-03-03 16:44:06 -08:00
|
|
|
use crate::cluster_info::ClusterInfo;
|
2019-04-13 23:19:54 -07:00
|
|
|
use crate::contact_info::ContactInfo;
|
2019-03-15 12:48:32 -07:00
|
|
|
use crate::entry;
|
2019-03-28 12:37:41 -07:00
|
|
|
use crate::entry::{hash_transactions, Entry};
|
2019-04-19 02:39:44 -07:00
|
|
|
use crate::leader_schedule_cache::LeaderScheduleCache;
|
2019-03-11 03:06:22 -07:00
|
|
|
use crate::packet;
|
|
|
|
use crate::packet::{Packet, Packets};
|
2019-03-03 16:44:06 -08:00
|
|
|
use crate::poh_recorder::{PohRecorder, PohRecorderError, WorkingBankEntries};
|
|
|
|
use crate::poh_service::{PohService, PohServiceConfig};
|
2018-12-07 19:16:27 -08:00
|
|
|
use crate::result::{Error, Result};
|
|
|
|
use crate::service::Service;
|
|
|
|
use crate::sigverify_stage::VerifiedPackets;
|
2018-05-14 16:31:13 -07:00
|
|
|
use bincode::deserialize;
|
2019-02-18 22:26:22 -08:00
|
|
|
use solana_metrics::counter::Counter;
|
2019-04-05 09:42:54 -07:00
|
|
|
use solana_runtime::bank::Bank;
|
2019-04-02 03:55:42 -07:00
|
|
|
use solana_runtime::locked_accounts_results::LockedAccountsResults;
|
2019-03-20 13:49:46 -07:00
|
|
|
use solana_sdk::pubkey::Pubkey;
|
2019-04-23 11:56:30 -07:00
|
|
|
use solana_sdk::timing::{self, duration_as_us, DEFAULT_TICKS_PER_SLOT, MAX_RECENT_BLOCKHASHES};
|
2019-04-05 09:42:54 -07:00
|
|
|
use solana_sdk::transaction::{self, Transaction, TransactionError};
|
2019-04-17 21:07:45 -07:00
|
|
|
use std::cmp;
|
2019-03-03 16:44:06 -08:00
|
|
|
use std::net::UdpSocket;
|
2019-02-24 08:59:49 -08:00
|
|
|
use std::sync::atomic::{AtomicBool, Ordering};
|
2019-03-03 16:44:06 -08:00
|
|
|
use std::sync::mpsc::{Receiver, RecvTimeoutError};
|
|
|
|
use std::sync::{Arc, Mutex, RwLock};
|
2018-07-03 21:14:08 -07:00
|
|
|
use std::thread::{self, Builder, JoinHandle};
|
2018-05-14 16:31:13 -07:00
|
|
|
use std::time::Duration;
|
|
|
|
use std::time::Instant;
|
2018-12-21 13:55:45 -08:00
|
|
|
use sys_info;
|
2018-05-14 16:31:13 -07:00
|
|
|
|
2019-04-17 18:15:50 -07:00
|
|
|
pub type UnprocessedPackets = Vec<(Packets, usize, Vec<u8>)>; // `usize` is the index of the first unprocessed packet in `SharedPackets`
|
2018-10-18 22:57:48 -07:00
|
|
|
|
2018-09-26 05:52:13 -07:00
|
|
|
// number of threads is 1 until mt bank is ready
|
2018-12-21 13:55:45 -08:00
|
|
|
pub const NUM_THREADS: u32 = 10;
|
2018-09-26 05:52:13 -07:00
|
|
|
|
2018-06-06 08:58:49 -07:00
|
|
|
/// Stores the stage's thread handle and output receiver.
|
2018-05-14 16:31:13 -07:00
|
|
|
pub struct BankingStage {
|
2019-03-03 16:44:06 -08:00
|
|
|
bank_thread_hdls: Vec<JoinHandle<()>>,
|
2018-09-26 05:52:13 -07:00
|
|
|
}
|
|
|
|
|
2019-04-13 23:19:54 -07:00
|
|
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
|
|
|
pub enum BufferedPacketsDecision {
|
|
|
|
Consume,
|
|
|
|
Forward,
|
|
|
|
Hold,
|
|
|
|
}
|
|
|
|
|
2018-05-14 16:31:13 -07:00
|
|
|
impl BankingStage {
|
2018-07-05 14:41:53 -07:00
|
|
|
/// Create the stage using `bank`. Exit when `verified_receiver` is dropped.
|
2018-12-07 19:01:28 -08:00
|
|
|
#[allow(clippy::new_ret_no_self)]
|
2018-05-14 16:31:13 -07:00
|
|
|
pub fn new(
|
2019-03-03 16:44:06 -08:00
|
|
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
2019-02-26 10:48:18 -08:00
|
|
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
2018-09-27 13:49:50 -07:00
|
|
|
verified_receiver: Receiver<VerifiedPackets>,
|
2019-04-17 21:07:45 -07:00
|
|
|
verified_vote_receiver: Receiver<VerifiedPackets>,
|
2019-04-19 02:39:44 -07:00
|
|
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
2019-03-18 22:08:21 -07:00
|
|
|
) -> Self {
|
|
|
|
Self::new_num_threads(
|
|
|
|
cluster_info,
|
|
|
|
poh_recorder,
|
|
|
|
verified_receiver,
|
2019-04-17 21:07:45 -07:00
|
|
|
verified_vote_receiver,
|
|
|
|
cmp::min(2, Self::num_threads()),
|
2019-04-19 02:39:44 -07:00
|
|
|
leader_schedule_cache,
|
2019-03-18 22:08:21 -07:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn new_num_threads(
|
|
|
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
|
|
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
|
|
|
verified_receiver: Receiver<VerifiedPackets>,
|
2019-04-17 21:07:45 -07:00
|
|
|
verified_vote_receiver: Receiver<VerifiedPackets>,
|
2019-03-18 22:08:21 -07:00
|
|
|
num_threads: u32,
|
2019-04-19 02:39:44 -07:00
|
|
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
2019-03-03 16:44:06 -08:00
|
|
|
) -> Self {
|
|
|
|
let verified_receiver = Arc::new(Mutex::new(verified_receiver));
|
2019-04-17 21:07:45 -07:00
|
|
|
let verified_vote_receiver = Arc::new(Mutex::new(verified_vote_receiver));
|
2018-10-25 14:56:21 -07:00
|
|
|
|
2018-09-26 05:52:13 -07:00
|
|
|
// Single thread to generate entries from many banks.
|
|
|
|
// This thread talks to poh_service and broadcasts the entries once they have been recorded.
|
2019-03-02 10:25:16 -08:00
|
|
|
// Once an entry has been recorded, its blockhash is registered with the bank.
|
2019-02-26 10:48:18 -08:00
|
|
|
let exit = Arc::new(AtomicBool::new(false));
|
2018-09-26 05:52:13 -07:00
|
|
|
|
|
|
|
// Many banks that process transactions in parallel.
|
2019-03-27 04:37:36 -07:00
|
|
|
let bank_thread_hdls: Vec<JoinHandle<()>> = (0..num_threads)
|
2019-04-17 21:07:45 -07:00
|
|
|
.map(|i| {
|
2019-04-18 11:18:49 -07:00
|
|
|
let (verified_receiver, enable_forwarding) = if i < num_threads - 1 {
|
|
|
|
(verified_receiver.clone(), true)
|
2019-04-17 21:07:45 -07:00
|
|
|
} else {
|
2019-04-18 11:18:49 -07:00
|
|
|
// Disable forwarding of vote transactions, as votes are gossiped
|
|
|
|
(verified_vote_receiver.clone(), false)
|
2019-04-17 21:07:45 -07:00
|
|
|
};
|
|
|
|
|
2019-03-03 16:44:06 -08:00
|
|
|
let poh_recorder = poh_recorder.clone();
|
|
|
|
let cluster_info = cluster_info.clone();
|
|
|
|
let exit = exit.clone();
|
2019-04-09 11:17:15 -07:00
|
|
|
let mut recv_start = Instant::now();
|
2019-04-19 02:39:44 -07:00
|
|
|
let leader_schedule_cache = leader_schedule_cache.clone();
|
2018-09-26 05:52:13 -07:00
|
|
|
Builder::new()
|
|
|
|
.name("solana-banking-stage-tx".to_string())
|
|
|
|
.spawn(move || {
|
2019-04-09 11:17:15 -07:00
|
|
|
Self::process_loop(
|
|
|
|
&verified_receiver,
|
|
|
|
&poh_recorder,
|
|
|
|
&cluster_info,
|
|
|
|
&mut recv_start,
|
2019-04-18 11:18:49 -07:00
|
|
|
enable_forwarding,
|
2019-04-19 02:39:44 -07:00
|
|
|
leader_schedule_cache,
|
2019-04-09 11:17:15 -07:00
|
|
|
);
|
2019-03-03 16:44:06 -08:00
|
|
|
exit.store(true, Ordering::Relaxed);
|
2018-12-07 19:01:28 -08:00
|
|
|
})
|
|
|
|
.unwrap()
|
|
|
|
})
|
|
|
|
.collect();
|
2019-03-03 16:44:06 -08:00
|
|
|
Self { bank_thread_hdls }
|
|
|
|
}
|
|
|
|
|
|
|
|
fn forward_unprocessed_packets(
|
2019-03-06 12:01:21 -08:00
|
|
|
socket: &std::net::UdpSocket,
|
2019-03-11 12:46:30 -07:00
|
|
|
tpu_via_blobs: &std::net::SocketAddr,
|
2019-04-17 18:15:50 -07:00
|
|
|
unprocessed_packets: &[(Packets, usize, Vec<u8>)],
|
2019-03-03 16:44:06 -08:00
|
|
|
) -> std::io::Result<()> {
|
2019-03-11 03:06:22 -07:00
|
|
|
let locked_packets: Vec<_> = unprocessed_packets
|
|
|
|
.iter()
|
2019-04-17 18:15:50 -07:00
|
|
|
.map(|(p, start_index, _)| (p, start_index))
|
2019-03-11 03:06:22 -07:00
|
|
|
.collect();
|
|
|
|
let packets: Vec<&Packet> = locked_packets
|
|
|
|
.iter()
|
|
|
|
.flat_map(|(p, start_index)| &p.packets[**start_index..])
|
|
|
|
.collect();
|
2019-04-17 15:37:01 -07:00
|
|
|
inc_new_counter_info!("banking_stage-forwarded_packets", packets.len());
|
2019-03-11 11:56:18 -07:00
|
|
|
let blobs = packet::packets_to_blobs(&packets);
|
2019-03-09 02:47:41 -08:00
|
|
|
|
2019-03-11 03:06:22 -07:00
|
|
|
for blob in blobs {
|
2019-03-11 12:46:30 -07:00
|
|
|
socket.send_to(&blob.data[..blob.meta.size], tpu_via_blobs)?;
|
2019-03-09 02:47:41 -08:00
|
|
|
}
|
|
|
|
|
2019-03-03 16:44:06 -08:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-04-11 17:23:45 -07:00
|
|
|
fn process_buffered_packets(
|
2019-03-08 16:48:15 -08:00
|
|
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
2019-04-17 18:15:50 -07:00
|
|
|
buffered_packets: &[(Packets, usize, Vec<u8>)],
|
2019-04-11 17:23:45 -07:00
|
|
|
) -> Result<UnprocessedPackets> {
|
|
|
|
let mut unprocessed_packets = vec![];
|
|
|
|
let mut bank_shutdown = false;
|
|
|
|
for (msgs, offset, vers) in buffered_packets {
|
|
|
|
if bank_shutdown {
|
2019-04-17 15:37:01 -07:00
|
|
|
inc_new_counter_info!("banking_stage-rebuffered_packets", vers.len() - *offset);
|
2019-04-11 17:23:45 -07:00
|
|
|
unprocessed_packets.push((msgs.to_owned(), *offset, vers.to_owned()));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
let bank = poh_recorder.lock().unwrap().bank();
|
|
|
|
if bank.is_none() {
|
2019-04-17 15:37:01 -07:00
|
|
|
inc_new_counter_info!("banking_stage-rebuffered_packets", vers.len() - *offset);
|
2019-04-11 17:23:45 -07:00
|
|
|
unprocessed_packets.push((msgs.to_owned(), *offset, vers.to_owned()));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
let bank = bank.unwrap();
|
|
|
|
|
|
|
|
let (processed, verified_txs, verified_indexes) =
|
|
|
|
Self::process_received_packets(&bank, &poh_recorder, &msgs, &vers, *offset)?;
|
2019-04-17 15:37:01 -07:00
|
|
|
inc_new_counter_info!("banking_stage-consumed_buffered_packets", processed);
|
|
|
|
inc_new_counter_info!("banking_stage-process_transactions", processed);
|
|
|
|
inc_new_counter_info!(
|
|
|
|
"banking_stage-rebuffered_packets",
|
|
|
|
verified_txs.len() - processed
|
|
|
|
);
|
2019-04-11 17:23:45 -07:00
|
|
|
if processed < verified_txs.len() {
|
|
|
|
bank_shutdown = true;
|
|
|
|
// Collect any unprocessed transactions in this batch for forwarding
|
|
|
|
unprocessed_packets.push((
|
|
|
|
msgs.to_owned(),
|
|
|
|
verified_indexes[processed],
|
|
|
|
vers.to_owned(),
|
|
|
|
));
|
|
|
|
}
|
2019-03-29 11:20:36 -07:00
|
|
|
}
|
2019-04-11 17:23:45 -07:00
|
|
|
Ok(unprocessed_packets)
|
|
|
|
}
|
2019-03-29 11:20:36 -07:00
|
|
|
|
2019-04-13 23:19:54 -07:00
|
|
|
fn process_or_forward_packets(
|
|
|
|
leader_data: Option<&ContactInfo>,
|
|
|
|
bank_is_available: bool,
|
2019-04-23 11:56:30 -07:00
|
|
|
would_be_leader: bool,
|
2019-04-13 23:19:54 -07:00
|
|
|
my_id: &Pubkey,
|
|
|
|
) -> BufferedPacketsDecision {
|
|
|
|
leader_data.map_or(
|
|
|
|
// If leader is not known, return the buffered packets as is
|
|
|
|
BufferedPacketsDecision::Hold,
|
|
|
|
// else process the packets
|
|
|
|
|x| {
|
|
|
|
if bank_is_available {
|
|
|
|
// If the bank is available, this node is the leader
|
|
|
|
BufferedPacketsDecision::Consume
|
2019-04-23 11:56:30 -07:00
|
|
|
} else if would_be_leader {
|
|
|
|
// If the node will be the leader soon, hold the packets for now
|
|
|
|
BufferedPacketsDecision::Hold
|
2019-04-13 23:19:54 -07:00
|
|
|
} else if x.id != *my_id {
|
|
|
|
// If the current node is not the leader, forward the buffered packets
|
|
|
|
BufferedPacketsDecision::Forward
|
|
|
|
} else {
|
|
|
|
// We don't know the leader. Hold the packets for now
|
|
|
|
BufferedPacketsDecision::Hold
|
|
|
|
}
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2019-04-11 17:23:45 -07:00
|
|
|
fn handle_buffered_packets(
|
|
|
|
socket: &std::net::UdpSocket,
|
|
|
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
|
|
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
2019-04-17 18:15:50 -07:00
|
|
|
buffered_packets: &[(Packets, usize, Vec<u8>)],
|
2019-04-18 11:18:49 -07:00
|
|
|
enable_forwarding: bool,
|
2019-04-11 17:23:45 -07:00
|
|
|
) -> Result<UnprocessedPackets> {
|
2019-03-11 11:04:40 -07:00
|
|
|
let rcluster_info = cluster_info.read().unwrap();
|
2019-03-08 16:48:15 -08:00
|
|
|
|
2019-04-23 11:56:30 -07:00
|
|
|
let decision = {
|
|
|
|
let poh = poh_recorder.lock().unwrap();
|
|
|
|
Self::process_or_forward_packets(
|
|
|
|
rcluster_info.leader_data(),
|
|
|
|
poh.bank().is_some(),
|
|
|
|
poh.would_be_leader(DEFAULT_TICKS_PER_SLOT),
|
|
|
|
&rcluster_info.id(),
|
|
|
|
)
|
|
|
|
};
|
2019-03-08 16:48:15 -08:00
|
|
|
|
2019-04-13 23:19:54 -07:00
|
|
|
match decision {
|
|
|
|
BufferedPacketsDecision::Consume => {
|
|
|
|
Self::process_buffered_packets(poh_recorder, buffered_packets)
|
|
|
|
}
|
|
|
|
BufferedPacketsDecision::Forward => {
|
2019-04-18 11:18:49 -07:00
|
|
|
if enable_forwarding {
|
|
|
|
let _ = Self::forward_unprocessed_packets(
|
|
|
|
&socket,
|
|
|
|
&rcluster_info.leader_data().unwrap().tpu_via_blobs,
|
|
|
|
&buffered_packets,
|
|
|
|
);
|
|
|
|
}
|
2019-04-13 23:19:54 -07:00
|
|
|
Ok(vec![])
|
|
|
|
}
|
|
|
|
_ => Ok(buffered_packets.to_vec()),
|
2019-03-08 16:48:15 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn should_buffer_packets(
|
|
|
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
|
|
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
2019-04-19 02:39:44 -07:00
|
|
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
2019-03-08 16:48:15 -08:00
|
|
|
) -> bool {
|
|
|
|
let rcluster_info = cluster_info.read().unwrap();
|
|
|
|
|
2019-03-11 11:04:40 -07:00
|
|
|
// Buffer the packets if I am the next leader
|
|
|
|
// or, if it was getting sent to me
|
2019-04-17 15:37:01 -07:00
|
|
|
// or, the next leader is unknown
|
2019-04-23 11:56:30 -07:00
|
|
|
let poh = poh_recorder.lock().unwrap();
|
|
|
|
let leader_id = match poh.bank() {
|
2019-04-19 02:39:44 -07:00
|
|
|
Some(bank) => leader_schedule_cache
|
|
|
|
.slot_leader_at_else_compute(bank.slot() + 1, &bank)
|
|
|
|
.unwrap_or_default(),
|
2019-04-23 11:56:30 -07:00
|
|
|
None => {
|
|
|
|
if poh.would_be_leader(DEFAULT_TICKS_PER_SLOT) {
|
|
|
|
rcluster_info.id()
|
|
|
|
} else {
|
|
|
|
rcluster_info
|
|
|
|
.leader_data()
|
|
|
|
.map_or(rcluster_info.id(), |x| x.id)
|
|
|
|
}
|
|
|
|
}
|
2019-03-11 11:04:40 -07:00
|
|
|
};
|
2019-03-08 16:48:15 -08:00
|
|
|
|
2019-03-11 11:04:40 -07:00
|
|
|
leader_id == rcluster_info.id()
|
2019-03-08 16:48:15 -08:00
|
|
|
}
|
|
|
|
|
2019-03-03 16:44:06 -08:00
|
|
|
pub fn process_loop(
|
|
|
|
verified_receiver: &Arc<Mutex<Receiver<VerifiedPackets>>>,
|
|
|
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
|
|
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
2019-04-09 11:17:15 -07:00
|
|
|
recv_start: &mut Instant,
|
2019-04-18 11:18:49 -07:00
|
|
|
enable_forwarding: bool,
|
2019-04-19 02:39:44 -07:00
|
|
|
leader_schedule_cache: Arc<LeaderScheduleCache>,
|
2019-03-03 16:44:06 -08:00
|
|
|
) {
|
2019-03-06 12:01:21 -08:00
|
|
|
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
2019-03-08 16:48:15 -08:00
|
|
|
let mut buffered_packets = vec![];
|
2019-03-03 16:44:06 -08:00
|
|
|
loop {
|
2019-04-11 17:23:45 -07:00
|
|
|
if !buffered_packets.is_empty() {
|
|
|
|
Self::handle_buffered_packets(
|
|
|
|
&socket,
|
|
|
|
poh_recorder,
|
|
|
|
cluster_info,
|
|
|
|
&buffered_packets,
|
2019-04-18 11:18:49 -07:00
|
|
|
enable_forwarding,
|
2019-04-11 17:23:45 -07:00
|
|
|
)
|
|
|
|
.map(|packets| buffered_packets = packets)
|
|
|
|
.unwrap_or_else(|_| buffered_packets.clear());
|
2019-03-08 16:48:15 -08:00
|
|
|
}
|
|
|
|
|
2019-04-14 12:34:07 -07:00
|
|
|
let recv_timeout = if !buffered_packets.is_empty() {
|
|
|
|
// If packets are buffered, let's wait for less time on recv from the channel.
|
|
|
|
// This helps detect the next leader faster, and processing the buffered
|
|
|
|
// packets quickly
|
|
|
|
Duration::from_millis(10)
|
|
|
|
} else {
|
|
|
|
// Default wait time
|
|
|
|
Duration::from_millis(100)
|
|
|
|
};
|
|
|
|
|
|
|
|
match Self::process_packets(&verified_receiver, &poh_recorder, recv_start, recv_timeout)
|
|
|
|
{
|
2019-03-03 16:44:06 -08:00
|
|
|
Err(Error::RecvTimeoutError(RecvTimeoutError::Timeout)) => (),
|
|
|
|
Ok(unprocessed_packets) => {
|
2019-04-19 02:39:44 -07:00
|
|
|
if Self::should_buffer_packets(
|
|
|
|
poh_recorder,
|
|
|
|
cluster_info,
|
|
|
|
&leader_schedule_cache,
|
|
|
|
) {
|
2019-04-17 15:37:01 -07:00
|
|
|
let num = unprocessed_packets
|
|
|
|
.iter()
|
2019-04-17 19:24:40 -07:00
|
|
|
.map(|(x, start, _)| x.packets.len().saturating_sub(*start))
|
2019-04-17 15:37:01 -07:00
|
|
|
.sum();
|
|
|
|
inc_new_counter_info!("banking_stage-buffered_packets", num);
|
2019-03-08 16:48:15 -08:00
|
|
|
buffered_packets.extend_from_slice(&unprocessed_packets);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-04-18 11:18:49 -07:00
|
|
|
if enable_forwarding {
|
|
|
|
if let Some(leader) = cluster_info.read().unwrap().leader_data() {
|
|
|
|
let _ = Self::forward_unprocessed_packets(
|
|
|
|
&socket,
|
|
|
|
&leader.tpu_via_blobs,
|
|
|
|
&unprocessed_packets,
|
|
|
|
);
|
|
|
|
}
|
2019-03-03 16:44:06 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(err) => {
|
|
|
|
debug!("solana-banking-stage-tx: exit due to {:?}", err);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-05-14 16:31:13 -07:00
|
|
|
}
|
|
|
|
|
2018-12-21 13:55:45 -08:00
|
|
|
pub fn num_threads() -> u32 {
|
|
|
|
sys_info::cpu_num().unwrap_or(NUM_THREADS)
|
|
|
|
}
|
|
|
|
|
2019-02-08 10:12:43 -08:00
|
|
|
/// Convert the transactions from a blob of binary data to a vector of transactions
|
|
|
|
fn deserialize_transactions(p: &Packets) -> Vec<Option<Transaction>> {
|
2018-05-14 16:31:13 -07:00
|
|
|
p.packets
|
2018-10-31 22:12:15 -07:00
|
|
|
.iter()
|
2019-02-08 10:12:43 -08:00
|
|
|
.map(|x| deserialize(&x.data[0..x.meta.size]).ok())
|
2018-12-07 19:01:28 -08:00
|
|
|
.collect()
|
2018-05-14 16:31:13 -07:00
|
|
|
}
|
|
|
|
|
2019-02-16 14:02:21 -08:00
|
|
|
fn record_transactions(
|
2019-03-22 14:17:39 -07:00
|
|
|
bank_slot: u64,
|
2019-02-16 14:02:21 -08:00
|
|
|
txs: &[Transaction],
|
2019-04-05 09:42:54 -07:00
|
|
|
results: &[transaction::Result<()>],
|
2019-02-24 08:59:49 -08:00
|
|
|
poh: &Arc<Mutex<PohRecorder>>,
|
2019-02-17 14:12:06 -08:00
|
|
|
) -> Result<()> {
|
2019-02-16 14:02:21 -08:00
|
|
|
let processed_transactions: Vec<_> = results
|
|
|
|
.iter()
|
|
|
|
.zip(txs.iter())
|
|
|
|
.filter_map(|(r, x)| match r {
|
|
|
|
Ok(_) => Some(x.clone()),
|
2019-03-13 12:58:44 -07:00
|
|
|
Err(TransactionError::InstructionError(index, err)) => {
|
2019-03-28 11:45:34 -07:00
|
|
|
debug!("instruction error {:?}, {:?}", index, err);
|
2019-02-16 14:02:21 -08:00
|
|
|
Some(x.clone())
|
|
|
|
}
|
|
|
|
Err(ref e) => {
|
|
|
|
debug!("process transaction failed {:?}", e);
|
|
|
|
None
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
debug!("processed: {} ", processed_transactions.len());
|
|
|
|
// unlock all the accounts with errors which are filtered by the above `filter_map`
|
|
|
|
if !processed_transactions.is_empty() {
|
2019-03-28 12:37:41 -07:00
|
|
|
let hash = hash_transactions(&processed_transactions);
|
2019-02-16 14:02:21 -08:00
|
|
|
// record and unlock will unlock all the successfull transactions
|
2019-03-22 14:17:39 -07:00
|
|
|
poh.lock()
|
|
|
|
.unwrap()
|
|
|
|
.record(bank_slot, hash, processed_transactions)?;
|
2019-02-16 14:02:21 -08:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-03-23 13:30:56 -07:00
|
|
|
fn process_and_record_transactions_locked(
|
2019-02-16 14:02:21 -08:00
|
|
|
bank: &Bank,
|
|
|
|
txs: &[Transaction],
|
2019-02-24 08:59:49 -08:00
|
|
|
poh: &Arc<Mutex<PohRecorder>>,
|
2019-04-02 03:55:42 -07:00
|
|
|
lock_results: &LockedAccountsResults,
|
2019-02-17 14:12:06 -08:00
|
|
|
) -> Result<()> {
|
2019-02-16 14:02:21 -08:00
|
|
|
let now = Instant::now();
|
|
|
|
// Use a shorter maximum age when adding transactions into the pipeline. This will reduce
|
|
|
|
// the likelihood of any single thread getting starved and processing old ids.
|
|
|
|
// TODO: Banking stage threads should be prioritized to complete faster then this queue
|
|
|
|
// expires.
|
2019-04-02 03:55:42 -07:00
|
|
|
let (loaded_accounts, results) =
|
|
|
|
bank.load_and_execute_transactions(txs, lock_results, MAX_RECENT_BLOCKHASHES / 2);
|
2019-02-16 14:02:21 -08:00
|
|
|
let load_execute_time = now.elapsed();
|
|
|
|
|
|
|
|
let record_time = {
|
|
|
|
let now = Instant::now();
|
2019-03-22 14:17:39 -07:00
|
|
|
Self::record_transactions(bank.slot(), txs, &results, poh)?;
|
2019-02-16 14:02:21 -08:00
|
|
|
now.elapsed()
|
|
|
|
};
|
|
|
|
|
|
|
|
let commit_time = {
|
|
|
|
let now = Instant::now();
|
|
|
|
bank.commit_transactions(txs, &loaded_accounts, &results);
|
|
|
|
now.elapsed()
|
|
|
|
};
|
|
|
|
|
2019-03-23 13:30:56 -07:00
|
|
|
debug!(
|
|
|
|
"bank: {} load_execute: {}us record: {}us commit: {}us txs_len: {}",
|
|
|
|
bank.slot(),
|
|
|
|
duration_as_us(&load_execute_time),
|
|
|
|
duration_as_us(&record_time),
|
|
|
|
duration_as_us(&commit_time),
|
|
|
|
txs.len(),
|
|
|
|
);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn process_and_record_transactions(
|
|
|
|
bank: &Bank,
|
|
|
|
txs: &[Transaction],
|
|
|
|
poh: &Arc<Mutex<PohRecorder>>,
|
|
|
|
) -> Result<()> {
|
|
|
|
let now = Instant::now();
|
|
|
|
// Once accounts are locked, other threads cannot encode transactions that will modify the
|
|
|
|
// same account state
|
|
|
|
let lock_results = bank.lock_accounts(txs);
|
|
|
|
let lock_time = now.elapsed();
|
|
|
|
|
|
|
|
let results = Self::process_and_record_transactions_locked(bank, txs, poh, &lock_results);
|
|
|
|
|
2019-02-16 14:02:21 -08:00
|
|
|
let now = Instant::now();
|
|
|
|
// Once the accounts are new transactions can enter the pipeline to process them
|
2019-04-02 03:55:42 -07:00
|
|
|
drop(lock_results);
|
2019-02-16 14:02:21 -08:00
|
|
|
let unlock_time = now.elapsed();
|
2019-03-23 13:30:56 -07:00
|
|
|
|
2019-02-16 14:02:21 -08:00
|
|
|
debug!(
|
2019-03-23 13:30:56 -07:00
|
|
|
"bank: {} lock: {}us unlock: {}us txs_len: {}",
|
2019-03-03 16:44:06 -08:00
|
|
|
bank.slot(),
|
2019-02-16 14:02:21 -08:00
|
|
|
duration_as_us(&lock_time),
|
|
|
|
duration_as_us(&unlock_time),
|
|
|
|
txs.len(),
|
|
|
|
);
|
2019-03-23 13:30:56 -07:00
|
|
|
|
|
|
|
results
|
2019-02-16 14:02:21 -08:00
|
|
|
}
|
|
|
|
|
2019-02-13 19:12:14 -08:00
|
|
|
/// Sends transactions to the bank.
|
|
|
|
///
|
|
|
|
/// Returns the number of transactions successfully processed by the bank, which may be less
|
|
|
|
/// than the total number if max PoH height was reached and the bank halted
|
2018-09-21 21:01:13 -07:00
|
|
|
fn process_transactions(
|
2019-02-26 10:48:18 -08:00
|
|
|
bank: &Bank,
|
2018-09-27 13:49:50 -07:00
|
|
|
transactions: &[Transaction],
|
2019-02-24 08:59:49 -08:00
|
|
|
poh: &Arc<Mutex<PohRecorder>>,
|
2019-02-13 19:12:14 -08:00
|
|
|
) -> Result<(usize)> {
|
2018-09-21 21:01:13 -07:00
|
|
|
let mut chunk_start = 0;
|
|
|
|
while chunk_start != transactions.len() {
|
2019-03-15 12:48:32 -07:00
|
|
|
let chunk_end = chunk_start
|
|
|
|
+ entry::num_will_fit(
|
|
|
|
&transactions[chunk_start..],
|
|
|
|
packet::BLOB_DATA_SIZE as u64,
|
2019-04-18 14:45:41 -07:00
|
|
|
&Entry::serialized_to_blob_size,
|
2019-03-15 12:48:32 -07:00
|
|
|
);
|
2018-09-21 21:01:13 -07:00
|
|
|
|
2019-02-16 14:02:21 -08:00
|
|
|
let result = Self::process_and_record_transactions(
|
|
|
|
bank,
|
|
|
|
&transactions[chunk_start..chunk_end],
|
|
|
|
poh,
|
|
|
|
);
|
2019-02-26 10:48:18 -08:00
|
|
|
trace!("process_transcations: {:?}", result);
|
2019-04-17 15:37:01 -07:00
|
|
|
chunk_start = chunk_end;
|
2019-02-17 14:12:06 -08:00
|
|
|
if let Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached)) = result {
|
2019-03-03 16:44:06 -08:00
|
|
|
info!(
|
|
|
|
"process transactions: max height reached slot: {} height: {}",
|
|
|
|
bank.slot(),
|
|
|
|
bank.tick_height()
|
|
|
|
);
|
2019-02-13 19:12:14 -08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
result?;
|
2018-09-21 21:01:13 -07:00
|
|
|
}
|
2019-02-13 19:12:14 -08:00
|
|
|
Ok(chunk_start)
|
2018-09-21 21:01:13 -07:00
|
|
|
}
|
|
|
|
|
2019-04-11 17:23:45 -07:00
|
|
|
fn process_received_packets(
|
|
|
|
bank: &Arc<Bank>,
|
|
|
|
poh: &Arc<Mutex<PohRecorder>>,
|
2019-04-17 18:15:50 -07:00
|
|
|
msgs: &Packets,
|
2019-04-11 17:23:45 -07:00
|
|
|
vers: &[u8],
|
|
|
|
offset: usize,
|
|
|
|
) -> Result<(usize, Vec<Transaction>, Vec<usize>)> {
|
|
|
|
debug!("banking-stage-tx bank {}", bank.slot());
|
2019-04-17 18:15:50 -07:00
|
|
|
let transactions =
|
|
|
|
Self::deserialize_transactions(&Packets::new(msgs.packets[offset..].to_owned()));
|
2019-04-11 17:23:45 -07:00
|
|
|
|
|
|
|
let vers = vers[offset..].to_owned();
|
|
|
|
|
|
|
|
debug!(
|
|
|
|
"bank: {} transactions received {}",
|
|
|
|
bank.slot(),
|
|
|
|
transactions.len()
|
|
|
|
);
|
|
|
|
let (verified_transactions, verified_indexes): (Vec<_>, Vec<_>) = transactions
|
|
|
|
.into_iter()
|
|
|
|
.zip(vers)
|
|
|
|
.zip(0..)
|
|
|
|
.filter_map(|((tx, ver), index)| match tx {
|
|
|
|
None => None,
|
|
|
|
Some(tx) => {
|
|
|
|
if ver != 0 {
|
|
|
|
Some((tx, index))
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.unzip();
|
|
|
|
|
|
|
|
debug!(
|
|
|
|
"bank: {} verified transactions {}",
|
|
|
|
bank.slot(),
|
|
|
|
verified_transactions.len()
|
|
|
|
);
|
|
|
|
|
|
|
|
let processed = Self::process_transactions(&bank, &verified_transactions, poh)?;
|
|
|
|
|
|
|
|
Ok((processed, verified_transactions, verified_indexes))
|
|
|
|
}
|
|
|
|
|
2019-02-07 20:20:40 -08:00
|
|
|
/// Process the incoming packets
|
2018-07-10 19:33:16 -07:00
|
|
|
pub fn process_packets(
|
2018-09-27 13:49:50 -07:00
|
|
|
verified_receiver: &Arc<Mutex<Receiver<VerifiedPackets>>>,
|
2019-02-24 08:59:49 -08:00
|
|
|
poh: &Arc<Mutex<PohRecorder>>,
|
2019-04-09 11:17:15 -07:00
|
|
|
recv_start: &mut Instant,
|
2019-04-14 12:34:07 -07:00
|
|
|
recv_timeout: Duration,
|
2019-02-13 19:12:14 -08:00
|
|
|
) -> Result<UnprocessedPackets> {
|
2018-09-26 05:52:13 -07:00
|
|
|
let mms = verified_receiver
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2019-04-14 12:34:07 -07:00
|
|
|
.recv_timeout(recv_timeout)?;
|
2019-03-03 16:44:06 -08:00
|
|
|
|
2018-05-14 16:31:13 -07:00
|
|
|
let mms_len = mms.len();
|
2019-04-09 11:17:15 -07:00
|
|
|
let count = mms.iter().map(|x| x.1.len()).sum();
|
2019-03-28 11:45:34 -07:00
|
|
|
debug!(
|
2019-04-09 11:17:15 -07:00
|
|
|
"@{:?} process start stalled for: {:?}ms txs: {}",
|
2018-05-14 16:31:13 -07:00
|
|
|
timing::timestamp(),
|
|
|
|
timing::duration_as_ms(&recv_start.elapsed()),
|
2019-04-09 11:17:15 -07:00
|
|
|
count,
|
2018-05-14 16:31:13 -07:00
|
|
|
);
|
2019-04-22 13:16:28 -07:00
|
|
|
inc_new_counter_info!("banking_stage-transactions_received", count);
|
2018-05-14 16:31:13 -07:00
|
|
|
let proc_start = Instant::now();
|
2018-10-23 14:44:14 -07:00
|
|
|
let mut new_tx_count = 0;
|
2019-02-13 19:12:14 -08:00
|
|
|
|
|
|
|
let mut unprocessed_packets = vec![];
|
|
|
|
let mut bank_shutdown = false;
|
2018-05-14 16:31:13 -07:00
|
|
|
for (msgs, vers) in mms {
|
2019-02-13 19:12:14 -08:00
|
|
|
if bank_shutdown {
|
2019-04-11 17:23:45 -07:00
|
|
|
unprocessed_packets.push((msgs, 0, vers));
|
2019-02-13 19:12:14 -08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-03-03 16:44:06 -08:00
|
|
|
let bank = poh.lock().unwrap().bank();
|
|
|
|
if bank.is_none() {
|
2019-04-11 17:23:45 -07:00
|
|
|
unprocessed_packets.push((msgs, 0, vers));
|
2019-03-03 16:44:06 -08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
let bank = bank.unwrap();
|
2019-02-13 19:12:14 -08:00
|
|
|
|
2019-04-11 17:23:45 -07:00
|
|
|
let (processed, verified_txs, verified_indexes) =
|
|
|
|
Self::process_received_packets(&bank, &poh, &msgs, &vers, 0)?;
|
2019-02-13 19:12:14 -08:00
|
|
|
|
2019-04-11 17:23:45 -07:00
|
|
|
if processed < verified_txs.len() {
|
2019-02-13 19:12:14 -08:00
|
|
|
bank_shutdown = true;
|
|
|
|
// Collect any unprocessed transactions in this batch for forwarding
|
2019-04-11 17:23:45 -07:00
|
|
|
unprocessed_packets.push((msgs, verified_indexes[processed], vers));
|
2019-02-13 19:12:14 -08:00
|
|
|
}
|
|
|
|
new_tx_count += processed;
|
2018-05-14 16:31:13 -07:00
|
|
|
}
|
2018-09-21 21:01:13 -07:00
|
|
|
|
2018-09-24 17:13:49 -07:00
|
|
|
inc_new_counter_info!(
|
|
|
|
"banking_stage-time_ms",
|
2018-09-26 05:52:13 -07:00
|
|
|
timing::duration_as_ms(&proc_start.elapsed()) as usize
|
2018-09-24 17:13:49 -07:00
|
|
|
);
|
2018-05-14 16:31:13 -07:00
|
|
|
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
|
|
|
|
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
|
2019-03-28 11:45:34 -07:00
|
|
|
debug!(
|
2019-04-09 11:17:15 -07:00
|
|
|
"@{:?} done processing transaction batches: {} time: {:?}ms tx count: {} tx/s: {}",
|
2018-05-14 16:31:13 -07:00
|
|
|
timing::timestamp(),
|
|
|
|
mms_len,
|
|
|
|
total_time_ms,
|
2019-04-09 11:17:15 -07:00
|
|
|
new_tx_count,
|
|
|
|
(new_tx_count as f32) / (total_time_s)
|
2018-05-14 16:31:13 -07:00
|
|
|
);
|
2018-08-06 11:35:45 -07:00
|
|
|
inc_new_counter_info!("banking_stage-process_packets", count);
|
2018-10-23 14:44:14 -07:00
|
|
|
inc_new_counter_info!("banking_stage-process_transactions", new_tx_count);
|
2019-02-13 19:12:14 -08:00
|
|
|
|
2019-04-09 11:17:15 -07:00
|
|
|
*recv_start = Instant::now();
|
|
|
|
|
2019-02-13 19:12:14 -08:00
|
|
|
Ok(unprocessed_packets)
|
|
|
|
}
|
2018-05-14 16:31:13 -07:00
|
|
|
}
|
|
|
|
|
2018-07-03 21:14:08 -07:00
|
|
|
impl Service for BankingStage {
|
2019-02-13 19:12:14 -08:00
|
|
|
type JoinReturnType = ();
|
2018-10-18 22:57:48 -07:00
|
|
|
|
2019-02-13 19:12:14 -08:00
|
|
|
fn join(self) -> thread::Result<()> {
|
2018-10-18 22:57:48 -07:00
|
|
|
for bank_thread_hdl in self.bank_thread_hdls {
|
2019-02-13 19:12:14 -08:00
|
|
|
bank_thread_hdl.join()?;
|
2018-09-26 05:52:13 -07:00
|
|
|
}
|
2019-02-13 19:12:14 -08:00
|
|
|
Ok(())
|
2018-07-03 21:14:08 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-03 16:44:06 -08:00
|
|
|
pub fn create_test_recorder(
|
|
|
|
bank: &Arc<Bank>,
|
2019-03-29 20:00:36 -07:00
|
|
|
blocktree: &Arc<Blocktree>,
|
2019-03-03 16:44:06 -08:00
|
|
|
) -> (
|
2019-03-04 20:50:02 -08:00
|
|
|
Arc<AtomicBool>,
|
2019-03-03 16:44:06 -08:00
|
|
|
Arc<Mutex<PohRecorder>>,
|
|
|
|
PohService,
|
|
|
|
Receiver<WorkingBankEntries>,
|
|
|
|
) {
|
|
|
|
let exit = Arc::new(AtomicBool::new(false));
|
2019-03-18 22:08:21 -07:00
|
|
|
let (mut poh_recorder, entry_receiver) = PohRecorder::new(
|
2019-03-15 13:22:16 -07:00
|
|
|
bank.tick_height(),
|
|
|
|
bank.last_blockhash(),
|
|
|
|
bank.slot(),
|
|
|
|
Some(4),
|
|
|
|
bank.ticks_per_slot(),
|
2019-03-20 14:23:58 -07:00
|
|
|
&Pubkey::default(),
|
2019-03-29 20:00:36 -07:00
|
|
|
blocktree,
|
2019-04-19 02:39:44 -07:00
|
|
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
2019-03-15 13:22:16 -07:00
|
|
|
);
|
2019-03-18 22:08:21 -07:00
|
|
|
poh_recorder.set_bank(&bank);
|
|
|
|
|
2019-03-03 16:44:06 -08:00
|
|
|
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
2019-03-04 16:33:14 -08:00
|
|
|
let poh_service = PohService::new(poh_recorder.clone(), &PohServiceConfig::default(), &exit);
|
2019-03-18 22:08:21 -07:00
|
|
|
|
2019-03-04 20:50:02 -08:00
|
|
|
(exit, poh_recorder, poh_service, entry_receiver)
|
2019-03-03 16:44:06 -08:00
|
|
|
}
|
|
|
|
|
2018-09-25 15:01:51 -07:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
2019-03-29 20:00:36 -07:00
|
|
|
use crate::blocktree::get_tmp_ledger_path;
|
2019-03-03 16:44:06 -08:00
|
|
|
use crate::cluster_info::Node;
|
2019-01-09 14:33:44 -08:00
|
|
|
use crate::entry::EntrySlice;
|
2018-12-07 19:16:27 -08:00
|
|
|
use crate::packet::to_packets;
|
2019-03-03 16:44:06 -08:00
|
|
|
use crate::poh_recorder::WorkingBank;
|
2019-03-29 20:00:36 -07:00
|
|
|
use crate::{get_tmp_ledger_path, tmp_ledger_name};
|
2019-02-18 22:26:22 -08:00
|
|
|
use solana_sdk::genesis_block::GenesisBlock;
|
2019-03-23 20:12:27 -07:00
|
|
|
use solana_sdk::instruction::InstructionError;
|
2018-12-03 10:26:28 -08:00
|
|
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
2019-04-03 08:45:57 -07:00
|
|
|
use solana_sdk::system_transaction;
|
2019-03-03 16:44:06 -08:00
|
|
|
use std::sync::mpsc::channel;
|
2018-09-25 15:01:51 -07:00
|
|
|
use std::thread::sleep;
|
|
|
|
|
|
|
|
#[test]
|
2018-09-26 05:52:13 -07:00
|
|
|
fn test_banking_stage_shutdown1() {
|
2019-02-12 15:49:23 -08:00
|
|
|
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
2019-01-24 12:04:04 -08:00
|
|
|
let bank = Arc::new(Bank::new(&genesis_block));
|
2019-04-19 02:39:44 -07:00
|
|
|
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
2018-09-25 15:01:51 -07:00
|
|
|
let (verified_sender, verified_receiver) = channel();
|
2019-04-17 21:07:45 -07:00
|
|
|
let (vote_sender, vote_receiver) = channel();
|
2019-03-29 20:00:36 -07:00
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let blocktree = Arc::new(
|
|
|
|
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
|
|
|
);
|
|
|
|
let (exit, poh_recorder, poh_service, _entry_receiever) =
|
|
|
|
create_test_recorder(&bank, &blocktree);
|
|
|
|
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
|
|
|
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
2019-04-17 21:07:45 -07:00
|
|
|
let banking_stage = BankingStage::new(
|
|
|
|
&cluster_info,
|
|
|
|
&poh_recorder,
|
|
|
|
verified_receiver,
|
|
|
|
vote_receiver,
|
2019-04-19 02:39:44 -07:00
|
|
|
&leader_schedule_cache,
|
2019-04-17 21:07:45 -07:00
|
|
|
);
|
2019-03-29 20:00:36 -07:00
|
|
|
drop(verified_sender);
|
2019-04-17 21:07:45 -07:00
|
|
|
drop(vote_sender);
|
2019-03-29 20:00:36 -07:00
|
|
|
exit.store(true, Ordering::Relaxed);
|
|
|
|
banking_stage.join().unwrap();
|
|
|
|
poh_service.join().unwrap();
|
|
|
|
}
|
|
|
|
Blocktree::destroy(&ledger_path).unwrap();
|
2018-09-26 05:52:13 -07:00
|
|
|
}
|
|
|
|
|
2018-09-25 15:01:51 -07:00
|
|
|
#[test]
|
|
|
|
fn test_banking_stage_tick() {
|
2019-03-03 16:44:06 -08:00
|
|
|
solana_logger::setup();
|
2019-03-01 14:52:27 -08:00
|
|
|
let (mut genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
|
|
|
genesis_block.ticks_per_slot = 4;
|
2019-01-24 12:04:04 -08:00
|
|
|
let bank = Arc::new(Bank::new(&genesis_block));
|
2019-04-19 02:39:44 -07:00
|
|
|
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
2019-03-02 10:25:16 -08:00
|
|
|
let start_hash = bank.last_blockhash();
|
2018-09-25 15:01:51 -07:00
|
|
|
let (verified_sender, verified_receiver) = channel();
|
2019-04-17 21:07:45 -07:00
|
|
|
let (vote_sender, vote_receiver) = channel();
|
2019-03-29 20:00:36 -07:00
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let blocktree = Arc::new(
|
|
|
|
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
|
|
|
);
|
|
|
|
let (exit, poh_recorder, poh_service, entry_receiver) =
|
|
|
|
create_test_recorder(&bank, &blocktree);
|
|
|
|
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
|
|
|
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
2019-04-17 21:07:45 -07:00
|
|
|
let banking_stage = BankingStage::new(
|
|
|
|
&cluster_info,
|
|
|
|
&poh_recorder,
|
|
|
|
verified_receiver,
|
|
|
|
vote_receiver,
|
2019-04-19 02:39:44 -07:00
|
|
|
&leader_schedule_cache,
|
2019-04-17 21:07:45 -07:00
|
|
|
);
|
2019-03-29 20:00:36 -07:00
|
|
|
trace!("sending bank");
|
|
|
|
sleep(Duration::from_millis(600));
|
|
|
|
drop(verified_sender);
|
2019-04-17 21:07:45 -07:00
|
|
|
drop(vote_sender);
|
2019-03-29 20:00:36 -07:00
|
|
|
exit.store(true, Ordering::Relaxed);
|
|
|
|
poh_service.join().unwrap();
|
|
|
|
drop(poh_recorder);
|
|
|
|
|
|
|
|
trace!("getting entries");
|
|
|
|
let entries: Vec<_> = entry_receiver
|
|
|
|
.iter()
|
|
|
|
.flat_map(|x| x.1.into_iter().map(|e| e.0))
|
|
|
|
.collect();
|
|
|
|
trace!("done");
|
|
|
|
assert_eq!(entries.len(), genesis_block.ticks_per_slot as usize - 1);
|
|
|
|
assert!(entries.verify(&start_hash));
|
|
|
|
assert_eq!(entries[entries.len() - 1].hash, bank.last_blockhash());
|
|
|
|
banking_stage.join().unwrap();
|
|
|
|
}
|
|
|
|
Blocktree::destroy(&ledger_path).unwrap();
|
2018-09-25 15:01:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2018-09-26 13:31:39 -07:00
|
|
|
fn test_banking_stage_entries_only() {
|
2019-03-12 21:07:06 -07:00
|
|
|
solana_logger::setup();
|
|
|
|
let (genesis_block, mint_keypair) = GenesisBlock::new(10);
|
2019-01-24 12:04:04 -08:00
|
|
|
let bank = Arc::new(Bank::new(&genesis_block));
|
2019-04-19 02:39:44 -07:00
|
|
|
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
2019-03-02 10:25:16 -08:00
|
|
|
let start_hash = bank.last_blockhash();
|
2018-09-25 15:01:51 -07:00
|
|
|
let (verified_sender, verified_receiver) = channel();
|
2019-04-17 21:07:45 -07:00
|
|
|
let (vote_sender, vote_receiver) = channel();
|
2019-03-29 20:00:36 -07:00
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let blocktree = Arc::new(
|
|
|
|
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
|
|
|
);
|
|
|
|
let (exit, poh_recorder, poh_service, entry_receiver) =
|
|
|
|
create_test_recorder(&bank, &blocktree);
|
|
|
|
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
|
|
|
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
2019-04-17 21:07:45 -07:00
|
|
|
let banking_stage = BankingStage::new(
|
|
|
|
&cluster_info,
|
|
|
|
&poh_recorder,
|
|
|
|
verified_receiver,
|
|
|
|
vote_receiver,
|
2019-04-19 02:39:44 -07:00
|
|
|
&leader_schedule_cache,
|
2019-04-17 21:07:45 -07:00
|
|
|
);
|
2018-09-25 15:01:51 -07:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
// fund another account so we can send 2 good transactions in a single batch.
|
|
|
|
let keypair = Keypair::new();
|
2019-04-03 08:45:57 -07:00
|
|
|
let fund_tx = system_transaction::create_user_account(
|
2019-04-02 16:01:56 -07:00
|
|
|
&mint_keypair,
|
|
|
|
&keypair.pubkey(),
|
|
|
|
2,
|
|
|
|
start_hash,
|
|
|
|
0,
|
|
|
|
);
|
2019-03-29 20:00:36 -07:00
|
|
|
bank.process_transaction(&fund_tx).unwrap();
|
2019-03-12 21:07:06 -07:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
// good tx
|
2019-03-30 20:37:33 -07:00
|
|
|
let to = Pubkey::new_rand();
|
2019-04-03 08:45:57 -07:00
|
|
|
let tx = system_transaction::create_user_account(&mint_keypair, &to, 1, start_hash, 0);
|
2019-03-29 20:00:36 -07:00
|
|
|
|
|
|
|
// good tx, but no verify
|
2019-03-30 20:37:33 -07:00
|
|
|
let to2 = Pubkey::new_rand();
|
2019-04-03 08:45:57 -07:00
|
|
|
let tx_no_ver =
|
|
|
|
system_transaction::create_user_account(&keypair, &to2, 2, start_hash, 0);
|
2019-03-29 20:00:36 -07:00
|
|
|
|
|
|
|
// bad tx, AccountNotFound
|
|
|
|
let keypair = Keypair::new();
|
2019-03-30 20:37:33 -07:00
|
|
|
let to3 = Pubkey::new_rand();
|
2019-04-03 08:45:57 -07:00
|
|
|
let tx_anf = system_transaction::create_user_account(&keypair, &to3, 1, start_hash, 0);
|
2019-03-29 20:00:36 -07:00
|
|
|
|
|
|
|
// send 'em over
|
|
|
|
let packets = to_packets(&[tx_no_ver, tx_anf, tx]);
|
|
|
|
|
|
|
|
// glad they all fit
|
|
|
|
assert_eq!(packets.len(), 1);
|
|
|
|
verified_sender // no_ver, anf, tx
|
|
|
|
.send(vec![(packets[0].clone(), vec![0u8, 1u8, 1u8])])
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
drop(verified_sender);
|
2019-04-17 21:07:45 -07:00
|
|
|
drop(vote_sender);
|
2019-03-29 20:00:36 -07:00
|
|
|
exit.store(true, Ordering::Relaxed);
|
|
|
|
poh_service.join().unwrap();
|
|
|
|
drop(poh_recorder);
|
|
|
|
|
|
|
|
let mut blockhash = start_hash;
|
|
|
|
let bank = Bank::new(&genesis_block);
|
|
|
|
bank.process_transaction(&fund_tx).unwrap();
|
|
|
|
//receive entries + ticks
|
|
|
|
for _ in 0..10 {
|
|
|
|
let ventries: Vec<Vec<Entry>> = entry_receiver
|
|
|
|
.iter()
|
|
|
|
.map(|x| x.1.into_iter().map(|e| e.0).collect())
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
for entries in &ventries {
|
|
|
|
for entry in entries {
|
|
|
|
bank.process_transactions(&entry.transactions)
|
|
|
|
.iter()
|
|
|
|
.for_each(|x| assert_eq!(*x, Ok(())));
|
|
|
|
}
|
|
|
|
assert!(entries.verify(&blockhash));
|
|
|
|
blockhash = entries.last().unwrap().hash;
|
2019-03-12 21:07:06 -07:00
|
|
|
}
|
2019-02-25 13:50:31 -08:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
if bank.get_balance(&to) == 1 {
|
|
|
|
break;
|
|
|
|
}
|
2019-03-12 21:07:06 -07:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
sleep(Duration::from_millis(200));
|
|
|
|
}
|
2019-03-12 21:07:06 -07:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
assert_eq!(bank.get_balance(&to), 1);
|
|
|
|
assert_eq!(bank.get_balance(&to2), 0);
|
2018-09-26 05:52:13 -07:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
drop(entry_receiver);
|
|
|
|
banking_stage.join().unwrap();
|
|
|
|
}
|
|
|
|
Blocktree::destroy(&ledger_path).unwrap();
|
2018-09-26 13:31:39 -07:00
|
|
|
}
|
2019-03-03 16:44:06 -08:00
|
|
|
|
2018-09-25 15:01:51 -07:00
|
|
|
#[test]
|
|
|
|
fn test_banking_stage_entryfication() {
|
2019-03-18 22:08:21 -07:00
|
|
|
solana_logger::setup();
|
2018-09-25 15:01:51 -07:00
|
|
|
// In this attack we'll demonstrate that a verifier can interpret the ledger
|
|
|
|
// differently if either the server doesn't signal the ledger to add an
|
|
|
|
// Entry OR if the verifier tries to parallelize across multiple Entries.
|
2019-02-12 15:49:23 -08:00
|
|
|
let (genesis_block, mint_keypair) = GenesisBlock::new(2);
|
2018-09-25 15:01:51 -07:00
|
|
|
let (verified_sender, verified_receiver) = channel();
|
|
|
|
|
2019-03-05 16:58:52 -08:00
|
|
|
// Process a batch that includes a transaction that receives two lamports.
|
2018-09-25 15:01:51 -07:00
|
|
|
let alice = Keypair::new();
|
2019-04-03 08:45:57 -07:00
|
|
|
let tx = system_transaction::create_user_account(
|
2019-02-01 07:36:35 -08:00
|
|
|
&mint_keypair,
|
2019-03-09 19:28:43 -08:00
|
|
|
&alice.pubkey(),
|
2019-02-01 07:36:35 -08:00
|
|
|
2,
|
2019-03-01 09:49:37 -08:00
|
|
|
genesis_block.hash(),
|
2019-02-01 07:36:35 -08:00
|
|
|
0,
|
|
|
|
);
|
2018-09-25 15:01:51 -07:00
|
|
|
|
2018-09-26 09:50:12 -07:00
|
|
|
let packets = to_packets(&[tx]);
|
2018-09-25 15:01:51 -07:00
|
|
|
verified_sender
|
|
|
|
.send(vec![(packets[0].clone(), vec![1u8])])
|
|
|
|
.unwrap();
|
|
|
|
|
2019-03-05 16:58:52 -08:00
|
|
|
// Process a second batch that spends one of those lamports.
|
2019-04-03 08:45:57 -07:00
|
|
|
let tx = system_transaction::create_user_account(
|
2019-02-01 07:36:35 -08:00
|
|
|
&alice,
|
2019-03-09 19:28:43 -08:00
|
|
|
&mint_keypair.pubkey(),
|
2019-02-01 07:36:35 -08:00
|
|
|
1,
|
2019-03-01 09:49:37 -08:00
|
|
|
genesis_block.hash(),
|
2019-02-01 07:36:35 -08:00
|
|
|
0,
|
|
|
|
);
|
2018-09-26 09:50:12 -07:00
|
|
|
let packets = to_packets(&[tx]);
|
2018-09-25 15:01:51 -07:00
|
|
|
verified_sender
|
|
|
|
.send(vec![(packets[0].clone(), vec![1u8])])
|
|
|
|
.unwrap();
|
2019-03-03 16:44:06 -08:00
|
|
|
|
2019-04-17 21:07:45 -07:00
|
|
|
let (vote_sender, vote_receiver) = channel();
|
2019-03-29 20:00:36 -07:00
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let entry_receiver = {
|
|
|
|
// start a banking_stage to eat verified receiver
|
|
|
|
let bank = Arc::new(Bank::new(&genesis_block));
|
2019-04-19 02:39:44 -07:00
|
|
|
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
2019-03-29 20:00:36 -07:00
|
|
|
let blocktree = Arc::new(
|
|
|
|
Blocktree::open(&ledger_path)
|
|
|
|
.expect("Expected to be able to open database ledger"),
|
|
|
|
);
|
|
|
|
let (exit, poh_recorder, poh_service, entry_receiver) =
|
|
|
|
create_test_recorder(&bank, &blocktree);
|
|
|
|
let cluster_info =
|
|
|
|
ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
|
|
|
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
|
|
|
let _banking_stage = BankingStage::new_num_threads(
|
|
|
|
&cluster_info,
|
|
|
|
&poh_recorder,
|
|
|
|
verified_receiver,
|
2019-04-17 21:07:45 -07:00
|
|
|
vote_receiver,
|
|
|
|
2,
|
2019-04-19 02:39:44 -07:00
|
|
|
&leader_schedule_cache,
|
2019-03-29 20:00:36 -07:00
|
|
|
);
|
2019-03-18 22:08:21 -07:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
// wait for banking_stage to eat the packets
|
|
|
|
while bank.get_balance(&alice.pubkey()) != 1 {
|
|
|
|
sleep(Duration::from_millis(100));
|
|
|
|
}
|
|
|
|
exit.store(true, Ordering::Relaxed);
|
|
|
|
poh_service.join().unwrap();
|
|
|
|
entry_receiver
|
|
|
|
};
|
|
|
|
drop(verified_sender);
|
2019-04-17 21:07:45 -07:00
|
|
|
drop(vote_sender);
|
2019-03-29 20:00:36 -07:00
|
|
|
|
|
|
|
// consume the entire entry_receiver, feed it into a new bank
|
|
|
|
// check that the balance is what we expect.
|
|
|
|
let entries: Vec<_> = entry_receiver
|
|
|
|
.iter()
|
|
|
|
.flat_map(|x| x.1.into_iter().map(|e| e.0))
|
|
|
|
.collect();
|
2018-09-25 15:01:51 -07:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
let bank = Bank::new(&genesis_block);
|
|
|
|
for entry in &entries {
|
|
|
|
bank.process_transactions(&entry.transactions)
|
|
|
|
.iter()
|
|
|
|
.for_each(|x| assert_eq!(*x, Ok(())));
|
|
|
|
}
|
2019-03-18 22:08:21 -07:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
// Assert the user holds one lamport, not two. If the stage only outputs one
|
|
|
|
// entry, then the second transaction will be rejected, because it drives
|
|
|
|
// the account balance below zero before the credit is added.
|
|
|
|
assert_eq!(bank.get_balance(&alice.pubkey()), 1);
|
2019-03-11 12:45:45 -07:00
|
|
|
}
|
2019-03-29 20:00:36 -07:00
|
|
|
Blocktree::destroy(&ledger_path).unwrap();
|
2018-10-18 22:57:48 -07:00
|
|
|
}
|
2019-02-16 14:02:21 -08:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_bank_record_transactions() {
|
|
|
|
let (genesis_block, mint_keypair) = GenesisBlock::new(10_000);
|
|
|
|
let bank = Arc::new(Bank::new(&genesis_block));
|
2019-02-19 16:17:36 -08:00
|
|
|
let working_bank = WorkingBank {
|
|
|
|
bank: bank.clone(),
|
|
|
|
min_tick_height: bank.tick_height(),
|
|
|
|
max_tick_height: std::u64::MAX,
|
|
|
|
};
|
2019-03-29 20:00:36 -07:00
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let blocktree =
|
|
|
|
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
|
|
|
let (poh_recorder, entry_receiver) = PohRecorder::new(
|
|
|
|
bank.tick_height(),
|
|
|
|
bank.last_blockhash(),
|
|
|
|
bank.slot(),
|
|
|
|
None,
|
|
|
|
bank.ticks_per_slot(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
&Arc::new(blocktree),
|
2019-04-19 02:39:44 -07:00
|
|
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
2019-03-29 20:00:36 -07:00
|
|
|
);
|
|
|
|
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
|
|
|
|
|
|
|
poh_recorder.lock().unwrap().set_working_bank(working_bank);
|
2019-03-30 20:37:33 -07:00
|
|
|
let pubkey = Pubkey::new_rand();
|
2019-03-29 20:00:36 -07:00
|
|
|
|
|
|
|
let transactions = vec![
|
2019-04-03 08:45:57 -07:00
|
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_block.hash(), 0),
|
|
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_block.hash(), 0),
|
2019-03-29 20:00:36 -07:00
|
|
|
];
|
|
|
|
|
|
|
|
let mut results = vec![Ok(()), Ok(())];
|
|
|
|
BankingStage::record_transactions(bank.slot(), &transactions, &results, &poh_recorder)
|
|
|
|
.unwrap();
|
|
|
|
let (_, entries) = entry_receiver.recv().unwrap();
|
|
|
|
assert_eq!(entries[0].0.transactions.len(), transactions.len());
|
|
|
|
|
|
|
|
// InstructionErrors should still be recorded
|
|
|
|
results[0] = Err(TransactionError::InstructionError(
|
|
|
|
1,
|
|
|
|
InstructionError::new_result_with_negative_lamports(),
|
|
|
|
));
|
|
|
|
BankingStage::record_transactions(bank.slot(), &transactions, &results, &poh_recorder)
|
|
|
|
.unwrap();
|
|
|
|
let (_, entries) = entry_receiver.recv().unwrap();
|
|
|
|
assert_eq!(entries[0].0.transactions.len(), transactions.len());
|
|
|
|
|
|
|
|
// Other TransactionErrors should not be recorded
|
|
|
|
results[0] = Err(TransactionError::AccountNotFound);
|
|
|
|
BankingStage::record_transactions(bank.slot(), &transactions, &results, &poh_recorder)
|
|
|
|
.unwrap();
|
|
|
|
let (_, entries) = entry_receiver.recv().unwrap();
|
|
|
|
assert_eq!(entries[0].0.transactions.len(), transactions.len() - 1);
|
|
|
|
}
|
|
|
|
Blocktree::destroy(&ledger_path).unwrap();
|
2019-02-16 14:02:21 -08:00
|
|
|
}
|
|
|
|
|
2019-04-13 23:19:54 -07:00
|
|
|
#[test]
|
|
|
|
fn test_should_process_or_forward_packets() {
|
|
|
|
let my_id = Pubkey::new_rand();
|
|
|
|
let my_id1 = Pubkey::new_rand();
|
|
|
|
|
|
|
|
assert_eq!(
|
2019-04-23 11:56:30 -07:00
|
|
|
BankingStage::process_or_forward_packets(None, true, false, &my_id),
|
2019-04-13 23:19:54 -07:00
|
|
|
BufferedPacketsDecision::Hold
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2019-04-23 11:56:30 -07:00
|
|
|
BankingStage::process_or_forward_packets(None, false, false, &my_id),
|
2019-04-13 23:19:54 -07:00
|
|
|
BufferedPacketsDecision::Hold
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2019-04-23 11:56:30 -07:00
|
|
|
BankingStage::process_or_forward_packets(None, false, false, &my_id1),
|
2019-04-13 23:19:54 -07:00
|
|
|
BufferedPacketsDecision::Hold
|
|
|
|
);
|
|
|
|
|
|
|
|
let mut contact_info = ContactInfo::default();
|
|
|
|
contact_info.id = my_id1;
|
|
|
|
assert_eq!(
|
2019-04-23 11:56:30 -07:00
|
|
|
BankingStage::process_or_forward_packets(Some(&contact_info), false, false, &my_id),
|
2019-04-13 23:19:54 -07:00
|
|
|
BufferedPacketsDecision::Forward
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2019-04-23 11:56:30 -07:00
|
|
|
BankingStage::process_or_forward_packets(Some(&contact_info), false, true, &my_id),
|
|
|
|
BufferedPacketsDecision::Hold
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
BankingStage::process_or_forward_packets(Some(&contact_info), true, false, &my_id),
|
2019-04-13 23:19:54 -07:00
|
|
|
BufferedPacketsDecision::Consume
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2019-04-23 11:56:30 -07:00
|
|
|
BankingStage::process_or_forward_packets(Some(&contact_info), false, false, &my_id1),
|
2019-04-13 23:19:54 -07:00
|
|
|
BufferedPacketsDecision::Hold
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2019-04-23 11:56:30 -07:00
|
|
|
BankingStage::process_or_forward_packets(Some(&contact_info), true, false, &my_id1),
|
2019-04-13 23:19:54 -07:00
|
|
|
BufferedPacketsDecision::Consume
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2019-02-16 14:02:21 -08:00
|
|
|
#[test]
|
|
|
|
fn test_bank_process_and_record_transactions() {
|
2019-02-26 10:48:18 -08:00
|
|
|
solana_logger::setup();
|
2019-02-16 14:02:21 -08:00
|
|
|
let (genesis_block, mint_keypair) = GenesisBlock::new(10_000);
|
|
|
|
let bank = Arc::new(Bank::new(&genesis_block));
|
2019-03-30 20:37:33 -07:00
|
|
|
let pubkey = Pubkey::new_rand();
|
2019-02-16 14:02:21 -08:00
|
|
|
|
2019-04-03 08:45:57 -07:00
|
|
|
let transactions = vec![system_transaction::transfer(
|
2019-02-16 14:02:21 -08:00
|
|
|
&mint_keypair,
|
2019-03-09 19:28:43 -08:00
|
|
|
&pubkey,
|
2019-02-16 14:02:21 -08:00
|
|
|
1,
|
2019-03-01 09:49:37 -08:00
|
|
|
genesis_block.hash(),
|
2019-02-16 14:02:21 -08:00
|
|
|
0,
|
|
|
|
)];
|
|
|
|
|
2019-02-19 16:17:36 -08:00
|
|
|
let working_bank = WorkingBank {
|
|
|
|
bank: bank.clone(),
|
|
|
|
min_tick_height: bank.tick_height(),
|
|
|
|
max_tick_height: bank.tick_height() + 1,
|
|
|
|
};
|
2019-03-29 20:00:36 -07:00
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let blocktree =
|
|
|
|
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
|
|
|
let (poh_recorder, entry_receiver) = PohRecorder::new(
|
|
|
|
bank.tick_height(),
|
|
|
|
bank.last_blockhash(),
|
|
|
|
bank.slot(),
|
|
|
|
Some(4),
|
|
|
|
bank.ticks_per_slot(),
|
|
|
|
&pubkey,
|
|
|
|
&Arc::new(blocktree),
|
2019-04-19 02:39:44 -07:00
|
|
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
2019-03-29 20:00:36 -07:00
|
|
|
);
|
|
|
|
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
|
|
|
|
|
|
|
poh_recorder.lock().unwrap().set_working_bank(working_bank);
|
|
|
|
|
|
|
|
BankingStage::process_and_record_transactions(&bank, &transactions, &poh_recorder)
|
|
|
|
.unwrap();
|
|
|
|
poh_recorder.lock().unwrap().tick();
|
|
|
|
|
|
|
|
let mut done = false;
|
|
|
|
// read entries until I find mine, might be ticks...
|
|
|
|
while let Ok((_, entries)) = entry_receiver.recv() {
|
|
|
|
for (entry, _) in entries {
|
|
|
|
if !entry.is_tick() {
|
|
|
|
trace!("got entry");
|
|
|
|
assert_eq!(entry.transactions.len(), transactions.len());
|
|
|
|
assert_eq!(bank.get_balance(&pubkey), 1);
|
|
|
|
done = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if done {
|
|
|
|
break;
|
2019-02-16 14:02:21 -08:00
|
|
|
}
|
|
|
|
}
|
2019-03-29 20:00:36 -07:00
|
|
|
trace!("done ticking");
|
2019-02-16 14:02:21 -08:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
assert_eq!(done, true);
|
2019-02-26 10:48:18 -08:00
|
|
|
|
2019-04-03 08:45:57 -07:00
|
|
|
let transactions = vec![system_transaction::transfer(
|
2019-03-29 20:00:36 -07:00
|
|
|
&mint_keypair,
|
|
|
|
&pubkey,
|
|
|
|
2,
|
|
|
|
genesis_block.hash(),
|
|
|
|
0,
|
|
|
|
)];
|
2019-02-16 14:02:21 -08:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
assert_matches!(
|
|
|
|
BankingStage::process_and_record_transactions(&bank, &transactions, &poh_recorder),
|
|
|
|
Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached))
|
|
|
|
);
|
2019-02-16 14:02:21 -08:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
assert_eq!(bank.get_balance(&pubkey), 1);
|
|
|
|
}
|
|
|
|
Blocktree::destroy(&ledger_path).unwrap();
|
2019-02-16 14:02:21 -08:00
|
|
|
}
|
2018-09-25 15:01:51 -07:00
|
|
|
}
|