2018-06-06 08:58:49 -07:00
|
|
|
//! The `banking_stage` processes Transaction messages. It is intended to be used
|
|
|
|
//! to contruct a software pipeline. The stage uses all available CPU cores and
|
|
|
|
//! can do its processing in parallel with signature verification on the GPU.
|
2019-09-19 10:06:08 -07:00
|
|
|
use crate::{
|
|
|
|
cluster_info::ClusterInfo,
|
2019-11-06 00:07:57 -08:00
|
|
|
packet::{limited_deserialize, Packet, Packets, PACKETS_PER_BATCH},
|
2019-09-19 10:06:08 -07:00
|
|
|
poh_recorder::{PohRecorder, PohRecorderError, WorkingBankEntry},
|
|
|
|
poh_service::PohService,
|
|
|
|
result::{Error, Result},
|
|
|
|
service::Service,
|
|
|
|
};
|
2019-06-26 18:42:27 -07:00
|
|
|
use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError};
|
2019-05-07 10:23:02 -07:00
|
|
|
use itertools::Itertools;
|
2019-10-18 09:28:51 -07:00
|
|
|
use solana_ledger::{
|
|
|
|
blocktree::Blocktree, entry::hash_transactions, leader_schedule_cache::LeaderScheduleCache,
|
|
|
|
};
|
2019-06-29 06:34:49 -07:00
|
|
|
use solana_measure::measure::Measure;
|
2019-05-17 07:00:06 -07:00
|
|
|
use solana_metrics::{inc_new_counter_debug, inc_new_counter_info, inc_new_counter_warn};
|
2019-11-07 19:48:33 -08:00
|
|
|
use solana_perf::cuda_runtime::PinnedVec;
|
2019-11-04 20:13:43 -08:00
|
|
|
use solana_perf::perf_libs;
|
2019-09-19 10:06:08 -07:00
|
|
|
use solana_runtime::{accounts_db::ErrorCounters, bank::Bank, transaction_batch::TransactionBatch};
|
|
|
|
use solana_sdk::{
|
|
|
|
clock::{
|
2019-11-02 00:38:30 -07:00
|
|
|
Slot, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE,
|
2019-11-06 00:07:57 -08:00
|
|
|
MAX_TRANSACTION_FORWARDING_DELAY, MAX_TRANSACTION_FORWARDING_DELAY_GPU,
|
2019-09-19 10:06:08 -07:00
|
|
|
},
|
|
|
|
poh_config::PohConfig,
|
|
|
|
pubkey::Pubkey,
|
|
|
|
timing::{duration_as_ms, timestamp},
|
|
|
|
transaction::{self, Transaction, TransactionError},
|
|
|
|
};
|
|
|
|
use std::{
|
|
|
|
cmp, env,
|
|
|
|
net::UdpSocket,
|
|
|
|
sync::atomic::AtomicBool,
|
|
|
|
sync::mpsc::Receiver,
|
|
|
|
sync::{Arc, Mutex, RwLock},
|
|
|
|
thread::{self, Builder, JoinHandle},
|
|
|
|
time::Duration,
|
|
|
|
time::Instant,
|
2019-05-10 14:28:38 -07:00
|
|
|
};
|
2018-05-14 16:31:13 -07:00
|
|
|
|
2019-05-20 17:48:42 -07:00
|
|
|
type PacketsAndOffsets = (Packets, Vec<usize>);
|
2019-05-20 09:15:00 -07:00
|
|
|
pub type UnprocessedPackets = Vec<PacketsAndOffsets>;
|
2018-10-18 22:57:48 -07:00
|
|
|
|
2019-06-24 15:56:50 -07:00
|
|
|
/// Transaction forwarding
|
|
|
|
pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 4;
|
|
|
|
|
2019-06-28 01:55:24 -07:00
|
|
|
// Fixed thread size seems to be fastest on GCP setup
|
|
|
|
pub const NUM_THREADS: u32 = 4;
|
|
|
|
|
|
|
|
const TOTAL_BUFFERED_PACKETS: usize = 500_000;
|
2018-09-26 05:52:13 -07:00
|
|
|
|
2019-09-19 16:29:52 -07:00
|
|
|
const MAX_NUM_TRANSACTIONS_PER_BATCH: usize = 128;
|
2019-09-10 11:04:03 -07:00
|
|
|
|
2018-06-06 08:58:49 -07:00
|
|
|
/// Stores the stage's thread handle and output receiver.
|
2018-05-14 16:31:13 -07:00
|
|
|
pub struct BankingStage {
|
2019-03-03 16:44:06 -08:00
|
|
|
bank_thread_hdls: Vec<JoinHandle<()>>,
|
2018-09-26 05:52:13 -07:00
|
|
|
}
|
|
|
|
|
2019-04-13 23:19:54 -07:00
|
|
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
|
|
|
pub enum BufferedPacketsDecision {
|
|
|
|
Consume,
|
|
|
|
Forward,
|
|
|
|
Hold,
|
|
|
|
}
|
|
|
|
|
2018-05-14 16:31:13 -07:00
|
|
|
impl BankingStage {
|
2018-07-05 14:41:53 -07:00
|
|
|
/// Create the stage using `bank`. Exit when `verified_receiver` is dropped.
|
2018-12-07 19:01:28 -08:00
|
|
|
#[allow(clippy::new_ret_no_self)]
|
2018-05-14 16:31:13 -07:00
|
|
|
pub fn new(
|
2019-03-03 16:44:06 -08:00
|
|
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
2019-02-26 10:48:18 -08:00
|
|
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
2019-11-01 14:23:03 -07:00
|
|
|
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
|
|
|
|
verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
|
2019-03-18 22:08:21 -07:00
|
|
|
) -> Self {
|
|
|
|
Self::new_num_threads(
|
|
|
|
cluster_info,
|
|
|
|
poh_recorder,
|
|
|
|
verified_receiver,
|
2019-04-17 21:07:45 -07:00
|
|
|
verified_vote_receiver,
|
2019-06-28 01:55:24 -07:00
|
|
|
Self::num_threads(),
|
2019-03-18 22:08:21 -07:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2019-05-01 15:13:10 -07:00
|
|
|
fn new_num_threads(
|
2019-03-18 22:08:21 -07:00
|
|
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
|
|
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
2019-11-01 14:23:03 -07:00
|
|
|
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
|
|
|
|
verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
|
2019-03-18 22:08:21 -07:00
|
|
|
num_threads: u32,
|
2019-03-03 16:44:06 -08:00
|
|
|
) -> Self {
|
2019-09-05 11:22:39 -07:00
|
|
|
let batch_limit = TOTAL_BUFFERED_PACKETS / ((num_threads - 1) as usize * PACKETS_PER_BATCH);
|
2018-09-26 05:52:13 -07:00
|
|
|
// Single thread to generate entries from many banks.
|
|
|
|
// This thread talks to poh_service and broadcasts the entries once they have been recorded.
|
2019-03-02 10:25:16 -08:00
|
|
|
// Once an entry has been recorded, its blockhash is registered with the bank.
|
2019-06-21 15:21:49 -07:00
|
|
|
let my_pubkey = cluster_info.read().unwrap().id();
|
2018-09-26 05:52:13 -07:00
|
|
|
// Many banks that process transactions in parallel.
|
2019-03-27 04:37:36 -07:00
|
|
|
let bank_thread_hdls: Vec<JoinHandle<()>> = (0..num_threads)
|
2019-04-17 21:07:45 -07:00
|
|
|
.map(|i| {
|
2019-04-18 11:18:49 -07:00
|
|
|
let (verified_receiver, enable_forwarding) = if i < num_threads - 1 {
|
|
|
|
(verified_receiver.clone(), true)
|
2019-04-17 21:07:45 -07:00
|
|
|
} else {
|
2019-04-18 11:18:49 -07:00
|
|
|
// Disable forwarding of vote transactions, as votes are gossiped
|
|
|
|
(verified_vote_receiver.clone(), false)
|
2019-04-17 21:07:45 -07:00
|
|
|
};
|
|
|
|
|
2019-03-03 16:44:06 -08:00
|
|
|
let poh_recorder = poh_recorder.clone();
|
|
|
|
let cluster_info = cluster_info.clone();
|
2019-04-09 11:17:15 -07:00
|
|
|
let mut recv_start = Instant::now();
|
2018-09-26 05:52:13 -07:00
|
|
|
Builder::new()
|
|
|
|
.name("solana-banking-stage-tx".to_string())
|
|
|
|
.spawn(move || {
|
2019-04-09 11:17:15 -07:00
|
|
|
Self::process_loop(
|
2019-06-21 15:21:49 -07:00
|
|
|
my_pubkey,
|
2019-04-09 11:17:15 -07:00
|
|
|
&verified_receiver,
|
|
|
|
&poh_recorder,
|
|
|
|
&cluster_info,
|
|
|
|
&mut recv_start,
|
2019-04-18 11:18:49 -07:00
|
|
|
enable_forwarding,
|
2019-05-20 09:15:00 -07:00
|
|
|
i,
|
2019-06-28 01:55:24 -07:00
|
|
|
batch_limit,
|
2019-04-09 11:17:15 -07:00
|
|
|
);
|
2018-12-07 19:01:28 -08:00
|
|
|
})
|
|
|
|
.unwrap()
|
|
|
|
})
|
|
|
|
.collect();
|
2019-03-03 16:44:06 -08:00
|
|
|
Self { bank_thread_hdls }
|
|
|
|
}
|
|
|
|
|
2019-05-20 09:15:00 -07:00
|
|
|
fn filter_valid_packets_for_forwarding(all_packets: &[PacketsAndOffsets]) -> Vec<&Packet> {
|
2019-05-10 14:28:38 -07:00
|
|
|
all_packets
|
|
|
|
.iter()
|
|
|
|
.flat_map(|(p, valid_indexes)| valid_indexes.iter().map(move |x| &p.packets[*x]))
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2019-05-01 15:13:10 -07:00
|
|
|
fn forward_buffered_packets(
|
2019-03-06 12:01:21 -08:00
|
|
|
socket: &std::net::UdpSocket,
|
2019-07-30 14:50:02 -07:00
|
|
|
tpu_forwards: &std::net::SocketAddr,
|
2019-05-20 09:15:00 -07:00
|
|
|
unprocessed_packets: &[PacketsAndOffsets],
|
2019-03-03 16:44:06 -08:00
|
|
|
) -> std::io::Result<()> {
|
2019-05-10 14:28:38 -07:00
|
|
|
let packets = Self::filter_valid_packets_for_forwarding(unprocessed_packets);
|
2019-05-02 17:36:19 -07:00
|
|
|
inc_new_counter_info!("banking_stage-forwarded_packets", packets.len());
|
2019-07-30 14:50:02 -07:00
|
|
|
for p in packets {
|
|
|
|
socket.send_to(&p.data[..p.meta.size], &tpu_forwards)?;
|
2019-03-09 02:47:41 -08:00
|
|
|
}
|
|
|
|
|
2019-03-03 16:44:06 -08:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-05-20 09:15:00 -07:00
|
|
|
pub fn consume_buffered_packets(
|
2019-05-23 23:20:04 -07:00
|
|
|
my_pubkey: &Pubkey,
|
2019-03-08 16:48:15 -08:00
|
|
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
2019-05-24 17:35:09 -07:00
|
|
|
buffered_packets: &mut Vec<PacketsAndOffsets>,
|
2019-06-28 01:55:24 -07:00
|
|
|
batch_limit: usize,
|
2019-04-11 17:23:45 -07:00
|
|
|
) -> Result<UnprocessedPackets> {
|
|
|
|
let mut unprocessed_packets = vec![];
|
2019-04-26 17:27:31 -07:00
|
|
|
let mut rebuffered_packets = 0;
|
|
|
|
let mut new_tx_count = 0;
|
2019-05-20 09:15:00 -07:00
|
|
|
let buffered_len = buffered_packets.len();
|
2019-05-24 17:35:09 -07:00
|
|
|
let mut buffered_packets_iter = buffered_packets.drain(..);
|
2019-06-28 01:55:24 -07:00
|
|
|
let mut dropped_batches_count = 0;
|
2019-05-20 09:15:00 -07:00
|
|
|
|
2019-06-29 06:34:49 -07:00
|
|
|
let mut proc_start = Measure::start("consume_buffered_process");
|
2019-05-13 14:40:05 -07:00
|
|
|
while let Some((msgs, unprocessed_indexes)) = buffered_packets_iter.next() {
|
2019-04-11 17:23:45 -07:00
|
|
|
let bank = poh_recorder.lock().unwrap().bank();
|
|
|
|
if bank.is_none() {
|
2019-05-07 10:23:02 -07:00
|
|
|
rebuffered_packets += unprocessed_indexes.len();
|
2019-06-28 01:55:24 -07:00
|
|
|
Self::push_unprocessed(
|
|
|
|
&mut unprocessed_packets,
|
|
|
|
msgs,
|
|
|
|
unprocessed_indexes,
|
|
|
|
&mut dropped_batches_count,
|
|
|
|
batch_limit,
|
|
|
|
);
|
2019-04-11 17:23:45 -07:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
let bank = bank.unwrap();
|
|
|
|
|
2019-05-07 10:23:02 -07:00
|
|
|
let (processed, verified_txs_len, new_unprocessed_indexes) =
|
|
|
|
Self::process_received_packets(
|
|
|
|
&bank,
|
|
|
|
&poh_recorder,
|
|
|
|
&msgs,
|
|
|
|
unprocessed_indexes.to_owned(),
|
2019-06-26 22:39:50 -07:00
|
|
|
);
|
2019-04-26 17:27:31 -07:00
|
|
|
|
|
|
|
new_tx_count += processed;
|
|
|
|
|
2019-05-07 10:23:02 -07:00
|
|
|
// Collect any unprocessed transactions in this batch for forwarding
|
2019-05-20 09:15:00 -07:00
|
|
|
rebuffered_packets += new_unprocessed_indexes.len();
|
2019-06-28 01:55:24 -07:00
|
|
|
Self::push_unprocessed(
|
|
|
|
&mut unprocessed_packets,
|
|
|
|
msgs,
|
|
|
|
new_unprocessed_indexes,
|
|
|
|
&mut dropped_batches_count,
|
|
|
|
batch_limit,
|
|
|
|
);
|
2019-05-13 14:40:05 -07:00
|
|
|
|
|
|
|
if processed < verified_txs_len {
|
2019-05-22 17:54:28 -07:00
|
|
|
let next_leader = poh_recorder.lock().unwrap().next_slot_leader();
|
2019-05-13 14:40:05 -07:00
|
|
|
// Walk thru rest of the transactions and filter out the invalid (e.g. too old) ones
|
2019-08-23 08:55:51 -07:00
|
|
|
#[allow(clippy::while_let_on_iterator)]
|
2019-05-13 14:40:05 -07:00
|
|
|
while let Some((msgs, unprocessed_indexes)) = buffered_packets_iter.next() {
|
2019-05-22 17:54:28 -07:00
|
|
|
let unprocessed_indexes = Self::filter_unprocessed_packets(
|
|
|
|
&bank,
|
|
|
|
&msgs,
|
|
|
|
&unprocessed_indexes,
|
2019-05-23 23:20:04 -07:00
|
|
|
my_pubkey,
|
2019-05-22 17:54:28 -07:00
|
|
|
next_leader,
|
|
|
|
);
|
2019-06-28 01:55:24 -07:00
|
|
|
Self::push_unprocessed(
|
|
|
|
&mut unprocessed_packets,
|
|
|
|
msgs,
|
|
|
|
unprocessed_indexes,
|
|
|
|
&mut dropped_batches_count,
|
|
|
|
batch_limit,
|
|
|
|
);
|
2019-05-13 14:40:05 -07:00
|
|
|
}
|
|
|
|
}
|
2019-03-29 11:20:36 -07:00
|
|
|
}
|
2019-04-26 17:27:31 -07:00
|
|
|
|
2019-06-29 06:34:49 -07:00
|
|
|
proc_start.stop();
|
2019-05-20 09:15:00 -07:00
|
|
|
|
|
|
|
debug!(
|
|
|
|
"@{:?} done processing buffered batches: {} time: {:?}ms tx count: {} tx/s: {}",
|
2019-09-06 14:30:56 -07:00
|
|
|
timestamp(),
|
2019-05-20 09:15:00 -07:00
|
|
|
buffered_len,
|
2019-06-29 06:34:49 -07:00
|
|
|
proc_start.as_ms(),
|
2019-05-20 09:15:00 -07:00
|
|
|
new_tx_count,
|
2019-06-29 06:34:49 -07:00
|
|
|
(new_tx_count as f32) / (proc_start.as_s())
|
2019-05-20 09:15:00 -07:00
|
|
|
);
|
|
|
|
|
2019-08-12 15:15:34 -07:00
|
|
|
inc_new_counter_info!("banking_stage-rebuffered_packets", rebuffered_packets);
|
|
|
|
inc_new_counter_info!("banking_stage-consumed_buffered_packets", new_tx_count);
|
|
|
|
inc_new_counter_debug!("banking_stage-process_transactions", new_tx_count);
|
|
|
|
inc_new_counter_debug!("banking_stage-dropped_batches_count", dropped_batches_count);
|
2019-04-26 17:27:31 -07:00
|
|
|
|
2019-04-11 17:23:45 -07:00
|
|
|
Ok(unprocessed_packets)
|
|
|
|
}
|
2019-03-29 11:20:36 -07:00
|
|
|
|
2019-05-01 15:13:10 -07:00
|
|
|
fn consume_or_forward_packets(
|
2019-07-18 14:54:27 -07:00
|
|
|
my_pubkey: &Pubkey,
|
2019-05-23 23:20:04 -07:00
|
|
|
leader_pubkey: Option<Pubkey>,
|
2019-04-13 23:19:54 -07:00
|
|
|
bank_is_available: bool,
|
2019-04-23 11:56:30 -07:00
|
|
|
would_be_leader: bool,
|
2019-04-13 23:19:54 -07:00
|
|
|
) -> BufferedPacketsDecision {
|
2019-05-23 23:20:04 -07:00
|
|
|
leader_pubkey.map_or(
|
2019-04-13 23:19:54 -07:00
|
|
|
// If leader is not known, return the buffered packets as is
|
|
|
|
BufferedPacketsDecision::Hold,
|
|
|
|
// else process the packets
|
|
|
|
|x| {
|
|
|
|
if bank_is_available {
|
|
|
|
// If the bank is available, this node is the leader
|
|
|
|
BufferedPacketsDecision::Consume
|
2019-04-23 11:56:30 -07:00
|
|
|
} else if would_be_leader {
|
|
|
|
// If the node will be the leader soon, hold the packets for now
|
|
|
|
BufferedPacketsDecision::Hold
|
2019-05-23 23:20:04 -07:00
|
|
|
} else if x != *my_pubkey {
|
2019-04-13 23:19:54 -07:00
|
|
|
// If the current node is not the leader, forward the buffered packets
|
|
|
|
BufferedPacketsDecision::Forward
|
|
|
|
} else {
|
|
|
|
// We don't know the leader. Hold the packets for now
|
|
|
|
BufferedPacketsDecision::Hold
|
|
|
|
}
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2019-05-01 15:13:10 -07:00
|
|
|
fn process_buffered_packets(
|
2019-06-21 15:21:49 -07:00
|
|
|
my_pubkey: &Pubkey,
|
2019-04-11 17:23:45 -07:00
|
|
|
socket: &std::net::UdpSocket,
|
|
|
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
|
|
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
2019-05-24 17:35:09 -07:00
|
|
|
buffered_packets: &mut Vec<PacketsAndOffsets>,
|
2019-04-18 11:18:49 -07:00
|
|
|
enable_forwarding: bool,
|
2019-06-28 01:55:24 -07:00
|
|
|
batch_limit: usize,
|
2019-05-24 17:35:09 -07:00
|
|
|
) -> Result<()> {
|
2019-10-02 14:07:34 -07:00
|
|
|
let (leader_at_slot_offset, poh_has_bank, would_be_leader) = {
|
2019-04-23 11:56:30 -07:00
|
|
|
let poh = poh_recorder.lock().unwrap();
|
2019-07-18 14:54:27 -07:00
|
|
|
(
|
2019-10-19 20:28:33 -07:00
|
|
|
poh.leader_after_n_slots(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET),
|
2019-07-18 14:54:27 -07:00
|
|
|
poh.has_bank(),
|
2019-06-24 15:56:50 -07:00
|
|
|
poh.would_be_leader(
|
|
|
|
(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET - 1) * DEFAULT_TICKS_PER_SLOT,
|
2019-05-01 15:13:10 -07:00
|
|
|
),
|
2019-04-23 11:56:30 -07:00
|
|
|
)
|
|
|
|
};
|
2019-03-08 16:48:15 -08:00
|
|
|
|
2019-07-18 14:54:27 -07:00
|
|
|
let decision = Self::consume_or_forward_packets(
|
|
|
|
my_pubkey,
|
2019-10-02 14:07:34 -07:00
|
|
|
leader_at_slot_offset,
|
2019-07-18 14:54:27 -07:00
|
|
|
poh_has_bank,
|
|
|
|
would_be_leader,
|
|
|
|
);
|
|
|
|
|
2019-04-13 23:19:54 -07:00
|
|
|
match decision {
|
|
|
|
BufferedPacketsDecision::Consume => {
|
2019-06-28 01:55:24 -07:00
|
|
|
let mut unprocessed = Self::consume_buffered_packets(
|
|
|
|
my_pubkey,
|
|
|
|
poh_recorder,
|
|
|
|
buffered_packets,
|
|
|
|
batch_limit,
|
|
|
|
)?;
|
2019-05-24 17:35:09 -07:00
|
|
|
buffered_packets.append(&mut unprocessed);
|
|
|
|
Ok(())
|
2019-04-13 23:19:54 -07:00
|
|
|
}
|
|
|
|
BufferedPacketsDecision::Forward => {
|
2019-04-18 11:18:49 -07:00
|
|
|
if enable_forwarding {
|
2019-07-18 14:54:27 -07:00
|
|
|
let next_leader = poh_recorder
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
2019-10-19 20:28:33 -07:00
|
|
|
.leader_after_n_slots(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET);
|
2019-05-24 17:35:09 -07:00
|
|
|
next_leader.map_or(Ok(()), |leader_pubkey| {
|
2019-06-21 15:21:49 -07:00
|
|
|
let leader_addr = {
|
|
|
|
cluster_info
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.lookup(&leader_pubkey)
|
2019-07-30 14:50:02 -07:00
|
|
|
.map(|leader| leader.tpu_forwards)
|
2019-06-21 15:21:49 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
leader_addr.map_or(Ok(()), |leader_addr| {
|
|
|
|
let _ = Self::forward_buffered_packets(
|
|
|
|
&socket,
|
|
|
|
&leader_addr,
|
|
|
|
&buffered_packets,
|
|
|
|
);
|
|
|
|
buffered_packets.clear();
|
|
|
|
Ok(())
|
|
|
|
})
|
2019-05-01 15:13:10 -07:00
|
|
|
})
|
|
|
|
} else {
|
2019-05-24 17:35:09 -07:00
|
|
|
buffered_packets.clear();
|
|
|
|
Ok(())
|
2019-04-18 11:18:49 -07:00
|
|
|
}
|
2019-04-13 23:19:54 -07:00
|
|
|
}
|
2019-05-24 17:35:09 -07:00
|
|
|
_ => Ok(()),
|
2019-03-08 16:48:15 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-03 16:44:06 -08:00
|
|
|
pub fn process_loop(
|
2019-06-21 15:21:49 -07:00
|
|
|
my_pubkey: Pubkey,
|
2019-11-01 14:23:03 -07:00
|
|
|
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
|
2019-03-03 16:44:06 -08:00
|
|
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
|
|
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
2019-04-09 11:17:15 -07:00
|
|
|
recv_start: &mut Instant,
|
2019-04-18 11:18:49 -07:00
|
|
|
enable_forwarding: bool,
|
2019-05-20 09:15:00 -07:00
|
|
|
id: u32,
|
2019-06-28 01:55:24 -07:00
|
|
|
batch_limit: usize,
|
2019-03-03 16:44:06 -08:00
|
|
|
) {
|
2019-03-06 12:01:21 -08:00
|
|
|
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
2019-03-08 16:48:15 -08:00
|
|
|
let mut buffered_packets = vec![];
|
2019-03-03 16:44:06 -08:00
|
|
|
loop {
|
2019-04-11 17:23:45 -07:00
|
|
|
if !buffered_packets.is_empty() {
|
2019-05-01 15:13:10 -07:00
|
|
|
Self::process_buffered_packets(
|
2019-06-21 15:21:49 -07:00
|
|
|
&my_pubkey,
|
2019-04-11 17:23:45 -07:00
|
|
|
&socket,
|
|
|
|
poh_recorder,
|
|
|
|
cluster_info,
|
2019-05-24 17:35:09 -07:00
|
|
|
&mut buffered_packets,
|
2019-04-18 11:18:49 -07:00
|
|
|
enable_forwarding,
|
2019-06-28 01:55:24 -07:00
|
|
|
batch_limit,
|
2019-04-11 17:23:45 -07:00
|
|
|
)
|
|
|
|
.unwrap_or_else(|_| buffered_packets.clear());
|
2019-03-08 16:48:15 -08:00
|
|
|
}
|
|
|
|
|
2019-04-14 12:34:07 -07:00
|
|
|
let recv_timeout = if !buffered_packets.is_empty() {
|
|
|
|
// If packets are buffered, let's wait for less time on recv from the channel.
|
|
|
|
// This helps detect the next leader faster, and processing the buffered
|
|
|
|
// packets quickly
|
|
|
|
Duration::from_millis(10)
|
|
|
|
} else {
|
|
|
|
// Default wait time
|
|
|
|
Duration::from_millis(100)
|
|
|
|
};
|
|
|
|
|
2019-05-20 09:15:00 -07:00
|
|
|
match Self::process_packets(
|
2019-06-21 15:21:49 -07:00
|
|
|
&my_pubkey,
|
2019-05-20 09:15:00 -07:00
|
|
|
&verified_receiver,
|
|
|
|
&poh_recorder,
|
|
|
|
recv_start,
|
|
|
|
recv_timeout,
|
|
|
|
id,
|
2019-06-28 01:55:24 -07:00
|
|
|
batch_limit,
|
2019-05-20 09:15:00 -07:00
|
|
|
) {
|
2019-06-26 18:42:27 -07:00
|
|
|
Err(Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout)) => (),
|
|
|
|
Err(Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected)) => break,
|
2019-05-24 17:35:09 -07:00
|
|
|
Ok(mut unprocessed_packets) => {
|
2019-04-26 17:27:31 -07:00
|
|
|
if unprocessed_packets.is_empty() {
|
|
|
|
continue;
|
|
|
|
}
|
2019-05-01 15:13:10 -07:00
|
|
|
let num = unprocessed_packets
|
|
|
|
.iter()
|
2019-05-07 10:23:02 -07:00
|
|
|
.map(|(_, unprocessed)| unprocessed.len())
|
2019-05-01 15:13:10 -07:00
|
|
|
.sum();
|
|
|
|
inc_new_counter_info!("banking_stage-buffered_packets", num);
|
2019-05-24 17:35:09 -07:00
|
|
|
buffered_packets.append(&mut unprocessed_packets);
|
2019-03-03 16:44:06 -08:00
|
|
|
}
|
|
|
|
Err(err) => {
|
2019-06-26 22:39:50 -07:00
|
|
|
debug!("solana-banking-stage-tx error: {:?}", err);
|
2019-03-03 16:44:06 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-05-14 16:31:13 -07:00
|
|
|
}
|
|
|
|
|
2018-12-21 13:55:45 -08:00
|
|
|
pub fn num_threads() -> u32 {
|
2019-06-28 01:55:24 -07:00
|
|
|
const MIN_THREADS_VOTES: u32 = 1;
|
|
|
|
const MIN_THREADS_BANKING: u32 = 1;
|
|
|
|
cmp::max(
|
|
|
|
env::var("SOLANA_BANKING_THREADS")
|
|
|
|
.map(|x| x.parse().unwrap_or(NUM_THREADS))
|
|
|
|
.unwrap_or(NUM_THREADS),
|
|
|
|
MIN_THREADS_VOTES + MIN_THREADS_BANKING,
|
|
|
|
)
|
2018-12-21 13:55:45 -08:00
|
|
|
}
|
|
|
|
|
2019-02-08 10:12:43 -08:00
|
|
|
/// Convert the transactions from a blob of binary data to a vector of transactions
|
|
|
|
fn deserialize_transactions(p: &Packets) -> Vec<Option<Transaction>> {
|
2018-05-14 16:31:13 -07:00
|
|
|
p.packets
|
2018-10-31 22:12:15 -07:00
|
|
|
.iter()
|
2019-11-06 00:07:57 -08:00
|
|
|
.map(|x| limited_deserialize(&x.data[0..x.meta.size]).ok())
|
2018-12-07 19:01:28 -08:00
|
|
|
.collect()
|
2018-05-14 16:31:13 -07:00
|
|
|
}
|
|
|
|
|
2019-06-26 22:39:50 -07:00
|
|
|
#[allow(clippy::match_wild_err_arm)]
|
2019-06-10 22:05:46 -07:00
|
|
|
fn record_transactions(
|
2019-11-02 00:38:30 -07:00
|
|
|
bank_slot: Slot,
|
2019-06-10 22:05:46 -07:00
|
|
|
txs: &[Transaction],
|
2019-04-05 09:42:54 -07:00
|
|
|
results: &[transaction::Result<()>],
|
2019-02-24 08:59:49 -08:00
|
|
|
poh: &Arc<Mutex<PohRecorder>>,
|
2019-08-09 21:14:20 -07:00
|
|
|
) -> (Result<usize>, Vec<usize>) {
|
2019-06-29 06:34:49 -07:00
|
|
|
let mut processed_generation = Measure::start("record::process_generation");
|
2019-07-01 12:14:40 -07:00
|
|
|
let (processed_transactions, processed_transactions_indexes): (Vec<_>, Vec<_>) = results
|
2019-02-16 14:02:21 -08:00
|
|
|
.iter()
|
|
|
|
.zip(txs.iter())
|
2019-06-26 22:39:50 -07:00
|
|
|
.enumerate()
|
|
|
|
.filter_map(|(i, (r, x))| {
|
2019-04-23 15:32:19 -07:00
|
|
|
if Bank::can_commit(r) {
|
2019-07-01 12:14:40 -07:00
|
|
|
Some((x.clone(), i))
|
2019-04-23 15:32:19 -07:00
|
|
|
} else {
|
2019-02-16 14:02:21 -08:00
|
|
|
None
|
|
|
|
}
|
|
|
|
})
|
2019-07-01 12:14:40 -07:00
|
|
|
.unzip();
|
2019-06-26 22:39:50 -07:00
|
|
|
|
2019-07-01 12:14:40 -07:00
|
|
|
processed_generation.stop();
|
2019-08-09 21:14:20 -07:00
|
|
|
let num_to_commit = processed_transactions.len();
|
|
|
|
debug!("num_to_commit: {} ", num_to_commit);
|
2019-02-16 14:02:21 -08:00
|
|
|
// unlock all the accounts with errors which are filtered by the above `filter_map`
|
|
|
|
if !processed_transactions.is_empty() {
|
2019-08-09 21:14:20 -07:00
|
|
|
inc_new_counter_warn!("banking_stage-record_transactions", num_to_commit);
|
2019-06-29 06:34:49 -07:00
|
|
|
|
|
|
|
let mut hash_time = Measure::start("record::hash");
|
2019-06-26 22:39:50 -07:00
|
|
|
let hash = hash_transactions(&processed_transactions[..]);
|
2019-06-29 06:34:49 -07:00
|
|
|
hash_time.stop();
|
|
|
|
|
|
|
|
let mut poh_record = Measure::start("record::poh_record");
|
2019-05-07 15:51:35 -07:00
|
|
|
// record and unlock will unlock all the successful transactions
|
2019-06-26 22:39:50 -07:00
|
|
|
let res = poh
|
|
|
|
.lock()
|
2019-03-22 14:17:39 -07:00
|
|
|
.unwrap()
|
2019-06-26 22:39:50 -07:00
|
|
|
.record(bank_slot, hash, processed_transactions);
|
|
|
|
|
|
|
|
match res {
|
|
|
|
Ok(()) => (),
|
|
|
|
Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached)) => {
|
2019-07-01 12:14:40 -07:00
|
|
|
// If record errors, add all the committable transactions (the ones
|
|
|
|
// we just attempted to record) as retryable
|
2019-08-09 21:14:20 -07:00
|
|
|
return (
|
|
|
|
Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached)),
|
|
|
|
processed_transactions_indexes,
|
|
|
|
);
|
2019-06-26 22:39:50 -07:00
|
|
|
}
|
2019-07-08 18:21:52 -07:00
|
|
|
Err(e) => panic!(format!("Poh recorder returned unexpected error: {:?}", e)),
|
2019-06-26 22:39:50 -07:00
|
|
|
}
|
2019-06-29 06:34:49 -07:00
|
|
|
poh_record.stop();
|
2019-02-16 14:02:21 -08:00
|
|
|
}
|
2019-08-09 21:14:20 -07:00
|
|
|
(Ok(num_to_commit), vec![])
|
2019-02-16 14:02:21 -08:00
|
|
|
}
|
|
|
|
|
2019-03-23 13:30:56 -07:00
|
|
|
fn process_and_record_transactions_locked(
|
2019-02-16 14:02:21 -08:00
|
|
|
bank: &Bank,
|
2019-02-24 08:59:49 -08:00
|
|
|
poh: &Arc<Mutex<PohRecorder>>,
|
2019-09-19 10:06:08 -07:00
|
|
|
batch: &TransactionBatch,
|
2019-08-09 21:14:20 -07:00
|
|
|
) -> (Result<usize>, Vec<usize>) {
|
2019-06-29 06:34:49 -07:00
|
|
|
let mut load_execute_time = Measure::start("load_execute_time");
|
2019-02-16 14:02:21 -08:00
|
|
|
// Use a shorter maximum age when adding transactions into the pipeline. This will reduce
|
|
|
|
// the likelihood of any single thread getting starved and processing old ids.
|
|
|
|
// TODO: Banking stage threads should be prioritized to complete faster then this queue
|
|
|
|
// expires.
|
2019-09-19 10:06:08 -07:00
|
|
|
let txs = batch.transactions();
|
2019-07-15 13:42:59 -07:00
|
|
|
let (mut loaded_accounts, results, mut retryable_txs, tx_count, signature_count) =
|
2019-09-19 10:06:08 -07:00
|
|
|
bank.load_and_execute_transactions(batch, MAX_PROCESSING_AGE);
|
2019-06-29 06:34:49 -07:00
|
|
|
load_execute_time.stop();
|
2019-02-16 14:02:21 -08:00
|
|
|
|
2019-05-23 17:35:15 -07:00
|
|
|
let freeze_lock = bank.freeze_lock();
|
|
|
|
|
2019-08-09 21:14:20 -07:00
|
|
|
let mut record_time = Measure::start("record_time");
|
|
|
|
let (num_to_commit, retryable_record_txs) =
|
|
|
|
Self::record_transactions(bank.slot(), txs, &results, poh);
|
|
|
|
retryable_txs.extend(retryable_record_txs);
|
|
|
|
if num_to_commit.is_err() {
|
|
|
|
return (num_to_commit, retryable_txs);
|
|
|
|
}
|
|
|
|
record_time.stop();
|
2019-02-16 14:02:21 -08:00
|
|
|
|
2019-08-09 21:14:20 -07:00
|
|
|
let mut commit_time = Measure::start("commit_time");
|
|
|
|
|
|
|
|
let num_to_commit = num_to_commit.unwrap();
|
|
|
|
|
|
|
|
if num_to_commit != 0 {
|
2019-07-15 13:42:59 -07:00
|
|
|
bank.commit_transactions(
|
|
|
|
txs,
|
2019-08-28 08:38:32 -07:00
|
|
|
None,
|
2019-07-15 13:42:59 -07:00
|
|
|
&mut loaded_accounts,
|
|
|
|
&results,
|
|
|
|
tx_count,
|
|
|
|
signature_count,
|
|
|
|
);
|
2019-08-09 21:14:20 -07:00
|
|
|
}
|
|
|
|
commit_time.stop();
|
2019-02-16 14:02:21 -08:00
|
|
|
|
2019-05-23 17:35:15 -07:00
|
|
|
drop(freeze_lock);
|
2019-05-07 15:51:35 -07:00
|
|
|
|
2019-03-23 13:30:56 -07:00
|
|
|
debug!(
|
2019-08-09 21:14:20 -07:00
|
|
|
"bank: {} process_and_record_locked: {}us record: {}us commit: {}us txs_len: {}",
|
2019-03-23 13:30:56 -07:00
|
|
|
bank.slot(),
|
2019-06-29 06:34:49 -07:00
|
|
|
load_execute_time.as_us(),
|
|
|
|
record_time.as_us(),
|
|
|
|
commit_time.as_us(),
|
2019-03-23 13:30:56 -07:00
|
|
|
txs.len(),
|
|
|
|
);
|
|
|
|
|
2019-08-09 21:14:20 -07:00
|
|
|
(Ok(num_to_commit), retryable_txs)
|
2019-03-23 13:30:56 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn process_and_record_transactions(
|
|
|
|
bank: &Bank,
|
|
|
|
txs: &[Transaction],
|
|
|
|
poh: &Arc<Mutex<PohRecorder>>,
|
2019-05-07 10:23:02 -07:00
|
|
|
chunk_offset: usize,
|
2019-08-09 21:14:20 -07:00
|
|
|
) -> (Result<usize>, Vec<usize>) {
|
2019-06-29 06:34:49 -07:00
|
|
|
let mut lock_time = Measure::start("lock_time");
|
2019-03-23 13:30:56 -07:00
|
|
|
// Once accounts are locked, other threads cannot encode transactions that will modify the
|
|
|
|
// same account state
|
2019-09-19 10:06:08 -07:00
|
|
|
let batch = bank.prepare_batch(txs, None);
|
2019-06-29 06:34:49 -07:00
|
|
|
lock_time.stop();
|
2019-03-23 13:30:56 -07:00
|
|
|
|
2019-06-26 22:39:50 -07:00
|
|
|
let (result, mut retryable_txs) =
|
2019-09-19 10:06:08 -07:00
|
|
|
Self::process_and_record_transactions_locked(bank, poh, &batch);
|
2019-06-26 22:39:50 -07:00
|
|
|
retryable_txs.iter_mut().for_each(|x| *x += chunk_offset);
|
2019-03-23 13:30:56 -07:00
|
|
|
|
2019-06-29 06:34:49 -07:00
|
|
|
let mut unlock_time = Measure::start("unlock_time");
|
2019-02-16 14:02:21 -08:00
|
|
|
// Once the accounts are new transactions can enter the pipeline to process them
|
2019-09-19 10:06:08 -07:00
|
|
|
drop(batch);
|
2019-06-29 06:34:49 -07:00
|
|
|
unlock_time.stop();
|
2019-03-23 13:30:56 -07:00
|
|
|
|
2019-02-16 14:02:21 -08:00
|
|
|
debug!(
|
2019-03-23 13:30:56 -07:00
|
|
|
"bank: {} lock: {}us unlock: {}us txs_len: {}",
|
2019-03-03 16:44:06 -08:00
|
|
|
bank.slot(),
|
2019-06-29 06:34:49 -07:00
|
|
|
lock_time.as_us(),
|
|
|
|
unlock_time.as_us(),
|
2019-02-16 14:02:21 -08:00
|
|
|
txs.len(),
|
|
|
|
);
|
2019-03-23 13:30:56 -07:00
|
|
|
|
2019-06-26 22:39:50 -07:00
|
|
|
(result, retryable_txs)
|
2019-02-16 14:02:21 -08:00
|
|
|
}
|
|
|
|
|
2019-02-13 19:12:14 -08:00
|
|
|
/// Sends transactions to the bank.
|
|
|
|
///
|
|
|
|
/// Returns the number of transactions successfully processed by the bank, which may be less
|
|
|
|
/// than the total number if max PoH height was reached and the bank halted
|
2018-09-21 21:01:13 -07:00
|
|
|
fn process_transactions(
|
2019-02-26 10:48:18 -08:00
|
|
|
bank: &Bank,
|
2018-09-27 13:49:50 -07:00
|
|
|
transactions: &[Transaction],
|
2019-02-24 08:59:49 -08:00
|
|
|
poh: &Arc<Mutex<PohRecorder>>,
|
2019-06-26 22:39:50 -07:00
|
|
|
) -> (usize, Vec<usize>) {
|
2018-09-21 21:01:13 -07:00
|
|
|
let mut chunk_start = 0;
|
2019-05-07 10:23:02 -07:00
|
|
|
let mut unprocessed_txs = vec![];
|
2018-09-21 21:01:13 -07:00
|
|
|
while chunk_start != transactions.len() {
|
2019-09-10 11:04:03 -07:00
|
|
|
let chunk_end = std::cmp::min(
|
|
|
|
transactions.len(),
|
|
|
|
chunk_start + MAX_NUM_TRANSACTIONS_PER_BATCH,
|
|
|
|
);
|
2018-09-21 21:01:13 -07:00
|
|
|
|
2019-06-26 22:39:50 -07:00
|
|
|
let (result, retryable_txs_in_chunk) = Self::process_and_record_transactions(
|
2019-02-16 14:02:21 -08:00
|
|
|
bank,
|
|
|
|
&transactions[chunk_start..chunk_end],
|
|
|
|
poh,
|
2019-05-07 10:23:02 -07:00
|
|
|
chunk_start,
|
2019-02-16 14:02:21 -08:00
|
|
|
);
|
2019-06-26 22:39:50 -07:00
|
|
|
trace!("process_transactions result: {:?}", result);
|
|
|
|
|
|
|
|
// Add the retryable txs (transactions that errored in a way that warrants a retry)
|
|
|
|
// to the list of unprocessed txs.
|
|
|
|
unprocessed_txs.extend_from_slice(&retryable_txs_in_chunk);
|
2019-02-17 14:12:06 -08:00
|
|
|
if let Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached)) = result {
|
2019-03-03 16:44:06 -08:00
|
|
|
info!(
|
|
|
|
"process transactions: max height reached slot: {} height: {}",
|
|
|
|
bank.slot(),
|
|
|
|
bank.tick_height()
|
|
|
|
);
|
2019-06-26 22:39:50 -07:00
|
|
|
// process_and_record_transactions has returned all retryable errors in
|
|
|
|
// transactions[chunk_start..chunk_end], so we just need to push the remaining
|
|
|
|
// transactions into the unprocessed queue.
|
2019-07-01 12:14:40 -07:00
|
|
|
unprocessed_txs.extend(chunk_end..transactions.len());
|
2019-02-13 19:12:14 -08:00
|
|
|
break;
|
|
|
|
}
|
2019-06-26 22:39:50 -07:00
|
|
|
// Don't exit early on any other type of error, continue processing...
|
2019-05-07 10:23:02 -07:00
|
|
|
chunk_start = chunk_end;
|
2018-09-21 21:01:13 -07:00
|
|
|
}
|
2019-06-26 22:39:50 -07:00
|
|
|
|
|
|
|
(chunk_start, unprocessed_txs)
|
2018-09-21 21:01:13 -07:00
|
|
|
}
|
|
|
|
|
2019-05-08 10:32:25 -07:00
|
|
|
// This function returns a vector of transactions that are not None. It also returns a vector
|
|
|
|
// with position of the transaction in the input list
|
|
|
|
fn filter_transaction_indexes(
|
2019-05-02 19:05:53 -07:00
|
|
|
transactions: Vec<Option<Transaction>>,
|
2019-05-07 10:23:02 -07:00
|
|
|
indexes: &[usize],
|
2019-05-08 10:32:25 -07:00
|
|
|
) -> (Vec<Transaction>, Vec<usize>) {
|
|
|
|
transactions
|
2019-04-11 17:23:45 -07:00
|
|
|
.into_iter()
|
2019-05-07 10:23:02 -07:00
|
|
|
.zip(indexes)
|
|
|
|
.filter_map(|(tx, index)| match tx {
|
2019-04-11 17:23:45 -07:00
|
|
|
None => None,
|
2019-05-07 10:23:02 -07:00
|
|
|
Some(tx) => Some((tx, index)),
|
2019-04-11 17:23:45 -07:00
|
|
|
})
|
2019-05-08 10:32:25 -07:00
|
|
|
.unzip()
|
|
|
|
}
|
2019-05-02 19:05:53 -07:00
|
|
|
|
2019-05-08 10:32:25 -07:00
|
|
|
// This function creates a filter of transaction results with Ok() for every pending
|
|
|
|
// transaction. The non-pending transactions are marked with TransactionError
|
|
|
|
fn prepare_filter_for_pending_transactions(
|
|
|
|
transactions: &[Transaction],
|
|
|
|
pending_tx_indexes: &[usize],
|
|
|
|
) -> Vec<transaction::Result<()>> {
|
|
|
|
let mut mask = vec![Err(TransactionError::BlockhashNotFound); transactions.len()];
|
|
|
|
pending_tx_indexes.iter().for_each(|x| mask[*x] = Ok(()));
|
|
|
|
mask
|
|
|
|
}
|
2019-05-07 10:23:02 -07:00
|
|
|
|
2019-05-08 10:32:25 -07:00
|
|
|
// This function returns a vector containing index of all valid transactions. A valid
|
|
|
|
// transaction has result Ok() as the value
|
|
|
|
fn filter_valid_transaction_indexes(
|
|
|
|
valid_txs: &[transaction::Result<()>],
|
|
|
|
transaction_indexes: &[usize],
|
|
|
|
) -> Vec<usize> {
|
|
|
|
let valid_transactions = valid_txs
|
2019-05-07 10:23:02 -07:00
|
|
|
.iter()
|
2019-05-08 10:32:25 -07:00
|
|
|
.enumerate()
|
|
|
|
.filter_map(|(index, x)| if x.is_ok() { Some(index) } else { None })
|
|
|
|
.collect_vec();
|
2019-05-02 19:05:53 -07:00
|
|
|
|
2019-05-08 10:32:25 -07:00
|
|
|
valid_transactions
|
|
|
|
.iter()
|
|
|
|
.map(|x| transaction_indexes[*x])
|
|
|
|
.collect()
|
2019-05-02 19:05:53 -07:00
|
|
|
}
|
|
|
|
|
2019-05-13 14:40:05 -07:00
|
|
|
// This function deserializes packets into transactions and returns non-None transactions
|
|
|
|
fn transactions_from_packets(
|
2019-05-02 19:05:53 -07:00
|
|
|
msgs: &Packets,
|
2019-05-13 14:40:05 -07:00
|
|
|
transaction_indexes: &[usize],
|
|
|
|
) -> (Vec<Transaction>, Vec<usize>) {
|
2019-05-07 10:23:02 -07:00
|
|
|
let packets = Packets::new(
|
2019-05-08 10:32:25 -07:00
|
|
|
transaction_indexes
|
2019-05-07 10:23:02 -07:00
|
|
|
.iter()
|
|
|
|
.map(|x| msgs.packets[*x].to_owned())
|
|
|
|
.collect_vec(),
|
|
|
|
);
|
|
|
|
|
|
|
|
let transactions = Self::deserialize_transactions(&packets);
|
2019-04-11 17:23:45 -07:00
|
|
|
|
2019-05-13 14:40:05 -07:00
|
|
|
Self::filter_transaction_indexes(transactions, &transaction_indexes)
|
|
|
|
}
|
|
|
|
|
2019-07-01 12:14:40 -07:00
|
|
|
/// This function filters pending packets that are still valid
|
|
|
|
/// # Arguments
|
|
|
|
/// * `transactions` - a batch of transactions deserialized from packets
|
|
|
|
/// * `transaction_to_packet_indexes` - maps each transaction to a packet index
|
|
|
|
/// * `pending_indexes` - identifies which indexes in the `transactions` list are still pending
|
|
|
|
fn filter_pending_packets_from_pending_txs(
|
2019-05-13 14:40:05 -07:00
|
|
|
bank: &Arc<Bank>,
|
|
|
|
transactions: &[Transaction],
|
2019-07-01 12:14:40 -07:00
|
|
|
transaction_to_packet_indexes: &[usize],
|
2019-05-13 14:40:05 -07:00
|
|
|
pending_indexes: &[usize],
|
|
|
|
) -> Vec<usize> {
|
|
|
|
let filter = Self::prepare_filter_for_pending_transactions(transactions, pending_indexes);
|
|
|
|
|
|
|
|
let mut error_counters = ErrorCounters::default();
|
2019-06-24 15:56:50 -07:00
|
|
|
// The following code also checks if the blockhash for a transaction is too old
|
|
|
|
// The check accounts for
|
|
|
|
// 1. Transaction forwarding delay
|
|
|
|
// 2. The slot at which the next leader will actually process the transaction
|
|
|
|
// Drop the transaction if it will expire by the time the next node receives and processes it
|
2019-10-09 16:09:36 -07:00
|
|
|
let api = perf_libs::api();
|
|
|
|
let max_tx_fwd_delay = if api.is_none() {
|
|
|
|
MAX_TRANSACTION_FORWARDING_DELAY
|
|
|
|
} else {
|
|
|
|
MAX_TRANSACTION_FORWARDING_DELAY_GPU
|
|
|
|
};
|
2019-05-13 14:40:05 -07:00
|
|
|
let result = bank.check_transactions(
|
|
|
|
transactions,
|
2019-08-28 08:38:32 -07:00
|
|
|
None,
|
2019-05-13 14:40:05 -07:00
|
|
|
&filter,
|
2019-06-24 16:46:34 -07:00
|
|
|
(MAX_PROCESSING_AGE)
|
2019-10-09 16:09:36 -07:00
|
|
|
.saturating_sub(max_tx_fwd_delay)
|
2019-06-24 15:56:50 -07:00
|
|
|
.saturating_sub(
|
|
|
|
(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET * bank.ticks_per_slot()
|
2019-08-19 23:22:56 -07:00
|
|
|
/ DEFAULT_TICKS_PER_SECOND) as usize,
|
2019-06-24 15:56:50 -07:00
|
|
|
),
|
2019-05-13 14:40:05 -07:00
|
|
|
&mut error_counters,
|
2019-05-08 10:32:25 -07:00
|
|
|
);
|
|
|
|
|
2019-07-01 12:14:40 -07:00
|
|
|
Self::filter_valid_transaction_indexes(&result, transaction_to_packet_indexes)
|
2019-05-13 14:40:05 -07:00
|
|
|
}
|
2019-05-08 10:32:25 -07:00
|
|
|
|
2019-05-13 14:40:05 -07:00
|
|
|
fn process_received_packets(
|
|
|
|
bank: &Arc<Bank>,
|
|
|
|
poh: &Arc<Mutex<PohRecorder>>,
|
|
|
|
msgs: &Packets,
|
2019-07-01 12:14:40 -07:00
|
|
|
packet_indexes: Vec<usize>,
|
2019-06-26 22:39:50 -07:00
|
|
|
) -> (usize, usize, Vec<usize>) {
|
2019-07-01 12:14:40 -07:00
|
|
|
let (transactions, transaction_to_packet_indexes) =
|
|
|
|
Self::transactions_from_packets(msgs, &packet_indexes);
|
2019-05-08 10:32:25 -07:00
|
|
|
debug!(
|
|
|
|
"bank: {} filtered transactions {}",
|
|
|
|
bank.slot(),
|
|
|
|
transactions.len()
|
|
|
|
);
|
|
|
|
|
|
|
|
let tx_len = transactions.len();
|
|
|
|
|
|
|
|
let (processed, unprocessed_tx_indexes) =
|
2019-06-26 22:39:50 -07:00
|
|
|
Self::process_transactions(bank, &transactions, poh);
|
2019-05-08 10:32:25 -07:00
|
|
|
|
2019-05-13 14:40:05 -07:00
|
|
|
let unprocessed_tx_count = unprocessed_tx_indexes.len();
|
2019-05-08 10:32:25 -07:00
|
|
|
|
2019-07-01 12:14:40 -07:00
|
|
|
let filtered_unprocessed_packet_indexes = Self::filter_pending_packets_from_pending_txs(
|
2019-05-13 14:40:05 -07:00
|
|
|
bank,
|
2019-05-08 10:32:25 -07:00
|
|
|
&transactions,
|
2019-07-01 12:14:40 -07:00
|
|
|
&transaction_to_packet_indexes,
|
2019-05-13 14:40:05 -07:00
|
|
|
&unprocessed_tx_indexes,
|
|
|
|
);
|
|
|
|
inc_new_counter_info!(
|
|
|
|
"banking_stage-dropped_tx_before_forwarding",
|
2019-07-01 12:14:40 -07:00
|
|
|
unprocessed_tx_count.saturating_sub(filtered_unprocessed_packet_indexes.len())
|
2019-05-08 10:32:25 -07:00
|
|
|
);
|
|
|
|
|
2019-07-01 12:14:40 -07:00
|
|
|
(processed, tx_len, filtered_unprocessed_packet_indexes)
|
2019-05-13 14:40:05 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
fn filter_unprocessed_packets(
|
|
|
|
bank: &Arc<Bank>,
|
|
|
|
msgs: &Packets,
|
|
|
|
transaction_indexes: &[usize],
|
2019-05-23 23:20:04 -07:00
|
|
|
my_pubkey: &Pubkey,
|
2019-05-22 17:54:28 -07:00
|
|
|
next_leader: Option<Pubkey>,
|
2019-05-13 14:40:05 -07:00
|
|
|
) -> Vec<usize> {
|
2019-05-22 17:54:28 -07:00
|
|
|
// Check if we are the next leader. If so, let's not filter the packets
|
|
|
|
// as we'll filter it again while processing the packets.
|
|
|
|
// Filtering helps if we were going to forward the packets to some other node
|
|
|
|
if let Some(leader) = next_leader {
|
2019-05-23 23:20:04 -07:00
|
|
|
if leader == *my_pubkey {
|
2019-05-22 17:54:28 -07:00
|
|
|
return transaction_indexes.to_vec();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-01 12:14:40 -07:00
|
|
|
let (transactions, transaction_to_packet_indexes) =
|
2019-05-13 14:40:05 -07:00
|
|
|
Self::transactions_from_packets(msgs, &transaction_indexes);
|
|
|
|
|
2019-07-01 12:14:40 -07:00
|
|
|
let tx_count = transaction_to_packet_indexes.len();
|
2019-05-13 14:40:05 -07:00
|
|
|
|
|
|
|
let unprocessed_tx_indexes = (0..transactions.len()).collect_vec();
|
2019-07-01 12:14:40 -07:00
|
|
|
let filtered_unprocessed_packet_indexes = Self::filter_pending_packets_from_pending_txs(
|
2019-05-13 14:40:05 -07:00
|
|
|
bank,
|
|
|
|
&transactions,
|
2019-07-01 12:14:40 -07:00
|
|
|
&transaction_to_packet_indexes,
|
2019-05-13 14:40:05 -07:00
|
|
|
&unprocessed_tx_indexes,
|
|
|
|
);
|
|
|
|
|
|
|
|
inc_new_counter_info!(
|
|
|
|
"banking_stage-dropped_tx_before_forwarding",
|
2019-07-01 12:14:40 -07:00
|
|
|
tx_count.saturating_sub(filtered_unprocessed_packet_indexes.len())
|
2019-05-13 14:40:05 -07:00
|
|
|
);
|
|
|
|
|
2019-07-01 12:14:40 -07:00
|
|
|
filtered_unprocessed_packet_indexes
|
2019-04-11 17:23:45 -07:00
|
|
|
}
|
|
|
|
|
2019-11-07 19:48:33 -08:00
|
|
|
fn generate_packet_indexes(vers: &PinnedVec<Packet>) -> Vec<usize> {
|
2019-05-20 09:15:00 -07:00
|
|
|
vers.iter()
|
|
|
|
.enumerate()
|
2019-11-01 14:23:03 -07:00
|
|
|
.filter_map(
|
|
|
|
|(index, ver)| {
|
|
|
|
if !ver.meta.discard {
|
|
|
|
Some(index)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
},
|
|
|
|
)
|
2019-05-20 09:15:00 -07:00
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2019-02-07 20:20:40 -08:00
|
|
|
/// Process the incoming packets
|
2018-07-10 19:33:16 -07:00
|
|
|
pub fn process_packets(
|
2019-06-21 15:21:49 -07:00
|
|
|
my_pubkey: &Pubkey,
|
2019-11-01 14:23:03 -07:00
|
|
|
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
|
2019-02-24 08:59:49 -08:00
|
|
|
poh: &Arc<Mutex<PohRecorder>>,
|
2019-04-09 11:17:15 -07:00
|
|
|
recv_start: &mut Instant,
|
2019-04-14 12:34:07 -07:00
|
|
|
recv_timeout: Duration,
|
2019-05-20 09:15:00 -07:00
|
|
|
id: u32,
|
2019-06-28 01:55:24 -07:00
|
|
|
batch_limit: usize,
|
2019-02-13 19:12:14 -08:00
|
|
|
) -> Result<UnprocessedPackets> {
|
2019-06-29 06:34:49 -07:00
|
|
|
let mut recv_time = Measure::start("process_packets_recv");
|
2019-06-26 18:42:27 -07:00
|
|
|
let mms = verified_receiver.recv_timeout(recv_timeout)?;
|
2019-06-29 06:34:49 -07:00
|
|
|
recv_time.stop();
|
2019-03-03 16:44:06 -08:00
|
|
|
|
2018-05-14 16:31:13 -07:00
|
|
|
let mms_len = mms.len();
|
2019-11-01 14:23:03 -07:00
|
|
|
let count: usize = mms.iter().map(|x| x.packets.len()).sum();
|
2019-03-28 11:45:34 -07:00
|
|
|
debug!(
|
2019-05-20 09:15:00 -07:00
|
|
|
"@{:?} process start stalled for: {:?}ms txs: {} id: {}",
|
2019-09-06 14:30:56 -07:00
|
|
|
timestamp(),
|
|
|
|
duration_as_ms(&recv_start.elapsed()),
|
2019-04-09 11:17:15 -07:00
|
|
|
count,
|
2019-05-20 09:15:00 -07:00
|
|
|
id,
|
2018-05-14 16:31:13 -07:00
|
|
|
);
|
2019-08-12 15:15:34 -07:00
|
|
|
inc_new_counter_debug!("banking_stage-transactions_received", count);
|
2019-06-29 06:34:49 -07:00
|
|
|
let mut proc_start = Measure::start("process_received_packets_process");
|
2018-10-23 14:44:14 -07:00
|
|
|
let mut new_tx_count = 0;
|
2019-05-20 09:15:00 -07:00
|
|
|
|
2019-05-13 14:40:05 -07:00
|
|
|
let mut mms_iter = mms.into_iter();
|
2019-02-13 19:12:14 -08:00
|
|
|
let mut unprocessed_packets = vec![];
|
2019-06-28 01:55:24 -07:00
|
|
|
let mut dropped_batches_count = 0;
|
2019-11-01 14:23:03 -07:00
|
|
|
while let Some(msgs) = mms_iter.next() {
|
|
|
|
let packet_indexes = Self::generate_packet_indexes(&msgs.packets);
|
2019-03-03 16:44:06 -08:00
|
|
|
let bank = poh.lock().unwrap().bank();
|
|
|
|
if bank.is_none() {
|
2019-06-28 01:55:24 -07:00
|
|
|
Self::push_unprocessed(
|
|
|
|
&mut unprocessed_packets,
|
|
|
|
msgs,
|
|
|
|
packet_indexes,
|
|
|
|
&mut dropped_batches_count,
|
|
|
|
batch_limit,
|
|
|
|
);
|
2019-03-03 16:44:06 -08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
let bank = bank.unwrap();
|
2019-02-13 19:12:14 -08:00
|
|
|
|
2019-05-07 10:23:02 -07:00
|
|
|
let (processed, verified_txs_len, unprocessed_indexes) =
|
2019-06-26 22:39:50 -07:00
|
|
|
Self::process_received_packets(&bank, &poh, &msgs, packet_indexes);
|
2019-02-13 19:12:14 -08:00
|
|
|
|
2019-05-13 14:40:05 -07:00
|
|
|
new_tx_count += processed;
|
|
|
|
|
2019-05-07 10:23:02 -07:00
|
|
|
// Collect any unprocessed transactions in this batch for forwarding
|
2019-06-28 01:55:24 -07:00
|
|
|
Self::push_unprocessed(
|
|
|
|
&mut unprocessed_packets,
|
|
|
|
msgs,
|
|
|
|
unprocessed_indexes,
|
|
|
|
&mut dropped_batches_count,
|
|
|
|
batch_limit,
|
|
|
|
);
|
2019-05-07 10:23:02 -07:00
|
|
|
|
2019-05-13 14:40:05 -07:00
|
|
|
if processed < verified_txs_len {
|
2019-05-22 17:54:28 -07:00
|
|
|
let next_leader = poh.lock().unwrap().next_slot_leader();
|
2019-05-13 14:40:05 -07:00
|
|
|
// Walk thru rest of the transactions and filter out the invalid (e.g. too old) ones
|
2019-08-23 08:55:51 -07:00
|
|
|
#[allow(clippy::while_let_on_iterator)]
|
2019-11-01 14:23:03 -07:00
|
|
|
while let Some(msgs) = mms_iter.next() {
|
|
|
|
let packet_indexes = Self::generate_packet_indexes(&msgs.packets);
|
2019-05-22 17:54:28 -07:00
|
|
|
let unprocessed_indexes = Self::filter_unprocessed_packets(
|
|
|
|
&bank,
|
|
|
|
&msgs,
|
|
|
|
&packet_indexes,
|
2019-05-23 23:20:04 -07:00
|
|
|
&my_pubkey,
|
2019-05-22 17:54:28 -07:00
|
|
|
next_leader,
|
|
|
|
);
|
2019-06-28 01:55:24 -07:00
|
|
|
Self::push_unprocessed(
|
|
|
|
&mut unprocessed_packets,
|
|
|
|
msgs,
|
|
|
|
unprocessed_indexes,
|
|
|
|
&mut dropped_batches_count,
|
|
|
|
batch_limit,
|
|
|
|
);
|
2019-05-13 14:40:05 -07:00
|
|
|
}
|
|
|
|
}
|
2018-05-14 16:31:13 -07:00
|
|
|
}
|
2018-09-21 21:01:13 -07:00
|
|
|
|
2019-06-29 06:34:49 -07:00
|
|
|
proc_start.stop();
|
|
|
|
|
|
|
|
inc_new_counter_debug!("banking_stage-time_ms", proc_start.as_ms() as usize);
|
2019-03-28 11:45:34 -07:00
|
|
|
debug!(
|
2019-05-20 09:15:00 -07:00
|
|
|
"@{:?} done processing transaction batches: {} time: {:?}ms tx count: {} tx/s: {} total count: {} id: {}",
|
2019-09-06 14:30:56 -07:00
|
|
|
timestamp(),
|
2018-05-14 16:31:13 -07:00
|
|
|
mms_len,
|
2019-06-29 06:34:49 -07:00
|
|
|
proc_start.as_ms(),
|
2019-04-09 11:17:15 -07:00
|
|
|
new_tx_count,
|
2019-06-29 06:34:49 -07:00
|
|
|
(new_tx_count as f32) / (proc_start.as_s()),
|
2019-05-20 09:15:00 -07:00
|
|
|
count,
|
|
|
|
id,
|
2018-05-14 16:31:13 -07:00
|
|
|
);
|
2019-08-12 15:15:34 -07:00
|
|
|
inc_new_counter_debug!("banking_stage-process_packets", count);
|
|
|
|
inc_new_counter_debug!("banking_stage-process_transactions", new_tx_count);
|
|
|
|
inc_new_counter_debug!("banking_stage-dropped_batches_count", dropped_batches_count);
|
2019-02-13 19:12:14 -08:00
|
|
|
|
2019-04-09 11:17:15 -07:00
|
|
|
*recv_start = Instant::now();
|
|
|
|
|
2019-02-13 19:12:14 -08:00
|
|
|
Ok(unprocessed_packets)
|
|
|
|
}
|
2019-05-20 09:15:00 -07:00
|
|
|
|
|
|
|
fn push_unprocessed(
|
|
|
|
unprocessed_packets: &mut UnprocessedPackets,
|
2019-05-20 17:48:42 -07:00
|
|
|
packets: Packets,
|
2019-05-20 09:15:00 -07:00
|
|
|
packet_indexes: Vec<usize>,
|
2019-06-28 01:55:24 -07:00
|
|
|
dropped_batches_count: &mut usize,
|
|
|
|
batch_limit: usize,
|
2019-05-20 09:15:00 -07:00
|
|
|
) {
|
|
|
|
if !packet_indexes.is_empty() {
|
2019-06-28 01:55:24 -07:00
|
|
|
if unprocessed_packets.len() >= batch_limit {
|
2019-06-27 00:32:32 -07:00
|
|
|
unprocessed_packets.remove(0);
|
2019-06-28 01:55:24 -07:00
|
|
|
*dropped_batches_count += 1;
|
2019-06-27 00:32:32 -07:00
|
|
|
}
|
2019-05-20 09:15:00 -07:00
|
|
|
unprocessed_packets.push((packets, packet_indexes));
|
|
|
|
}
|
|
|
|
}
|
2018-05-14 16:31:13 -07:00
|
|
|
}
|
|
|
|
|
2018-07-03 21:14:08 -07:00
|
|
|
impl Service for BankingStage {
|
2019-02-13 19:12:14 -08:00
|
|
|
type JoinReturnType = ();
|
2018-10-18 22:57:48 -07:00
|
|
|
|
2019-02-13 19:12:14 -08:00
|
|
|
fn join(self) -> thread::Result<()> {
|
2018-10-18 22:57:48 -07:00
|
|
|
for bank_thread_hdl in self.bank_thread_hdls {
|
2019-02-13 19:12:14 -08:00
|
|
|
bank_thread_hdl.join()?;
|
2018-09-26 05:52:13 -07:00
|
|
|
}
|
2019-02-13 19:12:14 -08:00
|
|
|
Ok(())
|
2018-07-03 21:14:08 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-03 16:44:06 -08:00
|
|
|
pub fn create_test_recorder(
|
|
|
|
bank: &Arc<Bank>,
|
2019-03-29 20:00:36 -07:00
|
|
|
blocktree: &Arc<Blocktree>,
|
2019-10-16 12:37:27 -07:00
|
|
|
poh_config: Option<PohConfig>,
|
2019-03-03 16:44:06 -08:00
|
|
|
) -> (
|
2019-03-04 20:50:02 -08:00
|
|
|
Arc<AtomicBool>,
|
2019-03-03 16:44:06 -08:00
|
|
|
Arc<Mutex<PohRecorder>>,
|
|
|
|
PohService,
|
2019-09-18 12:16:22 -07:00
|
|
|
Receiver<WorkingBankEntry>,
|
2019-03-03 16:44:06 -08:00
|
|
|
) {
|
|
|
|
let exit = Arc::new(AtomicBool::new(false));
|
2019-10-16 12:37:27 -07:00
|
|
|
let poh_config = Arc::new(poh_config.unwrap_or_default());
|
2019-03-18 22:08:21 -07:00
|
|
|
let (mut poh_recorder, entry_receiver) = PohRecorder::new(
|
2019-03-15 13:22:16 -07:00
|
|
|
bank.tick_height(),
|
|
|
|
bank.last_blockhash(),
|
|
|
|
bank.slot(),
|
2019-07-26 11:33:51 -07:00
|
|
|
Some((4, 4)),
|
2019-03-15 13:22:16 -07:00
|
|
|
bank.ticks_per_slot(),
|
2019-03-20 14:23:58 -07:00
|
|
|
&Pubkey::default(),
|
2019-03-29 20:00:36 -07:00
|
|
|
blocktree,
|
2019-04-19 02:39:44 -07:00
|
|
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
2019-05-18 14:01:36 -07:00
|
|
|
&poh_config,
|
2019-03-15 13:22:16 -07:00
|
|
|
);
|
2019-03-18 22:08:21 -07:00
|
|
|
poh_recorder.set_bank(&bank);
|
|
|
|
|
2019-03-03 16:44:06 -08:00
|
|
|
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
2019-05-18 14:01:36 -07:00
|
|
|
let poh_service = PohService::new(poh_recorder.clone(), &poh_config, &exit);
|
2019-03-18 22:08:21 -07:00
|
|
|
|
2019-03-04 20:50:02 -08:00
|
|
|
(exit, poh_recorder, poh_service, entry_receiver)
|
2019-03-03 16:44:06 -08:00
|
|
|
}
|
|
|
|
|
2018-09-25 15:01:51 -07:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
2019-03-03 16:44:06 -08:00
|
|
|
use crate::cluster_info::Node;
|
2019-11-08 20:56:57 -08:00
|
|
|
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
2018-12-07 19:16:27 -08:00
|
|
|
use crate::packet::to_packets;
|
2019-03-03 16:44:06 -08:00
|
|
|
use crate::poh_recorder::WorkingBank;
|
2019-06-26 18:42:27 -07:00
|
|
|
use crossbeam_channel::unbounded;
|
2019-05-10 14:28:38 -07:00
|
|
|
use itertools::Itertools;
|
2019-10-18 09:28:51 -07:00
|
|
|
use solana_ledger::blocktree::get_tmp_ledger_path;
|
|
|
|
use solana_ledger::entry::{Entry, EntrySlice};
|
2019-03-23 20:12:27 -07:00
|
|
|
use solana_sdk::instruction::InstructionError;
|
2018-12-03 10:26:28 -08:00
|
|
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
2019-04-03 08:45:57 -07:00
|
|
|
use solana_sdk::system_transaction;
|
2019-04-23 15:32:19 -07:00
|
|
|
use solana_sdk::transaction::TransactionError;
|
2019-06-26 18:42:27 -07:00
|
|
|
use std::sync::atomic::Ordering;
|
2018-09-25 15:01:51 -07:00
|
|
|
use std::thread::sleep;
|
|
|
|
|
|
|
|
#[test]
|
2018-09-26 05:52:13 -07:00
|
|
|
fn test_banking_stage_shutdown1() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let genesis_config = create_genesis_config(2).genesis_config;
|
|
|
|
let bank = Arc::new(Bank::new(&genesis_config));
|
2019-06-26 18:42:27 -07:00
|
|
|
let (verified_sender, verified_receiver) = unbounded();
|
|
|
|
let (vote_sender, vote_receiver) = unbounded();
|
2019-03-29 20:00:36 -07:00
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let blocktree = Arc::new(
|
|
|
|
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
|
|
|
);
|
|
|
|
let (exit, poh_recorder, poh_service, _entry_receiever) =
|
2019-10-16 12:37:27 -07:00
|
|
|
create_test_recorder(&bank, &blocktree, None);
|
2019-03-29 20:00:36 -07:00
|
|
|
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
|
|
|
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
2019-04-17 21:07:45 -07:00
|
|
|
let banking_stage = BankingStage::new(
|
|
|
|
&cluster_info,
|
|
|
|
&poh_recorder,
|
|
|
|
verified_receiver,
|
|
|
|
vote_receiver,
|
|
|
|
);
|
2019-03-29 20:00:36 -07:00
|
|
|
drop(verified_sender);
|
2019-04-17 21:07:45 -07:00
|
|
|
drop(vote_sender);
|
2019-03-29 20:00:36 -07:00
|
|
|
exit.store(true, Ordering::Relaxed);
|
|
|
|
banking_stage.join().unwrap();
|
|
|
|
poh_service.join().unwrap();
|
|
|
|
}
|
|
|
|
Blocktree::destroy(&ledger_path).unwrap();
|
2018-09-26 05:52:13 -07:00
|
|
|
}
|
|
|
|
|
2018-09-25 15:01:51 -07:00
|
|
|
#[test]
|
|
|
|
fn test_banking_stage_tick() {
|
2019-03-03 16:44:06 -08:00
|
|
|
solana_logger::setup();
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
mut genesis_config, ..
|
|
|
|
} = create_genesis_config(2);
|
|
|
|
genesis_config.ticks_per_slot = 4;
|
2019-10-16 12:37:27 -07:00
|
|
|
let num_extra_ticks = 2;
|
2019-11-08 20:56:57 -08:00
|
|
|
let bank = Arc::new(Bank::new(&genesis_config));
|
2019-03-02 10:25:16 -08:00
|
|
|
let start_hash = bank.last_blockhash();
|
2019-06-26 18:42:27 -07:00
|
|
|
let (verified_sender, verified_receiver) = unbounded();
|
|
|
|
let (vote_sender, vote_receiver) = unbounded();
|
2019-03-29 20:00:36 -07:00
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let blocktree = Arc::new(
|
|
|
|
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
|
|
|
);
|
2019-10-16 12:37:27 -07:00
|
|
|
let mut poh_config = PohConfig::default();
|
|
|
|
poh_config.target_tick_count = Some(bank.max_tick_height() + num_extra_ticks);
|
2019-03-29 20:00:36 -07:00
|
|
|
let (exit, poh_recorder, poh_service, entry_receiver) =
|
2019-10-16 12:37:27 -07:00
|
|
|
create_test_recorder(&bank, &blocktree, Some(poh_config));
|
2019-03-29 20:00:36 -07:00
|
|
|
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
|
|
|
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
2019-04-17 21:07:45 -07:00
|
|
|
let banking_stage = BankingStage::new(
|
|
|
|
&cluster_info,
|
|
|
|
&poh_recorder,
|
|
|
|
verified_receiver,
|
|
|
|
vote_receiver,
|
|
|
|
);
|
2019-03-29 20:00:36 -07:00
|
|
|
trace!("sending bank");
|
|
|
|
drop(verified_sender);
|
2019-04-17 21:07:45 -07:00
|
|
|
drop(vote_sender);
|
2019-03-29 20:00:36 -07:00
|
|
|
exit.store(true, Ordering::Relaxed);
|
|
|
|
poh_service.join().unwrap();
|
|
|
|
drop(poh_recorder);
|
|
|
|
|
|
|
|
trace!("getting entries");
|
|
|
|
let entries: Vec<_> = entry_receiver
|
|
|
|
.iter()
|
2019-09-18 12:16:22 -07:00
|
|
|
.map(|(_bank, (entry, _tick_height))| entry)
|
2019-03-29 20:00:36 -07:00
|
|
|
.collect();
|
|
|
|
trace!("done");
|
2019-11-08 20:56:57 -08:00
|
|
|
assert_eq!(entries.len(), genesis_config.ticks_per_slot as usize);
|
2019-03-29 20:00:36 -07:00
|
|
|
assert!(entries.verify(&start_hash));
|
|
|
|
assert_eq!(entries[entries.len() - 1].hash, bank.last_blockhash());
|
|
|
|
banking_stage.join().unwrap();
|
|
|
|
}
|
|
|
|
Blocktree::destroy(&ledger_path).unwrap();
|
2018-09-25 15:01:51 -07:00
|
|
|
}
|
|
|
|
|
2019-11-01 14:23:03 -07:00
|
|
|
pub fn convert_from_old_verified(mut with_vers: Vec<(Packets, Vec<u8>)>) -> Vec<Packets> {
|
|
|
|
with_vers.iter_mut().for_each(|(b, v)| {
|
|
|
|
b.packets
|
|
|
|
.iter_mut()
|
|
|
|
.zip(v)
|
|
|
|
.for_each(|(p, f)| p.meta.discard = *f == 0)
|
|
|
|
});
|
|
|
|
with_vers.into_iter().map(|(b, _)| b).collect()
|
|
|
|
}
|
|
|
|
|
2018-09-25 15:01:51 -07:00
|
|
|
#[test]
|
2018-09-26 13:31:39 -07:00
|
|
|
fn test_banking_stage_entries_only() {
|
2019-03-12 21:07:06 -07:00
|
|
|
solana_logger::setup();
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-05-22 20:39:00 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(10);
|
|
|
|
let bank = Arc::new(Bank::new(&genesis_config));
|
2019-03-02 10:25:16 -08:00
|
|
|
let start_hash = bank.last_blockhash();
|
2019-06-26 18:42:27 -07:00
|
|
|
let (verified_sender, verified_receiver) = unbounded();
|
|
|
|
let (vote_sender, vote_receiver) = unbounded();
|
2019-03-29 20:00:36 -07:00
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let blocktree = Arc::new(
|
|
|
|
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
|
|
|
);
|
2019-10-16 12:37:27 -07:00
|
|
|
let mut poh_config = PohConfig::default();
|
|
|
|
// limit tick count to avoid clearing working_bank at PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
|
|
|
|
poh_config.target_tick_count = Some(bank.max_tick_height() - 1);
|
2019-03-29 20:00:36 -07:00
|
|
|
let (exit, poh_recorder, poh_service, entry_receiver) =
|
2019-10-16 12:37:27 -07:00
|
|
|
create_test_recorder(&bank, &blocktree, Some(poh_config));
|
2019-03-29 20:00:36 -07:00
|
|
|
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
|
|
|
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
2019-04-17 21:07:45 -07:00
|
|
|
let banking_stage = BankingStage::new(
|
|
|
|
&cluster_info,
|
|
|
|
&poh_recorder,
|
|
|
|
verified_receiver,
|
|
|
|
vote_receiver,
|
|
|
|
);
|
2018-09-25 15:01:51 -07:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
// fund another account so we can send 2 good transactions in a single batch.
|
|
|
|
let keypair = Keypair::new();
|
2019-10-19 18:23:27 -07:00
|
|
|
let fund_tx =
|
2019-10-23 22:01:22 -07:00
|
|
|
system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 2, start_hash);
|
2019-03-29 20:00:36 -07:00
|
|
|
bank.process_transaction(&fund_tx).unwrap();
|
2019-03-12 21:07:06 -07:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
// good tx
|
2019-03-30 20:37:33 -07:00
|
|
|
let to = Pubkey::new_rand();
|
2019-10-23 22:01:22 -07:00
|
|
|
let tx = system_transaction::transfer(&mint_keypair, &to, 1, start_hash);
|
2019-03-29 20:00:36 -07:00
|
|
|
|
|
|
|
// good tx, but no verify
|
2019-03-30 20:37:33 -07:00
|
|
|
let to2 = Pubkey::new_rand();
|
2019-10-23 22:01:22 -07:00
|
|
|
let tx_no_ver = system_transaction::transfer(&keypair, &to2, 2, start_hash);
|
2019-03-29 20:00:36 -07:00
|
|
|
|
|
|
|
// bad tx, AccountNotFound
|
|
|
|
let keypair = Keypair::new();
|
2019-03-30 20:37:33 -07:00
|
|
|
let to3 = Pubkey::new_rand();
|
2019-10-23 22:01:22 -07:00
|
|
|
let tx_anf = system_transaction::transfer(&keypair, &to3, 1, start_hash);
|
2019-03-29 20:00:36 -07:00
|
|
|
|
|
|
|
// send 'em over
|
|
|
|
let packets = to_packets(&[tx_no_ver, tx_anf, tx]);
|
|
|
|
|
|
|
|
// glad they all fit
|
|
|
|
assert_eq!(packets.len(), 1);
|
2019-05-20 09:15:00 -07:00
|
|
|
|
|
|
|
let packets = packets
|
|
|
|
.into_iter()
|
|
|
|
.map(|packets| (packets, vec![0u8, 1u8, 1u8]))
|
|
|
|
.collect();
|
2019-11-01 14:23:03 -07:00
|
|
|
let packets = convert_from_old_verified(packets);
|
2019-03-29 20:00:36 -07:00
|
|
|
verified_sender // no_ver, anf, tx
|
2019-05-20 09:15:00 -07:00
|
|
|
.send(packets)
|
2019-03-29 20:00:36 -07:00
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
drop(verified_sender);
|
2019-04-17 21:07:45 -07:00
|
|
|
drop(vote_sender);
|
2019-10-16 12:37:27 -07:00
|
|
|
// wait until banking_stage to finish up all packets
|
|
|
|
banking_stage.join().unwrap();
|
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
exit.store(true, Ordering::Relaxed);
|
|
|
|
poh_service.join().unwrap();
|
|
|
|
drop(poh_recorder);
|
|
|
|
|
|
|
|
let mut blockhash = start_hash;
|
2019-11-08 20:56:57 -08:00
|
|
|
let bank = Bank::new(&genesis_config);
|
2019-03-29 20:00:36 -07:00
|
|
|
bank.process_transaction(&fund_tx).unwrap();
|
|
|
|
//receive entries + ticks
|
2019-10-16 12:37:27 -07:00
|
|
|
loop {
|
2019-09-18 12:16:22 -07:00
|
|
|
let entries: Vec<Entry> = entry_receiver
|
2019-03-29 20:00:36 -07:00
|
|
|
.iter()
|
2019-09-18 12:16:22 -07:00
|
|
|
.map(|(_bank, (entry, _tick_height))| entry)
|
2019-03-29 20:00:36 -07:00
|
|
|
.collect();
|
|
|
|
|
2019-09-18 12:16:22 -07:00
|
|
|
assert!(entries.verify(&blockhash));
|
2019-10-16 12:37:27 -07:00
|
|
|
if !entries.is_empty() {
|
|
|
|
blockhash = entries.last().unwrap().hash;
|
|
|
|
for entry in entries {
|
|
|
|
bank.process_transactions(&entry.transactions)
|
|
|
|
.iter()
|
|
|
|
.for_each(|x| assert_eq!(*x, Ok(())));
|
|
|
|
}
|
2019-03-12 21:07:06 -07:00
|
|
|
}
|
2019-02-25 13:50:31 -08:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
if bank.get_balance(&to) == 1 {
|
|
|
|
break;
|
|
|
|
}
|
2019-03-12 21:07:06 -07:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
sleep(Duration::from_millis(200));
|
|
|
|
}
|
2019-03-12 21:07:06 -07:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
assert_eq!(bank.get_balance(&to), 1);
|
|
|
|
assert_eq!(bank.get_balance(&to2), 0);
|
2018-09-26 05:52:13 -07:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
drop(entry_receiver);
|
|
|
|
}
|
|
|
|
Blocktree::destroy(&ledger_path).unwrap();
|
2018-09-26 13:31:39 -07:00
|
|
|
}
|
2019-03-03 16:44:06 -08:00
|
|
|
|
2018-09-25 15:01:51 -07:00
|
|
|
#[test]
|
|
|
|
fn test_banking_stage_entryfication() {
|
2019-03-18 22:08:21 -07:00
|
|
|
solana_logger::setup();
|
2018-09-25 15:01:51 -07:00
|
|
|
// In this attack we'll demonstrate that a verifier can interpret the ledger
|
|
|
|
// differently if either the server doesn't signal the ledger to add an
|
|
|
|
// Entry OR if the verifier tries to parallelize across multiple Entries.
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-05-22 20:39:00 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(2);
|
2019-06-26 18:42:27 -07:00
|
|
|
let (verified_sender, verified_receiver) = unbounded();
|
2018-09-25 15:01:51 -07:00
|
|
|
|
2019-03-05 16:58:52 -08:00
|
|
|
// Process a batch that includes a transaction that receives two lamports.
|
2018-09-25 15:01:51 -07:00
|
|
|
let alice = Keypair::new();
|
2019-10-23 22:01:22 -07:00
|
|
|
let tx =
|
2019-11-08 20:56:57 -08:00
|
|
|
system_transaction::transfer(&mint_keypair, &alice.pubkey(), 2, genesis_config.hash());
|
2018-09-25 15:01:51 -07:00
|
|
|
|
2018-09-26 09:50:12 -07:00
|
|
|
let packets = to_packets(&[tx]);
|
2019-05-20 09:15:00 -07:00
|
|
|
let packets = packets
|
|
|
|
.into_iter()
|
|
|
|
.map(|packets| (packets, vec![1u8]))
|
|
|
|
.collect();
|
2019-11-01 14:23:03 -07:00
|
|
|
let packets = convert_from_old_verified(packets);
|
2019-05-20 09:15:00 -07:00
|
|
|
verified_sender.send(packets).unwrap();
|
2018-09-25 15:01:51 -07:00
|
|
|
|
2019-10-23 22:01:22 -07:00
|
|
|
// Process a second batch that uses the same from account, so conflicts with above TX
|
|
|
|
let tx =
|
2019-11-08 20:56:57 -08:00
|
|
|
system_transaction::transfer(&mint_keypair, &alice.pubkey(), 1, genesis_config.hash());
|
2018-09-26 09:50:12 -07:00
|
|
|
let packets = to_packets(&[tx]);
|
2019-05-20 09:15:00 -07:00
|
|
|
let packets = packets
|
|
|
|
.into_iter()
|
|
|
|
.map(|packets| (packets, vec![1u8]))
|
|
|
|
.collect();
|
2019-11-01 14:23:03 -07:00
|
|
|
let packets = convert_from_old_verified(packets);
|
2019-05-20 09:15:00 -07:00
|
|
|
verified_sender.send(packets).unwrap();
|
2019-03-03 16:44:06 -08:00
|
|
|
|
2019-06-26 18:42:27 -07:00
|
|
|
let (vote_sender, vote_receiver) = unbounded();
|
2019-03-29 20:00:36 -07:00
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let entry_receiver = {
|
|
|
|
// start a banking_stage to eat verified receiver
|
2019-11-08 20:56:57 -08:00
|
|
|
let bank = Arc::new(Bank::new(&genesis_config));
|
2019-03-29 20:00:36 -07:00
|
|
|
let blocktree = Arc::new(
|
|
|
|
Blocktree::open(&ledger_path)
|
|
|
|
.expect("Expected to be able to open database ledger"),
|
|
|
|
);
|
2019-10-16 12:37:27 -07:00
|
|
|
let mut poh_config = PohConfig::default();
|
|
|
|
// limit tick count to avoid clearing working_bank at PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
|
|
|
|
poh_config.target_tick_count = Some(bank.max_tick_height() - 1);
|
2019-03-29 20:00:36 -07:00
|
|
|
let (exit, poh_recorder, poh_service, entry_receiver) =
|
2019-10-16 12:37:27 -07:00
|
|
|
create_test_recorder(&bank, &blocktree, Some(poh_config));
|
2019-03-29 20:00:36 -07:00
|
|
|
let cluster_info =
|
|
|
|
ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
|
|
|
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
|
|
|
let _banking_stage = BankingStage::new_num_threads(
|
|
|
|
&cluster_info,
|
|
|
|
&poh_recorder,
|
|
|
|
verified_receiver,
|
2019-04-17 21:07:45 -07:00
|
|
|
vote_receiver,
|
|
|
|
2,
|
2019-03-29 20:00:36 -07:00
|
|
|
);
|
2019-03-18 22:08:21 -07:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
// wait for banking_stage to eat the packets
|
2019-10-23 22:01:22 -07:00
|
|
|
while bank.get_balance(&alice.pubkey()) < 2 {
|
2019-03-29 20:00:36 -07:00
|
|
|
sleep(Duration::from_millis(100));
|
|
|
|
}
|
|
|
|
exit.store(true, Ordering::Relaxed);
|
|
|
|
poh_service.join().unwrap();
|
|
|
|
entry_receiver
|
|
|
|
};
|
|
|
|
drop(verified_sender);
|
2019-04-17 21:07:45 -07:00
|
|
|
drop(vote_sender);
|
2019-03-29 20:00:36 -07:00
|
|
|
|
|
|
|
// consume the entire entry_receiver, feed it into a new bank
|
|
|
|
// check that the balance is what we expect.
|
|
|
|
let entries: Vec<_> = entry_receiver
|
|
|
|
.iter()
|
2019-09-18 12:16:22 -07:00
|
|
|
.map(|(_bank, (entry, _tick_height))| entry)
|
2019-03-29 20:00:36 -07:00
|
|
|
.collect();
|
2018-09-25 15:01:51 -07:00
|
|
|
|
2019-11-08 20:56:57 -08:00
|
|
|
let bank = Bank::new(&genesis_config);
|
2019-03-29 20:00:36 -07:00
|
|
|
for entry in &entries {
|
|
|
|
bank.process_transactions(&entry.transactions)
|
|
|
|
.iter()
|
|
|
|
.for_each(|x| assert_eq!(*x, Ok(())));
|
|
|
|
}
|
2019-03-18 22:08:21 -07:00
|
|
|
|
2019-10-23 22:01:22 -07:00
|
|
|
// Assert the user holds two lamports, not three. If the stage only outputs one
|
2019-03-29 20:00:36 -07:00
|
|
|
// entry, then the second transaction will be rejected, because it drives
|
|
|
|
// the account balance below zero before the credit is added.
|
2019-10-23 22:01:22 -07:00
|
|
|
assert_eq!(bank.get_balance(&alice.pubkey()), 2);
|
2019-03-11 12:45:45 -07:00
|
|
|
}
|
2019-03-29 20:00:36 -07:00
|
|
|
Blocktree::destroy(&ledger_path).unwrap();
|
2018-10-18 22:57:48 -07:00
|
|
|
}
|
2019-02-16 14:02:21 -08:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_bank_record_transactions() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-05-22 20:39:00 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(10_000);
|
|
|
|
let bank = Arc::new(Bank::new(&genesis_config));
|
2019-02-19 16:17:36 -08:00
|
|
|
let working_bank = WorkingBank {
|
|
|
|
bank: bank.clone(),
|
|
|
|
min_tick_height: bank.tick_height(),
|
|
|
|
max_tick_height: std::u64::MAX,
|
|
|
|
};
|
2019-03-29 20:00:36 -07:00
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let blocktree =
|
|
|
|
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
|
|
|
let (poh_recorder, entry_receiver) = PohRecorder::new(
|
|
|
|
bank.tick_height(),
|
|
|
|
bank.last_blockhash(),
|
|
|
|
bank.slot(),
|
|
|
|
None,
|
|
|
|
bank.ticks_per_slot(),
|
|
|
|
&Pubkey::default(),
|
|
|
|
&Arc::new(blocktree),
|
2019-04-19 02:39:44 -07:00
|
|
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
2019-05-18 14:01:36 -07:00
|
|
|
&Arc::new(PohConfig::default()),
|
2019-03-29 20:00:36 -07:00
|
|
|
);
|
|
|
|
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
|
|
|
|
|
|
|
poh_recorder.lock().unwrap().set_working_bank(working_bank);
|
2019-03-30 20:37:33 -07:00
|
|
|
let pubkey = Pubkey::new_rand();
|
2019-05-07 15:51:35 -07:00
|
|
|
let keypair2 = Keypair::new();
|
|
|
|
let pubkey2 = Pubkey::new_rand();
|
2019-03-29 20:00:36 -07:00
|
|
|
|
|
|
|
let transactions = vec![
|
2019-11-08 20:56:57 -08:00
|
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
|
|
|
|
system_transaction::transfer(&keypair2, &pubkey2, 1, genesis_config.hash()),
|
2019-03-29 20:00:36 -07:00
|
|
|
];
|
|
|
|
|
|
|
|
let mut results = vec![Ok(()), Ok(())];
|
2019-08-15 13:00:09 -07:00
|
|
|
let _ = BankingStage::record_transactions(
|
|
|
|
bank.slot(),
|
|
|
|
&transactions,
|
|
|
|
&results,
|
|
|
|
&poh_recorder,
|
|
|
|
);
|
2019-09-18 12:16:22 -07:00
|
|
|
let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap();
|
|
|
|
assert_eq!(entry.transactions.len(), transactions.len());
|
2019-03-29 20:00:36 -07:00
|
|
|
|
|
|
|
// InstructionErrors should still be recorded
|
|
|
|
results[0] = Err(TransactionError::InstructionError(
|
|
|
|
1,
|
|
|
|
InstructionError::new_result_with_negative_lamports(),
|
|
|
|
));
|
2019-06-26 22:39:50 -07:00
|
|
|
let (res, retryable) = BankingStage::record_transactions(
|
|
|
|
bank.slot(),
|
|
|
|
&transactions,
|
|
|
|
&results,
|
|
|
|
&poh_recorder,
|
|
|
|
);
|
|
|
|
res.unwrap();
|
|
|
|
assert!(retryable.is_empty());
|
2019-09-18 12:16:22 -07:00
|
|
|
let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap();
|
|
|
|
assert_eq!(entry.transactions.len(), transactions.len());
|
2019-03-29 20:00:36 -07:00
|
|
|
|
|
|
|
// Other TransactionErrors should not be recorded
|
|
|
|
results[0] = Err(TransactionError::AccountNotFound);
|
2019-06-26 22:39:50 -07:00
|
|
|
let (res, retryable) = BankingStage::record_transactions(
|
|
|
|
bank.slot(),
|
|
|
|
&transactions,
|
|
|
|
&results,
|
|
|
|
&poh_recorder,
|
|
|
|
);
|
|
|
|
res.unwrap();
|
|
|
|
assert!(retryable.is_empty());
|
2019-09-18 12:16:22 -07:00
|
|
|
let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap();
|
|
|
|
assert_eq!(entry.transactions.len(), transactions.len() - 1);
|
2019-06-26 22:39:50 -07:00
|
|
|
|
|
|
|
// Once bank is set to a new bank (setting bank.slot() + 1 in record_transactions),
|
|
|
|
// record_transactions should throw MaxHeightReached and return the set of retryable
|
|
|
|
// txs
|
|
|
|
let (res, retryable) = BankingStage::record_transactions(
|
|
|
|
bank.slot() + 1,
|
|
|
|
&transactions,
|
|
|
|
&results,
|
|
|
|
&poh_recorder,
|
|
|
|
);
|
|
|
|
assert_matches!(
|
|
|
|
res,
|
|
|
|
Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached))
|
|
|
|
);
|
|
|
|
// The first result was an error so it's filtered out. The second result was Ok(),
|
|
|
|
// so it should be marked as retryable
|
|
|
|
assert_eq!(retryable, vec![1]);
|
|
|
|
// Should receive nothing from PohRecorder b/c record failed
|
|
|
|
assert!(entry_receiver.try_recv().is_err());
|
2019-03-29 20:00:36 -07:00
|
|
|
}
|
|
|
|
Blocktree::destroy(&ledger_path).unwrap();
|
2019-02-16 14:02:21 -08:00
|
|
|
}
|
|
|
|
|
2019-05-02 19:05:53 -07:00
|
|
|
#[test]
|
2019-05-08 10:32:25 -07:00
|
|
|
fn test_bank_filter_transaction_indexes() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-05-22 20:39:00 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(10_000);
|
2019-05-08 10:32:25 -07:00
|
|
|
let pubkey = Pubkey::new_rand();
|
2019-05-02 19:05:53 -07:00
|
|
|
|
2019-05-08 10:32:25 -07:00
|
|
|
let transactions = vec![
|
|
|
|
None,
|
|
|
|
Some(system_transaction::transfer(
|
|
|
|
&mint_keypair,
|
|
|
|
&pubkey,
|
|
|
|
1,
|
2019-11-08 20:56:57 -08:00
|
|
|
genesis_config.hash(),
|
2019-05-08 10:32:25 -07:00
|
|
|
)),
|
|
|
|
Some(system_transaction::transfer(
|
|
|
|
&mint_keypair,
|
|
|
|
&pubkey,
|
|
|
|
1,
|
2019-11-08 20:56:57 -08:00
|
|
|
genesis_config.hash(),
|
2019-05-08 10:32:25 -07:00
|
|
|
)),
|
|
|
|
Some(system_transaction::transfer(
|
|
|
|
&mint_keypair,
|
|
|
|
&pubkey,
|
|
|
|
1,
|
2019-11-08 20:56:57 -08:00
|
|
|
genesis_config.hash(),
|
2019-05-08 10:32:25 -07:00
|
|
|
)),
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
Some(system_transaction::transfer(
|
|
|
|
&mint_keypair,
|
|
|
|
&pubkey,
|
|
|
|
1,
|
2019-11-08 20:56:57 -08:00
|
|
|
genesis_config.hash(),
|
2019-05-08 10:32:25 -07:00
|
|
|
)),
|
|
|
|
None,
|
|
|
|
Some(system_transaction::transfer(
|
|
|
|
&mint_keypair,
|
|
|
|
&pubkey,
|
|
|
|
1,
|
2019-11-08 20:56:57 -08:00
|
|
|
genesis_config.hash(),
|
2019-05-08 10:32:25 -07:00
|
|
|
)),
|
|
|
|
None,
|
|
|
|
Some(system_transaction::transfer(
|
|
|
|
&mint_keypair,
|
|
|
|
&pubkey,
|
|
|
|
1,
|
2019-11-08 20:56:57 -08:00
|
|
|
genesis_config.hash(),
|
2019-05-08 10:32:25 -07:00
|
|
|
)),
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
];
|
|
|
|
|
|
|
|
let filtered_transactions = vec![
|
2019-11-08 20:56:57 -08:00
|
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
|
|
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
|
|
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
|
|
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
|
|
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
|
|
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
|
2019-05-08 10:32:25 -07:00
|
|
|
];
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
BankingStage::filter_transaction_indexes(
|
|
|
|
transactions.clone(),
|
|
|
|
&vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
|
|
|
|
),
|
|
|
|
(filtered_transactions.clone(), vec![1, 2, 3, 6, 8, 10])
|
|
|
|
);
|
2019-05-02 19:05:53 -07:00
|
|
|
|
2019-05-08 10:32:25 -07:00
|
|
|
assert_eq!(
|
|
|
|
BankingStage::filter_transaction_indexes(
|
|
|
|
transactions,
|
|
|
|
&vec![1, 2, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15],
|
|
|
|
),
|
|
|
|
(filtered_transactions, vec![2, 4, 5, 9, 11, 13])
|
|
|
|
);
|
|
|
|
}
|
2019-05-02 19:05:53 -07:00
|
|
|
|
2019-05-08 10:32:25 -07:00
|
|
|
#[test]
|
|
|
|
fn test_bank_prepare_filter_for_pending_transaction() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-05-22 20:39:00 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(10_000);
|
2019-05-08 10:32:25 -07:00
|
|
|
let pubkey = Pubkey::new_rand();
|
|
|
|
|
|
|
|
let transactions = vec![
|
2019-11-08 20:56:57 -08:00
|
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
|
|
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
|
|
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
|
|
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
|
|
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
|
|
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
|
2019-05-08 10:32:25 -07:00
|
|
|
];
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
BankingStage::prepare_filter_for_pending_transactions(&transactions, &vec![2, 4, 5],),
|
|
|
|
vec![
|
|
|
|
Err(TransactionError::BlockhashNotFound),
|
|
|
|
Err(TransactionError::BlockhashNotFound),
|
|
|
|
Ok(()),
|
|
|
|
Err(TransactionError::BlockhashNotFound),
|
|
|
|
Ok(()),
|
|
|
|
Ok(())
|
|
|
|
]
|
|
|
|
);
|
2019-05-02 19:05:53 -07:00
|
|
|
|
2019-05-08 10:32:25 -07:00
|
|
|
assert_eq!(
|
|
|
|
BankingStage::prepare_filter_for_pending_transactions(&transactions, &vec![0, 2, 3],),
|
|
|
|
vec![
|
|
|
|
Ok(()),
|
|
|
|
Err(TransactionError::BlockhashNotFound),
|
|
|
|
Ok(()),
|
|
|
|
Ok(()),
|
|
|
|
Err(TransactionError::BlockhashNotFound),
|
|
|
|
Err(TransactionError::BlockhashNotFound),
|
|
|
|
]
|
|
|
|
);
|
|
|
|
}
|
2019-05-02 19:05:53 -07:00
|
|
|
|
2019-05-08 10:32:25 -07:00
|
|
|
#[test]
|
|
|
|
fn test_bank_filter_valid_transaction_indexes() {
|
|
|
|
assert_eq!(
|
|
|
|
BankingStage::filter_valid_transaction_indexes(
|
|
|
|
&vec![
|
|
|
|
Err(TransactionError::BlockhashNotFound),
|
|
|
|
Err(TransactionError::BlockhashNotFound),
|
|
|
|
Ok(()),
|
|
|
|
Err(TransactionError::BlockhashNotFound),
|
|
|
|
Ok(()),
|
|
|
|
Ok(())
|
|
|
|
],
|
|
|
|
&vec![2, 4, 5, 9, 11, 13]
|
|
|
|
),
|
|
|
|
vec![5, 11, 13]
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
BankingStage::filter_valid_transaction_indexes(
|
|
|
|
&vec![
|
|
|
|
Ok(()),
|
|
|
|
Err(TransactionError::BlockhashNotFound),
|
|
|
|
Err(TransactionError::BlockhashNotFound),
|
|
|
|
Ok(()),
|
|
|
|
Ok(()),
|
|
|
|
Ok(())
|
|
|
|
],
|
|
|
|
&vec![1, 6, 7, 9, 31, 43]
|
|
|
|
),
|
|
|
|
vec![1, 9, 31, 43]
|
|
|
|
);
|
2019-05-02 19:05:53 -07:00
|
|
|
}
|
|
|
|
|
2019-04-13 23:19:54 -07:00
|
|
|
#[test]
|
|
|
|
fn test_should_process_or_forward_packets() {
|
2019-05-23 23:20:04 -07:00
|
|
|
let my_pubkey = Pubkey::new_rand();
|
|
|
|
let my_pubkey1 = Pubkey::new_rand();
|
2019-04-13 23:19:54 -07:00
|
|
|
|
|
|
|
assert_eq!(
|
2019-07-18 14:54:27 -07:00
|
|
|
BankingStage::consume_or_forward_packets(&my_pubkey, None, true, false,),
|
2019-04-13 23:19:54 -07:00
|
|
|
BufferedPacketsDecision::Hold
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2019-07-18 14:54:27 -07:00
|
|
|
BankingStage::consume_or_forward_packets(&my_pubkey, None, false, false),
|
2019-04-13 23:19:54 -07:00
|
|
|
BufferedPacketsDecision::Hold
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2019-07-18 14:54:27 -07:00
|
|
|
BankingStage::consume_or_forward_packets(&my_pubkey1, None, false, false),
|
2019-04-13 23:19:54 -07:00
|
|
|
BufferedPacketsDecision::Hold
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(
|
2019-05-23 23:20:04 -07:00
|
|
|
BankingStage::consume_or_forward_packets(
|
2019-07-18 14:54:27 -07:00
|
|
|
&my_pubkey,
|
2019-05-23 23:20:04 -07:00
|
|
|
Some(my_pubkey1.clone()),
|
|
|
|
false,
|
|
|
|
false,
|
|
|
|
),
|
2019-04-13 23:19:54 -07:00
|
|
|
BufferedPacketsDecision::Forward
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2019-05-23 23:20:04 -07:00
|
|
|
BankingStage::consume_or_forward_packets(
|
2019-07-18 14:54:27 -07:00
|
|
|
&my_pubkey,
|
2019-05-23 23:20:04 -07:00
|
|
|
Some(my_pubkey1.clone()),
|
|
|
|
false,
|
|
|
|
true,
|
|
|
|
),
|
2019-04-23 11:56:30 -07:00
|
|
|
BufferedPacketsDecision::Hold
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2019-05-23 23:20:04 -07:00
|
|
|
BankingStage::consume_or_forward_packets(
|
2019-07-18 14:54:27 -07:00
|
|
|
&my_pubkey,
|
2019-05-23 23:20:04 -07:00
|
|
|
Some(my_pubkey1.clone()),
|
|
|
|
true,
|
|
|
|
false,
|
|
|
|
),
|
2019-04-13 23:19:54 -07:00
|
|
|
BufferedPacketsDecision::Consume
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2019-05-23 23:20:04 -07:00
|
|
|
BankingStage::consume_or_forward_packets(
|
2019-07-18 14:54:27 -07:00
|
|
|
&my_pubkey1,
|
2019-05-23 23:20:04 -07:00
|
|
|
Some(my_pubkey1.clone()),
|
|
|
|
false,
|
|
|
|
false,
|
|
|
|
),
|
2019-04-13 23:19:54 -07:00
|
|
|
BufferedPacketsDecision::Hold
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2019-05-23 23:20:04 -07:00
|
|
|
BankingStage::consume_or_forward_packets(
|
2019-07-18 14:54:27 -07:00
|
|
|
&my_pubkey1,
|
2019-05-23 23:20:04 -07:00
|
|
|
Some(my_pubkey1.clone()),
|
|
|
|
true,
|
|
|
|
false,
|
|
|
|
),
|
2019-04-13 23:19:54 -07:00
|
|
|
BufferedPacketsDecision::Consume
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2019-02-16 14:02:21 -08:00
|
|
|
#[test]
|
|
|
|
fn test_bank_process_and_record_transactions() {
|
2019-02-26 10:48:18 -08:00
|
|
|
solana_logger::setup();
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-05-22 20:39:00 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(10_000);
|
|
|
|
let bank = Arc::new(Bank::new(&genesis_config));
|
2019-03-30 20:37:33 -07:00
|
|
|
let pubkey = Pubkey::new_rand();
|
2019-02-16 14:02:21 -08:00
|
|
|
|
2019-10-23 22:01:22 -07:00
|
|
|
let transactions = vec![system_transaction::transfer(
|
2019-02-16 14:02:21 -08:00
|
|
|
&mint_keypair,
|
2019-03-09 19:28:43 -08:00
|
|
|
&pubkey,
|
2019-02-16 14:02:21 -08:00
|
|
|
1,
|
2019-11-08 20:56:57 -08:00
|
|
|
genesis_config.hash(),
|
2019-02-16 14:02:21 -08:00
|
|
|
)];
|
|
|
|
|
2019-02-19 16:17:36 -08:00
|
|
|
let working_bank = WorkingBank {
|
|
|
|
bank: bank.clone(),
|
|
|
|
min_tick_height: bank.tick_height(),
|
|
|
|
max_tick_height: bank.tick_height() + 1,
|
|
|
|
};
|
2019-03-29 20:00:36 -07:00
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let blocktree =
|
|
|
|
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
|
|
|
let (poh_recorder, entry_receiver) = PohRecorder::new(
|
|
|
|
bank.tick_height(),
|
|
|
|
bank.last_blockhash(),
|
|
|
|
bank.slot(),
|
2019-07-26 11:33:51 -07:00
|
|
|
Some((4, 4)),
|
2019-03-29 20:00:36 -07:00
|
|
|
bank.ticks_per_slot(),
|
|
|
|
&pubkey,
|
|
|
|
&Arc::new(blocktree),
|
2019-04-19 02:39:44 -07:00
|
|
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
2019-05-18 14:01:36 -07:00
|
|
|
&Arc::new(PohConfig::default()),
|
2019-03-29 20:00:36 -07:00
|
|
|
);
|
|
|
|
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
|
|
|
|
|
|
|
poh_recorder.lock().unwrap().set_working_bank(working_bank);
|
|
|
|
|
2019-05-07 10:23:02 -07:00
|
|
|
BankingStage::process_and_record_transactions(&bank, &transactions, &poh_recorder, 0)
|
|
|
|
.0
|
2019-03-29 20:00:36 -07:00
|
|
|
.unwrap();
|
|
|
|
poh_recorder.lock().unwrap().tick();
|
|
|
|
|
|
|
|
let mut done = false;
|
|
|
|
// read entries until I find mine, might be ticks...
|
2019-09-18 12:16:22 -07:00
|
|
|
while let Ok((_bank, (entry, _tick_height))) = entry_receiver.recv() {
|
|
|
|
if !entry.is_tick() {
|
|
|
|
trace!("got entry");
|
|
|
|
assert_eq!(entry.transactions.len(), transactions.len());
|
|
|
|
assert_eq!(bank.get_balance(&pubkey), 1);
|
|
|
|
done = true;
|
2019-03-29 20:00:36 -07:00
|
|
|
}
|
|
|
|
if done {
|
|
|
|
break;
|
2019-02-16 14:02:21 -08:00
|
|
|
}
|
|
|
|
}
|
2019-03-29 20:00:36 -07:00
|
|
|
trace!("done ticking");
|
2019-02-16 14:02:21 -08:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
assert_eq!(done, true);
|
2019-02-26 10:48:18 -08:00
|
|
|
|
2019-04-03 08:45:57 -07:00
|
|
|
let transactions = vec![system_transaction::transfer(
|
2019-03-29 20:00:36 -07:00
|
|
|
&mint_keypair,
|
|
|
|
&pubkey,
|
|
|
|
2,
|
2019-11-08 20:56:57 -08:00
|
|
|
genesis_config.hash(),
|
2019-03-29 20:00:36 -07:00
|
|
|
)];
|
2019-02-16 14:02:21 -08:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
assert_matches!(
|
2019-05-07 10:23:02 -07:00
|
|
|
BankingStage::process_and_record_transactions(
|
|
|
|
&bank,
|
|
|
|
&transactions,
|
|
|
|
&poh_recorder,
|
|
|
|
0
|
|
|
|
)
|
|
|
|
.0,
|
2019-03-29 20:00:36 -07:00
|
|
|
Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached))
|
|
|
|
);
|
2019-02-16 14:02:21 -08:00
|
|
|
|
2019-03-29 20:00:36 -07:00
|
|
|
assert_eq!(bank.get_balance(&pubkey), 1);
|
|
|
|
}
|
|
|
|
Blocktree::destroy(&ledger_path).unwrap();
|
2019-02-16 14:02:21 -08:00
|
|
|
}
|
2019-05-07 10:23:02 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_bank_process_and_record_transactions_account_in_use() {
|
|
|
|
solana_logger::setup();
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-05-22 20:39:00 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(10_000);
|
|
|
|
let bank = Arc::new(Bank::new(&genesis_config));
|
2019-05-07 10:23:02 -07:00
|
|
|
let pubkey = Pubkey::new_rand();
|
|
|
|
let pubkey1 = Pubkey::new_rand();
|
|
|
|
|
|
|
|
let transactions = vec![
|
2019-11-08 20:56:57 -08:00
|
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
|
|
|
|
system_transaction::transfer(&mint_keypair, &pubkey1, 1, genesis_config.hash()),
|
2019-05-07 10:23:02 -07:00
|
|
|
];
|
|
|
|
|
|
|
|
let working_bank = WorkingBank {
|
|
|
|
bank: bank.clone(),
|
|
|
|
min_tick_height: bank.tick_height(),
|
|
|
|
max_tick_height: bank.tick_height() + 1,
|
|
|
|
};
|
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let blocktree =
|
|
|
|
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
|
|
|
let (poh_recorder, _entry_receiver) = PohRecorder::new(
|
|
|
|
bank.tick_height(),
|
|
|
|
bank.last_blockhash(),
|
|
|
|
bank.slot(),
|
2019-07-26 11:33:51 -07:00
|
|
|
Some((4, 4)),
|
2019-05-07 10:23:02 -07:00
|
|
|
bank.ticks_per_slot(),
|
|
|
|
&pubkey,
|
|
|
|
&Arc::new(blocktree),
|
|
|
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
2019-05-18 14:01:36 -07:00
|
|
|
&Arc::new(PohConfig::default()),
|
2019-05-07 10:23:02 -07:00
|
|
|
);
|
|
|
|
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
|
|
|
|
|
|
|
poh_recorder.lock().unwrap().set_working_bank(working_bank);
|
|
|
|
|
|
|
|
let (result, unprocessed) = BankingStage::process_and_record_transactions(
|
|
|
|
&bank,
|
|
|
|
&transactions,
|
|
|
|
&poh_recorder,
|
|
|
|
0,
|
|
|
|
);
|
|
|
|
|
|
|
|
assert!(result.is_ok());
|
|
|
|
assert_eq!(unprocessed.len(), 1);
|
|
|
|
}
|
|
|
|
Blocktree::destroy(&ledger_path).unwrap();
|
|
|
|
}
|
2019-05-10 14:28:38 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_filter_valid_packets() {
|
|
|
|
solana_logger::setup();
|
|
|
|
|
|
|
|
let all_packets = (0..16)
|
|
|
|
.map(|packets_id| {
|
|
|
|
let packets = Packets::new(
|
|
|
|
(0..32)
|
|
|
|
.map(|packet_id| {
|
|
|
|
let mut p = Packet::default();
|
|
|
|
p.meta.port = packets_id << 8 | packet_id;
|
|
|
|
p
|
|
|
|
})
|
|
|
|
.collect_vec(),
|
|
|
|
);
|
|
|
|
let valid_indexes = (0..32)
|
|
|
|
.filter_map(|x| if x % 2 != 0 { Some(x as usize) } else { None })
|
|
|
|
.collect_vec();
|
2019-05-20 17:48:42 -07:00
|
|
|
(packets, valid_indexes)
|
2019-05-10 14:28:38 -07:00
|
|
|
})
|
|
|
|
.collect_vec();
|
|
|
|
|
|
|
|
let result = BankingStage::filter_valid_packets_for_forwarding(&all_packets);
|
|
|
|
|
|
|
|
assert_eq!(result.len(), 256);
|
|
|
|
|
|
|
|
let _ = result
|
|
|
|
.into_iter()
|
|
|
|
.enumerate()
|
|
|
|
.map(|(index, p)| {
|
|
|
|
let packets_id = index / 16;
|
|
|
|
let packet_id = (index % 16) * 2 + 1;
|
|
|
|
assert_eq!(p.meta.port, (packets_id << 8 | packet_id) as u16);
|
|
|
|
})
|
|
|
|
.collect_vec();
|
|
|
|
}
|
2019-06-26 22:39:50 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_process_transactions_returns_unprocessed_txs() {
|
|
|
|
solana_logger::setup();
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-06-26 22:39:50 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(10_000);
|
|
|
|
let bank = Arc::new(Bank::new(&genesis_config));
|
2019-06-26 22:39:50 -07:00
|
|
|
|
2019-09-17 15:11:29 -07:00
|
|
|
let pubkey = Pubkey::new_rand();
|
2019-06-26 22:39:50 -07:00
|
|
|
|
2019-09-17 15:11:29 -07:00
|
|
|
let transactions =
|
2019-11-08 20:56:57 -08:00
|
|
|
vec![
|
|
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash(),);
|
|
|
|
3
|
|
|
|
];
|
2019-06-26 22:39:50 -07:00
|
|
|
|
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let blocktree =
|
|
|
|
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
|
|
|
let (poh_recorder, _entry_receiver) = PohRecorder::new(
|
|
|
|
bank.tick_height(),
|
|
|
|
bank.last_blockhash(),
|
|
|
|
bank.slot(),
|
2019-07-26 11:33:51 -07:00
|
|
|
Some((4, 4)),
|
2019-06-26 22:39:50 -07:00
|
|
|
bank.ticks_per_slot(),
|
|
|
|
&Pubkey::new_rand(),
|
|
|
|
&Arc::new(blocktree),
|
|
|
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
|
|
|
&Arc::new(PohConfig::default()),
|
|
|
|
);
|
|
|
|
|
|
|
|
// Poh Recorder has not working bank, so should throw MaxHeightReached error on
|
|
|
|
// record
|
|
|
|
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
|
|
|
|
|
|
|
let (processed_transactions_count, mut retryable_txs) =
|
|
|
|
BankingStage::process_transactions(&bank, &transactions, &poh_recorder);
|
|
|
|
|
|
|
|
assert_eq!(processed_transactions_count, 0,);
|
|
|
|
|
|
|
|
retryable_txs.sort();
|
|
|
|
let expected: Vec<usize> = (0..transactions.len()).collect();
|
|
|
|
assert_eq!(retryable_txs, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
Blocktree::destroy(&ledger_path).unwrap();
|
|
|
|
}
|
2018-09-25 15:01:51 -07:00
|
|
|
}
|