3382 lines
134 KiB
Rust
3382 lines
134 KiB
Rust
//! The `banking_stage` processes Transaction messages. It is intended to be used
|
|
//! to construct a software pipeline. The stage uses all available CPU cores and
|
|
//! can do its processing in parallel with signature verification on the GPU.
|
|
|
|
use {
|
|
self::{
|
|
committer::CommitTransactionDetails,
|
|
decision_maker::{BufferedPacketsDecision, DecisionMaker},
|
|
forwarder::Forwarder,
|
|
packet_receiver::PacketReceiver,
|
|
},
|
|
crate::{
|
|
banking_stage::committer::Committer,
|
|
banking_trace::BankingPacketReceiver,
|
|
immutable_deserialized_packet::ImmutableDeserializedPacket,
|
|
latest_unprocessed_votes::{LatestUnprocessedVotes, VoteSource},
|
|
leader_slot_banking_stage_metrics::{LeaderSlotMetricsTracker, ProcessTransactionsSummary},
|
|
leader_slot_banking_stage_timing_metrics::{
|
|
LeaderExecuteAndCommitTimings, RecordTransactionsTimings,
|
|
},
|
|
packet_deserializer::PacketDeserializer,
|
|
qos_service::QosService,
|
|
tracer_packet_stats::TracerPacketStats,
|
|
unprocessed_packet_batches::*,
|
|
unprocessed_transaction_storage::{
|
|
ConsumeScannerPayload, ThreadType, UnprocessedTransactionStorage,
|
|
},
|
|
},
|
|
crossbeam_channel::RecvTimeoutError,
|
|
histogram::Histogram,
|
|
itertools::Itertools,
|
|
solana_client::connection_cache::ConnectionCache,
|
|
solana_entry::entry::hash_transactions,
|
|
solana_gossip::cluster_info::ClusterInfo,
|
|
solana_ledger::{
|
|
blockstore_processor::TransactionStatusSender, token_balances::collect_token_balances,
|
|
},
|
|
solana_measure::{measure, measure::Measure},
|
|
solana_metrics::inc_new_counter_info,
|
|
solana_perf::{data_budget::DataBudget, packet::PACKETS_PER_BATCH},
|
|
solana_poh::poh_recorder::{BankStart, PohRecorder, PohRecorderError, TransactionRecorder},
|
|
solana_program_runtime::timings::ExecuteTimings,
|
|
solana_runtime::{
|
|
bank::{Bank, LoadAndExecuteTransactionsOutput, TransactionCheckResult},
|
|
bank_forks::BankForks,
|
|
transaction_batch::TransactionBatch,
|
|
transaction_error_metrics::TransactionErrorMetrics,
|
|
vote_sender_types::ReplayVoteSender,
|
|
},
|
|
solana_sdk::{
|
|
clock::{Slot, FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, MAX_PROCESSING_AGE},
|
|
feature_set::allow_votes_to_directly_update_vote_state,
|
|
pubkey::Pubkey,
|
|
timing::{timestamp, AtomicInterval},
|
|
transaction::{self, SanitizedTransaction, TransactionError, VersionedTransaction},
|
|
},
|
|
solana_transaction_status::TransactionTokenBalance,
|
|
std::{
|
|
cmp,
|
|
collections::HashMap,
|
|
env,
|
|
net::UdpSocket,
|
|
sync::{
|
|
atomic::{AtomicU64, AtomicUsize, Ordering},
|
|
Arc, RwLock,
|
|
},
|
|
thread::{self, Builder, JoinHandle},
|
|
time::{Duration, Instant},
|
|
},
|
|
};
|
|
|
|
pub mod committer;
|
|
mod decision_maker;
|
|
mod forwarder;
|
|
mod packet_receiver;
|
|
|
|
// Fixed thread size seems to be fastest on GCP setup
|
|
pub const NUM_THREADS: u32 = 6;
|
|
|
|
const TOTAL_BUFFERED_PACKETS: usize = 700_000;
|
|
|
|
const MAX_NUM_TRANSACTIONS_PER_BATCH: usize = 64;
|
|
|
|
const NUM_VOTE_PROCESSING_THREADS: u32 = 2;
|
|
const MIN_THREADS_BANKING: u32 = 1;
|
|
const MIN_TOTAL_THREADS: u32 = NUM_VOTE_PROCESSING_THREADS + MIN_THREADS_BANKING;
|
|
|
|
const SLOT_BOUNDARY_CHECK_PERIOD: Duration = Duration::from_millis(10);
|
|
|
|
pub struct ProcessTransactionBatchOutput {
|
|
// The number of transactions filtered out by the cost model
|
|
cost_model_throttled_transactions_count: usize,
|
|
// Amount of time spent running the cost model
|
|
cost_model_us: u64,
|
|
execute_and_commit_transactions_output: ExecuteAndCommitTransactionsOutput,
|
|
}
|
|
|
|
struct RecordTransactionsSummary {
|
|
// Metrics describing how time was spent recording transactions
|
|
record_transactions_timings: RecordTransactionsTimings,
|
|
// Result of trying to record the transactions into the PoH stream
|
|
result: Result<(), PohRecorderError>,
|
|
// Index in the slot of the first transaction recorded
|
|
starting_transaction_index: Option<usize>,
|
|
}
|
|
|
|
pub struct ExecuteAndCommitTransactionsOutput {
|
|
// Total number of transactions that were passed as candidates for execution
|
|
transactions_attempted_execution_count: usize,
|
|
// The number of transactions of that were executed. See description of in `ProcessTransactionsSummary`
|
|
// for possible outcomes of execution.
|
|
executed_transactions_count: usize,
|
|
// Total number of the executed transactions that returned success/not
|
|
// an error.
|
|
executed_with_successful_result_count: usize,
|
|
// Transactions that either were not executed, or were executed and failed to be committed due
|
|
// to the block ending.
|
|
retryable_transaction_indexes: Vec<usize>,
|
|
// A result that indicates whether transactions were successfully
|
|
// committed into the Poh stream.
|
|
commit_transactions_result: Result<Vec<CommitTransactionDetails>, PohRecorderError>,
|
|
execute_and_commit_timings: LeaderExecuteAndCommitTimings,
|
|
error_counters: TransactionErrorMetrics,
|
|
}
|
|
|
|
#[derive(Debug, Default)]
|
|
pub struct BankingStageStats {
|
|
last_report: AtomicInterval,
|
|
id: u32,
|
|
receive_and_buffer_packets_count: AtomicUsize,
|
|
dropped_packets_count: AtomicUsize,
|
|
pub(crate) dropped_duplicated_packets_count: AtomicUsize,
|
|
newly_buffered_packets_count: AtomicUsize,
|
|
current_buffered_packets_count: AtomicUsize,
|
|
rebuffered_packets_count: AtomicUsize,
|
|
consumed_buffered_packets_count: AtomicUsize,
|
|
forwarded_transaction_count: AtomicUsize,
|
|
forwarded_vote_count: AtomicUsize,
|
|
batch_packet_indexes_len: Histogram,
|
|
|
|
// Timing
|
|
consume_buffered_packets_elapsed: AtomicU64,
|
|
receive_and_buffer_packets_elapsed: AtomicU64,
|
|
filter_pending_packets_elapsed: AtomicU64,
|
|
pub(crate) packet_conversion_elapsed: AtomicU64,
|
|
transaction_processing_elapsed: AtomicU64,
|
|
}
|
|
|
|
impl BankingStageStats {
|
|
pub fn new(id: u32) -> Self {
|
|
BankingStageStats {
|
|
id,
|
|
batch_packet_indexes_len: Histogram::configure()
|
|
.max_value(PACKETS_PER_BATCH as u64)
|
|
.build()
|
|
.unwrap(),
|
|
..BankingStageStats::default()
|
|
}
|
|
}
|
|
|
|
fn is_empty(&self) -> bool {
|
|
0 == self
|
|
.receive_and_buffer_packets_count
|
|
.load(Ordering::Relaxed) as u64
|
|
+ self.dropped_packets_count.load(Ordering::Relaxed) as u64
|
|
+ self
|
|
.dropped_duplicated_packets_count
|
|
.load(Ordering::Relaxed) as u64
|
|
+ self.newly_buffered_packets_count.load(Ordering::Relaxed) as u64
|
|
+ self.current_buffered_packets_count.load(Ordering::Relaxed) as u64
|
|
+ self.rebuffered_packets_count.load(Ordering::Relaxed) as u64
|
|
+ self.consumed_buffered_packets_count.load(Ordering::Relaxed) as u64
|
|
+ self
|
|
.consume_buffered_packets_elapsed
|
|
.load(Ordering::Relaxed)
|
|
+ self
|
|
.receive_and_buffer_packets_elapsed
|
|
.load(Ordering::Relaxed)
|
|
+ self.filter_pending_packets_elapsed.load(Ordering::Relaxed)
|
|
+ self.packet_conversion_elapsed.load(Ordering::Relaxed)
|
|
+ self.transaction_processing_elapsed.load(Ordering::Relaxed)
|
|
+ self.forwarded_transaction_count.load(Ordering::Relaxed) as u64
|
|
+ self.forwarded_vote_count.load(Ordering::Relaxed) as u64
|
|
+ self.batch_packet_indexes_len.entries()
|
|
}
|
|
|
|
fn report(&mut self, report_interval_ms: u64) {
|
|
// skip reporting metrics if stats is empty
|
|
if self.is_empty() {
|
|
return;
|
|
}
|
|
if self.last_report.should_update(report_interval_ms) {
|
|
datapoint_info!(
|
|
"banking_stage-loop-stats",
|
|
("id", self.id as i64, i64),
|
|
(
|
|
"receive_and_buffer_packets_count",
|
|
self.receive_and_buffer_packets_count
|
|
.swap(0, Ordering::Relaxed) as i64,
|
|
i64
|
|
),
|
|
(
|
|
"dropped_packets_count",
|
|
self.dropped_packets_count.swap(0, Ordering::Relaxed) as i64,
|
|
i64
|
|
),
|
|
(
|
|
"dropped_duplicated_packets_count",
|
|
self.dropped_duplicated_packets_count
|
|
.swap(0, Ordering::Relaxed) as i64,
|
|
i64
|
|
),
|
|
(
|
|
"newly_buffered_packets_count",
|
|
self.newly_buffered_packets_count.swap(0, Ordering::Relaxed) as i64,
|
|
i64
|
|
),
|
|
(
|
|
"current_buffered_packets_count",
|
|
self.current_buffered_packets_count
|
|
.swap(0, Ordering::Relaxed) as i64,
|
|
i64
|
|
),
|
|
(
|
|
"rebuffered_packets_count",
|
|
self.rebuffered_packets_count.swap(0, Ordering::Relaxed) as i64,
|
|
i64
|
|
),
|
|
(
|
|
"consumed_buffered_packets_count",
|
|
self.consumed_buffered_packets_count
|
|
.swap(0, Ordering::Relaxed) as i64,
|
|
i64
|
|
),
|
|
(
|
|
"forwarded_transaction_count",
|
|
self.forwarded_transaction_count.swap(0, Ordering::Relaxed) as i64,
|
|
i64
|
|
),
|
|
(
|
|
"forwarded_vote_count",
|
|
self.forwarded_vote_count.swap(0, Ordering::Relaxed) as i64,
|
|
i64
|
|
),
|
|
(
|
|
"consume_buffered_packets_elapsed",
|
|
self.consume_buffered_packets_elapsed
|
|
.swap(0, Ordering::Relaxed) as i64,
|
|
i64
|
|
),
|
|
(
|
|
"receive_and_buffer_packets_elapsed",
|
|
self.receive_and_buffer_packets_elapsed
|
|
.swap(0, Ordering::Relaxed) as i64,
|
|
i64
|
|
),
|
|
(
|
|
"filter_pending_packets_elapsed",
|
|
self.filter_pending_packets_elapsed
|
|
.swap(0, Ordering::Relaxed) as i64,
|
|
i64
|
|
),
|
|
(
|
|
"packet_conversion_elapsed",
|
|
self.packet_conversion_elapsed.swap(0, Ordering::Relaxed) as i64,
|
|
i64
|
|
),
|
|
(
|
|
"transaction_processing_elapsed",
|
|
self.transaction_processing_elapsed
|
|
.swap(0, Ordering::Relaxed) as i64,
|
|
i64
|
|
),
|
|
(
|
|
"packet_batch_indices_len_min",
|
|
self.batch_packet_indexes_len.minimum().unwrap_or(0) as i64,
|
|
i64
|
|
),
|
|
(
|
|
"packet_batch_indices_len_max",
|
|
self.batch_packet_indexes_len.maximum().unwrap_or(0) as i64,
|
|
i64
|
|
),
|
|
(
|
|
"packet_batch_indices_len_mean",
|
|
self.batch_packet_indexes_len.mean().unwrap_or(0) as i64,
|
|
i64
|
|
),
|
|
(
|
|
"packet_batch_indices_len_90pct",
|
|
self.batch_packet_indexes_len.percentile(90.0).unwrap_or(0) as i64,
|
|
i64
|
|
)
|
|
);
|
|
self.batch_packet_indexes_len.clear();
|
|
}
|
|
}
|
|
}
|
|
|
|
#[derive(Default)]
|
|
struct PreBalanceInfo {
|
|
native: Vec<Vec<u64>>,
|
|
token: Vec<Vec<TransactionTokenBalance>>,
|
|
mint_decimals: HashMap<Pubkey, u8>,
|
|
}
|
|
|
|
#[derive(Debug, Default)]
|
|
pub struct BatchedTransactionDetails {
|
|
pub costs: BatchedTransactionCostDetails,
|
|
pub errors: BatchedTransactionErrorDetails,
|
|
}
|
|
|
|
#[derive(Debug, Default)]
|
|
pub struct BatchedTransactionCostDetails {
|
|
pub batched_signature_cost: u64,
|
|
pub batched_write_lock_cost: u64,
|
|
pub batched_data_bytes_cost: u64,
|
|
pub batched_builtins_execute_cost: u64,
|
|
pub batched_bpf_execute_cost: u64,
|
|
}
|
|
|
|
#[derive(Debug, Default)]
|
|
pub struct BatchedTransactionErrorDetails {
|
|
pub batched_retried_txs_per_block_limit_count: u64,
|
|
pub batched_retried_txs_per_vote_limit_count: u64,
|
|
pub batched_retried_txs_per_account_limit_count: u64,
|
|
pub batched_retried_txs_per_account_data_block_limit_count: u64,
|
|
pub batched_dropped_txs_per_account_data_total_limit_count: u64,
|
|
}
|
|
|
|
/// Stores the stage's thread handle and output receiver.
|
|
pub struct BankingStage {
|
|
bank_thread_hdls: Vec<JoinHandle<()>>,
|
|
}
|
|
|
|
#[derive(Debug, Clone)]
|
|
pub enum ForwardOption {
|
|
NotForward,
|
|
ForwardTpuVote,
|
|
ForwardTransaction,
|
|
}
|
|
|
|
#[derive(Debug, Default)]
|
|
pub struct FilterForwardingResults {
|
|
pub(crate) total_forwardable_packets: usize,
|
|
pub(crate) total_tracer_packets_in_buffer: usize,
|
|
pub(crate) total_forwardable_tracer_packets: usize,
|
|
pub(crate) total_packet_conversion_us: u64,
|
|
pub(crate) total_filter_packets_us: u64,
|
|
}
|
|
|
|
impl BankingStage {
|
|
/// Create the stage using `bank`. Exit when `verified_receiver` is dropped.
|
|
#[allow(clippy::too_many_arguments)]
|
|
pub fn new(
|
|
cluster_info: &Arc<ClusterInfo>,
|
|
poh_recorder: &Arc<RwLock<PohRecorder>>,
|
|
non_vote_receiver: BankingPacketReceiver,
|
|
tpu_vote_receiver: BankingPacketReceiver,
|
|
gossip_vote_receiver: BankingPacketReceiver,
|
|
transaction_status_sender: Option<TransactionStatusSender>,
|
|
replay_vote_sender: ReplayVoteSender,
|
|
log_messages_bytes_limit: Option<usize>,
|
|
connection_cache: Arc<ConnectionCache>,
|
|
bank_forks: Arc<RwLock<BankForks>>,
|
|
) -> Self {
|
|
Self::new_num_threads(
|
|
cluster_info,
|
|
poh_recorder,
|
|
non_vote_receiver,
|
|
tpu_vote_receiver,
|
|
gossip_vote_receiver,
|
|
Self::num_threads(),
|
|
transaction_status_sender,
|
|
replay_vote_sender,
|
|
log_messages_bytes_limit,
|
|
connection_cache,
|
|
bank_forks,
|
|
)
|
|
}
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
|
pub fn new_num_threads(
|
|
cluster_info: &Arc<ClusterInfo>,
|
|
poh_recorder: &Arc<RwLock<PohRecorder>>,
|
|
non_vote_receiver: BankingPacketReceiver,
|
|
tpu_vote_receiver: BankingPacketReceiver,
|
|
gossip_vote_receiver: BankingPacketReceiver,
|
|
num_threads: u32,
|
|
transaction_status_sender: Option<TransactionStatusSender>,
|
|
replay_vote_sender: ReplayVoteSender,
|
|
log_messages_bytes_limit: Option<usize>,
|
|
connection_cache: Arc<ConnectionCache>,
|
|
bank_forks: Arc<RwLock<BankForks>>,
|
|
) -> Self {
|
|
assert!(num_threads >= MIN_TOTAL_THREADS);
|
|
// Single thread to generate entries from many banks.
|
|
// This thread talks to poh_service and broadcasts the entries once they have been recorded.
|
|
// Once an entry has been recorded, its blockhash is registered with the bank.
|
|
let data_budget = Arc::new(DataBudget::default());
|
|
let batch_limit =
|
|
TOTAL_BUFFERED_PACKETS / ((num_threads - NUM_VOTE_PROCESSING_THREADS) as usize);
|
|
// Keeps track of extraneous vote transactions for the vote threads
|
|
let latest_unprocessed_votes = Arc::new(LatestUnprocessedVotes::new());
|
|
let should_split_voting_threads = bank_forks
|
|
.read()
|
|
.map(|bank_forks| {
|
|
let bank = bank_forks.root_bank();
|
|
bank.feature_set
|
|
.is_active(&allow_votes_to_directly_update_vote_state::id())
|
|
})
|
|
.unwrap_or(false);
|
|
// Many banks that process transactions in parallel.
|
|
let bank_thread_hdls: Vec<JoinHandle<()>> = (0..num_threads)
|
|
.map(|i| {
|
|
let (packet_receiver, unprocessed_transaction_storage) =
|
|
match (i, should_split_voting_threads) {
|
|
(0, false) => (
|
|
gossip_vote_receiver.clone(),
|
|
UnprocessedTransactionStorage::new_transaction_storage(
|
|
UnprocessedPacketBatches::with_capacity(batch_limit),
|
|
ThreadType::Voting(VoteSource::Gossip),
|
|
),
|
|
),
|
|
(0, true) => (
|
|
gossip_vote_receiver.clone(),
|
|
UnprocessedTransactionStorage::new_vote_storage(
|
|
latest_unprocessed_votes.clone(),
|
|
VoteSource::Gossip,
|
|
),
|
|
),
|
|
(1, false) => (
|
|
tpu_vote_receiver.clone(),
|
|
UnprocessedTransactionStorage::new_transaction_storage(
|
|
UnprocessedPacketBatches::with_capacity(batch_limit),
|
|
ThreadType::Voting(VoteSource::Tpu),
|
|
),
|
|
),
|
|
(1, true) => (
|
|
tpu_vote_receiver.clone(),
|
|
UnprocessedTransactionStorage::new_vote_storage(
|
|
latest_unprocessed_votes.clone(),
|
|
VoteSource::Tpu,
|
|
),
|
|
),
|
|
_ => (
|
|
non_vote_receiver.clone(),
|
|
UnprocessedTransactionStorage::new_transaction_storage(
|
|
UnprocessedPacketBatches::with_capacity(batch_limit),
|
|
ThreadType::Transactions,
|
|
),
|
|
),
|
|
};
|
|
|
|
let mut packet_deserializer = PacketDeserializer::new(packet_receiver);
|
|
let poh_recorder = poh_recorder.clone();
|
|
let cluster_info = cluster_info.clone();
|
|
let mut recv_start = Instant::now();
|
|
let transaction_status_sender = transaction_status_sender.clone();
|
|
let replay_vote_sender = replay_vote_sender.clone();
|
|
let data_budget = data_budget.clone();
|
|
let connection_cache = connection_cache.clone();
|
|
let bank_forks = bank_forks.clone();
|
|
|
|
let decision_maker = DecisionMaker::new(cluster_info.id(), poh_recorder.clone());
|
|
|
|
Builder::new()
|
|
.name(format!("solBanknStgTx{i:02}"))
|
|
.spawn(move || {
|
|
Self::process_loop(
|
|
&mut packet_deserializer,
|
|
&decision_maker,
|
|
&poh_recorder,
|
|
&cluster_info,
|
|
&mut recv_start,
|
|
i,
|
|
transaction_status_sender,
|
|
replay_vote_sender,
|
|
&data_budget,
|
|
log_messages_bytes_limit,
|
|
connection_cache,
|
|
&bank_forks,
|
|
unprocessed_transaction_storage,
|
|
);
|
|
})
|
|
.unwrap()
|
|
})
|
|
.collect();
|
|
Self { bank_thread_hdls }
|
|
}
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
|
fn do_process_packets(
|
|
bank_start: &BankStart,
|
|
payload: &mut ConsumeScannerPayload,
|
|
recorder: &TransactionRecorder,
|
|
transaction_status_sender: &Option<TransactionStatusSender>,
|
|
replay_vote_sender: &ReplayVoteSender,
|
|
banking_stage_stats: &BankingStageStats,
|
|
qos_service: &QosService,
|
|
log_messages_bytes_limit: Option<usize>,
|
|
consumed_buffered_packets_count: &mut usize,
|
|
rebuffered_packet_count: &mut usize,
|
|
test_fn: &Option<impl Fn()>,
|
|
packets_to_process: &Vec<Arc<ImmutableDeserializedPacket>>,
|
|
) -> Option<Vec<usize>> {
|
|
if payload.reached_end_of_slot {
|
|
return None;
|
|
}
|
|
|
|
let packets_to_process_len = packets_to_process.len();
|
|
let (process_transactions_summary, process_packets_transactions_time) = measure!(
|
|
Self::process_packets_transactions(
|
|
&bank_start.working_bank,
|
|
&bank_start.bank_creation_time,
|
|
recorder,
|
|
&payload.sanitized_transactions,
|
|
transaction_status_sender,
|
|
replay_vote_sender,
|
|
banking_stage_stats,
|
|
qos_service,
|
|
payload.slot_metrics_tracker,
|
|
log_messages_bytes_limit
|
|
),
|
|
"process_packets_transactions",
|
|
);
|
|
payload
|
|
.slot_metrics_tracker
|
|
.increment_process_packets_transactions_us(process_packets_transactions_time.as_us());
|
|
|
|
// Clear payload for next iteration
|
|
payload.sanitized_transactions.clear();
|
|
payload.account_locks.clear();
|
|
|
|
let ProcessTransactionsSummary {
|
|
reached_max_poh_height,
|
|
retryable_transaction_indexes,
|
|
..
|
|
} = process_transactions_summary;
|
|
|
|
if reached_max_poh_height || !bank_start.should_working_bank_still_be_processing_txs() {
|
|
payload.reached_end_of_slot = true;
|
|
}
|
|
|
|
// The difference between all transactions passed to execution and the ones that
|
|
// are retryable were the ones that were either:
|
|
// 1) Committed into the block
|
|
// 2) Dropped without being committed because they had some fatal error (too old,
|
|
// duplicate signature, etc.)
|
|
//
|
|
// Note: This assumes that every packet deserializes into one transaction!
|
|
*consumed_buffered_packets_count +=
|
|
packets_to_process_len.saturating_sub(retryable_transaction_indexes.len());
|
|
|
|
// Out of the buffered packets just retried, collect any still unprocessed
|
|
// transactions in this batch for forwarding
|
|
*rebuffered_packet_count += retryable_transaction_indexes.len();
|
|
if let Some(test_fn) = test_fn {
|
|
test_fn();
|
|
}
|
|
|
|
payload
|
|
.slot_metrics_tracker
|
|
.increment_retryable_packets_count(retryable_transaction_indexes.len() as u64);
|
|
|
|
Some(retryable_transaction_indexes)
|
|
}
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
|
pub fn consume_buffered_packets(
|
|
bank_start: &BankStart,
|
|
unprocessed_transaction_storage: &mut UnprocessedTransactionStorage,
|
|
transaction_status_sender: &Option<TransactionStatusSender>,
|
|
replay_vote_sender: &ReplayVoteSender,
|
|
test_fn: Option<impl Fn()>,
|
|
banking_stage_stats: &BankingStageStats,
|
|
recorder: &TransactionRecorder,
|
|
qos_service: &QosService,
|
|
slot_metrics_tracker: &mut LeaderSlotMetricsTracker,
|
|
log_messages_bytes_limit: Option<usize>,
|
|
) {
|
|
let mut rebuffered_packet_count = 0;
|
|
let mut consumed_buffered_packets_count = 0;
|
|
let mut proc_start = Measure::start("consume_buffered_process");
|
|
let num_packets_to_process = unprocessed_transaction_storage.len();
|
|
|
|
let reached_end_of_slot = unprocessed_transaction_storage.process_packets(
|
|
bank_start.working_bank.clone(),
|
|
banking_stage_stats,
|
|
slot_metrics_tracker,
|
|
|packets_to_process, payload| {
|
|
Self::do_process_packets(
|
|
bank_start,
|
|
payload,
|
|
recorder,
|
|
transaction_status_sender,
|
|
replay_vote_sender,
|
|
banking_stage_stats,
|
|
qos_service,
|
|
log_messages_bytes_limit,
|
|
&mut consumed_buffered_packets_count,
|
|
&mut rebuffered_packet_count,
|
|
&test_fn,
|
|
packets_to_process,
|
|
)
|
|
},
|
|
);
|
|
|
|
if reached_end_of_slot {
|
|
slot_metrics_tracker.set_end_of_slot_unprocessed_buffer_len(
|
|
unprocessed_transaction_storage.len() as u64,
|
|
);
|
|
}
|
|
|
|
proc_start.stop();
|
|
debug!(
|
|
"@{:?} done processing buffered batches: {} time: {:?}ms tx count: {} tx/s: {}",
|
|
timestamp(),
|
|
num_packets_to_process,
|
|
proc_start.as_ms(),
|
|
consumed_buffered_packets_count,
|
|
(consumed_buffered_packets_count as f32) / (proc_start.as_s())
|
|
);
|
|
|
|
banking_stage_stats
|
|
.consume_buffered_packets_elapsed
|
|
.fetch_add(proc_start.as_us(), Ordering::Relaxed);
|
|
banking_stage_stats
|
|
.rebuffered_packets_count
|
|
.fetch_add(rebuffered_packet_count, Ordering::Relaxed);
|
|
banking_stage_stats
|
|
.consumed_buffered_packets_count
|
|
.fetch_add(consumed_buffered_packets_count, Ordering::Relaxed);
|
|
}
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
|
fn process_buffered_packets(
|
|
decision_maker: &DecisionMaker,
|
|
socket: &UdpSocket,
|
|
poh_recorder: &Arc<RwLock<PohRecorder>>,
|
|
cluster_info: &ClusterInfo,
|
|
unprocessed_transaction_storage: &mut UnprocessedTransactionStorage,
|
|
transaction_status_sender: &Option<TransactionStatusSender>,
|
|
replay_vote_sender: &ReplayVoteSender,
|
|
banking_stage_stats: &BankingStageStats,
|
|
recorder: &TransactionRecorder,
|
|
data_budget: &DataBudget,
|
|
qos_service: &QosService,
|
|
slot_metrics_tracker: &mut LeaderSlotMetricsTracker,
|
|
log_messages_bytes_limit: Option<usize>,
|
|
connection_cache: &ConnectionCache,
|
|
tracer_packet_stats: &mut TracerPacketStats,
|
|
bank_forks: &Arc<RwLock<BankForks>>,
|
|
) {
|
|
if unprocessed_transaction_storage.should_not_process() {
|
|
return;
|
|
}
|
|
let ((metrics_action, decision), make_decision_time) =
|
|
measure!(decision_maker.make_consume_or_forward_decision(slot_metrics_tracker));
|
|
slot_metrics_tracker.increment_make_decision_us(make_decision_time.as_us());
|
|
|
|
match decision {
|
|
BufferedPacketsDecision::Consume(bank_start) => {
|
|
// Take metrics action before consume packets (potentially resetting the
|
|
// slot metrics tracker to the next slot) so that we don't count the
|
|
// packet processing metrics from the next slot towards the metrics
|
|
// of the previous slot
|
|
slot_metrics_tracker.apply_action(metrics_action);
|
|
let (_, consume_buffered_packets_time) = measure!(
|
|
Self::consume_buffered_packets(
|
|
&bank_start,
|
|
unprocessed_transaction_storage,
|
|
transaction_status_sender,
|
|
replay_vote_sender,
|
|
None::<Box<dyn Fn()>>,
|
|
banking_stage_stats,
|
|
recorder,
|
|
qos_service,
|
|
slot_metrics_tracker,
|
|
log_messages_bytes_limit
|
|
),
|
|
"consume_buffered_packets",
|
|
);
|
|
slot_metrics_tracker
|
|
.increment_consume_buffered_packets_us(consume_buffered_packets_time.as_us());
|
|
}
|
|
BufferedPacketsDecision::Forward => {
|
|
let (_, forward_time) = measure!(
|
|
Forwarder::handle_forwarding(
|
|
cluster_info,
|
|
unprocessed_transaction_storage,
|
|
poh_recorder,
|
|
socket,
|
|
false,
|
|
data_budget,
|
|
slot_metrics_tracker,
|
|
banking_stage_stats,
|
|
connection_cache,
|
|
tracer_packet_stats,
|
|
bank_forks,
|
|
),
|
|
"forward",
|
|
);
|
|
slot_metrics_tracker.increment_forward_us(forward_time.as_us());
|
|
// Take metrics action after forwarding packets to include forwarded
|
|
// metrics into current slot
|
|
slot_metrics_tracker.apply_action(metrics_action);
|
|
}
|
|
BufferedPacketsDecision::ForwardAndHold => {
|
|
let (_, forward_and_hold_time) = measure!(
|
|
Forwarder::handle_forwarding(
|
|
cluster_info,
|
|
unprocessed_transaction_storage,
|
|
poh_recorder,
|
|
socket,
|
|
true,
|
|
data_budget,
|
|
slot_metrics_tracker,
|
|
banking_stage_stats,
|
|
connection_cache,
|
|
tracer_packet_stats,
|
|
bank_forks,
|
|
),
|
|
"forward_and_hold",
|
|
);
|
|
slot_metrics_tracker.increment_forward_and_hold_us(forward_and_hold_time.as_us());
|
|
// Take metrics action after forwarding packets
|
|
slot_metrics_tracker.apply_action(metrics_action);
|
|
}
|
|
_ => (),
|
|
}
|
|
}
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
|
fn process_loop(
|
|
packet_deserializer: &mut PacketDeserializer,
|
|
decision_maker: &DecisionMaker,
|
|
poh_recorder: &Arc<RwLock<PohRecorder>>,
|
|
cluster_info: &ClusterInfo,
|
|
recv_start: &mut Instant,
|
|
id: u32,
|
|
transaction_status_sender: Option<TransactionStatusSender>,
|
|
replay_vote_sender: ReplayVoteSender,
|
|
data_budget: &DataBudget,
|
|
log_messages_bytes_limit: Option<usize>,
|
|
connection_cache: Arc<ConnectionCache>,
|
|
bank_forks: &Arc<RwLock<BankForks>>,
|
|
mut unprocessed_transaction_storage: UnprocessedTransactionStorage,
|
|
) {
|
|
let recorder = poh_recorder.read().unwrap().recorder();
|
|
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
|
let mut banking_stage_stats = BankingStageStats::new(id);
|
|
let mut tracer_packet_stats = TracerPacketStats::new(id);
|
|
let qos_service = QosService::new(id);
|
|
|
|
let mut slot_metrics_tracker = LeaderSlotMetricsTracker::new(id);
|
|
let mut last_metrics_update = Instant::now();
|
|
|
|
loop {
|
|
if !unprocessed_transaction_storage.is_empty()
|
|
|| last_metrics_update.elapsed() >= SLOT_BOUNDARY_CHECK_PERIOD
|
|
{
|
|
let (_, process_buffered_packets_time) = measure!(
|
|
Self::process_buffered_packets(
|
|
decision_maker,
|
|
&socket,
|
|
poh_recorder,
|
|
cluster_info,
|
|
&mut unprocessed_transaction_storage,
|
|
&transaction_status_sender,
|
|
&replay_vote_sender,
|
|
&banking_stage_stats,
|
|
&recorder,
|
|
data_budget,
|
|
&qos_service,
|
|
&mut slot_metrics_tracker,
|
|
log_messages_bytes_limit,
|
|
&connection_cache,
|
|
&mut tracer_packet_stats,
|
|
bank_forks,
|
|
),
|
|
"process_buffered_packets",
|
|
);
|
|
slot_metrics_tracker
|
|
.increment_process_buffered_packets_us(process_buffered_packets_time.as_us());
|
|
last_metrics_update = Instant::now();
|
|
}
|
|
|
|
tracer_packet_stats.report(1000);
|
|
|
|
// Gossip thread will almost always not wait because the transaction storage will most likely not be empty
|
|
let recv_timeout = if !unprocessed_transaction_storage.is_empty() {
|
|
// If there are buffered packets, run the equivalent of try_recv to try reading more
|
|
// packets. This prevents starving BankingStage::consume_buffered_packets due to
|
|
// buffered_packet_batches containing transactions that exceed the cost model for
|
|
// the current bank.
|
|
Duration::from_millis(0)
|
|
} else {
|
|
// Default wait time
|
|
Duration::from_millis(100)
|
|
};
|
|
|
|
let (res, receive_and_buffer_packets_time) = measure!(
|
|
PacketReceiver::receive_and_buffer_packets(
|
|
packet_deserializer,
|
|
recv_start,
|
|
recv_timeout,
|
|
id,
|
|
&mut unprocessed_transaction_storage,
|
|
&mut banking_stage_stats,
|
|
&mut tracer_packet_stats,
|
|
&mut slot_metrics_tracker,
|
|
),
|
|
"receive_and_buffer_packets",
|
|
);
|
|
slot_metrics_tracker
|
|
.increment_receive_and_buffer_packets_us(receive_and_buffer_packets_time.as_us());
|
|
|
|
match res {
|
|
Ok(()) | Err(RecvTimeoutError::Timeout) => (),
|
|
Err(RecvTimeoutError::Disconnected) => break,
|
|
}
|
|
banking_stage_stats.report(1000);
|
|
}
|
|
}
|
|
|
|
pub fn num_threads() -> u32 {
|
|
cmp::max(
|
|
env::var("SOLANA_BANKING_THREADS")
|
|
.map(|x| x.parse().unwrap_or(NUM_THREADS))
|
|
.unwrap_or(NUM_THREADS),
|
|
MIN_TOTAL_THREADS,
|
|
)
|
|
}
|
|
|
|
fn record_transactions(
|
|
bank_slot: Slot,
|
|
transactions: Vec<VersionedTransaction>,
|
|
recorder: &TransactionRecorder,
|
|
) -> RecordTransactionsSummary {
|
|
let mut record_transactions_timings = RecordTransactionsTimings::default();
|
|
let mut starting_transaction_index = None;
|
|
|
|
if !transactions.is_empty() {
|
|
let num_to_record = transactions.len();
|
|
inc_new_counter_info!("banking_stage-record_count", 1);
|
|
inc_new_counter_info!("banking_stage-record_transactions", num_to_record);
|
|
|
|
let (hash, hash_time) = measure!(hash_transactions(&transactions), "hash");
|
|
record_transactions_timings.hash_us = hash_time.as_us();
|
|
|
|
let (res, poh_record_time) =
|
|
measure!(recorder.record(bank_slot, hash, transactions), "hash");
|
|
record_transactions_timings.poh_record_us = poh_record_time.as_us();
|
|
|
|
match res {
|
|
Ok(starting_index) => {
|
|
starting_transaction_index = starting_index;
|
|
}
|
|
Err(PohRecorderError::MaxHeightReached) => {
|
|
inc_new_counter_info!("banking_stage-max_height_reached", 1);
|
|
inc_new_counter_info!(
|
|
"banking_stage-max_height_reached_num_to_commit",
|
|
num_to_record
|
|
);
|
|
return RecordTransactionsSummary {
|
|
record_transactions_timings,
|
|
result: Err(PohRecorderError::MaxHeightReached),
|
|
starting_transaction_index: None,
|
|
};
|
|
}
|
|
Err(e) => panic!("Poh recorder returned unexpected error: {e:?}"),
|
|
}
|
|
}
|
|
|
|
RecordTransactionsSummary {
|
|
record_transactions_timings,
|
|
result: Ok(()),
|
|
starting_transaction_index,
|
|
}
|
|
}
|
|
|
|
fn execute_and_commit_transactions_locked(
|
|
bank: &Arc<Bank>,
|
|
poh: &TransactionRecorder,
|
|
batch: &TransactionBatch,
|
|
transaction_status_sender: &Option<TransactionStatusSender>,
|
|
replay_vote_sender: &ReplayVoteSender,
|
|
log_messages_bytes_limit: Option<usize>,
|
|
) -> ExecuteAndCommitTransactionsOutput {
|
|
let mut execute_and_commit_timings = LeaderExecuteAndCommitTimings::default();
|
|
|
|
let mut pre_balance_info = PreBalanceInfo::default();
|
|
let (_, collect_balances_time) = measure!(
|
|
{
|
|
// If the extra meta-data services are enabled for RPC, collect the
|
|
// pre-balances for native and token programs.
|
|
if transaction_status_sender.is_some() {
|
|
pre_balance_info.native = bank.collect_balances(batch);
|
|
pre_balance_info.token =
|
|
collect_token_balances(bank, batch, &mut pre_balance_info.mint_decimals)
|
|
}
|
|
},
|
|
"collect_balances",
|
|
);
|
|
execute_and_commit_timings.collect_balances_us = collect_balances_time.as_us();
|
|
|
|
let (load_and_execute_transactions_output, load_execute_time) = measure!(
|
|
bank.load_and_execute_transactions(
|
|
batch,
|
|
MAX_PROCESSING_AGE,
|
|
transaction_status_sender.is_some(),
|
|
transaction_status_sender.is_some(),
|
|
transaction_status_sender.is_some(),
|
|
&mut execute_and_commit_timings.execute_timings,
|
|
None, // account_overrides
|
|
log_messages_bytes_limit
|
|
),
|
|
"load_execute",
|
|
);
|
|
execute_and_commit_timings.load_execute_us = load_execute_time.as_us();
|
|
|
|
let LoadAndExecuteTransactionsOutput {
|
|
mut loaded_transactions,
|
|
execution_results,
|
|
mut retryable_transaction_indexes,
|
|
executed_transactions_count,
|
|
executed_non_vote_transactions_count,
|
|
executed_with_successful_result_count,
|
|
signature_count,
|
|
error_counters,
|
|
..
|
|
} = load_and_execute_transactions_output;
|
|
|
|
let transactions_attempted_execution_count = execution_results.len();
|
|
let (executed_transactions, execution_results_to_transactions_time): (Vec<_>, Measure) = measure!(
|
|
execution_results
|
|
.iter()
|
|
.zip(batch.sanitized_transactions())
|
|
.filter_map(|(execution_result, tx)| {
|
|
if execution_result.was_executed() {
|
|
Some(tx.to_versioned_transaction())
|
|
} else {
|
|
None
|
|
}
|
|
})
|
|
.collect(),
|
|
"execution_results_to_transactions",
|
|
);
|
|
|
|
let (freeze_lock, freeze_lock_time) = measure!(bank.freeze_lock(), "freeze_lock");
|
|
execute_and_commit_timings.freeze_lock_us = freeze_lock_time.as_us();
|
|
|
|
let (record_transactions_summary, record_time) = measure!(
|
|
Self::record_transactions(bank.slot(), executed_transactions, poh),
|
|
"record_transactions",
|
|
);
|
|
execute_and_commit_timings.record_us = record_time.as_us();
|
|
|
|
let RecordTransactionsSummary {
|
|
result: record_transactions_result,
|
|
record_transactions_timings,
|
|
starting_transaction_index,
|
|
} = record_transactions_summary;
|
|
execute_and_commit_timings.record_transactions_timings = RecordTransactionsTimings {
|
|
execution_results_to_transactions_us: execution_results_to_transactions_time.as_us(),
|
|
..record_transactions_timings
|
|
};
|
|
|
|
if let Err(recorder_err) = record_transactions_result {
|
|
inc_new_counter_info!(
|
|
"banking_stage-record_transactions_retryable_record_txs",
|
|
executed_transactions_count
|
|
);
|
|
|
|
retryable_transaction_indexes.extend(execution_results.iter().enumerate().filter_map(
|
|
|(index, execution_result)| execution_result.was_executed().then_some(index),
|
|
));
|
|
|
|
return ExecuteAndCommitTransactionsOutput {
|
|
transactions_attempted_execution_count,
|
|
executed_transactions_count,
|
|
executed_with_successful_result_count,
|
|
retryable_transaction_indexes,
|
|
commit_transactions_result: Err(recorder_err),
|
|
execute_and_commit_timings,
|
|
error_counters,
|
|
};
|
|
}
|
|
|
|
let (commit_time_us, commit_transaction_statuses) = if executed_transactions_count != 0 {
|
|
Committer::commit_transactions(
|
|
batch,
|
|
&mut loaded_transactions,
|
|
execution_results,
|
|
starting_transaction_index,
|
|
bank,
|
|
&mut pre_balance_info,
|
|
&mut execute_and_commit_timings,
|
|
transaction_status_sender,
|
|
replay_vote_sender,
|
|
signature_count,
|
|
executed_transactions_count,
|
|
executed_non_vote_transactions_count,
|
|
executed_with_successful_result_count,
|
|
)
|
|
} else {
|
|
(
|
|
0,
|
|
vec![CommitTransactionDetails::NotCommitted; execution_results.len()],
|
|
)
|
|
};
|
|
|
|
drop(freeze_lock);
|
|
|
|
debug!(
|
|
"bank: {} process_and_record_locked: {}us record: {}us commit: {}us txs_len: {}",
|
|
bank.slot(),
|
|
load_execute_time.as_us(),
|
|
record_time.as_us(),
|
|
commit_time_us,
|
|
batch.sanitized_transactions().len(),
|
|
);
|
|
|
|
debug!(
|
|
"execute_and_commit_transactions_locked: {:?}",
|
|
execute_and_commit_timings.execute_timings,
|
|
);
|
|
|
|
debug_assert_eq!(
|
|
commit_transaction_statuses.len(),
|
|
transactions_attempted_execution_count
|
|
);
|
|
|
|
ExecuteAndCommitTransactionsOutput {
|
|
transactions_attempted_execution_count,
|
|
executed_transactions_count,
|
|
executed_with_successful_result_count,
|
|
retryable_transaction_indexes,
|
|
commit_transactions_result: Ok(commit_transaction_statuses),
|
|
execute_and_commit_timings,
|
|
error_counters,
|
|
}
|
|
}
|
|
|
|
pub fn process_and_record_transactions(
|
|
bank: &Arc<Bank>,
|
|
txs: &[SanitizedTransaction],
|
|
poh: &TransactionRecorder,
|
|
chunk_offset: usize,
|
|
transaction_status_sender: &Option<TransactionStatusSender>,
|
|
replay_vote_sender: &ReplayVoteSender,
|
|
qos_service: &QosService,
|
|
log_messages_bytes_limit: Option<usize>,
|
|
) -> ProcessTransactionBatchOutput {
|
|
let (
|
|
(transaction_costs, transactions_qos_results, cost_model_throttled_transactions_count),
|
|
cost_model_time,
|
|
) = measure!(qos_service.select_and_accumulate_transaction_costs(bank, txs));
|
|
|
|
// Only lock accounts for those transactions are selected for the block;
|
|
// Once accounts are locked, other threads cannot encode transactions that will modify the
|
|
// same account state
|
|
let (batch, lock_time) = measure!(
|
|
bank.prepare_sanitized_batch_with_results(txs, transactions_qos_results.iter())
|
|
);
|
|
|
|
// retryable_txs includes AccountInUse, WouldExceedMaxBlockCostLimit
|
|
// WouldExceedMaxAccountCostLimit, WouldExceedMaxVoteCostLimit
|
|
// and WouldExceedMaxAccountDataCostLimit
|
|
let mut execute_and_commit_transactions_output =
|
|
Self::execute_and_commit_transactions_locked(
|
|
bank,
|
|
poh,
|
|
&batch,
|
|
transaction_status_sender,
|
|
replay_vote_sender,
|
|
log_messages_bytes_limit,
|
|
);
|
|
|
|
// Once the accounts are new transactions can enter the pipeline to process them
|
|
let (_, unlock_time) = measure!(drop(batch));
|
|
|
|
let ExecuteAndCommitTransactionsOutput {
|
|
ref mut retryable_transaction_indexes,
|
|
ref execute_and_commit_timings,
|
|
ref commit_transactions_result,
|
|
..
|
|
} = execute_and_commit_transactions_output;
|
|
|
|
QosService::update_or_remove_transaction_costs(
|
|
transaction_costs.iter(),
|
|
transactions_qos_results.iter(),
|
|
commit_transactions_result.as_ref().ok(),
|
|
bank,
|
|
);
|
|
|
|
retryable_transaction_indexes
|
|
.iter_mut()
|
|
.for_each(|x| *x += chunk_offset);
|
|
|
|
let (cu, us) =
|
|
Self::accumulate_execute_units_and_time(&execute_and_commit_timings.execute_timings);
|
|
qos_service.accumulate_actual_execute_cu(cu);
|
|
qos_service.accumulate_actual_execute_time(us);
|
|
|
|
// reports qos service stats for this batch
|
|
qos_service.report_metrics(bank.clone());
|
|
|
|
debug!(
|
|
"bank: {} lock: {}us unlock: {}us txs_len: {}",
|
|
bank.slot(),
|
|
lock_time.as_us(),
|
|
unlock_time.as_us(),
|
|
txs.len(),
|
|
);
|
|
|
|
ProcessTransactionBatchOutput {
|
|
cost_model_throttled_transactions_count,
|
|
cost_model_us: cost_model_time.as_us(),
|
|
execute_and_commit_transactions_output,
|
|
}
|
|
}
|
|
|
|
fn accumulate_execute_units_and_time(execute_timings: &ExecuteTimings) -> (u64, u64) {
|
|
let (units, times): (Vec<_>, Vec<_>) = execute_timings
|
|
.details
|
|
.per_program_timings
|
|
.values()
|
|
.map(|program_timings| {
|
|
(
|
|
program_timings.accumulated_units,
|
|
program_timings.accumulated_us,
|
|
)
|
|
})
|
|
.unzip();
|
|
(units.iter().sum(), times.iter().sum())
|
|
}
|
|
|
|
/// Sends transactions to the bank.
|
|
///
|
|
/// Returns the number of transactions successfully processed by the bank, which may be less
|
|
/// than the total number if max PoH height was reached and the bank halted
|
|
fn process_transactions(
|
|
bank: &Arc<Bank>,
|
|
bank_creation_time: &Instant,
|
|
transactions: &[SanitizedTransaction],
|
|
poh: &TransactionRecorder,
|
|
transaction_status_sender: &Option<TransactionStatusSender>,
|
|
replay_vote_sender: &ReplayVoteSender,
|
|
qos_service: &QosService,
|
|
log_messages_bytes_limit: Option<usize>,
|
|
) -> ProcessTransactionsSummary {
|
|
let mut chunk_start = 0;
|
|
let mut all_retryable_tx_indexes = vec![];
|
|
// All the transactions that attempted execution. See description of
|
|
// struct ProcessTransactionsSummary above for possible outcomes.
|
|
let mut total_transactions_attempted_execution_count: usize = 0;
|
|
// All transactions that were executed and committed
|
|
let mut total_committed_transactions_count: usize = 0;
|
|
// All transactions that were executed and committed with a successful result
|
|
let mut total_committed_transactions_with_successful_result_count: usize = 0;
|
|
// All transactions that were executed but then failed record because the
|
|
// slot ended
|
|
let mut total_failed_commit_count: usize = 0;
|
|
let mut total_cost_model_throttled_transactions_count: usize = 0;
|
|
let mut total_cost_model_us: u64 = 0;
|
|
let mut total_execute_and_commit_timings = LeaderExecuteAndCommitTimings::default();
|
|
let mut total_error_counters = TransactionErrorMetrics::default();
|
|
let mut reached_max_poh_height = false;
|
|
while chunk_start != transactions.len() {
|
|
let chunk_end = std::cmp::min(
|
|
transactions.len(),
|
|
chunk_start + MAX_NUM_TRANSACTIONS_PER_BATCH,
|
|
);
|
|
let process_transaction_batch_output = Self::process_and_record_transactions(
|
|
bank,
|
|
&transactions[chunk_start..chunk_end],
|
|
poh,
|
|
chunk_start,
|
|
transaction_status_sender,
|
|
replay_vote_sender,
|
|
qos_service,
|
|
log_messages_bytes_limit,
|
|
);
|
|
|
|
let ProcessTransactionBatchOutput {
|
|
cost_model_throttled_transactions_count: new_cost_model_throttled_transactions_count,
|
|
cost_model_us: new_cost_model_us,
|
|
execute_and_commit_transactions_output,
|
|
} = process_transaction_batch_output;
|
|
total_cost_model_throttled_transactions_count =
|
|
total_cost_model_throttled_transactions_count
|
|
.saturating_add(new_cost_model_throttled_transactions_count);
|
|
total_cost_model_us = total_cost_model_us.saturating_add(new_cost_model_us);
|
|
|
|
let ExecuteAndCommitTransactionsOutput {
|
|
transactions_attempted_execution_count: new_transactions_attempted_execution_count,
|
|
executed_transactions_count: new_executed_transactions_count,
|
|
executed_with_successful_result_count: new_executed_with_successful_result_count,
|
|
retryable_transaction_indexes: new_retryable_transaction_indexes,
|
|
commit_transactions_result: new_commit_transactions_result,
|
|
execute_and_commit_timings: new_execute_and_commit_timings,
|
|
error_counters: new_error_counters,
|
|
..
|
|
} = execute_and_commit_transactions_output;
|
|
|
|
total_execute_and_commit_timings.accumulate(&new_execute_and_commit_timings);
|
|
total_error_counters.accumulate(&new_error_counters);
|
|
total_transactions_attempted_execution_count =
|
|
total_transactions_attempted_execution_count
|
|
.saturating_add(new_transactions_attempted_execution_count);
|
|
|
|
trace!(
|
|
"process_transactions result: {:?}",
|
|
new_commit_transactions_result
|
|
);
|
|
|
|
if new_commit_transactions_result.is_ok() {
|
|
total_committed_transactions_count = total_committed_transactions_count
|
|
.saturating_add(new_executed_transactions_count);
|
|
total_committed_transactions_with_successful_result_count =
|
|
total_committed_transactions_with_successful_result_count
|
|
.saturating_add(new_executed_with_successful_result_count);
|
|
} else {
|
|
total_failed_commit_count =
|
|
total_failed_commit_count.saturating_add(new_executed_transactions_count);
|
|
}
|
|
|
|
// Add the retryable txs (transactions that errored in a way that warrants a retry)
|
|
// to the list of unprocessed txs.
|
|
all_retryable_tx_indexes.extend_from_slice(&new_retryable_transaction_indexes);
|
|
|
|
// If `bank_creation_time` is None, it's a test so ignore the option so
|
|
// allow processing
|
|
let should_bank_still_be_processing_txs =
|
|
Bank::should_bank_still_be_processing_txs(bank_creation_time, bank.ns_per_slot);
|
|
match (
|
|
new_commit_transactions_result,
|
|
should_bank_still_be_processing_txs,
|
|
) {
|
|
(Err(PohRecorderError::MaxHeightReached), _) | (_, false) => {
|
|
info!(
|
|
"process transactions: max height reached slot: {} height: {}",
|
|
bank.slot(),
|
|
bank.tick_height()
|
|
);
|
|
// process_and_record_transactions has returned all retryable errors in
|
|
// transactions[chunk_start..chunk_end], so we just need to push the remaining
|
|
// transactions into the unprocessed queue.
|
|
all_retryable_tx_indexes.extend(chunk_end..transactions.len());
|
|
reached_max_poh_height = true;
|
|
break;
|
|
}
|
|
_ => (),
|
|
}
|
|
// Don't exit early on any other type of error, continue processing...
|
|
chunk_start = chunk_end;
|
|
}
|
|
|
|
ProcessTransactionsSummary {
|
|
reached_max_poh_height,
|
|
transactions_attempted_execution_count: total_transactions_attempted_execution_count,
|
|
committed_transactions_count: total_committed_transactions_count,
|
|
committed_transactions_with_successful_result_count:
|
|
total_committed_transactions_with_successful_result_count,
|
|
failed_commit_count: total_failed_commit_count,
|
|
retryable_transaction_indexes: all_retryable_tx_indexes,
|
|
cost_model_throttled_transactions_count: total_cost_model_throttled_transactions_count,
|
|
cost_model_us: total_cost_model_us,
|
|
execute_and_commit_timings: total_execute_and_commit_timings,
|
|
error_counters: total_error_counters,
|
|
}
|
|
}
|
|
|
|
/// This function creates a filter of transaction results with Ok() for every pending
|
|
/// transaction. The non-pending transactions are marked with TransactionError
|
|
fn prepare_filter_for_pending_transactions(
|
|
transactions_len: usize,
|
|
pending_tx_indexes: &[usize],
|
|
) -> Vec<transaction::Result<()>> {
|
|
let mut mask = vec![Err(TransactionError::BlockhashNotFound); transactions_len];
|
|
pending_tx_indexes.iter().for_each(|x| mask[*x] = Ok(()));
|
|
mask
|
|
}
|
|
|
|
/// This function returns a vector containing index of all valid transactions. A valid
|
|
/// transaction has result Ok() as the value
|
|
fn filter_valid_transaction_indexes(valid_txs: &[TransactionCheckResult]) -> Vec<usize> {
|
|
valid_txs
|
|
.iter()
|
|
.enumerate()
|
|
.filter_map(|(index, (x, _h))| if x.is_ok() { Some(index) } else { None })
|
|
.collect_vec()
|
|
}
|
|
|
|
/// This function filters pending packets that are still valid
|
|
/// # Arguments
|
|
/// * `transactions` - a batch of transactions deserialized from packets
|
|
/// * `pending_indexes` - identifies which indexes in the `transactions` list are still pending
|
|
fn filter_pending_packets_from_pending_txs(
|
|
bank: &Arc<Bank>,
|
|
transactions: &[SanitizedTransaction],
|
|
pending_indexes: &[usize],
|
|
) -> Vec<usize> {
|
|
let filter =
|
|
Self::prepare_filter_for_pending_transactions(transactions.len(), pending_indexes);
|
|
|
|
let results = bank.check_transactions_with_forwarding_delay(
|
|
transactions,
|
|
&filter,
|
|
FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET,
|
|
);
|
|
|
|
Self::filter_valid_transaction_indexes(&results)
|
|
}
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
|
fn process_packets_transactions<'a>(
|
|
bank: &'a Arc<Bank>,
|
|
bank_creation_time: &Instant,
|
|
poh: &'a TransactionRecorder,
|
|
sanitized_transactions: &[SanitizedTransaction],
|
|
transaction_status_sender: &Option<TransactionStatusSender>,
|
|
replay_vote_sender: &'a ReplayVoteSender,
|
|
banking_stage_stats: &'a BankingStageStats,
|
|
qos_service: &'a QosService,
|
|
slot_metrics_tracker: &'a mut LeaderSlotMetricsTracker,
|
|
log_messages_bytes_limit: Option<usize>,
|
|
) -> ProcessTransactionsSummary {
|
|
// Process transactions
|
|
let (mut process_transactions_summary, process_transactions_time) = measure!(
|
|
Self::process_transactions(
|
|
bank,
|
|
bank_creation_time,
|
|
sanitized_transactions,
|
|
poh,
|
|
transaction_status_sender,
|
|
replay_vote_sender,
|
|
qos_service,
|
|
log_messages_bytes_limit,
|
|
),
|
|
"process_transaction_time",
|
|
);
|
|
let process_transactions_us = process_transactions_time.as_us();
|
|
slot_metrics_tracker.increment_process_transactions_us(process_transactions_us);
|
|
banking_stage_stats
|
|
.transaction_processing_elapsed
|
|
.fetch_add(process_transactions_us, Ordering::Relaxed);
|
|
|
|
let ProcessTransactionsSummary {
|
|
ref retryable_transaction_indexes,
|
|
ref error_counters,
|
|
..
|
|
} = process_transactions_summary;
|
|
|
|
slot_metrics_tracker.accumulate_process_transactions_summary(&process_transactions_summary);
|
|
slot_metrics_tracker.accumulate_transaction_errors(error_counters);
|
|
|
|
let retryable_tx_count = retryable_transaction_indexes.len();
|
|
inc_new_counter_info!("banking_stage-unprocessed_transactions", retryable_tx_count);
|
|
|
|
// Filter out the retryable transactions that are too old
|
|
let (filtered_retryable_transaction_indexes, filter_retryable_packets_time) = measure!(
|
|
Self::filter_pending_packets_from_pending_txs(
|
|
bank,
|
|
sanitized_transactions,
|
|
retryable_transaction_indexes,
|
|
),
|
|
"filter_pending_packets_time",
|
|
);
|
|
let filter_retryable_packets_us = filter_retryable_packets_time.as_us();
|
|
slot_metrics_tracker.increment_filter_retryable_packets_us(filter_retryable_packets_us);
|
|
banking_stage_stats
|
|
.filter_pending_packets_elapsed
|
|
.fetch_add(filter_retryable_packets_us, Ordering::Relaxed);
|
|
|
|
let retryable_packets_filtered_count = retryable_transaction_indexes
|
|
.len()
|
|
.saturating_sub(filtered_retryable_transaction_indexes.len());
|
|
slot_metrics_tracker
|
|
.increment_retryable_packets_filtered_count(retryable_packets_filtered_count as u64);
|
|
|
|
inc_new_counter_info!(
|
|
"banking_stage-dropped_tx_before_forwarding",
|
|
retryable_transaction_indexes
|
|
.len()
|
|
.saturating_sub(filtered_retryable_transaction_indexes.len())
|
|
);
|
|
|
|
process_transactions_summary.retryable_transaction_indexes =
|
|
filtered_retryable_transaction_indexes;
|
|
process_transactions_summary
|
|
}
|
|
|
|
pub fn join(self) -> thread::Result<()> {
|
|
for bank_thread_hdl in self.bank_thread_hdls {
|
|
bank_thread_hdl.join()?;
|
|
}
|
|
Ok(())
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use {
|
|
super::*,
|
|
crate::{
|
|
banking_trace::{BankingPacketBatch, BankingTracer},
|
|
unprocessed_packet_batches,
|
|
},
|
|
crossbeam_channel::{unbounded, Receiver},
|
|
solana_address_lookup_table_program::state::{AddressLookupTable, LookupTableMeta},
|
|
solana_entry::entry::{next_entry, next_versioned_entry, Entry, EntrySlice},
|
|
solana_gossip::cluster_info::Node,
|
|
solana_ledger::{
|
|
blockstore::{entries_to_test_shreds, Blockstore},
|
|
genesis_utils::{
|
|
create_genesis_config, create_genesis_config_with_leader, GenesisConfigInfo,
|
|
},
|
|
get_tmp_ledger_path_auto_delete,
|
|
leader_schedule_cache::LeaderScheduleCache,
|
|
},
|
|
solana_perf::packet::{to_packet_batches, PacketBatch},
|
|
solana_poh::{
|
|
poh_recorder::{create_test_recorder, Record, WorkingBankEntry},
|
|
poh_service::PohService,
|
|
},
|
|
solana_program_runtime::timings::ProgramTiming,
|
|
solana_rpc::transaction_status_service::TransactionStatusService,
|
|
solana_runtime::{
|
|
bank_forks::BankForks,
|
|
genesis_utils::{activate_feature, bootstrap_validator_stake_lamports},
|
|
},
|
|
solana_sdk::{
|
|
account::AccountSharedData,
|
|
hash::Hash,
|
|
instruction::InstructionError,
|
|
message::{
|
|
v0::{self, MessageAddressTableLookup},
|
|
MessageHeader, VersionedMessage,
|
|
},
|
|
poh_config::PohConfig,
|
|
signature::{Keypair, Signer},
|
|
system_transaction,
|
|
transaction::{MessageHash, Transaction, TransactionError, VersionedTransaction},
|
|
},
|
|
solana_streamer::socket::SocketAddrSpace,
|
|
solana_transaction_status::{TransactionStatusMeta, VersionedTransactionWithStatusMeta},
|
|
solana_vote_program::{
|
|
vote_state::VoteStateUpdate, vote_transaction::new_vote_state_update_transaction,
|
|
},
|
|
std::{
|
|
borrow::Cow,
|
|
path::Path,
|
|
sync::atomic::{AtomicBool, Ordering},
|
|
thread::sleep,
|
|
},
|
|
};
|
|
|
|
pub(crate) fn new_test_cluster_info(keypair: Option<Arc<Keypair>>) -> (Node, ClusterInfo) {
|
|
let keypair = keypair.unwrap_or_else(|| Arc::new(Keypair::new()));
|
|
let node = Node::new_localhost_with_pubkey(&keypair.pubkey());
|
|
let cluster_info =
|
|
ClusterInfo::new(node.info.clone(), keypair, SocketAddrSpace::Unspecified);
|
|
(node, cluster_info)
|
|
}
|
|
|
|
#[test]
|
|
fn test_banking_stage_shutdown1() {
|
|
let genesis_config = create_genesis_config(2).genesis_config;
|
|
let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config);
|
|
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
|
let bank = Arc::new(bank_forks.read().unwrap().get(0).unwrap());
|
|
let banking_tracer = BankingTracer::new_disabled();
|
|
let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote();
|
|
let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote();
|
|
let (gossip_vote_sender, gossip_vote_receiver) =
|
|
banking_tracer.create_channel_gossip_vote();
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
{
|
|
let blockstore = Arc::new(
|
|
Blockstore::open(ledger_path.path())
|
|
.expect("Expected to be able to open database ledger"),
|
|
);
|
|
let (exit, poh_recorder, poh_service, _entry_receiever) =
|
|
create_test_recorder(&bank, &blockstore, None, None);
|
|
let (_, cluster_info) = new_test_cluster_info(/*keypair:*/ None);
|
|
let cluster_info = Arc::new(cluster_info);
|
|
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
|
|
|
let banking_stage = BankingStage::new(
|
|
&cluster_info,
|
|
&poh_recorder,
|
|
non_vote_receiver,
|
|
tpu_vote_receiver,
|
|
gossip_vote_receiver,
|
|
None,
|
|
replay_vote_sender,
|
|
None,
|
|
Arc::new(ConnectionCache::default()),
|
|
bank_forks,
|
|
);
|
|
drop(non_vote_sender);
|
|
drop(tpu_vote_sender);
|
|
drop(gossip_vote_sender);
|
|
exit.store(true, Ordering::Relaxed);
|
|
banking_stage.join().unwrap();
|
|
poh_service.join().unwrap();
|
|
}
|
|
Blockstore::destroy(ledger_path.path()).unwrap();
|
|
}
|
|
|
|
#[test]
|
|
fn test_banking_stage_tick() {
|
|
solana_logger::setup();
|
|
let GenesisConfigInfo {
|
|
mut genesis_config, ..
|
|
} = create_genesis_config(2);
|
|
genesis_config.ticks_per_slot = 4;
|
|
let num_extra_ticks = 2;
|
|
let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config);
|
|
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
|
let bank = Arc::new(bank_forks.read().unwrap().get(0).unwrap());
|
|
let start_hash = bank.last_blockhash();
|
|
let banking_tracer = BankingTracer::new_disabled();
|
|
let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote();
|
|
let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote();
|
|
let (gossip_vote_sender, gossip_vote_receiver) =
|
|
banking_tracer.create_channel_gossip_vote();
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
{
|
|
let blockstore = Arc::new(
|
|
Blockstore::open(ledger_path.path())
|
|
.expect("Expected to be able to open database ledger"),
|
|
);
|
|
let poh_config = PohConfig {
|
|
target_tick_count: Some(bank.max_tick_height() + num_extra_ticks),
|
|
..PohConfig::default()
|
|
};
|
|
let (exit, poh_recorder, poh_service, entry_receiver) =
|
|
create_test_recorder(&bank, &blockstore, Some(poh_config), None);
|
|
let (_, cluster_info) = new_test_cluster_info(/*keypair:*/ None);
|
|
let cluster_info = Arc::new(cluster_info);
|
|
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
|
|
|
let banking_stage = BankingStage::new(
|
|
&cluster_info,
|
|
&poh_recorder,
|
|
non_vote_receiver,
|
|
tpu_vote_receiver,
|
|
gossip_vote_receiver,
|
|
None,
|
|
replay_vote_sender,
|
|
None,
|
|
Arc::new(ConnectionCache::default()),
|
|
bank_forks,
|
|
);
|
|
trace!("sending bank");
|
|
drop(non_vote_sender);
|
|
drop(tpu_vote_sender);
|
|
drop(gossip_vote_sender);
|
|
exit.store(true, Ordering::Relaxed);
|
|
poh_service.join().unwrap();
|
|
drop(poh_recorder);
|
|
|
|
trace!("getting entries");
|
|
let entries: Vec<_> = entry_receiver
|
|
.iter()
|
|
.map(|(_bank, (entry, _tick_height))| entry)
|
|
.collect();
|
|
trace!("done");
|
|
assert_eq!(entries.len(), genesis_config.ticks_per_slot as usize);
|
|
assert!(entries.verify(&start_hash));
|
|
assert_eq!(entries[entries.len() - 1].hash, bank.last_blockhash());
|
|
banking_stage.join().unwrap();
|
|
}
|
|
Blockstore::destroy(ledger_path.path()).unwrap();
|
|
}
|
|
|
|
pub fn convert_from_old_verified(
|
|
mut with_vers: Vec<(PacketBatch, Vec<u8>)>,
|
|
) -> Vec<PacketBatch> {
|
|
with_vers.iter_mut().for_each(|(b, v)| {
|
|
b.iter_mut()
|
|
.zip(v)
|
|
.for_each(|(p, f)| p.meta_mut().set_discard(*f == 0))
|
|
});
|
|
with_vers.into_iter().map(|(b, _)| b).collect()
|
|
}
|
|
|
|
#[test]
|
|
fn test_banking_stage_entries_only() {
|
|
solana_logger::setup();
|
|
let GenesisConfigInfo {
|
|
genesis_config,
|
|
mint_keypair,
|
|
..
|
|
} = create_slow_genesis_config(10);
|
|
let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config);
|
|
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
|
let bank = Arc::new(bank_forks.read().unwrap().get(0).unwrap());
|
|
let start_hash = bank.last_blockhash();
|
|
let banking_tracer = BankingTracer::new_disabled();
|
|
let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote();
|
|
let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote();
|
|
let (gossip_vote_sender, gossip_vote_receiver) =
|
|
banking_tracer.create_channel_gossip_vote();
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
{
|
|
let blockstore = Arc::new(
|
|
Blockstore::open(ledger_path.path())
|
|
.expect("Expected to be able to open database ledger"),
|
|
);
|
|
let poh_config = PohConfig {
|
|
// limit tick count to avoid clearing working_bank at PohRecord then
|
|
// PohRecorderError(MaxHeightReached) at BankingStage
|
|
target_tick_count: Some(bank.max_tick_height() - 1),
|
|
..PohConfig::default()
|
|
};
|
|
let (exit, poh_recorder, poh_service, entry_receiver) =
|
|
create_test_recorder(&bank, &blockstore, Some(poh_config), None);
|
|
let (_, cluster_info) = new_test_cluster_info(/*keypair:*/ None);
|
|
let cluster_info = Arc::new(cluster_info);
|
|
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
|
|
|
let banking_stage = BankingStage::new(
|
|
&cluster_info,
|
|
&poh_recorder,
|
|
non_vote_receiver,
|
|
tpu_vote_receiver,
|
|
gossip_vote_receiver,
|
|
None,
|
|
replay_vote_sender,
|
|
None,
|
|
Arc::new(ConnectionCache::default()),
|
|
bank_forks,
|
|
);
|
|
|
|
// fund another account so we can send 2 good transactions in a single batch.
|
|
let keypair = Keypair::new();
|
|
let fund_tx =
|
|
system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 2, start_hash);
|
|
bank.process_transaction(&fund_tx).unwrap();
|
|
|
|
// good tx
|
|
let to = solana_sdk::pubkey::new_rand();
|
|
let tx = system_transaction::transfer(&mint_keypair, &to, 1, start_hash);
|
|
|
|
// good tx, but no verify
|
|
let to2 = solana_sdk::pubkey::new_rand();
|
|
let tx_no_ver = system_transaction::transfer(&keypair, &to2, 2, start_hash);
|
|
|
|
// bad tx, AccountNotFound
|
|
let keypair = Keypair::new();
|
|
let to3 = solana_sdk::pubkey::new_rand();
|
|
let tx_anf = system_transaction::transfer(&keypair, &to3, 1, start_hash);
|
|
|
|
// send 'em over
|
|
let packet_batches = to_packet_batches(&[tx_no_ver, tx_anf, tx], 3);
|
|
|
|
// glad they all fit
|
|
assert_eq!(packet_batches.len(), 1);
|
|
|
|
let packet_batches = packet_batches
|
|
.into_iter()
|
|
.map(|batch| (batch, vec![0u8, 1u8, 1u8]))
|
|
.collect();
|
|
let packet_batches = convert_from_old_verified(packet_batches);
|
|
non_vote_sender // no_ver, anf, tx
|
|
.send(BankingPacketBatch::new((packet_batches, None)))
|
|
.unwrap();
|
|
|
|
drop(non_vote_sender);
|
|
drop(tpu_vote_sender);
|
|
drop(gossip_vote_sender);
|
|
// wait until banking_stage to finish up all packets
|
|
banking_stage.join().unwrap();
|
|
|
|
exit.store(true, Ordering::Relaxed);
|
|
poh_service.join().unwrap();
|
|
drop(poh_recorder);
|
|
|
|
let mut blockhash = start_hash;
|
|
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
|
|
bank.process_transaction(&fund_tx).unwrap();
|
|
//receive entries + ticks
|
|
loop {
|
|
let entries: Vec<Entry> = entry_receiver
|
|
.iter()
|
|
.map(|(_bank, (entry, _tick_height))| entry)
|
|
.collect();
|
|
|
|
assert!(entries.verify(&blockhash));
|
|
if !entries.is_empty() {
|
|
blockhash = entries.last().unwrap().hash;
|
|
for entry in entries {
|
|
bank.process_entry_transactions(entry.transactions)
|
|
.iter()
|
|
.for_each(|x| assert_eq!(*x, Ok(())));
|
|
}
|
|
}
|
|
|
|
if bank.get_balance(&to) == 1 {
|
|
break;
|
|
}
|
|
|
|
sleep(Duration::from_millis(200));
|
|
}
|
|
|
|
assert_eq!(bank.get_balance(&to), 1);
|
|
assert_eq!(bank.get_balance(&to2), 0);
|
|
|
|
drop(entry_receiver);
|
|
}
|
|
Blockstore::destroy(ledger_path.path()).unwrap();
|
|
}
|
|
|
|
#[test]
|
|
fn test_banking_stage_entryfication() {
|
|
solana_logger::setup();
|
|
// In this attack we'll demonstrate that a verifier can interpret the ledger
|
|
// differently if either the server doesn't signal the ledger to add an
|
|
// Entry OR if the verifier tries to parallelize across multiple Entries.
|
|
let GenesisConfigInfo {
|
|
genesis_config,
|
|
mint_keypair,
|
|
..
|
|
} = create_slow_genesis_config(2);
|
|
let banking_tracer = BankingTracer::new_disabled();
|
|
let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote();
|
|
|
|
// Process a batch that includes a transaction that receives two lamports.
|
|
let alice = Keypair::new();
|
|
let tx =
|
|
system_transaction::transfer(&mint_keypair, &alice.pubkey(), 2, genesis_config.hash());
|
|
|
|
let packet_batches = to_packet_batches(&[tx], 1);
|
|
let packet_batches = packet_batches
|
|
.into_iter()
|
|
.map(|batch| (batch, vec![1u8]))
|
|
.collect();
|
|
let packet_batches = convert_from_old_verified(packet_batches);
|
|
non_vote_sender
|
|
.send(BankingPacketBatch::new((packet_batches, None)))
|
|
.unwrap();
|
|
|
|
// Process a second batch that uses the same from account, so conflicts with above TX
|
|
let tx =
|
|
system_transaction::transfer(&mint_keypair, &alice.pubkey(), 1, genesis_config.hash());
|
|
let packet_batches = to_packet_batches(&[tx], 1);
|
|
let packet_batches = packet_batches
|
|
.into_iter()
|
|
.map(|batch| (batch, vec![1u8]))
|
|
.collect();
|
|
let packet_batches = convert_from_old_verified(packet_batches);
|
|
non_vote_sender
|
|
.send(BankingPacketBatch::new((packet_batches, None)))
|
|
.unwrap();
|
|
|
|
let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote();
|
|
let (gossip_vote_sender, gossip_vote_receiver) =
|
|
banking_tracer.create_channel_gossip_vote();
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
{
|
|
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
|
|
|
let entry_receiver = {
|
|
// start a banking_stage to eat verified receiver
|
|
let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config);
|
|
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
|
let bank = Arc::new(bank_forks.read().unwrap().get(0).unwrap());
|
|
let blockstore = Arc::new(
|
|
Blockstore::open(ledger_path.path())
|
|
.expect("Expected to be able to open database ledger"),
|
|
);
|
|
let poh_config = PohConfig {
|
|
// limit tick count to avoid clearing working_bank at
|
|
// PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
|
|
target_tick_count: Some(bank.max_tick_height() - 1),
|
|
..PohConfig::default()
|
|
};
|
|
let (exit, poh_recorder, poh_service, entry_receiver) =
|
|
create_test_recorder(&bank, &blockstore, Some(poh_config), None);
|
|
let (_, cluster_info) = new_test_cluster_info(/*keypair:*/ None);
|
|
let cluster_info = Arc::new(cluster_info);
|
|
let _banking_stage = BankingStage::new_num_threads(
|
|
&cluster_info,
|
|
&poh_recorder,
|
|
non_vote_receiver,
|
|
tpu_vote_receiver,
|
|
gossip_vote_receiver,
|
|
3,
|
|
None,
|
|
replay_vote_sender,
|
|
None,
|
|
Arc::new(ConnectionCache::default()),
|
|
bank_forks,
|
|
);
|
|
|
|
// wait for banking_stage to eat the packets
|
|
while bank.get_balance(&alice.pubkey()) < 1 {
|
|
sleep(Duration::from_millis(10));
|
|
}
|
|
exit.store(true, Ordering::Relaxed);
|
|
poh_service.join().unwrap();
|
|
entry_receiver
|
|
};
|
|
drop(non_vote_sender);
|
|
drop(tpu_vote_sender);
|
|
drop(gossip_vote_sender);
|
|
|
|
// consume the entire entry_receiver, feed it into a new bank
|
|
// check that the balance is what we expect.
|
|
let entries: Vec<_> = entry_receiver
|
|
.iter()
|
|
.map(|(_bank, (entry, _tick_height))| entry)
|
|
.collect();
|
|
|
|
let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config);
|
|
for entry in entries {
|
|
bank.process_entry_transactions(entry.transactions)
|
|
.iter()
|
|
.for_each(|x| assert_eq!(*x, Ok(())));
|
|
}
|
|
|
|
// Assert the user doesn't hold three lamports. If the stage only outputs one
|
|
// entry, then one of the transactions will be rejected, because it drives
|
|
// the account balance below zero before the credit is added.
|
|
assert!(bank.get_balance(&alice.pubkey()) != 3);
|
|
}
|
|
Blockstore::destroy(ledger_path.path()).unwrap();
|
|
}
|
|
|
|
fn sanitize_transactions(txs: Vec<Transaction>) -> Vec<SanitizedTransaction> {
|
|
txs.into_iter()
|
|
.map(SanitizedTransaction::from_transaction_for_tests)
|
|
.collect()
|
|
}
|
|
|
|
#[test]
|
|
fn test_bank_record_transactions() {
|
|
solana_logger::setup();
|
|
|
|
let GenesisConfigInfo {
|
|
genesis_config,
|
|
mint_keypair,
|
|
..
|
|
} = create_genesis_config(10_000);
|
|
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
{
|
|
let blockstore = Blockstore::open(ledger_path.path())
|
|
.expect("Expected to be able to open database ledger");
|
|
let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
|
|
// TODO use record_receiver
|
|
bank.tick_height(),
|
|
bank.last_blockhash(),
|
|
bank.clone(),
|
|
None,
|
|
bank.ticks_per_slot(),
|
|
&Pubkey::default(),
|
|
&Arc::new(blockstore),
|
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
|
&PohConfig::default(),
|
|
Arc::new(AtomicBool::default()),
|
|
);
|
|
let recorder = poh_recorder.recorder();
|
|
let poh_recorder = Arc::new(RwLock::new(poh_recorder));
|
|
|
|
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
|
|
|
|
poh_recorder.write().unwrap().set_bank(&bank, false);
|
|
let pubkey = solana_sdk::pubkey::new_rand();
|
|
let keypair2 = Keypair::new();
|
|
let pubkey2 = solana_sdk::pubkey::new_rand();
|
|
|
|
let txs = vec![
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash())
|
|
.into(),
|
|
system_transaction::transfer(&keypair2, &pubkey2, 1, genesis_config.hash()).into(),
|
|
];
|
|
|
|
let _ = BankingStage::record_transactions(bank.slot(), txs.clone(), &recorder);
|
|
let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap();
|
|
assert_eq!(entry.transactions, txs);
|
|
|
|
// Once bank is set to a new bank (setting bank.slot() + 1 in record_transactions),
|
|
// record_transactions should throw MaxHeightReached
|
|
let next_slot = bank.slot() + 1;
|
|
let RecordTransactionsSummary { result, .. } =
|
|
BankingStage::record_transactions(next_slot, txs, &recorder);
|
|
assert_matches!(result, Err(PohRecorderError::MaxHeightReached));
|
|
// Should receive nothing from PohRecorder b/c record failed
|
|
assert!(entry_receiver.try_recv().is_err());
|
|
|
|
poh_recorder
|
|
.read()
|
|
.unwrap()
|
|
.is_exited
|
|
.store(true, Ordering::Relaxed);
|
|
let _ = poh_simulator.join();
|
|
}
|
|
Blockstore::destroy(ledger_path.path()).unwrap();
|
|
}
|
|
|
|
#[test]
|
|
fn test_bank_prepare_filter_for_pending_transaction() {
|
|
assert_eq!(
|
|
BankingStage::prepare_filter_for_pending_transactions(6, &[2, 4, 5]),
|
|
vec![
|
|
Err(TransactionError::BlockhashNotFound),
|
|
Err(TransactionError::BlockhashNotFound),
|
|
Ok(()),
|
|
Err(TransactionError::BlockhashNotFound),
|
|
Ok(()),
|
|
Ok(())
|
|
]
|
|
);
|
|
|
|
assert_eq!(
|
|
BankingStage::prepare_filter_for_pending_transactions(6, &[0, 2, 3]),
|
|
vec![
|
|
Ok(()),
|
|
Err(TransactionError::BlockhashNotFound),
|
|
Ok(()),
|
|
Ok(()),
|
|
Err(TransactionError::BlockhashNotFound),
|
|
Err(TransactionError::BlockhashNotFound),
|
|
]
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_bank_filter_valid_transaction_indexes() {
|
|
assert_eq!(
|
|
BankingStage::filter_valid_transaction_indexes(&[
|
|
(Err(TransactionError::BlockhashNotFound), None),
|
|
(Err(TransactionError::BlockhashNotFound), None),
|
|
(Ok(()), None),
|
|
(Err(TransactionError::BlockhashNotFound), None),
|
|
(Ok(()), None),
|
|
(Ok(()), None),
|
|
]),
|
|
[2, 4, 5]
|
|
);
|
|
|
|
assert_eq!(
|
|
BankingStage::filter_valid_transaction_indexes(&[
|
|
(Ok(()), None),
|
|
(Err(TransactionError::BlockhashNotFound), None),
|
|
(Err(TransactionError::BlockhashNotFound), None),
|
|
(Ok(()), None),
|
|
(Ok(()), None),
|
|
(Ok(()), None),
|
|
]),
|
|
[0, 3, 4, 5]
|
|
);
|
|
}
|
|
|
|
pub(crate) fn create_slow_genesis_config(lamports: u64) -> GenesisConfigInfo {
|
|
create_slow_genesis_config_with_leader(lamports, &solana_sdk::pubkey::new_rand())
|
|
}
|
|
|
|
pub(crate) fn create_slow_genesis_config_with_leader(
|
|
lamports: u64,
|
|
validator_pubkey: &Pubkey,
|
|
) -> GenesisConfigInfo {
|
|
let mut config_info = create_genesis_config_with_leader(
|
|
lamports,
|
|
validator_pubkey,
|
|
// See solana_ledger::genesis_utils::create_genesis_config.
|
|
bootstrap_validator_stake_lamports(),
|
|
);
|
|
|
|
// For these tests there's only 1 slot, don't want to run out of ticks
|
|
config_info.genesis_config.ticks_per_slot *= 8;
|
|
config_info
|
|
}
|
|
|
|
#[test]
|
|
fn test_bank_process_and_record_transactions() {
|
|
solana_logger::setup();
|
|
let GenesisConfigInfo {
|
|
genesis_config,
|
|
mint_keypair,
|
|
..
|
|
} = create_slow_genesis_config(10_000);
|
|
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
|
|
let pubkey = solana_sdk::pubkey::new_rand();
|
|
|
|
let transactions = sanitize_transactions(vec![system_transaction::transfer(
|
|
&mint_keypair,
|
|
&pubkey,
|
|
1,
|
|
genesis_config.hash(),
|
|
)]);
|
|
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
{
|
|
let blockstore = Blockstore::open(ledger_path.path())
|
|
.expect("Expected to be able to open database ledger");
|
|
let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
|
|
bank.tick_height(),
|
|
bank.last_blockhash(),
|
|
bank.clone(),
|
|
Some((4, 4)),
|
|
bank.ticks_per_slot(),
|
|
&pubkey,
|
|
&Arc::new(blockstore),
|
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
|
&PohConfig::default(),
|
|
Arc::new(AtomicBool::default()),
|
|
);
|
|
let recorder = poh_recorder.recorder();
|
|
let poh_recorder = Arc::new(RwLock::new(poh_recorder));
|
|
|
|
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
|
|
|
|
poh_recorder.write().unwrap().set_bank(&bank, false);
|
|
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
|
|
|
let process_transactions_batch_output = BankingStage::process_and_record_transactions(
|
|
&bank,
|
|
&transactions,
|
|
&recorder,
|
|
0,
|
|
&None,
|
|
&replay_vote_sender,
|
|
&QosService::new(1),
|
|
None,
|
|
);
|
|
|
|
let ExecuteAndCommitTransactionsOutput {
|
|
transactions_attempted_execution_count,
|
|
executed_transactions_count,
|
|
executed_with_successful_result_count,
|
|
commit_transactions_result,
|
|
..
|
|
} = process_transactions_batch_output.execute_and_commit_transactions_output;
|
|
|
|
assert_eq!(transactions_attempted_execution_count, 1);
|
|
assert_eq!(executed_transactions_count, 1);
|
|
assert_eq!(executed_with_successful_result_count, 1);
|
|
assert!(commit_transactions_result.is_ok());
|
|
|
|
// Tick up to max tick height
|
|
while poh_recorder.read().unwrap().tick_height() != bank.max_tick_height() {
|
|
poh_recorder.write().unwrap().tick();
|
|
}
|
|
|
|
let mut done = false;
|
|
// read entries until I find mine, might be ticks...
|
|
while let Ok((_bank, (entry, _tick_height))) = entry_receiver.recv() {
|
|
if !entry.is_tick() {
|
|
trace!("got entry");
|
|
assert_eq!(entry.transactions.len(), transactions.len());
|
|
assert_eq!(bank.get_balance(&pubkey), 1);
|
|
done = true;
|
|
}
|
|
if done {
|
|
break;
|
|
}
|
|
}
|
|
trace!("done ticking");
|
|
|
|
assert!(done);
|
|
|
|
let transactions = sanitize_transactions(vec![system_transaction::transfer(
|
|
&mint_keypair,
|
|
&pubkey,
|
|
2,
|
|
genesis_config.hash(),
|
|
)]);
|
|
|
|
let process_transactions_batch_output = BankingStage::process_and_record_transactions(
|
|
&bank,
|
|
&transactions,
|
|
&recorder,
|
|
0,
|
|
&None,
|
|
&replay_vote_sender,
|
|
&QosService::new(1),
|
|
None,
|
|
);
|
|
|
|
let ExecuteAndCommitTransactionsOutput {
|
|
transactions_attempted_execution_count,
|
|
executed_transactions_count,
|
|
executed_with_successful_result_count,
|
|
retryable_transaction_indexes,
|
|
commit_transactions_result,
|
|
..
|
|
} = process_transactions_batch_output.execute_and_commit_transactions_output;
|
|
assert_eq!(transactions_attempted_execution_count, 1);
|
|
// Transactions was still executed, just wasn't committed, so should be counted here.
|
|
assert_eq!(executed_transactions_count, 1);
|
|
assert_eq!(executed_with_successful_result_count, 1);
|
|
assert_eq!(retryable_transaction_indexes, vec![0]);
|
|
assert_matches!(
|
|
commit_transactions_result,
|
|
Err(PohRecorderError::MaxHeightReached)
|
|
);
|
|
|
|
poh_recorder
|
|
.read()
|
|
.unwrap()
|
|
.is_exited
|
|
.store(true, Ordering::Relaxed);
|
|
let _ = poh_simulator.join();
|
|
|
|
assert_eq!(bank.get_balance(&pubkey), 1);
|
|
}
|
|
Blockstore::destroy(ledger_path.path()).unwrap();
|
|
}
|
|
|
|
#[test]
|
|
fn test_bank_process_and_record_transactions_all_unexecuted() {
|
|
solana_logger::setup();
|
|
let GenesisConfigInfo {
|
|
genesis_config,
|
|
mint_keypair,
|
|
..
|
|
} = create_slow_genesis_config(10_000);
|
|
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
|
|
let pubkey = solana_sdk::pubkey::new_rand();
|
|
|
|
let transactions = {
|
|
let mut tx =
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash());
|
|
// Add duplicate account key
|
|
tx.message.account_keys.push(pubkey);
|
|
sanitize_transactions(vec![tx])
|
|
};
|
|
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
{
|
|
let blockstore = Blockstore::open(ledger_path.path())
|
|
.expect("Expected to be able to open database ledger");
|
|
let (poh_recorder, _entry_receiver, record_receiver) = PohRecorder::new(
|
|
bank.tick_height(),
|
|
bank.last_blockhash(),
|
|
bank.clone(),
|
|
Some((4, 4)),
|
|
bank.ticks_per_slot(),
|
|
&pubkey,
|
|
&Arc::new(blockstore),
|
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
|
&PohConfig::default(),
|
|
Arc::new(AtomicBool::default()),
|
|
);
|
|
let recorder = poh_recorder.recorder();
|
|
let poh_recorder = Arc::new(RwLock::new(poh_recorder));
|
|
|
|
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
|
|
|
|
poh_recorder.write().unwrap().set_bank(&bank, false);
|
|
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
|
|
|
let process_transactions_batch_output = BankingStage::process_and_record_transactions(
|
|
&bank,
|
|
&transactions,
|
|
&recorder,
|
|
0,
|
|
&None,
|
|
&replay_vote_sender,
|
|
&QosService::new(1),
|
|
None,
|
|
);
|
|
|
|
let ExecuteAndCommitTransactionsOutput {
|
|
transactions_attempted_execution_count,
|
|
executed_transactions_count,
|
|
executed_with_successful_result_count,
|
|
commit_transactions_result,
|
|
retryable_transaction_indexes,
|
|
..
|
|
} = process_transactions_batch_output.execute_and_commit_transactions_output;
|
|
|
|
assert_eq!(transactions_attempted_execution_count, 1);
|
|
assert_eq!(executed_transactions_count, 0);
|
|
assert_eq!(executed_with_successful_result_count, 0);
|
|
assert!(retryable_transaction_indexes.is_empty());
|
|
assert_eq!(
|
|
commit_transactions_result.ok(),
|
|
Some(vec![CommitTransactionDetails::NotCommitted; 1])
|
|
);
|
|
|
|
poh_recorder
|
|
.read()
|
|
.unwrap()
|
|
.is_exited
|
|
.store(true, Ordering::Relaxed);
|
|
let _ = poh_simulator.join();
|
|
}
|
|
Blockstore::destroy(ledger_path.path()).unwrap();
|
|
}
|
|
|
|
#[test]
|
|
fn test_bank_process_and_record_transactions_cost_tracker() {
|
|
solana_logger::setup();
|
|
let GenesisConfigInfo {
|
|
genesis_config,
|
|
mint_keypair,
|
|
..
|
|
} = create_slow_genesis_config(10_000);
|
|
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
|
|
let pubkey = solana_sdk::pubkey::new_rand();
|
|
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
{
|
|
let blockstore = Blockstore::open(ledger_path.path())
|
|
.expect("Expected to be able to open database ledger");
|
|
let (poh_recorder, _entry_receiver, record_receiver) = PohRecorder::new(
|
|
bank.tick_height(),
|
|
bank.last_blockhash(),
|
|
bank.clone(),
|
|
Some((4, 4)),
|
|
bank.ticks_per_slot(),
|
|
&pubkey,
|
|
&Arc::new(blockstore),
|
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
|
&PohConfig::default(),
|
|
Arc::new(AtomicBool::default()),
|
|
);
|
|
let recorder = poh_recorder.recorder();
|
|
let poh_recorder = Arc::new(RwLock::new(poh_recorder));
|
|
|
|
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
|
|
|
|
poh_recorder.write().unwrap().set_bank(&bank, false);
|
|
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
|
|
|
let qos_service = QosService::new(1);
|
|
|
|
let get_block_cost = || bank.read_cost_tracker().unwrap().block_cost();
|
|
let get_tx_count = || bank.read_cost_tracker().unwrap().transaction_count();
|
|
assert_eq!(get_block_cost(), 0);
|
|
assert_eq!(get_tx_count(), 0);
|
|
|
|
//
|
|
// TEST: cost tracker's block cost increases when successfully processing a tx
|
|
//
|
|
|
|
let transactions = sanitize_transactions(vec![system_transaction::transfer(
|
|
&mint_keypair,
|
|
&pubkey,
|
|
1,
|
|
genesis_config.hash(),
|
|
)]);
|
|
|
|
let process_transactions_batch_output = BankingStage::process_and_record_transactions(
|
|
&bank,
|
|
&transactions,
|
|
&recorder,
|
|
0,
|
|
&None,
|
|
&replay_vote_sender,
|
|
&qos_service,
|
|
None,
|
|
);
|
|
|
|
let ExecuteAndCommitTransactionsOutput {
|
|
executed_with_successful_result_count,
|
|
commit_transactions_result,
|
|
..
|
|
} = process_transactions_batch_output.execute_and_commit_transactions_output;
|
|
assert_eq!(executed_with_successful_result_count, 1);
|
|
assert!(commit_transactions_result.is_ok());
|
|
|
|
let single_transfer_cost = get_block_cost();
|
|
assert_ne!(single_transfer_cost, 0);
|
|
assert_eq!(get_tx_count(), 1);
|
|
|
|
//
|
|
// TEST: When a tx in a batch can't be executed (here because of account
|
|
// locks), then its cost does not affect the cost tracker.
|
|
//
|
|
|
|
let allocate_keypair = Keypair::new();
|
|
let transactions = sanitize_transactions(vec![
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 2, genesis_config.hash()),
|
|
// intentionally use a tx that has a different cost
|
|
system_transaction::allocate(
|
|
&mint_keypair,
|
|
&allocate_keypair,
|
|
genesis_config.hash(),
|
|
1,
|
|
),
|
|
]);
|
|
|
|
let process_transactions_batch_output = BankingStage::process_and_record_transactions(
|
|
&bank,
|
|
&transactions,
|
|
&recorder,
|
|
0,
|
|
&None,
|
|
&replay_vote_sender,
|
|
&qos_service,
|
|
None,
|
|
);
|
|
|
|
let ExecuteAndCommitTransactionsOutput {
|
|
executed_with_successful_result_count,
|
|
commit_transactions_result,
|
|
retryable_transaction_indexes,
|
|
..
|
|
} = process_transactions_batch_output.execute_and_commit_transactions_output;
|
|
assert_eq!(executed_with_successful_result_count, 1);
|
|
assert!(commit_transactions_result.is_ok());
|
|
assert_eq!(retryable_transaction_indexes, vec![1]);
|
|
|
|
assert_eq!(get_block_cost(), 2 * single_transfer_cost);
|
|
assert_eq!(get_tx_count(), 2);
|
|
|
|
poh_recorder
|
|
.read()
|
|
.unwrap()
|
|
.is_exited
|
|
.store(true, Ordering::Relaxed);
|
|
let _ = poh_simulator.join();
|
|
}
|
|
Blockstore::destroy(ledger_path.path()).unwrap();
|
|
}
|
|
|
|
fn simulate_poh(
|
|
record_receiver: Receiver<Record>,
|
|
poh_recorder: &Arc<RwLock<PohRecorder>>,
|
|
) -> JoinHandle<()> {
|
|
let poh_recorder = poh_recorder.clone();
|
|
let is_exited = poh_recorder.read().unwrap().is_exited.clone();
|
|
let tick_producer = Builder::new()
|
|
.name("solana-simulate_poh".to_string())
|
|
.spawn(move || loop {
|
|
PohService::read_record_receiver_and_process(
|
|
&poh_recorder,
|
|
&record_receiver,
|
|
Duration::from_millis(10),
|
|
);
|
|
if is_exited.load(Ordering::Relaxed) {
|
|
break;
|
|
}
|
|
});
|
|
tick_producer.unwrap()
|
|
}
|
|
|
|
#[test]
|
|
fn test_bank_process_and_record_transactions_account_in_use() {
|
|
solana_logger::setup();
|
|
let GenesisConfigInfo {
|
|
genesis_config,
|
|
mint_keypair,
|
|
..
|
|
} = create_slow_genesis_config(10_000);
|
|
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
|
|
let pubkey = solana_sdk::pubkey::new_rand();
|
|
let pubkey1 = solana_sdk::pubkey::new_rand();
|
|
|
|
let transactions = sanitize_transactions(vec![
|
|
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
|
|
system_transaction::transfer(&mint_keypair, &pubkey1, 1, genesis_config.hash()),
|
|
]);
|
|
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
{
|
|
let blockstore = Blockstore::open(ledger_path.path())
|
|
.expect("Expected to be able to open database ledger");
|
|
let (poh_recorder, _entry_receiver, record_receiver) = PohRecorder::new(
|
|
bank.tick_height(),
|
|
bank.last_blockhash(),
|
|
bank.clone(),
|
|
Some((4, 4)),
|
|
bank.ticks_per_slot(),
|
|
&pubkey,
|
|
&Arc::new(blockstore),
|
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
|
&PohConfig::default(),
|
|
Arc::new(AtomicBool::default()),
|
|
);
|
|
let recorder = poh_recorder.recorder();
|
|
let poh_recorder = Arc::new(RwLock::new(poh_recorder));
|
|
|
|
poh_recorder.write().unwrap().set_bank(&bank, false);
|
|
|
|
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
|
|
|
|
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
|
|
|
let process_transactions_batch_output = BankingStage::process_and_record_transactions(
|
|
&bank,
|
|
&transactions,
|
|
&recorder,
|
|
0,
|
|
&None,
|
|
&replay_vote_sender,
|
|
&QosService::new(1),
|
|
None,
|
|
);
|
|
|
|
poh_recorder
|
|
.read()
|
|
.unwrap()
|
|
.is_exited
|
|
.store(true, Ordering::Relaxed);
|
|
let _ = poh_simulator.join();
|
|
|
|
let ExecuteAndCommitTransactionsOutput {
|
|
transactions_attempted_execution_count,
|
|
executed_transactions_count,
|
|
retryable_transaction_indexes,
|
|
commit_transactions_result,
|
|
..
|
|
} = process_transactions_batch_output.execute_and_commit_transactions_output;
|
|
|
|
assert_eq!(transactions_attempted_execution_count, 2);
|
|
assert_eq!(executed_transactions_count, 1);
|
|
assert_eq!(retryable_transaction_indexes, vec![1],);
|
|
assert!(commit_transactions_result.is_ok());
|
|
}
|
|
Blockstore::destroy(ledger_path.path()).unwrap();
|
|
}
|
|
|
|
#[test]
|
|
fn test_process_transactions_returns_unprocessed_txs() {
|
|
solana_logger::setup();
|
|
let GenesisConfigInfo {
|
|
genesis_config,
|
|
mint_keypair,
|
|
..
|
|
} = create_slow_genesis_config(10_000);
|
|
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
|
|
|
|
let pubkey = solana_sdk::pubkey::new_rand();
|
|
|
|
let transactions = sanitize_transactions(vec![system_transaction::transfer(
|
|
&mint_keypair,
|
|
&pubkey,
|
|
1,
|
|
genesis_config.hash(),
|
|
)]);
|
|
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
{
|
|
let blockstore = Blockstore::open(ledger_path.path())
|
|
.expect("Expected to be able to open database ledger");
|
|
let (poh_recorder, _entry_receiver, record_receiver) = PohRecorder::new(
|
|
bank.tick_height(),
|
|
bank.last_blockhash(),
|
|
bank.clone(),
|
|
Some((4, 4)),
|
|
bank.ticks_per_slot(),
|
|
&solana_sdk::pubkey::new_rand(),
|
|
&Arc::new(blockstore),
|
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
|
&PohConfig::default(),
|
|
Arc::new(AtomicBool::default()),
|
|
);
|
|
|
|
// Poh Recorder has no working bank, so should throw MaxHeightReached error on
|
|
// record
|
|
let recorder = poh_recorder.recorder();
|
|
|
|
let poh_simulator = simulate_poh(record_receiver, &Arc::new(RwLock::new(poh_recorder)));
|
|
|
|
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
|
|
|
let process_transactions_summary = BankingStage::process_transactions(
|
|
&bank,
|
|
&Instant::now(),
|
|
&transactions,
|
|
&recorder,
|
|
&None,
|
|
&replay_vote_sender,
|
|
&QosService::new(1),
|
|
None,
|
|
);
|
|
|
|
let ProcessTransactionsSummary {
|
|
reached_max_poh_height,
|
|
transactions_attempted_execution_count,
|
|
committed_transactions_count,
|
|
committed_transactions_with_successful_result_count,
|
|
failed_commit_count,
|
|
mut retryable_transaction_indexes,
|
|
..
|
|
} = process_transactions_summary;
|
|
assert!(reached_max_poh_height);
|
|
assert_eq!(transactions_attempted_execution_count, 1);
|
|
assert_eq!(failed_commit_count, 1);
|
|
// MaxHeightReached error does not commit, should be zero here
|
|
assert_eq!(committed_transactions_count, 0);
|
|
assert_eq!(committed_transactions_with_successful_result_count, 0);
|
|
|
|
retryable_transaction_indexes.sort_unstable();
|
|
let expected: Vec<usize> = (0..transactions.len()).collect();
|
|
assert_eq!(retryable_transaction_indexes, expected);
|
|
|
|
recorder.is_exited.store(true, Ordering::Relaxed);
|
|
let _ = poh_simulator.join();
|
|
}
|
|
|
|
Blockstore::destroy(ledger_path.path()).unwrap();
|
|
}
|
|
|
|
fn execute_transactions_with_dummy_poh_service(
|
|
bank: Arc<Bank>,
|
|
transactions: Vec<Transaction>,
|
|
) -> ProcessTransactionsSummary {
|
|
let transactions = sanitize_transactions(transactions);
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
let blockstore = Blockstore::open(ledger_path.path())
|
|
.expect("Expected to be able to open database ledger");
|
|
let (poh_recorder, _entry_receiver, record_receiver) = PohRecorder::new(
|
|
bank.tick_height(),
|
|
bank.last_blockhash(),
|
|
bank.clone(),
|
|
Some((4, 4)),
|
|
bank.ticks_per_slot(),
|
|
&Pubkey::new_unique(),
|
|
&Arc::new(blockstore),
|
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
|
&PohConfig::default(),
|
|
Arc::new(AtomicBool::default()),
|
|
);
|
|
let recorder = poh_recorder.recorder();
|
|
let poh_recorder = Arc::new(RwLock::new(poh_recorder));
|
|
|
|
poh_recorder.write().unwrap().set_bank(&bank, false);
|
|
|
|
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
|
|
|
|
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
|
|
|
let process_transactions_summary = BankingStage::process_transactions(
|
|
&bank,
|
|
&Instant::now(),
|
|
&transactions,
|
|
&recorder,
|
|
&None,
|
|
&replay_vote_sender,
|
|
&QosService::new(1),
|
|
None,
|
|
);
|
|
|
|
poh_recorder
|
|
.read()
|
|
.unwrap()
|
|
.is_exited
|
|
.store(true, Ordering::Relaxed);
|
|
let _ = poh_simulator.join();
|
|
|
|
process_transactions_summary
|
|
}
|
|
|
|
#[test]
|
|
fn test_process_transactions_instruction_error() {
|
|
solana_logger::setup();
|
|
let lamports = 10_000;
|
|
let GenesisConfigInfo {
|
|
genesis_config,
|
|
mint_keypair,
|
|
..
|
|
} = create_slow_genesis_config(lamports);
|
|
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
|
|
// set cost tracker limits to MAX so it will not filter out TXs
|
|
bank.write_cost_tracker()
|
|
.unwrap()
|
|
.set_limits(std::u64::MAX, std::u64::MAX, std::u64::MAX);
|
|
|
|
// Transfer more than the balance of the mint keypair, should cause a
|
|
// InstructionError::InsufficientFunds that is then committed. Needs to be
|
|
// MAX_NUM_TRANSACTIONS_PER_BATCH at least so it doesn't conflict on account locks
|
|
// with the below transaction
|
|
let mut transactions = vec![
|
|
system_transaction::transfer(
|
|
&mint_keypair,
|
|
&Pubkey::new_unique(),
|
|
lamports + 1,
|
|
genesis_config.hash(),
|
|
);
|
|
MAX_NUM_TRANSACTIONS_PER_BATCH
|
|
];
|
|
|
|
// Make one transaction that will succeed.
|
|
transactions.push(system_transaction::transfer(
|
|
&mint_keypair,
|
|
&Pubkey::new_unique(),
|
|
1,
|
|
genesis_config.hash(),
|
|
));
|
|
|
|
let transactions_count = transactions.len();
|
|
let ProcessTransactionsSummary {
|
|
reached_max_poh_height,
|
|
transactions_attempted_execution_count,
|
|
committed_transactions_count,
|
|
committed_transactions_with_successful_result_count,
|
|
failed_commit_count,
|
|
retryable_transaction_indexes,
|
|
..
|
|
} = execute_transactions_with_dummy_poh_service(bank, transactions);
|
|
|
|
// All the transactions should have been replayed, but only 1 committed
|
|
assert!(!reached_max_poh_height);
|
|
assert_eq!(transactions_attempted_execution_count, transactions_count);
|
|
// Both transactions should have been committed, even though one was an error,
|
|
// because InstructionErrors are committed
|
|
assert_eq!(committed_transactions_count, 2);
|
|
assert_eq!(committed_transactions_with_successful_result_count, 1);
|
|
assert_eq!(failed_commit_count, 0);
|
|
assert_eq!(
|
|
retryable_transaction_indexes,
|
|
(1..transactions_count - 1).collect::<Vec<usize>>()
|
|
);
|
|
}
|
|
#[test]
|
|
fn test_process_transactions_account_in_use() {
|
|
solana_logger::setup();
|
|
let GenesisConfigInfo {
|
|
genesis_config,
|
|
mint_keypair,
|
|
..
|
|
} = create_slow_genesis_config(10_000);
|
|
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
|
|
// set cost tracker limits to MAX so it will not filter out TXs
|
|
bank.write_cost_tracker()
|
|
.unwrap()
|
|
.set_limits(std::u64::MAX, std::u64::MAX, std::u64::MAX);
|
|
|
|
// Make all repetitive transactions that conflict on the `mint_keypair`, so only 1 should be executed
|
|
let mut transactions = vec![
|
|
system_transaction::transfer(
|
|
&mint_keypair,
|
|
&Pubkey::new_unique(),
|
|
1,
|
|
genesis_config.hash()
|
|
);
|
|
MAX_NUM_TRANSACTIONS_PER_BATCH
|
|
];
|
|
|
|
// Make one more in separate batch that also conflicts, but because it's in a separate batch, it
|
|
// should be executed
|
|
transactions.push(system_transaction::transfer(
|
|
&mint_keypair,
|
|
&Pubkey::new_unique(),
|
|
1,
|
|
genesis_config.hash(),
|
|
));
|
|
|
|
let transactions_count = transactions.len();
|
|
let ProcessTransactionsSummary {
|
|
reached_max_poh_height,
|
|
transactions_attempted_execution_count,
|
|
committed_transactions_count,
|
|
committed_transactions_with_successful_result_count,
|
|
failed_commit_count,
|
|
retryable_transaction_indexes,
|
|
..
|
|
} = execute_transactions_with_dummy_poh_service(bank, transactions);
|
|
|
|
// All the transactions should have been replayed, but only 2 committed (first and last)
|
|
assert!(!reached_max_poh_height);
|
|
assert_eq!(transactions_attempted_execution_count, transactions_count);
|
|
assert_eq!(committed_transactions_count, 2);
|
|
assert_eq!(committed_transactions_with_successful_result_count, 2);
|
|
assert_eq!(failed_commit_count, 0,);
|
|
|
|
// Everything except first and last index of the transactions failed and are last retryable
|
|
assert_eq!(
|
|
retryable_transaction_indexes,
|
|
(1..transactions_count - 1).collect::<Vec<usize>>()
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_write_persist_transaction_status() {
|
|
solana_logger::setup();
|
|
let GenesisConfigInfo {
|
|
mut genesis_config,
|
|
mint_keypair,
|
|
..
|
|
} = create_slow_genesis_config(solana_sdk::native_token::sol_to_lamports(1000.0));
|
|
genesis_config.rent.lamports_per_byte_year = 50;
|
|
genesis_config.rent.exemption_threshold = 2.0;
|
|
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
|
|
let pubkey = solana_sdk::pubkey::new_rand();
|
|
let pubkey1 = solana_sdk::pubkey::new_rand();
|
|
let keypair1 = Keypair::new();
|
|
|
|
let rent_exempt_amount = bank.get_minimum_balance_for_rent_exemption(0);
|
|
|
|
let success_tx = system_transaction::transfer(
|
|
&mint_keypair,
|
|
&pubkey,
|
|
rent_exempt_amount,
|
|
genesis_config.hash(),
|
|
);
|
|
let success_signature = success_tx.signatures[0];
|
|
let entry_1 = next_entry(&genesis_config.hash(), 1, vec![success_tx.clone()]);
|
|
let ix_error_tx = system_transaction::transfer(
|
|
&keypair1,
|
|
&pubkey1,
|
|
2 * rent_exempt_amount,
|
|
genesis_config.hash(),
|
|
);
|
|
let ix_error_signature = ix_error_tx.signatures[0];
|
|
let entry_2 = next_entry(&entry_1.hash, 1, vec![ix_error_tx.clone()]);
|
|
let entries = vec![entry_1, entry_2];
|
|
|
|
let transactions = sanitize_transactions(vec![success_tx, ix_error_tx]);
|
|
bank.transfer(rent_exempt_amount, &mint_keypair, &keypair1.pubkey())
|
|
.unwrap();
|
|
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
{
|
|
let blockstore = Blockstore::open(ledger_path.path())
|
|
.expect("Expected to be able to open database ledger");
|
|
let blockstore = Arc::new(blockstore);
|
|
let (poh_recorder, _entry_receiver, record_receiver) = PohRecorder::new(
|
|
bank.tick_height(),
|
|
bank.last_blockhash(),
|
|
bank.clone(),
|
|
Some((4, 4)),
|
|
bank.ticks_per_slot(),
|
|
&pubkey,
|
|
&blockstore,
|
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
|
&PohConfig::default(),
|
|
Arc::new(AtomicBool::default()),
|
|
);
|
|
let recorder = poh_recorder.recorder();
|
|
let poh_recorder = Arc::new(RwLock::new(poh_recorder));
|
|
|
|
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
|
|
|
|
poh_recorder.write().unwrap().set_bank(&bank, false);
|
|
|
|
let shreds = entries_to_test_shreds(
|
|
&entries,
|
|
bank.slot(),
|
|
0, // parent_slot
|
|
true, // is_full_slot
|
|
0, // version
|
|
true, // merkle_variant
|
|
);
|
|
blockstore.insert_shreds(shreds, None, false).unwrap();
|
|
blockstore.set_roots(std::iter::once(&bank.slot())).unwrap();
|
|
|
|
let (transaction_status_sender, transaction_status_receiver) = unbounded();
|
|
let transaction_status_service = TransactionStatusService::new(
|
|
transaction_status_receiver,
|
|
Arc::new(AtomicU64::default()),
|
|
true,
|
|
None,
|
|
blockstore.clone(),
|
|
false,
|
|
&Arc::new(AtomicBool::new(false)),
|
|
);
|
|
|
|
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
|
|
|
let _ = BankingStage::process_and_record_transactions(
|
|
&bank,
|
|
&transactions,
|
|
&recorder,
|
|
0,
|
|
&Some(TransactionStatusSender {
|
|
sender: transaction_status_sender,
|
|
}),
|
|
&replay_vote_sender,
|
|
&QosService::new(1),
|
|
None,
|
|
);
|
|
|
|
transaction_status_service.join().unwrap();
|
|
|
|
let confirmed_block = blockstore.get_rooted_block(bank.slot(), false).unwrap();
|
|
let actual_tx_results: Vec<_> = confirmed_block
|
|
.transactions
|
|
.into_iter()
|
|
.map(|VersionedTransactionWithStatusMeta { transaction, meta }| {
|
|
(transaction.signatures[0], meta.status)
|
|
})
|
|
.collect();
|
|
let expected_tx_results = vec![
|
|
(success_signature, Ok(())),
|
|
(
|
|
ix_error_signature,
|
|
Err(TransactionError::InstructionError(
|
|
0,
|
|
InstructionError::Custom(1),
|
|
)),
|
|
),
|
|
];
|
|
assert_eq!(actual_tx_results, expected_tx_results);
|
|
|
|
poh_recorder
|
|
.read()
|
|
.unwrap()
|
|
.is_exited
|
|
.store(true, Ordering::Relaxed);
|
|
let _ = poh_simulator.join();
|
|
}
|
|
Blockstore::destroy(ledger_path.path()).unwrap();
|
|
}
|
|
|
|
fn generate_new_address_lookup_table(
|
|
authority: Option<Pubkey>,
|
|
num_addresses: usize,
|
|
) -> AddressLookupTable<'static> {
|
|
let mut addresses = Vec::with_capacity(num_addresses);
|
|
addresses.resize_with(num_addresses, Pubkey::new_unique);
|
|
AddressLookupTable {
|
|
meta: LookupTableMeta {
|
|
authority,
|
|
..LookupTableMeta::default()
|
|
},
|
|
addresses: Cow::Owned(addresses),
|
|
}
|
|
}
|
|
|
|
fn store_address_lookup_table(
|
|
bank: &Bank,
|
|
account_address: Pubkey,
|
|
address_lookup_table: AddressLookupTable<'static>,
|
|
) -> AccountSharedData {
|
|
let data = address_lookup_table.serialize_for_tests().unwrap();
|
|
let mut account =
|
|
AccountSharedData::new(1, data.len(), &solana_address_lookup_table_program::id());
|
|
account.set_data(data);
|
|
bank.store_account(&account_address, &account);
|
|
|
|
account
|
|
}
|
|
|
|
#[test]
|
|
fn test_write_persist_loaded_addresses() {
|
|
solana_logger::setup();
|
|
let GenesisConfigInfo {
|
|
genesis_config,
|
|
mint_keypair,
|
|
..
|
|
} = create_slow_genesis_config(10_000);
|
|
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
|
|
let keypair = Keypair::new();
|
|
|
|
let address_table_key = Pubkey::new_unique();
|
|
let address_table_state = generate_new_address_lookup_table(None, 2);
|
|
store_address_lookup_table(&bank, address_table_key, address_table_state);
|
|
|
|
let bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::new_unique(), 1));
|
|
let message = VersionedMessage::V0(v0::Message {
|
|
header: MessageHeader {
|
|
num_required_signatures: 1,
|
|
num_readonly_signed_accounts: 0,
|
|
num_readonly_unsigned_accounts: 0,
|
|
},
|
|
recent_blockhash: genesis_config.hash(),
|
|
account_keys: vec![keypair.pubkey()],
|
|
address_table_lookups: vec![MessageAddressTableLookup {
|
|
account_key: address_table_key,
|
|
writable_indexes: vec![0],
|
|
readonly_indexes: vec![1],
|
|
}],
|
|
instructions: vec![],
|
|
});
|
|
|
|
let tx = VersionedTransaction::try_new(message, &[&keypair]).unwrap();
|
|
let sanitized_tx = SanitizedTransaction::try_create(
|
|
tx.clone(),
|
|
MessageHash::Compute,
|
|
Some(false),
|
|
bank.as_ref(),
|
|
true, // require_static_program_ids
|
|
)
|
|
.unwrap();
|
|
|
|
let entry = next_versioned_entry(&genesis_config.hash(), 1, vec![tx]);
|
|
let entries = vec![entry];
|
|
|
|
bank.transfer(1, &mint_keypair, &keypair.pubkey()).unwrap();
|
|
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
{
|
|
let blockstore = Blockstore::open(ledger_path.path())
|
|
.expect("Expected to be able to open database ledger");
|
|
let blockstore = Arc::new(blockstore);
|
|
let (poh_recorder, _entry_receiver, record_receiver) = PohRecorder::new(
|
|
bank.tick_height(),
|
|
bank.last_blockhash(),
|
|
bank.clone(),
|
|
Some((4, 4)),
|
|
bank.ticks_per_slot(),
|
|
&Pubkey::new_unique(),
|
|
&blockstore,
|
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
|
&PohConfig::default(),
|
|
Arc::new(AtomicBool::default()),
|
|
);
|
|
let recorder = poh_recorder.recorder();
|
|
let poh_recorder = Arc::new(RwLock::new(poh_recorder));
|
|
|
|
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
|
|
|
|
poh_recorder.write().unwrap().set_bank(&bank, false);
|
|
|
|
let shreds = entries_to_test_shreds(
|
|
&entries,
|
|
bank.slot(),
|
|
0, // parent_slot
|
|
true, // is_full_slot
|
|
0, // version
|
|
true, // merkle_variant
|
|
);
|
|
blockstore.insert_shreds(shreds, None, false).unwrap();
|
|
blockstore.set_roots(std::iter::once(&bank.slot())).unwrap();
|
|
|
|
let (transaction_status_sender, transaction_status_receiver) = unbounded();
|
|
let transaction_status_service = TransactionStatusService::new(
|
|
transaction_status_receiver,
|
|
Arc::new(AtomicU64::default()),
|
|
true,
|
|
None,
|
|
blockstore.clone(),
|
|
false,
|
|
&Arc::new(AtomicBool::new(false)),
|
|
);
|
|
|
|
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
|
|
|
let _ = BankingStage::process_and_record_transactions(
|
|
&bank,
|
|
&[sanitized_tx.clone()],
|
|
&recorder,
|
|
0,
|
|
&Some(TransactionStatusSender {
|
|
sender: transaction_status_sender,
|
|
}),
|
|
&replay_vote_sender,
|
|
&QosService::new(1),
|
|
None,
|
|
);
|
|
|
|
transaction_status_service.join().unwrap();
|
|
|
|
let mut confirmed_block = blockstore.get_rooted_block(bank.slot(), false).unwrap();
|
|
assert_eq!(confirmed_block.transactions.len(), 1);
|
|
|
|
let recorded_meta = confirmed_block.transactions.pop().unwrap().meta;
|
|
assert_eq!(
|
|
recorded_meta,
|
|
TransactionStatusMeta {
|
|
status: Ok(()),
|
|
pre_balances: vec![1, 0, 0],
|
|
post_balances: vec![1, 0, 0],
|
|
pre_token_balances: Some(vec![]),
|
|
post_token_balances: Some(vec![]),
|
|
rewards: Some(vec![]),
|
|
loaded_addresses: sanitized_tx.get_loaded_addresses(),
|
|
compute_units_consumed: Some(0),
|
|
..TransactionStatusMeta::default()
|
|
}
|
|
);
|
|
poh_recorder
|
|
.read()
|
|
.unwrap()
|
|
.is_exited
|
|
.store(true, Ordering::Relaxed);
|
|
let _ = poh_simulator.join();
|
|
}
|
|
Blockstore::destroy(ledger_path.path()).unwrap();
|
|
}
|
|
|
|
#[allow(clippy::type_complexity)]
|
|
fn setup_conflicting_transactions(
|
|
ledger_path: &Path,
|
|
) -> (
|
|
Vec<Transaction>,
|
|
Arc<Bank>,
|
|
Arc<RwLock<PohRecorder>>,
|
|
Receiver<WorkingBankEntry>,
|
|
JoinHandle<()>,
|
|
) {
|
|
Blockstore::destroy(ledger_path).unwrap();
|
|
let genesis_config_info = create_slow_genesis_config(10_000);
|
|
let GenesisConfigInfo {
|
|
genesis_config,
|
|
mint_keypair,
|
|
..
|
|
} = &genesis_config_info;
|
|
let blockstore =
|
|
Blockstore::open(ledger_path).expect("Expected to be able to open database ledger");
|
|
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(genesis_config));
|
|
let exit = Arc::new(AtomicBool::default());
|
|
let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
|
|
bank.tick_height(),
|
|
bank.last_blockhash(),
|
|
bank.clone(),
|
|
Some((4, 4)),
|
|
bank.ticks_per_slot(),
|
|
&solana_sdk::pubkey::new_rand(),
|
|
&Arc::new(blockstore),
|
|
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
|
&PohConfig::default(),
|
|
exit,
|
|
);
|
|
let poh_recorder = Arc::new(RwLock::new(poh_recorder));
|
|
|
|
// Set up unparallelizable conflicting transactions
|
|
let pubkey0 = solana_sdk::pubkey::new_rand();
|
|
let pubkey1 = solana_sdk::pubkey::new_rand();
|
|
let pubkey2 = solana_sdk::pubkey::new_rand();
|
|
let transactions = vec![
|
|
system_transaction::transfer(mint_keypair, &pubkey0, 1, genesis_config.hash()),
|
|
system_transaction::transfer(mint_keypair, &pubkey1, 1, genesis_config.hash()),
|
|
system_transaction::transfer(mint_keypair, &pubkey2, 1, genesis_config.hash()),
|
|
];
|
|
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
|
|
|
|
(
|
|
transactions,
|
|
bank,
|
|
poh_recorder,
|
|
entry_receiver,
|
|
poh_simulator,
|
|
)
|
|
}
|
|
|
|
#[test]
|
|
fn test_consume_buffered_packets() {
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
{
|
|
let (transactions, bank, poh_recorder, _entry_receiver, poh_simulator) =
|
|
setup_conflicting_transactions(ledger_path.path());
|
|
let recorder = poh_recorder.read().unwrap().recorder();
|
|
let num_conflicting_transactions = transactions.len();
|
|
let deserialized_packets =
|
|
unprocessed_packet_batches::transactions_to_deserialized_packets(&transactions)
|
|
.unwrap();
|
|
assert_eq!(deserialized_packets.len(), num_conflicting_transactions);
|
|
let mut buffered_packet_batches =
|
|
UnprocessedTransactionStorage::new_transaction_storage(
|
|
UnprocessedPacketBatches::from_iter(
|
|
deserialized_packets.into_iter(),
|
|
num_conflicting_transactions,
|
|
),
|
|
ThreadType::Transactions,
|
|
);
|
|
|
|
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
|
|
|
// When the working bank in poh_recorder is None, no packets should be processed (consume will not be called)
|
|
assert!(!poh_recorder.read().unwrap().has_bank());
|
|
assert_eq!(buffered_packet_batches.len(), num_conflicting_transactions);
|
|
// When the working bank in poh_recorder is Some, all packets should be processed.
|
|
// Multi-Iterator will process them 1-by-1 if all txs are conflicting.
|
|
poh_recorder.write().unwrap().set_bank(&bank, false);
|
|
let bank_start = poh_recorder.read().unwrap().bank_start().unwrap();
|
|
BankingStage::consume_buffered_packets(
|
|
&bank_start,
|
|
&mut buffered_packet_batches,
|
|
&None,
|
|
&replay_vote_sender,
|
|
None::<Box<dyn Fn()>>,
|
|
&BankingStageStats::default(),
|
|
&recorder,
|
|
&QosService::new(1),
|
|
&mut LeaderSlotMetricsTracker::new(0),
|
|
None,
|
|
);
|
|
assert!(buffered_packet_batches.is_empty());
|
|
poh_recorder
|
|
.read()
|
|
.unwrap()
|
|
.is_exited
|
|
.store(true, Ordering::Relaxed);
|
|
let _ = poh_simulator.join();
|
|
}
|
|
Blockstore::destroy(ledger_path.path()).unwrap();
|
|
}
|
|
|
|
#[test]
|
|
fn test_consume_buffered_packets_sanitization_error() {
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
{
|
|
let (mut transactions, bank, poh_recorder, _entry_receiver, poh_simulator) =
|
|
setup_conflicting_transactions(ledger_path.path());
|
|
let duplicate_account_key = transactions[0].message.account_keys[0];
|
|
transactions[0]
|
|
.message
|
|
.account_keys
|
|
.push(duplicate_account_key); // corrupt transaction
|
|
let recorder = poh_recorder.read().unwrap().recorder();
|
|
let num_conflicting_transactions = transactions.len();
|
|
let deserialized_packets =
|
|
unprocessed_packet_batches::transactions_to_deserialized_packets(&transactions)
|
|
.unwrap();
|
|
assert_eq!(deserialized_packets.len(), num_conflicting_transactions);
|
|
let mut buffered_packet_batches =
|
|
UnprocessedTransactionStorage::new_transaction_storage(
|
|
UnprocessedPacketBatches::from_iter(
|
|
deserialized_packets.into_iter(),
|
|
num_conflicting_transactions,
|
|
),
|
|
ThreadType::Transactions,
|
|
);
|
|
|
|
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
|
|
|
// When the working bank in poh_recorder is None, no packets should be processed
|
|
assert!(!poh_recorder.read().unwrap().has_bank());
|
|
assert_eq!(buffered_packet_batches.len(), num_conflicting_transactions);
|
|
// When the working bank in poh_recorder is Some, all packets should be processed.
|
|
// Multi-Iterator will process them 1-by-1 if all txs are conflicting.
|
|
poh_recorder.write().unwrap().set_bank(&bank, false);
|
|
let bank_start = poh_recorder.read().unwrap().bank_start().unwrap();
|
|
BankingStage::consume_buffered_packets(
|
|
&bank_start,
|
|
&mut buffered_packet_batches,
|
|
&None,
|
|
&replay_vote_sender,
|
|
None::<Box<dyn Fn()>>,
|
|
&BankingStageStats::default(),
|
|
&recorder,
|
|
&QosService::new(1),
|
|
&mut LeaderSlotMetricsTracker::new(0),
|
|
None,
|
|
);
|
|
assert!(buffered_packet_batches.is_empty());
|
|
poh_recorder
|
|
.read()
|
|
.unwrap()
|
|
.is_exited
|
|
.store(true, Ordering::Relaxed);
|
|
let _ = poh_simulator.join();
|
|
}
|
|
Blockstore::destroy(ledger_path.path()).unwrap();
|
|
}
|
|
|
|
#[test]
|
|
fn test_consume_buffered_packets_interrupted() {
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
{
|
|
let (continue_sender, continue_receiver) = unbounded();
|
|
let (finished_packet_sender, finished_packet_receiver) = unbounded();
|
|
let (transactions, bank, poh_recorder, _entry_receiver, poh_simulator) =
|
|
setup_conflicting_transactions(ledger_path.path());
|
|
|
|
let test_fn = Some(move || {
|
|
finished_packet_sender.send(()).unwrap();
|
|
continue_receiver.recv().unwrap();
|
|
});
|
|
// When the poh recorder has a bank, it should process all buffered packets.
|
|
let num_conflicting_transactions = transactions.len();
|
|
poh_recorder.write().unwrap().set_bank(&bank, false);
|
|
let poh_recorder_ = poh_recorder.clone();
|
|
let recorder = poh_recorder_.read().unwrap().recorder();
|
|
let bank_start = poh_recorder.read().unwrap().bank_start().unwrap();
|
|
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
|
// Start up thread to process the banks
|
|
let t_consume = Builder::new()
|
|
.name("consume-buffered-packets".to_string())
|
|
.spawn(move || {
|
|
let num_conflicting_transactions = transactions.len();
|
|
let deserialized_packets =
|
|
unprocessed_packet_batches::transactions_to_deserialized_packets(
|
|
&transactions,
|
|
)
|
|
.unwrap();
|
|
assert_eq!(deserialized_packets.len(), num_conflicting_transactions);
|
|
let mut buffered_packet_batches =
|
|
UnprocessedTransactionStorage::new_transaction_storage(
|
|
UnprocessedPacketBatches::from_iter(
|
|
deserialized_packets.into_iter(),
|
|
num_conflicting_transactions,
|
|
),
|
|
ThreadType::Transactions,
|
|
);
|
|
BankingStage::consume_buffered_packets(
|
|
&bank_start,
|
|
&mut buffered_packet_batches,
|
|
&None,
|
|
&replay_vote_sender,
|
|
test_fn,
|
|
&BankingStageStats::default(),
|
|
&recorder,
|
|
&QosService::new(1),
|
|
&mut LeaderSlotMetricsTracker::new(0),
|
|
None,
|
|
);
|
|
|
|
// Check everything is correct. All valid packets should be processed.
|
|
assert!(buffered_packet_batches.is_empty());
|
|
})
|
|
.unwrap();
|
|
|
|
// Should be calling `test_fn` for each non-conflicting batch.
|
|
// In this case each batch is of size 1.
|
|
for i in 0..num_conflicting_transactions {
|
|
finished_packet_receiver.recv().unwrap();
|
|
if i + 1 == num_conflicting_transactions {
|
|
poh_recorder
|
|
.read()
|
|
.unwrap()
|
|
.is_exited
|
|
.store(true, Ordering::Relaxed);
|
|
}
|
|
continue_sender.send(()).unwrap();
|
|
}
|
|
t_consume.join().unwrap();
|
|
let _ = poh_simulator.join();
|
|
}
|
|
Blockstore::destroy(ledger_path.path()).unwrap();
|
|
}
|
|
|
|
#[test]
|
|
fn test_accumulate_execute_units_and_time() {
|
|
let mut execute_timings = ExecuteTimings::default();
|
|
let mut expected_units = 0;
|
|
let mut expected_us = 0;
|
|
|
|
for n in 0..10 {
|
|
execute_timings.details.per_program_timings.insert(
|
|
Pubkey::new_unique(),
|
|
ProgramTiming {
|
|
accumulated_us: n * 100,
|
|
accumulated_units: n * 1000,
|
|
count: n as u32,
|
|
errored_txs_compute_consumed: vec![],
|
|
total_errored_units: 0,
|
|
},
|
|
);
|
|
expected_us += n * 100;
|
|
expected_units += n * 1000;
|
|
}
|
|
|
|
let (units, us) = BankingStage::accumulate_execute_units_and_time(&execute_timings);
|
|
|
|
assert_eq!(expected_units, units);
|
|
assert_eq!(expected_us, us);
|
|
}
|
|
|
|
#[test]
|
|
fn test_unprocessed_transaction_storage_full_send() {
|
|
solana_logger::setup();
|
|
let GenesisConfigInfo {
|
|
mut genesis_config,
|
|
mint_keypair,
|
|
..
|
|
} = create_slow_genesis_config(10000);
|
|
activate_feature(
|
|
&mut genesis_config,
|
|
allow_votes_to_directly_update_vote_state::id(),
|
|
);
|
|
let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config);
|
|
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
|
let bank = Arc::new(bank_forks.read().unwrap().get(0).unwrap());
|
|
let start_hash = bank.last_blockhash();
|
|
let banking_tracer = BankingTracer::new_disabled();
|
|
let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote();
|
|
let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote();
|
|
let (gossip_vote_sender, gossip_vote_receiver) =
|
|
banking_tracer.create_channel_gossip_vote();
|
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
{
|
|
let blockstore = Arc::new(
|
|
Blockstore::open(ledger_path.path())
|
|
.expect("Expected to be able to open database ledger"),
|
|
);
|
|
let poh_config = PohConfig {
|
|
// limit tick count to avoid clearing working_bank at PohRecord then
|
|
// PohRecorderError(MaxHeightReached) at BankingStage
|
|
target_tick_count: Some(bank.max_tick_height() - 1),
|
|
..PohConfig::default()
|
|
};
|
|
let (exit, poh_recorder, poh_service, _entry_receiver) =
|
|
create_test_recorder(&bank, &blockstore, Some(poh_config), None);
|
|
let (_, cluster_info) = new_test_cluster_info(/*keypair:*/ None);
|
|
let cluster_info = Arc::new(cluster_info);
|
|
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
|
|
|
let banking_stage = BankingStage::new(
|
|
&cluster_info,
|
|
&poh_recorder,
|
|
non_vote_receiver,
|
|
tpu_vote_receiver,
|
|
gossip_vote_receiver,
|
|
None,
|
|
replay_vote_sender,
|
|
None,
|
|
Arc::new(ConnectionCache::default()),
|
|
bank_forks,
|
|
);
|
|
|
|
let keypairs = (0..100).map(|_| Keypair::new()).collect_vec();
|
|
let vote_keypairs = (0..100).map(|_| Keypair::new()).collect_vec();
|
|
for keypair in keypairs.iter() {
|
|
bank.process_transaction(&system_transaction::transfer(
|
|
&mint_keypair,
|
|
&keypair.pubkey(),
|
|
20,
|
|
start_hash,
|
|
))
|
|
.unwrap();
|
|
}
|
|
|
|
// Send a bunch of votes and transfers
|
|
let tpu_votes = (0..100_usize)
|
|
.map(|i| {
|
|
new_vote_state_update_transaction(
|
|
VoteStateUpdate::from(vec![
|
|
(0, 8),
|
|
(1, 7),
|
|
(i as u64 + 10, 6),
|
|
(i as u64 + 11, 1),
|
|
]),
|
|
Hash::new_unique(),
|
|
&keypairs[i],
|
|
&vote_keypairs[i],
|
|
&vote_keypairs[i],
|
|
None,
|
|
);
|
|
})
|
|
.collect_vec();
|
|
let gossip_votes = (0..100_usize)
|
|
.map(|i| {
|
|
new_vote_state_update_transaction(
|
|
VoteStateUpdate::from(vec![
|
|
(0, 8),
|
|
(1, 7),
|
|
(i as u64 + 64 + 5, 6),
|
|
(i as u64 + 7, 1),
|
|
]),
|
|
Hash::new_unique(),
|
|
&keypairs[i],
|
|
&vote_keypairs[i],
|
|
&vote_keypairs[i],
|
|
None,
|
|
);
|
|
})
|
|
.collect_vec();
|
|
let txs = (0..100_usize)
|
|
.map(|i| {
|
|
system_transaction::transfer(
|
|
&keypairs[i],
|
|
&keypairs[(i + 1) % 100].pubkey(),
|
|
10,
|
|
start_hash,
|
|
);
|
|
})
|
|
.collect_vec();
|
|
|
|
let non_vote_packet_batches = to_packet_batches(&txs, 10);
|
|
let tpu_packet_batches = to_packet_batches(&tpu_votes, 10);
|
|
let gossip_packet_batches = to_packet_batches(&gossip_votes, 10);
|
|
|
|
// Send em all
|
|
[
|
|
(non_vote_packet_batches, non_vote_sender),
|
|
(tpu_packet_batches, tpu_vote_sender),
|
|
(gossip_packet_batches, gossip_vote_sender),
|
|
]
|
|
.into_iter()
|
|
.map(|(packet_batches, sender)| {
|
|
Builder::new()
|
|
.spawn(move || {
|
|
sender
|
|
.send(BankingPacketBatch::new((packet_batches, None)))
|
|
.unwrap()
|
|
})
|
|
.unwrap()
|
|
})
|
|
.for_each(|handle| handle.join().unwrap());
|
|
|
|
banking_stage.join().unwrap();
|
|
exit.store(true, Ordering::Relaxed);
|
|
poh_service.join().unwrap();
|
|
}
|
|
Blockstore::destroy(ledger_path.path()).unwrap();
|
|
}
|
|
}
|