2021-02-16 13:48:20 -08:00
|
|
|
#![allow(clippy::integer_arithmetic)]
|
2021-12-03 09:00:31 -08:00
|
|
|
use {
|
banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer
- write-lock-contention also has a same-batch-only value, where
contention happens only inside batches, not between them
- Rename num-threads to batches-per-iteration, which is closer to what
it is actually doing.
- Add num-banking-threads as a new option
- Rename packets-per-chunk to packets-per-batch, because this is closer
to what's happening; and it was previously confusing that num-chunks
had little to do with packets-per-chunk.
Example output for a iterations=100 and a permutation of inputs:
contention,threads,batchsize,batchcount,tps
none, 3,192, 4,65290.30
none, 4,192, 4,77358.06
none, 5,192, 4,86436.65
none, 3, 12,64,43944.57
none, 4, 12,64,65852.15
none, 5, 12,64,70674.37
same-batch-only,3,192, 4,3928.21
same-batch-only,4,192, 4,6460.15
same-batch-only,5,192, 4,7242.85
same-batch-only,3, 12,64,11377.58
same-batch-only,4, 12,64,19582.79
same-batch-only,5, 12,64,24648.45
full, 3,192, 4,3914.26
full, 4,192, 4,2102.99
full, 5,192, 4,3041.87
full, 3, 12,64,11316.17
full, 4, 12,64,2224.99
full, 5, 12,64,5240.32
2022-04-14 06:54:38 -07:00
|
|
|
clap::{crate_description, crate_name, Arg, ArgEnum, Command},
|
2022-01-11 02:44:46 -08:00
|
|
|
crossbeam_channel::{unbounded, Receiver},
|
2021-12-03 09:00:31 -08:00
|
|
|
log::*,
|
|
|
|
rand::{thread_rng, Rng},
|
|
|
|
rayon::prelude::*,
|
2022-11-18 11:21:45 -08:00
|
|
|
solana_client::connection_cache::ConnectionCache,
|
2023-01-25 04:54:38 -08:00
|
|
|
solana_core::{
|
|
|
|
banking_stage::BankingStage,
|
|
|
|
banking_trace::{BankingPacketBatch, BankingTracer, BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT},
|
|
|
|
},
|
2021-12-03 09:00:31 -08:00
|
|
|
solana_gossip::cluster_info::{ClusterInfo, Node},
|
|
|
|
solana_ledger::{
|
|
|
|
blockstore::Blockstore,
|
|
|
|
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
|
|
|
get_tmp_ledger_path,
|
2022-02-07 18:28:28 -08:00
|
|
|
leader_schedule_cache::LeaderScheduleCache,
|
2021-12-03 09:00:31 -08:00
|
|
|
},
|
|
|
|
solana_measure::measure::Measure,
|
2022-05-02 15:41:47 -07:00
|
|
|
solana_perf::packet::{to_packet_batches, PacketBatch},
|
2021-12-03 09:00:31 -08:00
|
|
|
solana_poh::poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry},
|
2023-03-23 17:05:54 -07:00
|
|
|
solana_runtime::{
|
|
|
|
bank::Bank, bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache,
|
|
|
|
},
|
2021-12-03 09:00:31 -08:00
|
|
|
solana_sdk::{
|
2022-10-25 07:33:53 -07:00
|
|
|
compute_budget::ComputeBudgetInstruction,
|
2021-12-03 09:00:31 -08:00
|
|
|
hash::Hash,
|
2022-10-25 07:33:53 -07:00
|
|
|
message::Message,
|
|
|
|
pubkey::{self, Pubkey},
|
|
|
|
signature::{Keypair, Signature, Signer},
|
|
|
|
system_instruction, system_transaction,
|
2021-12-03 09:00:31 -08:00
|
|
|
timing::{duration_as_us, timestamp},
|
|
|
|
transaction::Transaction,
|
|
|
|
},
|
|
|
|
solana_streamer::socket::SocketAddrSpace,
|
2023-02-01 18:10:06 -08:00
|
|
|
solana_tpu_client::tpu_client::DEFAULT_TPU_CONNECTION_POOL_SIZE,
|
2021-12-03 09:00:31 -08:00
|
|
|
std::{
|
2022-07-05 07:29:44 -07:00
|
|
|
sync::{atomic::Ordering, Arc, RwLock},
|
2021-12-03 09:00:31 -08:00
|
|
|
thread::sleep,
|
|
|
|
time::{Duration, Instant},
|
|
|
|
},
|
2020-03-21 10:54:40 -07:00
|
|
|
};
|
2019-08-30 11:10:32 -07:00
|
|
|
|
2022-10-25 07:33:53 -07:00
|
|
|
// transfer transaction cost = 1 * SIGNATURE_COST +
|
|
|
|
// 2 * WRITE_LOCK_UNITS +
|
|
|
|
// 1 * system_program
|
|
|
|
// = 1470 CU
|
|
|
|
const TRANSFER_TRANSACTION_COST: u32 = 1470;
|
|
|
|
|
2019-08-30 11:10:32 -07:00
|
|
|
fn check_txs(
|
2019-09-18 12:16:22 -07:00
|
|
|
receiver: &Arc<Receiver<WorkingBankEntry>>,
|
2019-08-30 11:10:32 -07:00
|
|
|
ref_tx_count: usize,
|
2022-07-05 07:29:44 -07:00
|
|
|
poh_recorder: &Arc<RwLock<PohRecorder>>,
|
2019-08-30 11:10:32 -07:00
|
|
|
) -> bool {
|
|
|
|
let mut total = 0;
|
|
|
|
let now = Instant::now();
|
|
|
|
let mut no_bank = false;
|
|
|
|
loop {
|
2019-10-16 12:53:11 -07:00
|
|
|
if let Ok((_bank, (entry, _tick_height))) = receiver.recv_timeout(Duration::from_millis(10))
|
2019-09-18 12:16:22 -07:00
|
|
|
{
|
|
|
|
total += entry.transactions.len();
|
2019-08-30 11:10:32 -07:00
|
|
|
}
|
|
|
|
if total >= ref_tx_count {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if now.elapsed().as_secs() > 60 {
|
|
|
|
break;
|
|
|
|
}
|
2022-07-05 07:29:44 -07:00
|
|
|
if poh_recorder.read().unwrap().bank().is_none() {
|
2019-08-30 11:10:32 -07:00
|
|
|
no_bank = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !no_bank {
|
|
|
|
assert!(total >= ref_tx_count);
|
|
|
|
}
|
|
|
|
no_bank
|
|
|
|
}
|
|
|
|
|
2022-05-22 18:00:42 -07:00
|
|
|
#[derive(ArgEnum, Clone, Copy, PartialEq, Eq)]
|
banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer
- write-lock-contention also has a same-batch-only value, where
contention happens only inside batches, not between them
- Rename num-threads to batches-per-iteration, which is closer to what
it is actually doing.
- Add num-banking-threads as a new option
- Rename packets-per-chunk to packets-per-batch, because this is closer
to what's happening; and it was previously confusing that num-chunks
had little to do with packets-per-chunk.
Example output for a iterations=100 and a permutation of inputs:
contention,threads,batchsize,batchcount,tps
none, 3,192, 4,65290.30
none, 4,192, 4,77358.06
none, 5,192, 4,86436.65
none, 3, 12,64,43944.57
none, 4, 12,64,65852.15
none, 5, 12,64,70674.37
same-batch-only,3,192, 4,3928.21
same-batch-only,4,192, 4,6460.15
same-batch-only,5,192, 4,7242.85
same-batch-only,3, 12,64,11377.58
same-batch-only,4, 12,64,19582.79
same-batch-only,5, 12,64,24648.45
full, 3,192, 4,3914.26
full, 4,192, 4,2102.99
full, 5,192, 4,3041.87
full, 3, 12,64,11316.17
full, 4, 12,64,2224.99
full, 5, 12,64,5240.32
2022-04-14 06:54:38 -07:00
|
|
|
enum WriteLockContention {
|
|
|
|
/// No transactions lock the same accounts.
|
|
|
|
None,
|
|
|
|
/// Transactions don't lock the same account, unless they belong to the same batch.
|
|
|
|
SameBatchOnly,
|
|
|
|
/// All transactions write lock the same account.
|
|
|
|
Full,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl WriteLockContention {
|
|
|
|
fn possible_values<'a>() -> impl Iterator<Item = clap::PossibleValue<'a>> {
|
|
|
|
Self::value_variants()
|
|
|
|
.iter()
|
|
|
|
.filter_map(|v| v.to_possible_value())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl std::str::FromStr for WriteLockContention {
|
|
|
|
type Err = String;
|
|
|
|
fn from_str(input: &str) -> Result<Self, String> {
|
|
|
|
ArgEnum::from_str(input, false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-22 15:01:01 -07:00
|
|
|
fn make_accounts_txs(
|
|
|
|
total_num_transactions: usize,
|
banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer
- write-lock-contention also has a same-batch-only value, where
contention happens only inside batches, not between them
- Rename num-threads to batches-per-iteration, which is closer to what
it is actually doing.
- Add num-banking-threads as a new option
- Rename packets-per-chunk to packets-per-batch, because this is closer
to what's happening; and it was previously confusing that num-chunks
had little to do with packets-per-chunk.
Example output for a iterations=100 and a permutation of inputs:
contention,threads,batchsize,batchcount,tps
none, 3,192, 4,65290.30
none, 4,192, 4,77358.06
none, 5,192, 4,86436.65
none, 3, 12,64,43944.57
none, 4, 12,64,65852.15
none, 5, 12,64,70674.37
same-batch-only,3,192, 4,3928.21
same-batch-only,4,192, 4,6460.15
same-batch-only,5,192, 4,7242.85
same-batch-only,3, 12,64,11377.58
same-batch-only,4, 12,64,19582.79
same-batch-only,5, 12,64,24648.45
full, 3,192, 4,3914.26
full, 4,192, 4,2102.99
full, 5,192, 4,3041.87
full, 3, 12,64,11316.17
full, 4, 12,64,2224.99
full, 5, 12,64,5240.32
2022-04-14 06:54:38 -07:00
|
|
|
packets_per_batch: usize,
|
2020-05-22 15:01:01 -07:00
|
|
|
hash: Hash,
|
banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer
- write-lock-contention also has a same-batch-only value, where
contention happens only inside batches, not between them
- Rename num-threads to batches-per-iteration, which is closer to what
it is actually doing.
- Add num-banking-threads as a new option
- Rename packets-per-chunk to packets-per-batch, because this is closer
to what's happening; and it was previously confusing that num-chunks
had little to do with packets-per-chunk.
Example output for a iterations=100 and a permutation of inputs:
contention,threads,batchsize,batchcount,tps
none, 3,192, 4,65290.30
none, 4,192, 4,77358.06
none, 5,192, 4,86436.65
none, 3, 12,64,43944.57
none, 4, 12,64,65852.15
none, 5, 12,64,70674.37
same-batch-only,3,192, 4,3928.21
same-batch-only,4,192, 4,6460.15
same-batch-only,5,192, 4,7242.85
same-batch-only,3, 12,64,11377.58
same-batch-only,4, 12,64,19582.79
same-batch-only,5, 12,64,24648.45
full, 3,192, 4,3914.26
full, 4,192, 4,2102.99
full, 5,192, 4,3041.87
full, 3, 12,64,11316.17
full, 4, 12,64,2224.99
full, 5, 12,64,5240.32
2022-04-14 06:54:38 -07:00
|
|
|
contention: WriteLockContention,
|
2022-10-25 07:33:53 -07:00
|
|
|
simulate_mint: bool,
|
|
|
|
mint_txs_percentage: usize,
|
2020-05-22 15:01:01 -07:00
|
|
|
) -> Vec<Transaction> {
|
banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer
- write-lock-contention also has a same-batch-only value, where
contention happens only inside batches, not between them
- Rename num-threads to batches-per-iteration, which is closer to what
it is actually doing.
- Add num-banking-threads as a new option
- Rename packets-per-chunk to packets-per-batch, because this is closer
to what's happening; and it was previously confusing that num-chunks
had little to do with packets-per-chunk.
Example output for a iterations=100 and a permutation of inputs:
contention,threads,batchsize,batchcount,tps
none, 3,192, 4,65290.30
none, 4,192, 4,77358.06
none, 5,192, 4,86436.65
none, 3, 12,64,43944.57
none, 4, 12,64,65852.15
none, 5, 12,64,70674.37
same-batch-only,3,192, 4,3928.21
same-batch-only,4,192, 4,6460.15
same-batch-only,5,192, 4,7242.85
same-batch-only,3, 12,64,11377.58
same-batch-only,4, 12,64,19582.79
same-batch-only,5, 12,64,24648.45
full, 3,192, 4,3914.26
full, 4,192, 4,2102.99
full, 5,192, 4,3041.87
full, 3, 12,64,11316.17
full, 4, 12,64,2224.99
full, 5, 12,64,5240.32
2022-04-14 06:54:38 -07:00
|
|
|
let to_pubkey = pubkey::new_rand();
|
|
|
|
let chunk_pubkeys: Vec<pubkey::Pubkey> = (0..total_num_transactions / packets_per_batch)
|
|
|
|
.map(|_| pubkey::new_rand())
|
|
|
|
.collect();
|
2020-05-22 15:01:01 -07:00
|
|
|
let payer_key = Keypair::new();
|
|
|
|
(0..total_num_transactions)
|
2019-08-30 11:10:32 -07:00
|
|
|
.into_par_iter()
|
banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer
- write-lock-contention also has a same-batch-only value, where
contention happens only inside batches, not between them
- Rename num-threads to batches-per-iteration, which is closer to what
it is actually doing.
- Add num-banking-threads as a new option
- Rename packets-per-chunk to packets-per-batch, because this is closer
to what's happening; and it was previously confusing that num-chunks
had little to do with packets-per-chunk.
Example output for a iterations=100 and a permutation of inputs:
contention,threads,batchsize,batchcount,tps
none, 3,192, 4,65290.30
none, 4,192, 4,77358.06
none, 5,192, 4,86436.65
none, 3, 12,64,43944.57
none, 4, 12,64,65852.15
none, 5, 12,64,70674.37
same-batch-only,3,192, 4,3928.21
same-batch-only,4,192, 4,6460.15
same-batch-only,5,192, 4,7242.85
same-batch-only,3, 12,64,11377.58
same-batch-only,4, 12,64,19582.79
same-batch-only,5, 12,64,24648.45
full, 3,192, 4,3914.26
full, 4,192, 4,2102.99
full, 5,192, 4,3041.87
full, 3, 12,64,11316.17
full, 4, 12,64,2224.99
full, 5, 12,64,5240.32
2022-04-14 06:54:38 -07:00
|
|
|
.map(|i| {
|
2022-10-25 07:33:53 -07:00
|
|
|
let is_simulated_mint = is_simulated_mint_transaction(
|
|
|
|
simulate_mint,
|
|
|
|
i,
|
|
|
|
packets_per_batch,
|
|
|
|
mint_txs_percentage,
|
|
|
|
);
|
|
|
|
// simulated mint transactions have higher compute-unit-price
|
|
|
|
let compute_unit_price = if is_simulated_mint { 5 } else { 1 };
|
|
|
|
let mut new = make_transfer_transaction_with_compute_unit_price(
|
|
|
|
&payer_key,
|
|
|
|
&to_pubkey,
|
|
|
|
1,
|
|
|
|
hash,
|
|
|
|
compute_unit_price,
|
|
|
|
);
|
2021-06-07 23:17:16 -07:00
|
|
|
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer
- write-lock-contention also has a same-batch-only value, where
contention happens only inside batches, not between them
- Rename num-threads to batches-per-iteration, which is closer to what
it is actually doing.
- Add num-banking-threads as a new option
- Rename packets-per-chunk to packets-per-batch, because this is closer
to what's happening; and it was previously confusing that num-chunks
had little to do with packets-per-chunk.
Example output for a iterations=100 and a permutation of inputs:
contention,threads,batchsize,batchcount,tps
none, 3,192, 4,65290.30
none, 4,192, 4,77358.06
none, 5,192, 4,86436.65
none, 3, 12,64,43944.57
none, 4, 12,64,65852.15
none, 5, 12,64,70674.37
same-batch-only,3,192, 4,3928.21
same-batch-only,4,192, 4,6460.15
same-batch-only,5,192, 4,7242.85
same-batch-only,3, 12,64,11377.58
same-batch-only,4, 12,64,19582.79
same-batch-only,5, 12,64,24648.45
full, 3,192, 4,3914.26
full, 4,192, 4,2102.99
full, 5,192, 4,3041.87
full, 3, 12,64,11316.17
full, 4, 12,64,2224.99
full, 5, 12,64,5240.32
2022-04-14 06:54:38 -07:00
|
|
|
new.message.account_keys[0] = pubkey::new_rand();
|
|
|
|
new.message.account_keys[1] = match contention {
|
|
|
|
WriteLockContention::None => pubkey::new_rand(),
|
2022-10-25 07:33:53 -07:00
|
|
|
WriteLockContention::SameBatchOnly => {
|
|
|
|
// simulated mint transactions have conflict accounts
|
|
|
|
if is_simulated_mint {
|
|
|
|
chunk_pubkeys[i / packets_per_batch]
|
|
|
|
} else {
|
|
|
|
pubkey::new_rand()
|
|
|
|
}
|
|
|
|
}
|
banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer
- write-lock-contention also has a same-batch-only value, where
contention happens only inside batches, not between them
- Rename num-threads to batches-per-iteration, which is closer to what
it is actually doing.
- Add num-banking-threads as a new option
- Rename packets-per-chunk to packets-per-batch, because this is closer
to what's happening; and it was previously confusing that num-chunks
had little to do with packets-per-chunk.
Example output for a iterations=100 and a permutation of inputs:
contention,threads,batchsize,batchcount,tps
none, 3,192, 4,65290.30
none, 4,192, 4,77358.06
none, 5,192, 4,86436.65
none, 3, 12,64,43944.57
none, 4, 12,64,65852.15
none, 5, 12,64,70674.37
same-batch-only,3,192, 4,3928.21
same-batch-only,4,192, 4,6460.15
same-batch-only,5,192, 4,7242.85
same-batch-only,3, 12,64,11377.58
same-batch-only,4, 12,64,19582.79
same-batch-only,5, 12,64,24648.45
full, 3,192, 4,3914.26
full, 4,192, 4,2102.99
full, 5,192, 4,3041.87
full, 3, 12,64,11316.17
full, 4, 12,64,2224.99
full, 5, 12,64,5240.32
2022-04-14 06:54:38 -07:00
|
|
|
WriteLockContention::Full => to_pubkey,
|
|
|
|
};
|
2019-08-30 11:10:32 -07:00
|
|
|
new.signatures = vec![Signature::new(&sig[0..64])];
|
|
|
|
new
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2022-10-25 07:33:53 -07:00
|
|
|
// In simulating mint, `mint_txs_percentage` transactions in a batch are mint transaction
|
|
|
|
// (eg., have conflicting account and higher priority) and remaining percentage regular
|
|
|
|
// transactions (eg., non-conflict and low priority)
|
|
|
|
fn is_simulated_mint_transaction(
|
|
|
|
simulate_mint: bool,
|
|
|
|
index: usize,
|
|
|
|
packets_per_batch: usize,
|
|
|
|
mint_txs_percentage: usize,
|
|
|
|
) -> bool {
|
|
|
|
simulate_mint && (index % packets_per_batch <= packets_per_batch * mint_txs_percentage / 100)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn make_transfer_transaction_with_compute_unit_price(
|
|
|
|
from_keypair: &Keypair,
|
|
|
|
to: &Pubkey,
|
|
|
|
lamports: u64,
|
|
|
|
recent_blockhash: Hash,
|
|
|
|
compute_unit_price: u64,
|
|
|
|
) -> Transaction {
|
|
|
|
let from_pubkey = from_keypair.pubkey();
|
|
|
|
let instructions = vec![
|
|
|
|
system_instruction::transfer(&from_pubkey, to, lamports),
|
|
|
|
ComputeBudgetInstruction::set_compute_unit_price(compute_unit_price),
|
|
|
|
ComputeBudgetInstruction::set_compute_unit_limit(TRANSFER_TRANSACTION_COST),
|
|
|
|
];
|
|
|
|
let message = Message::new(&instructions, Some(&from_pubkey));
|
|
|
|
Transaction::new(&[from_keypair], message, recent_blockhash)
|
|
|
|
}
|
|
|
|
|
2022-05-02 15:41:47 -07:00
|
|
|
struct PacketsPerIteration {
|
|
|
|
packet_batches: Vec<PacketBatch>,
|
|
|
|
transactions: Vec<Transaction>,
|
2019-08-30 11:10:32 -07:00
|
|
|
packets_per_batch: usize,
|
|
|
|
}
|
|
|
|
|
2022-05-02 15:41:47 -07:00
|
|
|
impl PacketsPerIteration {
|
|
|
|
fn new(
|
|
|
|
packets_per_batch: usize,
|
|
|
|
batches_per_iteration: usize,
|
|
|
|
genesis_hash: Hash,
|
|
|
|
write_lock_contention: WriteLockContention,
|
2022-10-25 07:33:53 -07:00
|
|
|
simulate_mint: bool,
|
|
|
|
mint_txs_percentage: usize,
|
2022-05-02 15:41:47 -07:00
|
|
|
) -> Self {
|
|
|
|
let total_num_transactions = packets_per_batch * batches_per_iteration;
|
|
|
|
let transactions = make_accounts_txs(
|
|
|
|
total_num_transactions,
|
|
|
|
packets_per_batch,
|
|
|
|
genesis_hash,
|
|
|
|
write_lock_contention,
|
2022-10-25 07:33:53 -07:00
|
|
|
simulate_mint,
|
|
|
|
mint_txs_percentage,
|
2022-05-02 15:41:47 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
let packet_batches: Vec<PacketBatch> = to_packet_batches(&transactions, packets_per_batch);
|
|
|
|
assert_eq!(packet_batches.len(), batches_per_iteration);
|
|
|
|
Self {
|
|
|
|
packet_batches,
|
|
|
|
transactions,
|
|
|
|
packets_per_batch,
|
|
|
|
}
|
2019-08-30 11:10:32 -07:00
|
|
|
}
|
|
|
|
|
2022-05-02 15:41:47 -07:00
|
|
|
fn refresh_blockhash(&mut self, new_blockhash: Hash) {
|
|
|
|
for tx in self.transactions.iter_mut() {
|
|
|
|
tx.message.recent_blockhash = new_blockhash;
|
|
|
|
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
|
|
|
tx.signatures[0] = Signature::new(&sig[0..64]);
|
|
|
|
}
|
|
|
|
self.packet_batches = to_packet_batches(&self.transactions, self.packets_per_batch);
|
|
|
|
}
|
2019-08-30 11:10:32 -07:00
|
|
|
}
|
|
|
|
|
2020-05-22 15:01:01 -07:00
|
|
|
#[allow(clippy::cognitive_complexity)]
|
2019-08-30 11:10:32 -07:00
|
|
|
fn main() {
|
|
|
|
solana_logger::setup();
|
2020-05-22 15:01:01 -07:00
|
|
|
|
2022-04-14 05:27:47 -07:00
|
|
|
let matches = Command::new(crate_name!())
|
2020-05-22 15:01:01 -07:00
|
|
|
.about(crate_description!())
|
|
|
|
.version(solana_version::version!())
|
2022-05-02 15:41:47 -07:00
|
|
|
.arg(
|
|
|
|
Arg::new("iterations")
|
|
|
|
.long("iterations")
|
|
|
|
.takes_value(true)
|
|
|
|
.help("Number of test iterations"),
|
|
|
|
)
|
2020-05-22 15:01:01 -07:00
|
|
|
.arg(
|
2022-04-14 05:27:47 -07:00
|
|
|
Arg::new("num_chunks")
|
2020-05-22 15:01:01 -07:00
|
|
|
.long("num-chunks")
|
|
|
|
.takes_value(true)
|
|
|
|
.value_name("SIZE")
|
|
|
|
.help("Number of transaction chunks."),
|
|
|
|
)
|
|
|
|
.arg(
|
banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer
- write-lock-contention also has a same-batch-only value, where
contention happens only inside batches, not between them
- Rename num-threads to batches-per-iteration, which is closer to what
it is actually doing.
- Add num-banking-threads as a new option
- Rename packets-per-chunk to packets-per-batch, because this is closer
to what's happening; and it was previously confusing that num-chunks
had little to do with packets-per-chunk.
Example output for a iterations=100 and a permutation of inputs:
contention,threads,batchsize,batchcount,tps
none, 3,192, 4,65290.30
none, 4,192, 4,77358.06
none, 5,192, 4,86436.65
none, 3, 12,64,43944.57
none, 4, 12,64,65852.15
none, 5, 12,64,70674.37
same-batch-only,3,192, 4,3928.21
same-batch-only,4,192, 4,6460.15
same-batch-only,5,192, 4,7242.85
same-batch-only,3, 12,64,11377.58
same-batch-only,4, 12,64,19582.79
same-batch-only,5, 12,64,24648.45
full, 3,192, 4,3914.26
full, 4,192, 4,2102.99
full, 5,192, 4,3041.87
full, 3, 12,64,11316.17
full, 4, 12,64,2224.99
full, 5, 12,64,5240.32
2022-04-14 06:54:38 -07:00
|
|
|
Arg::new("packets_per_batch")
|
|
|
|
.long("packets-per-batch")
|
2020-05-22 15:01:01 -07:00
|
|
|
.takes_value(true)
|
|
|
|
.value_name("SIZE")
|
banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer
- write-lock-contention also has a same-batch-only value, where
contention happens only inside batches, not between them
- Rename num-threads to batches-per-iteration, which is closer to what
it is actually doing.
- Add num-banking-threads as a new option
- Rename packets-per-chunk to packets-per-batch, because this is closer
to what's happening; and it was previously confusing that num-chunks
had little to do with packets-per-chunk.
Example output for a iterations=100 and a permutation of inputs:
contention,threads,batchsize,batchcount,tps
none, 3,192, 4,65290.30
none, 4,192, 4,77358.06
none, 5,192, 4,86436.65
none, 3, 12,64,43944.57
none, 4, 12,64,65852.15
none, 5, 12,64,70674.37
same-batch-only,3,192, 4,3928.21
same-batch-only,4,192, 4,6460.15
same-batch-only,5,192, 4,7242.85
same-batch-only,3, 12,64,11377.58
same-batch-only,4, 12,64,19582.79
same-batch-only,5, 12,64,24648.45
full, 3,192, 4,3914.26
full, 4,192, 4,2102.99
full, 5,192, 4,3041.87
full, 3, 12,64,11316.17
full, 4, 12,64,2224.99
full, 5, 12,64,5240.32
2022-04-14 06:54:38 -07:00
|
|
|
.help("Packets per batch"),
|
2020-05-22 15:01:01 -07:00
|
|
|
)
|
|
|
|
.arg(
|
2022-04-14 05:27:47 -07:00
|
|
|
Arg::new("skip_sanity")
|
2020-05-22 15:01:01 -07:00
|
|
|
.long("skip-sanity")
|
|
|
|
.takes_value(false)
|
|
|
|
.help("Skip transaction sanity execution"),
|
|
|
|
)
|
2023-01-25 04:54:38 -08:00
|
|
|
.arg(
|
|
|
|
Arg::new("trace_banking")
|
|
|
|
.long("trace-banking")
|
|
|
|
.takes_value(false)
|
|
|
|
.help("Enable banking tracing"),
|
|
|
|
)
|
2020-05-22 15:01:01 -07:00
|
|
|
.arg(
|
banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer
- write-lock-contention also has a same-batch-only value, where
contention happens only inside batches, not between them
- Rename num-threads to batches-per-iteration, which is closer to what
it is actually doing.
- Add num-banking-threads as a new option
- Rename packets-per-chunk to packets-per-batch, because this is closer
to what's happening; and it was previously confusing that num-chunks
had little to do with packets-per-chunk.
Example output for a iterations=100 and a permutation of inputs:
contention,threads,batchsize,batchcount,tps
none, 3,192, 4,65290.30
none, 4,192, 4,77358.06
none, 5,192, 4,86436.65
none, 3, 12,64,43944.57
none, 4, 12,64,65852.15
none, 5, 12,64,70674.37
same-batch-only,3,192, 4,3928.21
same-batch-only,4,192, 4,6460.15
same-batch-only,5,192, 4,7242.85
same-batch-only,3, 12,64,11377.58
same-batch-only,4, 12,64,19582.79
same-batch-only,5, 12,64,24648.45
full, 3,192, 4,3914.26
full, 4,192, 4,2102.99
full, 5,192, 4,3041.87
full, 3, 12,64,11316.17
full, 4, 12,64,2224.99
full, 5, 12,64,5240.32
2022-04-14 06:54:38 -07:00
|
|
|
Arg::new("write_lock_contention")
|
|
|
|
.long("write-lock-contention")
|
|
|
|
.takes_value(true)
|
|
|
|
.possible_values(WriteLockContention::possible_values())
|
|
|
|
.help("Accounts that test transactions write lock"),
|
2020-05-22 15:01:01 -07:00
|
|
|
)
|
|
|
|
.arg(
|
banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer
- write-lock-contention also has a same-batch-only value, where
contention happens only inside batches, not between them
- Rename num-threads to batches-per-iteration, which is closer to what
it is actually doing.
- Add num-banking-threads as a new option
- Rename packets-per-chunk to packets-per-batch, because this is closer
to what's happening; and it was previously confusing that num-chunks
had little to do with packets-per-chunk.
Example output for a iterations=100 and a permutation of inputs:
contention,threads,batchsize,batchcount,tps
none, 3,192, 4,65290.30
none, 4,192, 4,77358.06
none, 5,192, 4,86436.65
none, 3, 12,64,43944.57
none, 4, 12,64,65852.15
none, 5, 12,64,70674.37
same-batch-only,3,192, 4,3928.21
same-batch-only,4,192, 4,6460.15
same-batch-only,5,192, 4,7242.85
same-batch-only,3, 12,64,11377.58
same-batch-only,4, 12,64,19582.79
same-batch-only,5, 12,64,24648.45
full, 3,192, 4,3914.26
full, 4,192, 4,2102.99
full, 5,192, 4,3041.87
full, 3, 12,64,11316.17
full, 4, 12,64,2224.99
full, 5, 12,64,5240.32
2022-04-14 06:54:38 -07:00
|
|
|
Arg::new("batches_per_iteration")
|
|
|
|
.long("batches-per-iteration")
|
2020-05-22 15:01:01 -07:00
|
|
|
.takes_value(true)
|
banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer
- write-lock-contention also has a same-batch-only value, where
contention happens only inside batches, not between them
- Rename num-threads to batches-per-iteration, which is closer to what
it is actually doing.
- Add num-banking-threads as a new option
- Rename packets-per-chunk to packets-per-batch, because this is closer
to what's happening; and it was previously confusing that num-chunks
had little to do with packets-per-chunk.
Example output for a iterations=100 and a permutation of inputs:
contention,threads,batchsize,batchcount,tps
none, 3,192, 4,65290.30
none, 4,192, 4,77358.06
none, 5,192, 4,86436.65
none, 3, 12,64,43944.57
none, 4, 12,64,65852.15
none, 5, 12,64,70674.37
same-batch-only,3,192, 4,3928.21
same-batch-only,4,192, 4,6460.15
same-batch-only,5,192, 4,7242.85
same-batch-only,3, 12,64,11377.58
same-batch-only,4, 12,64,19582.79
same-batch-only,5, 12,64,24648.45
full, 3,192, 4,3914.26
full, 4,192, 4,2102.99
full, 5,192, 4,3041.87
full, 3, 12,64,11316.17
full, 4, 12,64,2224.99
full, 5, 12,64,5240.32
2022-04-14 06:54:38 -07:00
|
|
|
.help("Number of batches to send in each iteration"),
|
|
|
|
)
|
|
|
|
.arg(
|
|
|
|
Arg::new("num_banking_threads")
|
|
|
|
.long("num-banking-threads")
|
|
|
|
.takes_value(true)
|
|
|
|
.help("Number of threads to use in the banking stage"),
|
2020-05-22 15:01:01 -07:00
|
|
|
)
|
2022-06-08 04:57:12 -07:00
|
|
|
.arg(
|
2022-08-19 07:15:15 -07:00
|
|
|
Arg::new("tpu_disable_quic")
|
|
|
|
.long("tpu-disable-quic")
|
2022-06-08 04:57:12 -07:00
|
|
|
.takes_value(false)
|
2022-08-19 07:15:15 -07:00
|
|
|
.help("Disable forwarding messages to TPU using QUIC"),
|
2022-06-08 04:57:12 -07:00
|
|
|
)
|
2022-10-25 07:33:53 -07:00
|
|
|
.arg(
|
|
|
|
Arg::new("simulate_mint")
|
|
|
|
.long("simulate-mint")
|
|
|
|
.takes_value(false)
|
|
|
|
.help("Simulate mint transactions to have higher priority"),
|
|
|
|
)
|
|
|
|
.arg(
|
|
|
|
Arg::new("mint_txs_percentage")
|
|
|
|
.long("mint-txs-percentage")
|
|
|
|
.takes_value(true)
|
|
|
|
.requires("simulate_mint")
|
|
|
|
.help("In simulating mint, number of mint transactions out of 100."),
|
|
|
|
)
|
2020-05-22 15:01:01 -07:00
|
|
|
.get_matches();
|
|
|
|
|
banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer
- write-lock-contention also has a same-batch-only value, where
contention happens only inside batches, not between them
- Rename num-threads to batches-per-iteration, which is closer to what
it is actually doing.
- Add num-banking-threads as a new option
- Rename packets-per-chunk to packets-per-batch, because this is closer
to what's happening; and it was previously confusing that num-chunks
had little to do with packets-per-chunk.
Example output for a iterations=100 and a permutation of inputs:
contention,threads,batchsize,batchcount,tps
none, 3,192, 4,65290.30
none, 4,192, 4,77358.06
none, 5,192, 4,86436.65
none, 3, 12,64,43944.57
none, 4, 12,64,65852.15
none, 5, 12,64,70674.37
same-batch-only,3,192, 4,3928.21
same-batch-only,4,192, 4,6460.15
same-batch-only,5,192, 4,7242.85
same-batch-only,3, 12,64,11377.58
same-batch-only,4, 12,64,19582.79
same-batch-only,5, 12,64,24648.45
full, 3,192, 4,3914.26
full, 4,192, 4,2102.99
full, 5,192, 4,3041.87
full, 3, 12,64,11316.17
full, 4, 12,64,2224.99
full, 5, 12,64,5240.32
2022-04-14 06:54:38 -07:00
|
|
|
let num_banking_threads = matches
|
|
|
|
.value_of_t::<u32>("num_banking_threads")
|
|
|
|
.unwrap_or_else(|_| BankingStage::num_threads());
|
2019-08-30 11:10:32 -07:00
|
|
|
// a multiple of packet chunk duplicates to avoid races
|
2022-04-14 05:27:47 -07:00
|
|
|
let num_chunks = matches.value_of_t::<usize>("num_chunks").unwrap_or(16);
|
banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer
- write-lock-contention also has a same-batch-only value, where
contention happens only inside batches, not between them
- Rename num-threads to batches-per-iteration, which is closer to what
it is actually doing.
- Add num-banking-threads as a new option
- Rename packets-per-chunk to packets-per-batch, because this is closer
to what's happening; and it was previously confusing that num-chunks
had little to do with packets-per-chunk.
Example output for a iterations=100 and a permutation of inputs:
contention,threads,batchsize,batchcount,tps
none, 3,192, 4,65290.30
none, 4,192, 4,77358.06
none, 5,192, 4,86436.65
none, 3, 12,64,43944.57
none, 4, 12,64,65852.15
none, 5, 12,64,70674.37
same-batch-only,3,192, 4,3928.21
same-batch-only,4,192, 4,6460.15
same-batch-only,5,192, 4,7242.85
same-batch-only,3, 12,64,11377.58
same-batch-only,4, 12,64,19582.79
same-batch-only,5, 12,64,24648.45
full, 3,192, 4,3914.26
full, 4,192, 4,2102.99
full, 5,192, 4,3041.87
full, 3, 12,64,11316.17
full, 4, 12,64,2224.99
full, 5, 12,64,5240.32
2022-04-14 06:54:38 -07:00
|
|
|
let packets_per_batch = matches
|
|
|
|
.value_of_t::<usize>("packets_per_batch")
|
2022-04-14 05:27:47 -07:00
|
|
|
.unwrap_or(192);
|
|
|
|
let iterations = matches.value_of_t::<usize>("iterations").unwrap_or(1000);
|
banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer
- write-lock-contention also has a same-batch-only value, where
contention happens only inside batches, not between them
- Rename num-threads to batches-per-iteration, which is closer to what
it is actually doing.
- Add num-banking-threads as a new option
- Rename packets-per-chunk to packets-per-batch, because this is closer
to what's happening; and it was previously confusing that num-chunks
had little to do with packets-per-chunk.
Example output for a iterations=100 and a permutation of inputs:
contention,threads,batchsize,batchcount,tps
none, 3,192, 4,65290.30
none, 4,192, 4,77358.06
none, 5,192, 4,86436.65
none, 3, 12,64,43944.57
none, 4, 12,64,65852.15
none, 5, 12,64,70674.37
same-batch-only,3,192, 4,3928.21
same-batch-only,4,192, 4,6460.15
same-batch-only,5,192, 4,7242.85
same-batch-only,3, 12,64,11377.58
same-batch-only,4, 12,64,19582.79
same-batch-only,5, 12,64,24648.45
full, 3,192, 4,3914.26
full, 4,192, 4,2102.99
full, 5,192, 4,3041.87
full, 3, 12,64,11316.17
full, 4, 12,64,2224.99
full, 5, 12,64,5240.32
2022-04-14 06:54:38 -07:00
|
|
|
let batches_per_iteration = matches
|
|
|
|
.value_of_t::<usize>("batches_per_iteration")
|
|
|
|
.unwrap_or(BankingStage::num_threads() as usize);
|
|
|
|
let write_lock_contention = matches
|
|
|
|
.value_of_t::<WriteLockContention>("write_lock_contention")
|
|
|
|
.unwrap_or(WriteLockContention::None);
|
2022-10-25 07:33:53 -07:00
|
|
|
let mint_txs_percentage = matches
|
|
|
|
.value_of_t::<usize>("mint_txs_percentage")
|
|
|
|
.unwrap_or(99);
|
2020-05-22 15:01:01 -07:00
|
|
|
|
2019-08-30 11:10:32 -07:00
|
|
|
let mint_total = 1_000_000_000_000;
|
2019-11-08 20:56:57 -08:00
|
|
|
let GenesisConfigInfo {
|
|
|
|
genesis_config,
|
2019-08-30 11:10:32 -07:00
|
|
|
mint_keypair,
|
|
|
|
..
|
2019-11-08 20:56:57 -08:00
|
|
|
} = create_genesis_config(mint_total);
|
2019-08-30 11:10:32 -07:00
|
|
|
|
2020-08-07 11:21:35 -07:00
|
|
|
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
2021-08-04 15:30:43 -07:00
|
|
|
let bank0 = Bank::new_for_benches(&genesis_config);
|
2022-07-05 21:24:58 -07:00
|
|
|
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
|
|
|
|
let mut bank = bank_forks.read().unwrap().working_bank();
|
2019-08-30 11:10:32 -07:00
|
|
|
|
2022-02-16 19:44:34 -08:00
|
|
|
// set cost tracker limits to MAX so it will not filter out TXs
|
|
|
|
bank.write_cost_tracker()
|
|
|
|
.unwrap()
|
|
|
|
.set_limits(std::u64::MAX, std::u64::MAX, std::u64::MAX);
|
|
|
|
|
2022-05-02 15:41:47 -07:00
|
|
|
let mut all_packets: Vec<PacketsPerIteration> = std::iter::from_fn(|| {
|
|
|
|
Some(PacketsPerIteration::new(
|
|
|
|
packets_per_batch,
|
|
|
|
batches_per_iteration,
|
|
|
|
genesis_config.hash(),
|
|
|
|
write_lock_contention,
|
2022-10-25 07:33:53 -07:00
|
|
|
matches.is_present("simulate_mint"),
|
|
|
|
mint_txs_percentage,
|
2022-05-02 15:41:47 -07:00
|
|
|
))
|
|
|
|
})
|
|
|
|
.take(num_chunks)
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
let total_num_transactions: u64 = all_packets
|
|
|
|
.iter()
|
|
|
|
.map(|packets_for_single_iteration| packets_for_single_iteration.transactions.len() as u64)
|
|
|
|
.sum();
|
banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer
- write-lock-contention also has a same-batch-only value, where
contention happens only inside batches, not between them
- Rename num-threads to batches-per-iteration, which is closer to what
it is actually doing.
- Add num-banking-threads as a new option
- Rename packets-per-chunk to packets-per-batch, because this is closer
to what's happening; and it was previously confusing that num-chunks
had little to do with packets-per-chunk.
Example output for a iterations=100 and a permutation of inputs:
contention,threads,batchsize,batchcount,tps
none, 3,192, 4,65290.30
none, 4,192, 4,77358.06
none, 5,192, 4,86436.65
none, 3, 12,64,43944.57
none, 4, 12,64,65852.15
none, 5, 12,64,70674.37
same-batch-only,3,192, 4,3928.21
same-batch-only,4,192, 4,6460.15
same-batch-only,5,192, 4,7242.85
same-batch-only,3, 12,64,11377.58
same-batch-only,4, 12,64,19582.79
same-batch-only,5, 12,64,24648.45
full, 3,192, 4,3914.26
full, 4,192, 4,2102.99
full, 5,192, 4,3041.87
full, 3, 12,64,11316.17
full, 4, 12,64,2224.99
full, 5, 12,64,5240.32
2022-04-14 06:54:38 -07:00
|
|
|
info!(
|
|
|
|
"threads: {} txs: {}",
|
|
|
|
num_banking_threads, total_num_transactions
|
|
|
|
);
|
2019-08-30 11:10:32 -07:00
|
|
|
|
2022-11-22 08:24:06 -08:00
|
|
|
// fund all the accounts
|
2022-05-02 15:41:47 -07:00
|
|
|
all_packets.iter().for_each(|packets_for_single_iteration| {
|
|
|
|
packets_for_single_iteration
|
|
|
|
.transactions
|
|
|
|
.iter()
|
|
|
|
.for_each(|tx| {
|
|
|
|
let mut fund = system_transaction::transfer(
|
|
|
|
&mint_keypair,
|
|
|
|
&tx.message.account_keys[0],
|
|
|
|
mint_total / total_num_transactions,
|
|
|
|
genesis_config.hash(),
|
|
|
|
);
|
|
|
|
// Ignore any pesky duplicate signature errors in the case we are using single-payer
|
|
|
|
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
|
|
|
fund.signatures = vec![Signature::new(&sig[0..64])];
|
|
|
|
bank.process_transaction(&fund).unwrap();
|
|
|
|
});
|
2019-08-30 11:10:32 -07:00
|
|
|
});
|
2020-05-22 15:01:01 -07:00
|
|
|
|
|
|
|
let skip_sanity = matches.is_present("skip_sanity");
|
|
|
|
if !skip_sanity {
|
2022-05-02 15:41:47 -07:00
|
|
|
all_packets.iter().for_each(|packets_for_single_iteration| {
|
|
|
|
//sanity check, make sure all the transactions can execute sequentially
|
|
|
|
packets_for_single_iteration
|
|
|
|
.transactions
|
|
|
|
.iter()
|
|
|
|
.for_each(|tx| {
|
|
|
|
let res = bank.process_transaction(tx);
|
2022-12-06 06:30:06 -08:00
|
|
|
assert!(res.is_ok(), "sanity test transactions error: {res:?}");
|
2022-05-02 15:41:47 -07:00
|
|
|
});
|
2020-05-22 15:01:01 -07:00
|
|
|
});
|
|
|
|
bank.clear_signatures();
|
2021-07-15 20:51:27 -07:00
|
|
|
|
banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer
- write-lock-contention also has a same-batch-only value, where
contention happens only inside batches, not between them
- Rename num-threads to batches-per-iteration, which is closer to what
it is actually doing.
- Add num-banking-threads as a new option
- Rename packets-per-chunk to packets-per-batch, because this is closer
to what's happening; and it was previously confusing that num-chunks
had little to do with packets-per-chunk.
Example output for a iterations=100 and a permutation of inputs:
contention,threads,batchsize,batchcount,tps
none, 3,192, 4,65290.30
none, 4,192, 4,77358.06
none, 5,192, 4,86436.65
none, 3, 12,64,43944.57
none, 4, 12,64,65852.15
none, 5, 12,64,70674.37
same-batch-only,3,192, 4,3928.21
same-batch-only,4,192, 4,6460.15
same-batch-only,5,192, 4,7242.85
same-batch-only,3, 12,64,11377.58
same-batch-only,4, 12,64,19582.79
same-batch-only,5, 12,64,24648.45
full, 3,192, 4,3914.26
full, 4,192, 4,2102.99
full, 5,192, 4,3041.87
full, 3, 12,64,11316.17
full, 4, 12,64,2224.99
full, 5, 12,64,5240.32
2022-04-14 06:54:38 -07:00
|
|
|
if write_lock_contention == WriteLockContention::None {
|
2022-05-02 15:41:47 -07:00
|
|
|
all_packets.iter().for_each(|packets_for_single_iteration| {
|
|
|
|
//sanity check, make sure all the transactions can execute in parallel
|
|
|
|
let res =
|
|
|
|
bank.process_transactions(packets_for_single_iteration.transactions.iter());
|
|
|
|
for r in res {
|
2022-12-06 06:30:06 -08:00
|
|
|
assert!(r.is_ok(), "sanity parallel execution error: {r:?}");
|
2022-05-02 15:41:47 -07:00
|
|
|
}
|
|
|
|
bank.clear_signatures();
|
|
|
|
});
|
2020-05-22 15:01:01 -07:00
|
|
|
}
|
2019-08-30 11:10:32 -07:00
|
|
|
}
|
2020-05-22 15:01:01 -07:00
|
|
|
|
2019-08-30 11:10:32 -07:00
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
2020-01-13 13:13:52 -08:00
|
|
|
let blockstore = Arc::new(
|
|
|
|
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
2019-08-30 11:10:32 -07:00
|
|
|
);
|
2022-02-07 18:28:28 -08:00
|
|
|
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
2022-10-25 07:33:53 -07:00
|
|
|
let (exit, poh_recorder, poh_service, signal_receiver) =
|
|
|
|
create_test_recorder(&bank, &blockstore, None, Some(leader_schedule_cache));
|
2023-01-25 04:54:38 -08:00
|
|
|
let (banking_tracer, tracer_thread) =
|
|
|
|
BankingTracer::new(matches.is_present("trace_banking").then_some((
|
|
|
|
&blockstore.banking_trace_path(),
|
|
|
|
exit.clone(),
|
|
|
|
BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT,
|
|
|
|
)))
|
|
|
|
.unwrap();
|
|
|
|
let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote();
|
|
|
|
let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote();
|
|
|
|
let (gossip_vote_sender, gossip_vote_receiver) =
|
|
|
|
banking_tracer.create_channel_gossip_vote();
|
2023-01-24 08:57:55 -08:00
|
|
|
let cluster_info = {
|
|
|
|
let keypair = Arc::new(Keypair::new());
|
|
|
|
let node = Node::new_localhost_with_pubkey(&keypair.pubkey());
|
|
|
|
ClusterInfo::new(node.info, keypair, SocketAddrSpace::Unspecified)
|
|
|
|
};
|
2020-04-21 12:54:45 -07:00
|
|
|
let cluster_info = Arc::new(cluster_info);
|
2023-03-24 09:47:04 -07:00
|
|
|
let tpu_disable_quic = matches.is_present("tpu_disable_quic");
|
|
|
|
let connection_cache = match tpu_disable_quic {
|
|
|
|
false => ConnectionCache::new(DEFAULT_TPU_CONNECTION_POOL_SIZE),
|
|
|
|
true => ConnectionCache::with_udp(DEFAULT_TPU_CONNECTION_POOL_SIZE),
|
2022-07-05 10:49:42 -07:00
|
|
|
};
|
banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer
- write-lock-contention also has a same-batch-only value, where
contention happens only inside batches, not between them
- Rename num-threads to batches-per-iteration, which is closer to what
it is actually doing.
- Add num-banking-threads as a new option
- Rename packets-per-chunk to packets-per-batch, because this is closer
to what's happening; and it was previously confusing that num-chunks
had little to do with packets-per-chunk.
Example output for a iterations=100 and a permutation of inputs:
contention,threads,batchsize,batchcount,tps
none, 3,192, 4,65290.30
none, 4,192, 4,77358.06
none, 5,192, 4,86436.65
none, 3, 12,64,43944.57
none, 4, 12,64,65852.15
none, 5, 12,64,70674.37
same-batch-only,3,192, 4,3928.21
same-batch-only,4,192, 4,6460.15
same-batch-only,5,192, 4,7242.85
same-batch-only,3, 12,64,11377.58
same-batch-only,4, 12,64,19582.79
same-batch-only,5, 12,64,24648.45
full, 3,192, 4,3914.26
full, 4,192, 4,2102.99
full, 5,192, 4,3041.87
full, 3, 12,64,11316.17
full, 4, 12,64,2224.99
full, 5, 12,64,5240.32
2022-04-14 06:54:38 -07:00
|
|
|
let banking_stage = BankingStage::new_num_threads(
|
2019-08-30 11:10:32 -07:00
|
|
|
&cluster_info,
|
|
|
|
&poh_recorder,
|
2023-01-18 17:04:55 -08:00
|
|
|
non_vote_receiver,
|
2021-10-07 02:38:23 -07:00
|
|
|
tpu_vote_receiver,
|
2023-01-18 17:04:55 -08:00
|
|
|
gossip_vote_receiver,
|
banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer
- write-lock-contention also has a same-batch-only value, where
contention happens only inside batches, not between them
- Rename num-threads to batches-per-iteration, which is closer to what
it is actually doing.
- Add num-banking-threads as a new option
- Rename packets-per-chunk to packets-per-batch, because this is closer
to what's happening; and it was previously confusing that num-chunks
had little to do with packets-per-chunk.
Example output for a iterations=100 and a permutation of inputs:
contention,threads,batchsize,batchcount,tps
none, 3,192, 4,65290.30
none, 4,192, 4,77358.06
none, 5,192, 4,86436.65
none, 3, 12,64,43944.57
none, 4, 12,64,65852.15
none, 5, 12,64,70674.37
same-batch-only,3,192, 4,3928.21
same-batch-only,4,192, 4,6460.15
same-batch-only,5,192, 4,7242.85
same-batch-only,3, 12,64,11377.58
same-batch-only,4, 12,64,19582.79
same-batch-only,5, 12,64,24648.45
full, 3,192, 4,3914.26
full, 4,192, 4,2102.99
full, 5,192, 4,3041.87
full, 3, 12,64,11316.17
full, 4, 12,64,2224.99
full, 5, 12,64,5240.32
2022-04-14 06:54:38 -07:00
|
|
|
num_banking_threads,
|
2019-11-20 15:43:10 -08:00
|
|
|
None,
|
2020-08-07 11:21:35 -07:00
|
|
|
replay_vote_sender,
|
2022-07-19 22:01:35 -07:00
|
|
|
None,
|
2022-07-05 10:49:42 -07:00
|
|
|
Arc::new(connection_cache),
|
2022-07-05 21:24:58 -07:00
|
|
|
bank_forks.clone(),
|
2023-03-23 17:05:54 -07:00
|
|
|
&Arc::new(PrioritizationFeeCache::new(0u64)),
|
2019-08-30 11:10:32 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
// This is so that the signal_receiver does not go out of scope after the closure.
|
|
|
|
// If it is dropped before poh_service, then poh_service will error when
|
|
|
|
// calling send() on the channel.
|
|
|
|
let signal_receiver = Arc::new(signal_receiver);
|
2019-12-21 10:43:08 -08:00
|
|
|
let mut total_us = 0;
|
|
|
|
let mut tx_total_us = 0;
|
2020-05-22 15:01:01 -07:00
|
|
|
let base_tx_count = bank.transaction_count();
|
2019-08-30 11:10:32 -07:00
|
|
|
let mut txs_processed = 0;
|
2020-10-19 12:12:08 -07:00
|
|
|
let collector = solana_sdk::pubkey::new_rand();
|
2019-12-21 10:43:08 -08:00
|
|
|
let mut total_sent = 0;
|
2022-05-02 15:41:47 -07:00
|
|
|
for current_iteration_index in 0..iterations {
|
|
|
|
trace!("RUNNING ITERATION {}", current_iteration_index);
|
2019-08-30 11:10:32 -07:00
|
|
|
let now = Instant::now();
|
|
|
|
let mut sent = 0;
|
|
|
|
|
2022-05-02 15:41:47 -07:00
|
|
|
let packets_for_this_iteration = &all_packets[current_iteration_index % num_chunks];
|
|
|
|
for (packet_batch_index, packet_batch) in
|
|
|
|
packets_for_this_iteration.packet_batches.iter().enumerate()
|
|
|
|
{
|
2022-05-23 13:30:15 -07:00
|
|
|
sent += packet_batch.len();
|
2019-08-30 11:10:32 -07:00
|
|
|
trace!(
|
2022-05-02 15:41:47 -07:00
|
|
|
"Sending PacketBatch index {}, {}",
|
|
|
|
packet_batch_index,
|
2019-08-30 11:10:32 -07:00
|
|
|
timestamp(),
|
|
|
|
);
|
2023-01-18 17:04:55 -08:00
|
|
|
non_vote_sender
|
2023-01-25 04:54:38 -08:00
|
|
|
.send(BankingPacketBatch::new((vec![packet_batch.clone()], None)))
|
2022-05-24 14:01:41 -07:00
|
|
|
.unwrap();
|
2019-08-30 11:10:32 -07:00
|
|
|
}
|
2022-05-02 15:41:47 -07:00
|
|
|
|
|
|
|
for tx in &packets_for_this_iteration.transactions {
|
2019-08-30 11:10:32 -07:00
|
|
|
loop {
|
|
|
|
if bank.get_signature_status(&tx.signatures[0]).is_some() {
|
|
|
|
break;
|
|
|
|
}
|
2022-07-05 07:29:44 -07:00
|
|
|
if poh_recorder.read().unwrap().bank().is_none() {
|
2019-08-30 11:10:32 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
sleep(Duration::from_millis(5));
|
|
|
|
}
|
|
|
|
}
|
2022-10-25 07:33:53 -07:00
|
|
|
|
|
|
|
// check if txs had been processed by bank. Returns when all transactions are
|
|
|
|
// processed, with `FALSE` indicate there is still bank. or returns TRUE indicate a
|
|
|
|
// bank has expired before receiving all txs.
|
2020-05-22 15:01:01 -07:00
|
|
|
if check_txs(
|
|
|
|
&signal_receiver,
|
2022-05-02 15:41:47 -07:00
|
|
|
packets_for_this_iteration.transactions.len(),
|
2020-05-22 15:01:01 -07:00
|
|
|
&poh_recorder,
|
|
|
|
) {
|
2022-10-25 07:33:53 -07:00
|
|
|
eprintln!(
|
|
|
|
"[iteration {}, tx sent {}, slot {} expired, bank tx count {}]",
|
|
|
|
current_iteration_index,
|
|
|
|
sent,
|
2019-08-30 11:10:32 -07:00
|
|
|
bank.slot(),
|
|
|
|
bank.transaction_count(),
|
|
|
|
);
|
2019-12-21 10:43:08 -08:00
|
|
|
tx_total_us += duration_as_us(&now.elapsed());
|
2019-09-20 13:21:12 -07:00
|
|
|
|
2019-08-30 11:10:32 -07:00
|
|
|
let mut poh_time = Measure::start("poh_time");
|
2021-09-13 16:55:35 -07:00
|
|
|
poh_recorder
|
2022-07-05 07:29:44 -07:00
|
|
|
.write()
|
2021-09-13 16:55:35 -07:00
|
|
|
.unwrap()
|
|
|
|
.reset(bank.clone(), Some((bank.slot(), bank.slot() + 1)));
|
2019-09-20 13:21:12 -07:00
|
|
|
poh_time.stop();
|
|
|
|
|
|
|
|
let mut new_bank_time = Measure::start("new_bank");
|
2019-08-30 11:10:32 -07:00
|
|
|
let new_bank = Bank::new_from_parent(&bank, &collector, bank.slot() + 1);
|
2019-09-20 13:21:12 -07:00
|
|
|
new_bank_time.stop();
|
|
|
|
|
|
|
|
let mut insert_time = Measure::start("insert_time");
|
2022-07-05 21:24:58 -07:00
|
|
|
bank_forks.write().unwrap().insert(new_bank);
|
|
|
|
bank = bank_forks.read().unwrap().working_bank();
|
2019-09-20 13:21:12 -07:00
|
|
|
insert_time.stop();
|
|
|
|
|
2022-02-16 19:44:34 -08:00
|
|
|
// set cost tracker limits to MAX so it will not filter out TXs
|
|
|
|
bank.write_cost_tracker().unwrap().set_limits(
|
|
|
|
std::u64::MAX,
|
|
|
|
std::u64::MAX,
|
|
|
|
std::u64::MAX,
|
|
|
|
);
|
|
|
|
|
2023-03-27 08:17:17 -07:00
|
|
|
assert!(poh_recorder.read().unwrap().bank().is_none());
|
2022-07-05 07:29:44 -07:00
|
|
|
poh_recorder.write().unwrap().set_bank(&bank, false);
|
|
|
|
assert!(poh_recorder.read().unwrap().bank().is_some());
|
2019-08-30 11:10:32 -07:00
|
|
|
debug!(
|
|
|
|
"new_bank_time: {}us insert_time: {}us poh_time: {}us",
|
|
|
|
new_bank_time.as_us(),
|
|
|
|
insert_time.as_us(),
|
|
|
|
poh_time.as_us(),
|
|
|
|
);
|
|
|
|
} else {
|
2022-10-25 07:33:53 -07:00
|
|
|
eprintln!(
|
|
|
|
"[iteration {}, tx sent {}, slot {} active, bank tx count {}]",
|
|
|
|
current_iteration_index,
|
|
|
|
sent,
|
|
|
|
bank.slot(),
|
|
|
|
bank.transaction_count(),
|
|
|
|
);
|
2019-12-21 10:43:08 -08:00
|
|
|
tx_total_us += duration_as_us(&now.elapsed());
|
2019-08-30 11:10:32 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// This signature clear may not actually clear the signatures
|
|
|
|
// in this chunk, but since we rotate between CHUNKS then
|
|
|
|
// we should clear them by the time we come around again to re-use that chunk.
|
|
|
|
bank.clear_signatures();
|
2019-12-21 10:43:08 -08:00
|
|
|
total_us += duration_as_us(&now.elapsed());
|
|
|
|
total_sent += sent;
|
2019-08-30 11:10:32 -07:00
|
|
|
|
2022-10-25 07:33:53 -07:00
|
|
|
if current_iteration_index % num_chunks == 0 {
|
2022-05-02 15:41:47 -07:00
|
|
|
let last_blockhash = bank.last_blockhash();
|
|
|
|
for packets_for_single_iteration in all_packets.iter_mut() {
|
|
|
|
packets_for_single_iteration.refresh_blockhash(last_blockhash);
|
2019-08-30 11:10:32 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-10-25 07:33:53 -07:00
|
|
|
txs_processed += bank_forks
|
2022-07-05 21:24:58 -07:00
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.working_bank()
|
|
|
|
.transaction_count();
|
2020-05-22 15:01:01 -07:00
|
|
|
debug!("processed: {} base: {}", txs_processed, base_tx_count);
|
2022-10-25 07:33:53 -07:00
|
|
|
|
|
|
|
eprintln!("[total_sent: {}, base_tx_count: {}, txs_processed: {}, txs_landed: {}, total_us: {}, tx_total_us: {}]",
|
|
|
|
total_sent, base_tx_count, txs_processed, (txs_processed - base_tx_count), total_us, tx_total_us);
|
|
|
|
|
2019-08-30 11:10:32 -07:00
|
|
|
eprintln!(
|
2020-05-22 15:01:01 -07:00
|
|
|
"{{'name': 'banking_bench_total', 'median': '{:.2}'}}",
|
2019-12-21 10:43:08 -08:00
|
|
|
(1000.0 * 1000.0 * total_sent as f64) / (total_us as f64),
|
2019-08-30 11:10:32 -07:00
|
|
|
);
|
|
|
|
eprintln!(
|
2020-05-22 15:01:01 -07:00
|
|
|
"{{'name': 'banking_bench_tx_total', 'median': '{:.2}'}}",
|
2019-12-21 10:43:08 -08:00
|
|
|
(1000.0 * 1000.0 * total_sent as f64) / (tx_total_us as f64),
|
2019-08-30 11:10:32 -07:00
|
|
|
);
|
2020-05-22 15:01:01 -07:00
|
|
|
eprintln!(
|
|
|
|
"{{'name': 'banking_bench_success_tx_total', 'median': '{:.2}'}}",
|
|
|
|
(1000.0 * 1000.0 * (txs_processed - base_tx_count) as f64) / (total_us as f64),
|
|
|
|
);
|
2019-08-30 11:10:32 -07:00
|
|
|
|
2023-01-18 17:04:55 -08:00
|
|
|
drop(non_vote_sender);
|
2021-10-07 02:38:23 -07:00
|
|
|
drop(tpu_vote_sender);
|
2023-01-18 17:04:55 -08:00
|
|
|
drop(gossip_vote_sender);
|
2019-08-30 11:10:32 -07:00
|
|
|
exit.store(true, Ordering::Relaxed);
|
2019-10-16 14:45:05 -07:00
|
|
|
banking_stage.join().unwrap();
|
|
|
|
debug!("waited for banking_stage");
|
2019-08-30 11:10:32 -07:00
|
|
|
poh_service.join().unwrap();
|
|
|
|
sleep(Duration::from_secs(1));
|
|
|
|
debug!("waited for poh_service");
|
2023-01-25 04:54:38 -08:00
|
|
|
if let Some(tracer_thread) = tracer_thread {
|
|
|
|
tracer_thread.join().unwrap().unwrap();
|
|
|
|
}
|
2019-08-30 11:10:32 -07:00
|
|
|
}
|
2020-01-13 13:13:52 -08:00
|
|
|
let _unused = Blockstore::destroy(&ledger_path);
|
2019-08-30 11:10:32 -07:00
|
|
|
}
|