banking-bench: Add and rearrange options
- Add write-lock-contention option, replacing same_payer - write-lock-contention also has a same-batch-only value, where contention happens only inside batches, not between them - Rename num-threads to batches-per-iteration, which is closer to what it is actually doing. - Add num-banking-threads as a new option - Rename packets-per-chunk to packets-per-batch, because this is closer to what's happening; and it was previously confusing that num-chunks had little to do with packets-per-chunk. Example output for a iterations=100 and a permutation of inputs: contention,threads,batchsize,batchcount,tps none, 3,192, 4,65290.30 none, 4,192, 4,77358.06 none, 5,192, 4,86436.65 none, 3, 12,64,43944.57 none, 4, 12,64,65852.15 none, 5, 12,64,70674.37 same-batch-only,3,192, 4,3928.21 same-batch-only,4,192, 4,6460.15 same-batch-only,5,192, 4,7242.85 same-batch-only,3, 12,64,11377.58 same-batch-only,4, 12,64,19582.79 same-batch-only,5, 12,64,24648.45 full, 3,192, 4,3914.26 full, 4,192, 4,2102.99 full, 5,192, 4,3041.87 full, 3, 12,64,11316.17 full, 4, 12,64,2224.99 full, 5, 12,64,5240.32
This commit is contained in:
parent
2c7699eb8c
commit
d2c6c04d3e
|
@ -9,7 +9,7 @@ homepage = "https://solana.com/"
|
||||||
publish = false
|
publish = false
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
clap = "3.1.8"
|
clap = { version = "3.1.8", features = ["derive"] }
|
||||||
crossbeam-channel = "0.5"
|
crossbeam-channel = "0.5"
|
||||||
log = "0.4.14"
|
log = "0.4.14"
|
||||||
rand = "0.7.0"
|
rand = "0.7.0"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#![allow(clippy::integer_arithmetic)]
|
#![allow(clippy::integer_arithmetic)]
|
||||||
use {
|
use {
|
||||||
clap::{crate_description, crate_name, Arg, Command},
|
clap::{crate_description, crate_name, Arg, ArgEnum, Command},
|
||||||
crossbeam_channel::{unbounded, Receiver},
|
crossbeam_channel::{unbounded, Receiver},
|
||||||
log::*,
|
log::*,
|
||||||
rand::{thread_rng, Rng},
|
rand::{thread_rng, Rng},
|
||||||
|
@ -66,23 +66,55 @@ fn check_txs(
|
||||||
no_bank
|
no_bank
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(ArgEnum, Clone, Copy, PartialEq)]
|
||||||
|
enum WriteLockContention {
|
||||||
|
/// No transactions lock the same accounts.
|
||||||
|
None,
|
||||||
|
/// Transactions don't lock the same account, unless they belong to the same batch.
|
||||||
|
SameBatchOnly,
|
||||||
|
/// All transactions write lock the same account.
|
||||||
|
Full,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WriteLockContention {
|
||||||
|
fn possible_values<'a>() -> impl Iterator<Item = clap::PossibleValue<'a>> {
|
||||||
|
Self::value_variants()
|
||||||
|
.iter()
|
||||||
|
.filter_map(|v| v.to_possible_value())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::str::FromStr for WriteLockContention {
|
||||||
|
type Err = String;
|
||||||
|
fn from_str(input: &str) -> Result<Self, String> {
|
||||||
|
ArgEnum::from_str(input, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn make_accounts_txs(
|
fn make_accounts_txs(
|
||||||
total_num_transactions: usize,
|
total_num_transactions: usize,
|
||||||
|
packets_per_batch: usize,
|
||||||
hash: Hash,
|
hash: Hash,
|
||||||
same_payer: bool,
|
contention: WriteLockContention,
|
||||||
) -> Vec<Transaction> {
|
) -> Vec<Transaction> {
|
||||||
let to_pubkey = solana_sdk::pubkey::new_rand();
|
use solana_sdk::pubkey;
|
||||||
|
let to_pubkey = pubkey::new_rand();
|
||||||
|
let chunk_pubkeys: Vec<pubkey::Pubkey> = (0..total_num_transactions / packets_per_batch)
|
||||||
|
.map(|_| pubkey::new_rand())
|
||||||
|
.collect();
|
||||||
let payer_key = Keypair::new();
|
let payer_key = Keypair::new();
|
||||||
let dummy = system_transaction::transfer(&payer_key, &to_pubkey, 1, hash);
|
let dummy = system_transaction::transfer(&payer_key, &to_pubkey, 1, hash);
|
||||||
(0..total_num_transactions)
|
(0..total_num_transactions)
|
||||||
.into_par_iter()
|
.into_par_iter()
|
||||||
.map(|_| {
|
.map(|i| {
|
||||||
let mut new = dummy.clone();
|
let mut new = dummy.clone();
|
||||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
||||||
if !same_payer {
|
new.message.account_keys[0] = pubkey::new_rand();
|
||||||
new.message.account_keys[0] = solana_sdk::pubkey::new_rand();
|
new.message.account_keys[1] = match contention {
|
||||||
}
|
WriteLockContention::None => pubkey::new_rand(),
|
||||||
new.message.account_keys[1] = solana_sdk::pubkey::new_rand();
|
WriteLockContention::SameBatchOnly => chunk_pubkeys[i / packets_per_batch],
|
||||||
|
WriteLockContention::Full => to_pubkey,
|
||||||
|
};
|
||||||
new.signatures = vec![Signature::new(&sig[0..64])];
|
new.signatures = vec![Signature::new(&sig[0..64])];
|
||||||
new
|
new
|
||||||
})
|
})
|
||||||
|
@ -91,13 +123,11 @@ fn make_accounts_txs(
|
||||||
|
|
||||||
struct Config {
|
struct Config {
|
||||||
packets_per_batch: usize,
|
packets_per_batch: usize,
|
||||||
chunk_len: usize,
|
|
||||||
num_threads: usize,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Config {
|
impl Config {
|
||||||
fn get_transactions_index(&self, chunk_index: usize) -> usize {
|
fn get_transactions_index(&self, chunk_index: usize) -> usize {
|
||||||
chunk_index * (self.chunk_len / self.num_threads) * self.packets_per_batch
|
chunk_index * self.packets_per_batch
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,11 +150,11 @@ fn main() {
|
||||||
.help("Number of transaction chunks."),
|
.help("Number of transaction chunks."),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::new("packets_per_chunk")
|
Arg::new("packets_per_batch")
|
||||||
.long("packets-per-chunk")
|
.long("packets-per-batch")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.value_name("SIZE")
|
.value_name("SIZE")
|
||||||
.help("Packets per chunk"),
|
.help("Packets per batch"),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::new("skip_sanity")
|
Arg::new("skip_sanity")
|
||||||
|
@ -133,10 +163,11 @@ fn main() {
|
||||||
.help("Skip transaction sanity execution"),
|
.help("Skip transaction sanity execution"),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::new("same_payer")
|
Arg::new("write_lock_contention")
|
||||||
.long("same-payer")
|
.long("write-lock-contention")
|
||||||
.takes_value(false)
|
.takes_value(true)
|
||||||
.help("Use the same payer for transfers"),
|
.possible_values(WriteLockContention::possible_values())
|
||||||
|
.help("Accounts that test transactions write lock"),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::new("iterations")
|
Arg::new("iterations")
|
||||||
|
@ -145,24 +176,36 @@ fn main() {
|
||||||
.help("Number of iterations"),
|
.help("Number of iterations"),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::new("num_threads")
|
Arg::new("batches_per_iteration")
|
||||||
.long("num-threads")
|
.long("batches-per-iteration")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.help("Number of iterations"),
|
.help("Number of batches to send in each iteration"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::new("num_banking_threads")
|
||||||
|
.long("num-banking-threads")
|
||||||
|
.takes_value(true)
|
||||||
|
.help("Number of threads to use in the banking stage"),
|
||||||
)
|
)
|
||||||
.get_matches();
|
.get_matches();
|
||||||
|
|
||||||
let num_threads = matches
|
let num_banking_threads = matches
|
||||||
.value_of_t::<usize>("num_threads")
|
.value_of_t::<u32>("num_banking_threads")
|
||||||
.unwrap_or(BankingStage::num_threads() as usize);
|
.unwrap_or_else(|_| BankingStage::num_threads());
|
||||||
// a multiple of packet chunk duplicates to avoid races
|
// a multiple of packet chunk duplicates to avoid races
|
||||||
let num_chunks = matches.value_of_t::<usize>("num_chunks").unwrap_or(16);
|
let num_chunks = matches.value_of_t::<usize>("num_chunks").unwrap_or(16);
|
||||||
let packets_per_chunk = matches
|
let packets_per_batch = matches
|
||||||
.value_of_t::<usize>("packets_per_chunk")
|
.value_of_t::<usize>("packets_per_batch")
|
||||||
.unwrap_or(192);
|
.unwrap_or(192);
|
||||||
let iterations = matches.value_of_t::<usize>("iterations").unwrap_or(1000);
|
let iterations = matches.value_of_t::<usize>("iterations").unwrap_or(1000);
|
||||||
|
let batches_per_iteration = matches
|
||||||
|
.value_of_t::<usize>("batches_per_iteration")
|
||||||
|
.unwrap_or(BankingStage::num_threads() as usize);
|
||||||
|
let write_lock_contention = matches
|
||||||
|
.value_of_t::<WriteLockContention>("write_lock_contention")
|
||||||
|
.unwrap_or(WriteLockContention::None);
|
||||||
|
|
||||||
let total_num_transactions = num_chunks * num_threads * packets_per_chunk;
|
let total_num_transactions = num_chunks * packets_per_batch * batches_per_iteration;
|
||||||
let mint_total = 1_000_000_000_000;
|
let mint_total = 1_000_000_000_000;
|
||||||
let GenesisConfigInfo {
|
let GenesisConfigInfo {
|
||||||
genesis_config,
|
genesis_config,
|
||||||
|
@ -183,11 +226,17 @@ fn main() {
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.set_limits(std::u64::MAX, std::u64::MAX, std::u64::MAX);
|
.set_limits(std::u64::MAX, std::u64::MAX, std::u64::MAX);
|
||||||
|
|
||||||
info!("threads: {} txs: {}", num_threads, total_num_transactions);
|
info!(
|
||||||
|
"threads: {} txs: {}",
|
||||||
|
num_banking_threads, total_num_transactions
|
||||||
|
);
|
||||||
|
|
||||||
let same_payer = matches.is_present("same_payer");
|
let mut transactions = make_accounts_txs(
|
||||||
let mut transactions =
|
total_num_transactions,
|
||||||
make_accounts_txs(total_num_transactions, genesis_config.hash(), same_payer);
|
packets_per_batch,
|
||||||
|
genesis_config.hash(),
|
||||||
|
write_lock_contention,
|
||||||
|
);
|
||||||
|
|
||||||
// fund all the accounts
|
// fund all the accounts
|
||||||
transactions.iter().for_each(|tx| {
|
transactions.iter().for_each(|tx| {
|
||||||
|
@ -212,16 +261,20 @@ fn main() {
|
||||||
assert!(res.is_ok(), "sanity test transactions error: {:?}", res);
|
assert!(res.is_ok(), "sanity test transactions error: {:?}", res);
|
||||||
});
|
});
|
||||||
bank.clear_signatures();
|
bank.clear_signatures();
|
||||||
//sanity check, make sure all the transactions can execute in parallel
|
|
||||||
|
|
||||||
|
if write_lock_contention == WriteLockContention::None {
|
||||||
|
//sanity check, make sure all the transactions can execute in parallel
|
||||||
let res = bank.process_transactions(transactions.iter());
|
let res = bank.process_transactions(transactions.iter());
|
||||||
for r in res {
|
for r in res {
|
||||||
assert!(r.is_ok(), "sanity parallel execution error: {:?}", r);
|
assert!(r.is_ok(), "sanity parallel execution error: {:?}", r);
|
||||||
}
|
}
|
||||||
bank.clear_signatures();
|
bank.clear_signatures();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut verified: Vec<_> = to_packet_batches(&transactions, packets_per_batch);
|
||||||
|
assert_eq!(verified.len(), num_chunks * batches_per_iteration);
|
||||||
|
|
||||||
let mut verified: Vec<_> = to_packet_batches(&transactions, packets_per_chunk);
|
|
||||||
let ledger_path = get_tmp_ledger_path!();
|
let ledger_path = get_tmp_ledger_path!();
|
||||||
{
|
{
|
||||||
let blockstore = Arc::new(
|
let blockstore = Arc::new(
|
||||||
|
@ -240,19 +293,20 @@ fn main() {
|
||||||
SocketAddrSpace::Unspecified,
|
SocketAddrSpace::Unspecified,
|
||||||
);
|
);
|
||||||
let cluster_info = Arc::new(cluster_info);
|
let cluster_info = Arc::new(cluster_info);
|
||||||
let banking_stage = BankingStage::new(
|
let banking_stage = BankingStage::new_num_threads(
|
||||||
&cluster_info,
|
&cluster_info,
|
||||||
&poh_recorder,
|
&poh_recorder,
|
||||||
verified_receiver,
|
verified_receiver,
|
||||||
tpu_vote_receiver,
|
tpu_vote_receiver,
|
||||||
vote_receiver,
|
vote_receiver,
|
||||||
|
num_banking_threads,
|
||||||
None,
|
None,
|
||||||
replay_vote_sender,
|
replay_vote_sender,
|
||||||
Arc::new(RwLock::new(CostModel::default())),
|
Arc::new(RwLock::new(CostModel::default())),
|
||||||
);
|
);
|
||||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||||
|
|
||||||
let chunk_len = verified.len() / num_chunks;
|
let chunk_len = batches_per_iteration;
|
||||||
let mut start = 0;
|
let mut start = 0;
|
||||||
|
|
||||||
// This is so that the signal_receiver does not go out of scope after the closure.
|
// This is so that the signal_receiver does not go out of scope after the closure.
|
||||||
|
@ -265,20 +319,13 @@ fn main() {
|
||||||
let mut txs_processed = 0;
|
let mut txs_processed = 0;
|
||||||
let mut root = 1;
|
let mut root = 1;
|
||||||
let collector = solana_sdk::pubkey::new_rand();
|
let collector = solana_sdk::pubkey::new_rand();
|
||||||
let config = Config {
|
let config = Config { packets_per_batch };
|
||||||
packets_per_batch: packets_per_chunk,
|
|
||||||
chunk_len,
|
|
||||||
num_threads,
|
|
||||||
};
|
|
||||||
let mut total_sent = 0;
|
let mut total_sent = 0;
|
||||||
for _ in 0..iterations {
|
for _ in 0..iterations {
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
let mut sent = 0;
|
let mut sent = 0;
|
||||||
|
|
||||||
for (i, v) in verified[start..start + chunk_len]
|
for (i, v) in verified[start..start + chunk_len].chunks(1).enumerate() {
|
||||||
.chunks(chunk_len / num_threads)
|
|
||||||
.enumerate()
|
|
||||||
{
|
|
||||||
let mut byte = 0;
|
let mut byte = 0;
|
||||||
let index = config.get_transactions_index(start + i);
|
let index = config.get_transactions_index(start + i);
|
||||||
if index < transactions.len() {
|
if index < transactions.len() {
|
||||||
|
@ -386,7 +433,7 @@ fn main() {
|
||||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
||||||
tx.signatures[0] = Signature::new(&sig[0..64]);
|
tx.signatures[0] = Signature::new(&sig[0..64]);
|
||||||
}
|
}
|
||||||
verified = to_packet_batches(&transactions.clone(), packets_per_chunk);
|
verified = to_packet_batches(&transactions.clone(), packets_per_batch);
|
||||||
}
|
}
|
||||||
|
|
||||||
start += chunk_len;
|
start += chunk_len;
|
||||||
|
|
|
@ -399,7 +399,7 @@ impl BankingStage {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn new_num_threads(
|
pub fn new_num_threads(
|
||||||
cluster_info: &Arc<ClusterInfo>,
|
cluster_info: &Arc<ClusterInfo>,
|
||||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||||
verified_receiver: CrossbeamReceiver<Vec<PacketBatch>>,
|
verified_receiver: CrossbeamReceiver<Vec<PacketBatch>>,
|
||||||
|
|
Loading…
Reference in New Issue