2019-09-10 16:24:43 -07:00
|
|
|
use crate::cli::Config;
|
2019-05-01 13:21:45 -07:00
|
|
|
use log::*;
|
2018-12-11 19:47:32 -08:00
|
|
|
use rayon::prelude::*;
|
2019-05-01 15:58:35 -07:00
|
|
|
use solana_client::perf_utils::{sample_txs, SampleStats};
|
2019-08-21 10:23:33 -07:00
|
|
|
use solana_core::gen_keys::GenKeys;
|
2019-12-16 13:05:17 -08:00
|
|
|
use solana_faucet::faucet::request_airdrop_transaction;
|
2019-09-27 12:19:06 -07:00
|
|
|
#[cfg(feature = "move")]
|
2019-11-29 12:50:32 -08:00
|
|
|
use solana_librapay::{create_genesis, upload_mint_script, upload_payment_script};
|
2019-07-27 15:28:00 -07:00
|
|
|
use solana_measure::measure::Measure;
|
2019-11-06 13:15:00 -08:00
|
|
|
use solana_metrics::{self, datapoint_debug};
|
2019-10-01 14:43:36 -07:00
|
|
|
use solana_sdk::{
|
|
|
|
client::Client,
|
2019-10-04 01:16:07 -07:00
|
|
|
clock::{DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE},
|
2019-11-06 13:15:00 -08:00
|
|
|
commitment_config::CommitmentConfig,
|
2019-10-01 14:43:36 -07:00
|
|
|
fee_calculator::FeeCalculator,
|
|
|
|
hash::Hash,
|
|
|
|
pubkey::Pubkey,
|
|
|
|
signature::{Keypair, KeypairUtil},
|
|
|
|
system_instruction, system_transaction,
|
2019-10-30 19:51:44 -07:00
|
|
|
timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
|
2019-10-01 14:43:36 -07:00
|
|
|
transaction::Transaction,
|
|
|
|
};
|
|
|
|
use std::{
|
|
|
|
cmp,
|
|
|
|
collections::VecDeque,
|
|
|
|
net::SocketAddr,
|
2019-12-19 10:04:53 -08:00
|
|
|
process::exit,
|
2019-10-01 14:43:36 -07:00
|
|
|
sync::{
|
|
|
|
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
|
|
|
|
Arc, RwLock,
|
|
|
|
},
|
|
|
|
thread::{sleep, Builder},
|
|
|
|
time::{Duration, Instant},
|
|
|
|
};
|
|
|
|
|
2019-10-04 01:16:07 -07:00
|
|
|
// The point at which transactions become "too old", in seconds.
|
|
|
|
const MAX_TX_QUEUE_AGE: u64 =
|
2019-10-09 15:31:30 -07:00
|
|
|
MAX_PROCESSING_AGE as u64 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TICKS_PER_SECOND;
|
2018-12-11 19:47:32 -08:00
|
|
|
|
2019-09-27 12:19:06 -07:00
|
|
|
#[cfg(feature = "move")]
|
2019-11-29 12:50:32 -08:00
|
|
|
use solana_librapay::librapay_transaction;
|
2019-07-27 15:28:00 -07:00
|
|
|
|
2019-06-04 13:56:11 -07:00
|
|
|
pub const MAX_SPENDS_PER_TX: u64 = 4;
|
2018-12-11 19:47:32 -08:00
|
|
|
|
2019-06-12 15:01:59 -07:00
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum BenchTpsError {
|
|
|
|
AirdropFailure,
|
|
|
|
}
|
|
|
|
|
|
|
|
pub type Result<T> = std::result::Result<T, BenchTpsError>;
|
|
|
|
|
2018-12-11 19:47:32 -08:00
|
|
|
pub type SharedTransactions = Arc<RwLock<VecDeque<Vec<(Transaction, u64)>>>>;
|
|
|
|
|
2019-07-31 16:10:55 -07:00
|
|
|
type LibraKeys = (Keypair, Pubkey, Pubkey, Vec<Keypair>);
|
|
|
|
|
2019-09-06 07:24:04 -07:00
|
|
|
fn get_recent_blockhash<T: Client>(client: &T) -> (Hash, FeeCalculator) {
|
|
|
|
loop {
|
2019-11-06 13:15:00 -08:00
|
|
|
match client.get_recent_blockhash_with_commitment(CommitmentConfig::recent()) {
|
2019-09-06 07:24:04 -07:00
|
|
|
Ok((blockhash, fee_calculator)) => return (blockhash, fee_calculator),
|
|
|
|
Err(err) => {
|
|
|
|
info!("Couldn't get recent blockhash: {:?}", err);
|
|
|
|
sleep(Duration::from_secs(1));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-19 14:04:36 -07:00
|
|
|
pub fn do_bench_tps<T>(
|
|
|
|
clients: Vec<T>,
|
|
|
|
config: Config,
|
|
|
|
gen_keypairs: Vec<Keypair>,
|
|
|
|
keypair0_balance: u64,
|
2019-07-31 16:10:55 -07:00
|
|
|
libra_args: Option<LibraKeys>,
|
2019-05-01 13:21:45 -07:00
|
|
|
) -> u64
|
|
|
|
where
|
2019-04-19 14:04:36 -07:00
|
|
|
T: 'static + Client + Send + Sync,
|
|
|
|
{
|
2019-03-22 11:39:25 -07:00
|
|
|
let Config {
|
|
|
|
id,
|
|
|
|
threads,
|
|
|
|
thread_batch_sleep_ms,
|
|
|
|
duration,
|
|
|
|
tx_count,
|
|
|
|
sustained,
|
2019-09-10 16:24:43 -07:00
|
|
|
num_lamports_per_account,
|
2019-07-31 11:15:14 -07:00
|
|
|
..
|
2019-03-22 11:39:25 -07:00
|
|
|
} = config;
|
|
|
|
|
2019-04-19 14:04:36 -07:00
|
|
|
let clients: Vec<_> = clients.into_iter().map(Arc::new).collect();
|
|
|
|
let client = &clients[0];
|
2019-03-22 11:39:25 -07:00
|
|
|
|
2019-12-18 20:50:17 -08:00
|
|
|
let mut source_keypair_chunks: Vec<Vec<&Keypair>> = Vec::new();
|
|
|
|
let mut dest_keypair_chunks: Vec<VecDeque<&Keypair>> = Vec::new();
|
|
|
|
assert!(gen_keypairs.len() >= 2 * tx_count);
|
|
|
|
for chunk in gen_keypairs.chunks_exact(2 * tx_count) {
|
|
|
|
source_keypair_chunks.push(chunk[..tx_count].iter().collect());
|
|
|
|
dest_keypair_chunks.push(chunk[tx_count..].iter().collect());
|
|
|
|
}
|
2019-03-22 11:39:25 -07:00
|
|
|
|
2019-09-06 07:24:04 -07:00
|
|
|
let first_tx_count = loop {
|
|
|
|
match client.get_transaction_count() {
|
|
|
|
Ok(count) => break count,
|
|
|
|
Err(err) => {
|
|
|
|
info!("Couldn't get transaction count: {:?}", err);
|
|
|
|
sleep(Duration::from_secs(1));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2019-10-04 01:16:07 -07:00
|
|
|
info!("Initial transaction count {}", first_tx_count);
|
2019-03-22 11:39:25 -07:00
|
|
|
|
|
|
|
let exit_signal = Arc::new(AtomicBool::new(false));
|
|
|
|
|
|
|
|
// Setup a thread per validator to sample every period
|
|
|
|
// collect the max transaction rate and total tx count seen
|
|
|
|
let maxes = Arc::new(RwLock::new(Vec::new()));
|
|
|
|
let sample_period = 1; // in seconds
|
2019-10-04 01:16:07 -07:00
|
|
|
info!("Sampling TPS every {} second...", sample_period);
|
2019-04-19 14:04:36 -07:00
|
|
|
let v_threads: Vec<_> = clients
|
|
|
|
.iter()
|
|
|
|
.map(|client| {
|
2019-03-22 11:39:25 -07:00
|
|
|
let exit_signal = exit_signal.clone();
|
|
|
|
let maxes = maxes.clone();
|
2019-04-19 14:04:36 -07:00
|
|
|
let client = client.clone();
|
2019-03-22 11:39:25 -07:00
|
|
|
Builder::new()
|
|
|
|
.name("solana-client-sample".to_string())
|
|
|
|
.spawn(move || {
|
2019-05-01 15:58:35 -07:00
|
|
|
sample_txs(&exit_signal, &maxes, sample_period, &client);
|
2019-03-22 11:39:25 -07:00
|
|
|
})
|
|
|
|
.unwrap()
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
|
|
|
|
|
2019-12-18 20:50:17 -08:00
|
|
|
let recent_blockhash = Arc::new(RwLock::new(get_recent_blockhash(client.as_ref()).0));
|
2019-03-22 11:39:25 -07:00
|
|
|
let shared_tx_active_thread_count = Arc::new(AtomicIsize::new(0));
|
|
|
|
let total_tx_sent_count = Arc::new(AtomicUsize::new(0));
|
|
|
|
|
2019-12-18 20:50:17 -08:00
|
|
|
let blockhash_thread = {
|
|
|
|
let exit_signal = exit_signal.clone();
|
|
|
|
let recent_blockhash = recent_blockhash.clone();
|
|
|
|
let client = client.clone();
|
|
|
|
let id = id.pubkey();
|
|
|
|
Builder::new()
|
|
|
|
.name("solana-blockhash-poller".to_string())
|
|
|
|
.spawn(move || {
|
|
|
|
poll_blockhash(&exit_signal, &recent_blockhash, &client, &id);
|
|
|
|
})
|
|
|
|
.unwrap()
|
|
|
|
};
|
|
|
|
|
2019-03-22 11:39:25 -07:00
|
|
|
let s_threads: Vec<_> = (0..threads)
|
|
|
|
.map(|_| {
|
|
|
|
let exit_signal = exit_signal.clone();
|
|
|
|
let shared_txs = shared_txs.clone();
|
|
|
|
let shared_tx_active_thread_count = shared_tx_active_thread_count.clone();
|
|
|
|
let total_tx_sent_count = total_tx_sent_count.clone();
|
2019-04-19 14:04:36 -07:00
|
|
|
let client = client.clone();
|
2019-03-22 11:39:25 -07:00
|
|
|
Builder::new()
|
|
|
|
.name("solana-client-sender".to_string())
|
|
|
|
.spawn(move || {
|
|
|
|
do_tx_transfers(
|
|
|
|
&exit_signal,
|
|
|
|
&shared_txs,
|
|
|
|
&shared_tx_active_thread_count,
|
|
|
|
&total_tx_sent_count,
|
|
|
|
thread_batch_sleep_ms,
|
2019-04-19 14:04:36 -07:00
|
|
|
&client,
|
2019-03-22 11:39:25 -07:00
|
|
|
);
|
|
|
|
})
|
|
|
|
.unwrap()
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
// generate and send transactions for the specified duration
|
|
|
|
let start = Instant::now();
|
2019-12-18 20:50:17 -08:00
|
|
|
let keypair_chunks = source_keypair_chunks.len() as u64;
|
2019-03-22 11:39:25 -07:00
|
|
|
let mut reclaim_lamports_back_to_source_account = false;
|
|
|
|
let mut i = keypair0_balance;
|
|
|
|
while start.elapsed() < duration {
|
2019-12-18 20:50:17 -08:00
|
|
|
let chunk_index = (i % keypair_chunks) as usize;
|
2019-03-22 11:39:25 -07:00
|
|
|
generate_txs(
|
|
|
|
&shared_txs,
|
2019-12-18 20:50:17 -08:00
|
|
|
&recent_blockhash,
|
|
|
|
&source_keypair_chunks[chunk_index],
|
|
|
|
&dest_keypair_chunks[chunk_index],
|
2019-03-22 11:39:25 -07:00
|
|
|
threads,
|
|
|
|
reclaim_lamports_back_to_source_account,
|
2019-07-31 11:15:14 -07:00
|
|
|
&libra_args,
|
2019-03-22 11:39:25 -07:00
|
|
|
);
|
2019-12-18 20:50:17 -08:00
|
|
|
|
|
|
|
// In sustained mode, overlap the transfers with generation. This has higher average
|
|
|
|
// performance but lower peak performance in tested environments.
|
|
|
|
if sustained {
|
|
|
|
// Ensure that we don't generate more transactions than we can handle.
|
|
|
|
while shared_txs.read().unwrap().len() > 2 * threads {
|
|
|
|
sleep(Duration::from_millis(1));
|
|
|
|
}
|
|
|
|
} else {
|
2020-01-09 17:48:18 -08:00
|
|
|
while !shared_txs.read().unwrap().is_empty()
|
|
|
|
|| shared_tx_active_thread_count.load(Ordering::Relaxed) > 0
|
|
|
|
{
|
2019-05-23 15:15:26 -07:00
|
|
|
sleep(Duration::from_millis(1));
|
2019-03-22 11:39:25 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-18 20:50:17 -08:00
|
|
|
// Rotate destination keypairs so that the next round of transactions will have different
|
|
|
|
// transaction signatures even when blockhash is reused.
|
|
|
|
dest_keypair_chunks[chunk_index].rotate_left(1);
|
|
|
|
|
2019-03-22 11:39:25 -07:00
|
|
|
i += 1;
|
2019-12-18 20:50:17 -08:00
|
|
|
if should_switch_directions(num_lamports_per_account, keypair_chunks, i) {
|
2019-03-22 11:39:25 -07:00
|
|
|
reclaim_lamports_back_to_source_account = !reclaim_lamports_back_to_source_account;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop the sampling threads so it will collect the stats
|
|
|
|
exit_signal.store(true, Ordering::Relaxed);
|
|
|
|
|
2019-10-04 01:16:07 -07:00
|
|
|
info!("Waiting for validator threads...");
|
2019-03-22 11:39:25 -07:00
|
|
|
for t in v_threads {
|
|
|
|
if let Err(err) = t.join() {
|
2019-10-04 01:16:07 -07:00
|
|
|
info!(" join() failed with: {:?}", err);
|
2019-03-22 11:39:25 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// join the tx send threads
|
2019-10-04 01:16:07 -07:00
|
|
|
info!("Waiting for transmit threads...");
|
2019-03-22 11:39:25 -07:00
|
|
|
for t in s_threads {
|
|
|
|
if let Err(err) = t.join() {
|
2019-10-04 01:16:07 -07:00
|
|
|
info!(" join() failed with: {:?}", err);
|
2019-03-22 11:39:25 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-18 20:50:17 -08:00
|
|
|
info!("Waiting for blockhash thread...");
|
|
|
|
if let Err(err) = blockhash_thread.join() {
|
|
|
|
info!(" join() failed with: {:?}", err);
|
|
|
|
}
|
|
|
|
|
2019-04-19 14:04:36 -07:00
|
|
|
let balance = client.get_balance(&id.pubkey()).unwrap_or(0);
|
2019-03-22 11:39:25 -07:00
|
|
|
metrics_submit_lamport_balance(balance);
|
|
|
|
|
|
|
|
compute_and_report_stats(
|
|
|
|
&maxes,
|
|
|
|
sample_period,
|
|
|
|
&start.elapsed(),
|
|
|
|
total_tx_sent_count.load(Ordering::Relaxed),
|
|
|
|
);
|
2019-05-01 13:21:45 -07:00
|
|
|
|
|
|
|
let r_maxes = maxes.read().unwrap();
|
2019-05-01 15:58:35 -07:00
|
|
|
r_maxes.first().unwrap().1.txs
|
2019-03-22 11:39:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
fn metrics_submit_lamport_balance(lamport_balance: u64) {
|
2019-10-04 01:16:07 -07:00
|
|
|
info!("Token balance: {}", lamport_balance);
|
2019-10-04 16:25:22 -07:00
|
|
|
datapoint_debug!(
|
2019-05-10 08:33:58 -07:00
|
|
|
"bench-tps-lamport_balance",
|
|
|
|
("balance", lamport_balance, i64)
|
2018-12-11 19:47:32 -08:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2019-09-27 12:19:06 -07:00
|
|
|
#[cfg(feature = "move")]
|
2019-07-31 11:15:14 -07:00
|
|
|
fn generate_move_txs(
|
2019-12-18 20:50:17 -08:00
|
|
|
source: &[&Keypair],
|
|
|
|
dest: &VecDeque<&Keypair>,
|
2019-07-31 11:15:14 -07:00
|
|
|
reclaim: bool,
|
|
|
|
move_keypairs: &[Keypair],
|
|
|
|
libra_pay_program_id: &Pubkey,
|
|
|
|
libra_mint_id: &Pubkey,
|
|
|
|
blockhash: &Hash,
|
|
|
|
) -> Vec<(Transaction, u64)> {
|
|
|
|
let count = move_keypairs.len() / 2;
|
|
|
|
let source_move = &move_keypairs[..count];
|
|
|
|
let dest_move = &move_keypairs[count..];
|
|
|
|
let pairs: Vec<_> = if !reclaim {
|
|
|
|
source_move
|
|
|
|
.iter()
|
|
|
|
.zip(dest_move.iter())
|
|
|
|
.zip(source.iter())
|
|
|
|
.collect()
|
|
|
|
} else {
|
|
|
|
dest_move
|
|
|
|
.iter()
|
|
|
|
.zip(source_move.iter())
|
|
|
|
.zip(dest.iter())
|
|
|
|
.collect()
|
|
|
|
};
|
|
|
|
|
|
|
|
pairs
|
|
|
|
.par_iter()
|
|
|
|
.map(|((from, to), payer)| {
|
|
|
|
(
|
|
|
|
librapay_transaction::transfer(
|
|
|
|
libra_pay_program_id,
|
|
|
|
libra_mint_id,
|
|
|
|
&payer,
|
|
|
|
&from,
|
|
|
|
&to.pubkey(),
|
|
|
|
1,
|
|
|
|
*blockhash,
|
|
|
|
),
|
|
|
|
timestamp(),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2019-07-31 16:10:55 -07:00
|
|
|
fn generate_system_txs(
|
2019-12-18 20:50:17 -08:00
|
|
|
source: &[&Keypair],
|
|
|
|
dest: &VecDeque<&Keypair>,
|
2019-07-31 11:15:14 -07:00
|
|
|
reclaim: bool,
|
|
|
|
blockhash: &Hash,
|
|
|
|
) -> Vec<(Transaction, u64)> {
|
|
|
|
let pairs: Vec<_> = if !reclaim {
|
|
|
|
source.iter().zip(dest.iter()).collect()
|
|
|
|
} else {
|
|
|
|
dest.iter().zip(source.iter()).collect()
|
|
|
|
};
|
|
|
|
|
|
|
|
pairs
|
|
|
|
.par_iter()
|
|
|
|
.map(|(from, to)| {
|
|
|
|
(
|
2019-10-23 22:01:22 -07:00
|
|
|
system_transaction::transfer(from, &to.pubkey(), 1, *blockhash),
|
2019-07-31 11:15:14 -07:00
|
|
|
timestamp(),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2019-04-27 08:39:29 -07:00
|
|
|
fn generate_txs(
|
2018-12-11 19:47:32 -08:00
|
|
|
shared_txs: &SharedTransactions,
|
2019-12-18 20:50:17 -08:00
|
|
|
blockhash: &Arc<RwLock<Hash>>,
|
|
|
|
source: &[&Keypair],
|
|
|
|
dest: &VecDeque<&Keypair>,
|
2018-12-11 19:47:32 -08:00
|
|
|
threads: usize,
|
|
|
|
reclaim: bool,
|
2019-07-31 16:10:55 -07:00
|
|
|
libra_args: &Option<LibraKeys>,
|
2018-12-11 19:47:32 -08:00
|
|
|
) {
|
2019-12-18 20:50:17 -08:00
|
|
|
let blockhash = *blockhash.read().unwrap();
|
2018-12-11 19:47:32 -08:00
|
|
|
let tx_count = source.len();
|
2019-12-18 20:50:17 -08:00
|
|
|
info!(
|
|
|
|
"Signing transactions... {} (reclaim={}, blockhash={})",
|
|
|
|
tx_count, reclaim, &blockhash
|
|
|
|
);
|
2018-12-11 19:47:32 -08:00
|
|
|
let signing_start = Instant::now();
|
|
|
|
|
2019-07-31 16:10:55 -07:00
|
|
|
let transactions = if let Some((
|
2019-09-27 12:19:06 -07:00
|
|
|
_libra_genesis_keypair,
|
|
|
|
_libra_pay_program_id,
|
2019-07-31 16:10:55 -07:00
|
|
|
_libra_mint_program_id,
|
2019-09-27 12:19:06 -07:00
|
|
|
_libra_keys,
|
2019-07-31 16:10:55 -07:00
|
|
|
)) = libra_args
|
|
|
|
{
|
2019-09-27 12:19:06 -07:00
|
|
|
#[cfg(not(feature = "move"))]
|
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(feature = "move")]
|
|
|
|
{
|
|
|
|
generate_move_txs(
|
|
|
|
source,
|
|
|
|
dest,
|
|
|
|
reclaim,
|
|
|
|
&_libra_keys,
|
|
|
|
_libra_pay_program_id,
|
|
|
|
&_libra_genesis_keypair.pubkey(),
|
2019-12-18 20:50:17 -08:00
|
|
|
&blockhash,
|
2019-09-27 12:19:06 -07:00
|
|
|
)
|
|
|
|
}
|
2018-12-11 19:47:32 -08:00
|
|
|
} else {
|
2019-12-18 20:50:17 -08:00
|
|
|
generate_system_txs(source, dest, reclaim, &blockhash)
|
2018-12-11 19:47:32 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
let duration = signing_start.elapsed();
|
|
|
|
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
|
|
|
let bsps = (tx_count) as f64 / ns as f64;
|
|
|
|
let nsps = ns as f64 / (tx_count) as f64;
|
2019-10-04 01:16:07 -07:00
|
|
|
info!(
|
2018-12-11 19:47:32 -08:00
|
|
|
"Done. {:.2} thousand signatures per second, {:.2} us per signature, {} ms total time, {}",
|
|
|
|
bsps * 1_000_000_f64,
|
|
|
|
nsps / 1_000_f64,
|
|
|
|
duration_as_ms(&duration),
|
2019-03-02 10:25:16 -08:00
|
|
|
blockhash,
|
2018-12-11 19:47:32 -08:00
|
|
|
);
|
2019-10-04 16:25:22 -07:00
|
|
|
datapoint_debug!(
|
2019-05-10 08:33:58 -07:00
|
|
|
"bench-tps-generate_txs",
|
2019-10-30 19:51:44 -07:00
|
|
|
("duration", duration_as_us(&duration), i64)
|
2018-12-11 19:47:32 -08:00
|
|
|
);
|
|
|
|
|
|
|
|
let sz = transactions.len() / threads;
|
|
|
|
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
|
|
|
{
|
|
|
|
let mut shared_txs_wl = shared_txs.write().unwrap();
|
|
|
|
for chunk in chunks {
|
|
|
|
shared_txs_wl.push_back(chunk.to_vec());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-18 20:50:17 -08:00
|
|
|
fn poll_blockhash<T: Client>(
|
|
|
|
exit_signal: &Arc<AtomicBool>,
|
|
|
|
blockhash: &Arc<RwLock<Hash>>,
|
|
|
|
client: &Arc<T>,
|
|
|
|
id: &Pubkey,
|
|
|
|
) {
|
2019-12-19 08:37:12 -08:00
|
|
|
let mut blockhash_last_updated = Instant::now();
|
2019-12-19 10:13:37 -08:00
|
|
|
let mut last_error_log = Instant::now();
|
2019-12-18 20:50:17 -08:00
|
|
|
loop {
|
2019-12-19 08:37:12 -08:00
|
|
|
let blockhash_updated = {
|
2019-12-18 20:50:17 -08:00
|
|
|
let old_blockhash = *blockhash.read().unwrap();
|
|
|
|
if let Ok((new_blockhash, _fee)) = client.get_new_blockhash(&old_blockhash) {
|
|
|
|
*blockhash.write().unwrap() = new_blockhash;
|
2019-12-19 08:37:12 -08:00
|
|
|
blockhash_last_updated = Instant::now();
|
|
|
|
true
|
2019-12-18 20:50:17 -08:00
|
|
|
} else {
|
2019-12-19 10:04:53 -08:00
|
|
|
if blockhash_last_updated.elapsed().as_secs() > 120 {
|
|
|
|
eprintln!("Blockhash is stuck");
|
|
|
|
exit(1)
|
2019-12-19 10:19:18 -08:00
|
|
|
} else if blockhash_last_updated.elapsed().as_secs() > 30
|
|
|
|
&& last_error_log.elapsed().as_secs() >= 1
|
|
|
|
{
|
|
|
|
last_error_log = Instant::now();
|
|
|
|
error!("Blockhash is not updating");
|
2019-12-18 20:50:17 -08:00
|
|
|
}
|
2019-12-19 08:37:12 -08:00
|
|
|
false
|
2019-12-18 20:50:17 -08:00
|
|
|
}
|
2019-12-19 08:37:12 -08:00
|
|
|
};
|
2019-12-18 20:50:17 -08:00
|
|
|
|
2019-12-19 08:37:12 -08:00
|
|
|
if blockhash_updated {
|
|
|
|
let balance = client.get_balance(id).unwrap_or(0);
|
|
|
|
metrics_submit_lamport_balance(balance);
|
|
|
|
}
|
2019-12-18 20:50:17 -08:00
|
|
|
|
|
|
|
if exit_signal.load(Ordering::Relaxed) {
|
|
|
|
break;
|
|
|
|
}
|
2019-12-19 08:37:12 -08:00
|
|
|
|
2019-12-19 10:00:35 -08:00
|
|
|
sleep(Duration::from_millis(50));
|
2019-12-18 20:50:17 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-19 14:04:36 -07:00
|
|
|
fn do_tx_transfers<T: Client>(
|
2018-12-11 19:47:32 -08:00
|
|
|
exit_signal: &Arc<AtomicBool>,
|
|
|
|
shared_txs: &SharedTransactions,
|
|
|
|
shared_tx_thread_count: &Arc<AtomicIsize>,
|
|
|
|
total_tx_sent_count: &Arc<AtomicUsize>,
|
2019-01-03 14:16:06 -08:00
|
|
|
thread_batch_sleep_ms: usize,
|
2019-04-19 14:04:36 -07:00
|
|
|
client: &Arc<T>,
|
2018-12-11 19:47:32 -08:00
|
|
|
) {
|
|
|
|
loop {
|
2019-01-03 14:16:06 -08:00
|
|
|
if thread_batch_sleep_ms > 0 {
|
|
|
|
sleep(Duration::from_millis(thread_batch_sleep_ms as u64));
|
|
|
|
}
|
2019-12-18 20:50:17 -08:00
|
|
|
let txs = {
|
2019-05-17 12:49:41 -07:00
|
|
|
let mut shared_txs_wl = shared_txs.write().expect("write lock in do_tx_transfers");
|
2019-12-18 20:50:17 -08:00
|
|
|
shared_txs_wl.pop_front()
|
|
|
|
};
|
2018-12-11 19:47:32 -08:00
|
|
|
if let Some(txs0) = txs {
|
|
|
|
shared_tx_thread_count.fetch_add(1, Ordering::Relaxed);
|
2019-10-04 01:16:07 -07:00
|
|
|
info!(
|
2018-12-11 19:47:32 -08:00
|
|
|
"Transferring 1 unit {} times... to {}",
|
|
|
|
txs0.len(),
|
2019-09-06 09:07:40 -07:00
|
|
|
client.as_ref().tpu_addr(),
|
2018-12-11 19:47:32 -08:00
|
|
|
);
|
|
|
|
let tx_len = txs0.len();
|
|
|
|
let transfer_start = Instant::now();
|
2019-10-01 14:43:36 -07:00
|
|
|
let mut old_transactions = false;
|
2018-12-11 19:47:32 -08:00
|
|
|
for tx in txs0 {
|
|
|
|
let now = timestamp();
|
2019-10-01 14:43:36 -07:00
|
|
|
// Transactions that are too old will be rejected by the cluster Don't bother
|
|
|
|
// sending them.
|
|
|
|
if now > tx.1 && now - tx.1 > 1000 * MAX_TX_QUEUE_AGE {
|
|
|
|
old_transactions = true;
|
2018-12-11 19:47:32 -08:00
|
|
|
continue;
|
|
|
|
}
|
2019-05-17 12:49:41 -07:00
|
|
|
client
|
|
|
|
.async_send_transaction(tx.0)
|
|
|
|
.expect("async_send_transaction in do_tx_transfers");
|
2018-12-11 19:47:32 -08:00
|
|
|
}
|
2019-10-01 14:43:36 -07:00
|
|
|
if old_transactions {
|
|
|
|
let mut shared_txs_wl = shared_txs.write().expect("write lock in do_tx_transfers");
|
|
|
|
shared_txs_wl.clear();
|
|
|
|
}
|
2018-12-11 19:47:32 -08:00
|
|
|
shared_tx_thread_count.fetch_add(-1, Ordering::Relaxed);
|
|
|
|
total_tx_sent_count.fetch_add(tx_len, Ordering::Relaxed);
|
2019-10-04 01:16:07 -07:00
|
|
|
info!(
|
2018-12-11 19:47:32 -08:00
|
|
|
"Tx send done. {} ms {} tps",
|
|
|
|
duration_as_ms(&transfer_start.elapsed()),
|
|
|
|
tx_len as f32 / duration_as_s(&transfer_start.elapsed()),
|
|
|
|
);
|
2019-10-04 16:25:22 -07:00
|
|
|
datapoint_debug!(
|
2019-05-10 08:33:58 -07:00
|
|
|
"bench-tps-do_tx_transfers",
|
2019-10-30 19:51:44 -07:00
|
|
|
("duration", duration_as_us(&transfer_start.elapsed()), i64),
|
2019-05-10 08:33:58 -07:00
|
|
|
("count", tx_len, i64)
|
2018-12-11 19:47:32 -08:00
|
|
|
);
|
|
|
|
}
|
|
|
|
if exit_signal.load(Ordering::Relaxed) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-19 14:04:36 -07:00
|
|
|
fn verify_funding_transfer<T: Client>(client: &T, tx: &Transaction, amount: u64) -> bool {
|
2019-03-29 09:05:06 -07:00
|
|
|
for a in &tx.message().account_keys[1..] {
|
2019-11-06 13:15:00 -08:00
|
|
|
if client
|
|
|
|
.get_balance_with_commitment(a, CommitmentConfig::recent())
|
|
|
|
.unwrap_or(0)
|
|
|
|
>= amount
|
|
|
|
{
|
2018-12-11 19:47:32 -08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
false
|
|
|
|
}
|
|
|
|
|
|
|
|
/// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX
|
|
|
|
/// on every iteration. This allows us to replay the transfers because the source is either empty,
|
|
|
|
/// or full
|
2019-06-04 13:56:11 -07:00
|
|
|
pub fn fund_keys<T: Client>(
|
|
|
|
client: &T,
|
|
|
|
source: &Keypair,
|
|
|
|
dests: &[Keypair],
|
|
|
|
total: u64,
|
2019-07-01 17:32:03 -07:00
|
|
|
max_fee: u64,
|
|
|
|
mut extra: u64,
|
2019-06-04 13:56:11 -07:00
|
|
|
) {
|
2018-12-11 19:47:32 -08:00
|
|
|
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
|
|
|
|
let mut notfunded: Vec<&Keypair> = dests.iter().collect();
|
2019-07-01 17:32:03 -07:00
|
|
|
let lamports_per_account = (total - (extra * max_fee)) / (notfunded.len() as u64 + 1);
|
2018-12-11 19:47:32 -08:00
|
|
|
|
2019-10-04 01:16:07 -07:00
|
|
|
info!(
|
2019-08-07 08:55:01 -07:00
|
|
|
"funding keys {} with lamports: {:?} total: {}",
|
|
|
|
dests.len(),
|
|
|
|
client.get_balance(&source.pubkey()),
|
|
|
|
total
|
|
|
|
);
|
2018-12-11 19:47:32 -08:00
|
|
|
while !notfunded.is_empty() {
|
|
|
|
let mut new_funded: Vec<(&Keypair, u64)> = vec![];
|
|
|
|
let mut to_fund = vec![];
|
2019-10-04 01:16:07 -07:00
|
|
|
info!("creating from... {}", funded.len());
|
|
|
|
let mut build_to_fund = Measure::start("build_to_fund");
|
2018-12-11 19:47:32 -08:00
|
|
|
for f in &mut funded {
|
2019-06-04 13:56:11 -07:00
|
|
|
let max_units = cmp::min(notfunded.len() as u64, MAX_SPENDS_PER_TX);
|
2018-12-11 19:47:32 -08:00
|
|
|
if max_units == 0 {
|
|
|
|
break;
|
|
|
|
}
|
2019-06-04 13:56:11 -07:00
|
|
|
let start = notfunded.len() - max_units as usize;
|
2019-07-01 17:32:03 -07:00
|
|
|
let fees = if extra > 0 { max_fee } else { 0 };
|
|
|
|
let per_unit = (f.1 - lamports_per_account - fees) / max_units;
|
2018-12-11 19:47:32 -08:00
|
|
|
let moves: Vec<_> = notfunded[start..]
|
|
|
|
.iter()
|
|
|
|
.map(|k| (k.pubkey(), per_unit))
|
|
|
|
.collect();
|
|
|
|
notfunded[start..]
|
|
|
|
.iter()
|
|
|
|
.for_each(|k| new_funded.push((k, per_unit)));
|
|
|
|
notfunded.truncate(start);
|
|
|
|
if !moves.is_empty() {
|
|
|
|
to_fund.push((f.0, moves));
|
|
|
|
}
|
2019-07-01 17:32:03 -07:00
|
|
|
extra -= 1;
|
2018-12-11 19:47:32 -08:00
|
|
|
}
|
2019-10-04 01:16:07 -07:00
|
|
|
build_to_fund.stop();
|
|
|
|
debug!("build to_fund vec: {}us", build_to_fund.as_us());
|
2018-12-11 19:47:32 -08:00
|
|
|
|
2019-03-02 10:25:16 -08:00
|
|
|
// try to transfer a "few" at a time with recent blockhash
|
2018-12-11 19:47:32 -08:00
|
|
|
// assume 4MB network buffers, and 512 byte packets
|
|
|
|
const FUND_CHUNK_LEN: usize = 4 * 1024 * 1024 / 512;
|
|
|
|
|
|
|
|
to_fund.chunks(FUND_CHUNK_LEN).for_each(|chunk| {
|
|
|
|
let mut tries = 0;
|
|
|
|
|
2019-10-04 01:16:07 -07:00
|
|
|
let mut make_txs = Measure::start("make_txs");
|
2018-12-11 19:47:32 -08:00
|
|
|
// this set of transactions just initializes us for bookkeeping
|
|
|
|
#[allow(clippy::clone_double_ref)] // sigh
|
|
|
|
let mut to_fund_txs: Vec<_> = chunk
|
|
|
|
.par_iter()
|
|
|
|
.map(|(k, m)| {
|
2019-07-27 15:28:00 -07:00
|
|
|
let tx = Transaction::new_unsigned_instructions(
|
|
|
|
system_instruction::transfer_many(&k.pubkey(), &m),
|
|
|
|
);
|
|
|
|
(k.clone(), tx)
|
2018-12-11 19:47:32 -08:00
|
|
|
})
|
|
|
|
.collect();
|
2019-10-04 01:16:07 -07:00
|
|
|
make_txs.stop();
|
|
|
|
debug!(
|
|
|
|
"make {} unsigned txs: {}us",
|
|
|
|
to_fund_txs.len(),
|
|
|
|
make_txs.as_us()
|
|
|
|
);
|
2018-12-11 19:47:32 -08:00
|
|
|
|
|
|
|
let amount = chunk[0].1[0].1;
|
|
|
|
|
|
|
|
while !to_fund_txs.is_empty() {
|
|
|
|
let receivers = to_fund_txs
|
|
|
|
.iter()
|
2019-03-29 09:05:06 -07:00
|
|
|
.fold(0, |len, (_, tx)| len + tx.message().instructions.len());
|
2018-12-11 19:47:32 -08:00
|
|
|
|
2019-10-04 01:16:07 -07:00
|
|
|
info!(
|
2018-12-11 19:47:32 -08:00
|
|
|
"{} {} to {} in {} txs",
|
|
|
|
if tries == 0 {
|
|
|
|
"transferring"
|
|
|
|
} else {
|
|
|
|
" retrying"
|
|
|
|
},
|
|
|
|
amount,
|
|
|
|
receivers,
|
|
|
|
to_fund_txs.len(),
|
|
|
|
);
|
|
|
|
|
2019-09-06 07:24:04 -07:00
|
|
|
let (blockhash, _fee_calculator) = get_recent_blockhash(client);
|
2018-12-11 19:47:32 -08:00
|
|
|
|
2019-03-02 10:25:16 -08:00
|
|
|
// re-sign retained to_fund_txes with updated blockhash
|
2019-10-04 01:16:07 -07:00
|
|
|
let mut sign_txs = Measure::start("sign_txs");
|
2018-12-11 19:47:32 -08:00
|
|
|
to_fund_txs.par_iter_mut().for_each(|(k, tx)| {
|
2019-03-02 10:25:16 -08:00
|
|
|
tx.sign(&[*k], blockhash);
|
2018-12-11 19:47:32 -08:00
|
|
|
});
|
2019-10-04 01:16:07 -07:00
|
|
|
sign_txs.stop();
|
|
|
|
debug!("sign {} txs: {}us", to_fund_txs.len(), sign_txs.as_us());
|
2018-12-11 19:47:32 -08:00
|
|
|
|
2019-10-04 01:16:07 -07:00
|
|
|
let mut send_txs = Measure::start("send_txs");
|
2018-12-11 19:47:32 -08:00
|
|
|
to_fund_txs.iter().for_each(|(_, tx)| {
|
2019-04-05 20:59:27 -07:00
|
|
|
client.async_send_transaction(tx.clone()).expect("transfer");
|
2018-12-11 19:47:32 -08:00
|
|
|
});
|
2019-10-04 01:16:07 -07:00
|
|
|
send_txs.stop();
|
|
|
|
debug!("send {} txs: {}us", to_fund_txs.len(), send_txs.as_us());
|
|
|
|
|
|
|
|
let mut verify_txs = Measure::start("verify_txs");
|
|
|
|
let mut starting_txs = to_fund_txs.len();
|
|
|
|
let mut verified_txs = 0;
|
|
|
|
let mut failed_verify = 0;
|
|
|
|
// Only loop multiple times for small (quick) transaction batches
|
|
|
|
for _ in 0..(if starting_txs < 1000 { 3 } else { 1 }) {
|
|
|
|
let mut timer = Instant::now();
|
|
|
|
to_fund_txs.retain(|(_, tx)| {
|
|
|
|
if timer.elapsed() >= Duration::from_secs(5) {
|
|
|
|
if failed_verify > 0 {
|
|
|
|
debug!("total txs failed verify: {}", failed_verify);
|
|
|
|
}
|
|
|
|
info!(
|
|
|
|
"Verifying transfers... {} remaining",
|
|
|
|
starting_txs - verified_txs
|
|
|
|
);
|
|
|
|
timer = Instant::now();
|
|
|
|
}
|
|
|
|
let verified = verify_funding_transfer(client, &tx, amount);
|
|
|
|
if verified {
|
|
|
|
verified_txs += 1;
|
|
|
|
} else {
|
|
|
|
failed_verify += 1;
|
|
|
|
}
|
|
|
|
!verified
|
|
|
|
});
|
2019-04-15 16:30:00 -07:00
|
|
|
if to_fund_txs.is_empty() {
|
|
|
|
break;
|
|
|
|
}
|
2019-10-04 01:16:07 -07:00
|
|
|
debug!("Looping verifications");
|
|
|
|
info!("Verifying transfers... {} remaining", to_fund_txs.len());
|
2019-04-15 16:30:00 -07:00
|
|
|
sleep(Duration::from_millis(100));
|
|
|
|
}
|
2019-10-04 01:16:07 -07:00
|
|
|
starting_txs -= to_fund_txs.len();
|
|
|
|
verify_txs.stop();
|
|
|
|
debug!("verified {} txs: {}us", starting_txs, verify_txs.as_us());
|
2018-12-11 19:47:32 -08:00
|
|
|
|
2019-10-04 01:16:07 -07:00
|
|
|
// retry anything that seems to have dropped through cracks
|
|
|
|
// again since these txs are all or nothing, they're fine to
|
|
|
|
// retry
|
2018-12-11 19:47:32 -08:00
|
|
|
tries += 1;
|
|
|
|
}
|
2019-10-04 01:16:07 -07:00
|
|
|
info!("transferred");
|
2018-12-11 19:47:32 -08:00
|
|
|
});
|
2019-10-04 01:16:07 -07:00
|
|
|
info!("funded: {} left: {}", new_funded.len(), notfunded.len());
|
2018-12-11 19:47:32 -08:00
|
|
|
funded = new_funded;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-19 14:04:36 -07:00
|
|
|
pub fn airdrop_lamports<T: Client>(
|
|
|
|
client: &T,
|
2019-12-16 13:05:17 -08:00
|
|
|
faucet_addr: &SocketAddr,
|
2019-04-19 14:04:36 -07:00
|
|
|
id: &Keypair,
|
|
|
|
tx_count: u64,
|
2019-06-12 15:01:59 -07:00
|
|
|
) -> Result<()> {
|
2019-04-19 14:04:36 -07:00
|
|
|
let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0);
|
2019-03-05 17:18:25 -08:00
|
|
|
metrics_submit_lamport_balance(starting_balance);
|
2019-10-04 01:16:07 -07:00
|
|
|
info!("starting balance {}", starting_balance);
|
2018-12-11 19:47:32 -08:00
|
|
|
|
|
|
|
if starting_balance < tx_count {
|
|
|
|
let airdrop_amount = tx_count - starting_balance;
|
2019-10-04 01:16:07 -07:00
|
|
|
info!(
|
2019-03-05 17:18:25 -08:00
|
|
|
"Airdropping {:?} lamports from {} for {}",
|
2018-12-11 19:47:32 -08:00
|
|
|
airdrop_amount,
|
2019-12-16 13:05:17 -08:00
|
|
|
faucet_addr,
|
2018-12-11 19:47:32 -08:00
|
|
|
id.pubkey(),
|
|
|
|
);
|
|
|
|
|
2019-09-06 07:24:04 -07:00
|
|
|
let (blockhash, _fee_calculator) = get_recent_blockhash(client);
|
2019-12-16 13:05:17 -08:00
|
|
|
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
2018-12-11 19:47:32 -08:00
|
|
|
Ok(transaction) => {
|
2019-10-23 22:01:22 -07:00
|
|
|
let mut tries = 0;
|
|
|
|
loop {
|
|
|
|
tries += 1;
|
|
|
|
let signature = client.async_send_transaction(transaction.clone()).unwrap();
|
|
|
|
let result = client.poll_for_signature_confirmation(&signature, 1);
|
|
|
|
|
|
|
|
if result.is_ok() {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if tries >= 5 {
|
2019-04-25 12:46:40 -07:00
|
|
|
panic!(
|
2019-10-23 22:01:22 -07:00
|
|
|
"Error requesting airdrop: to addr: {:?} amount: {} {:?}",
|
2019-12-16 13:05:17 -08:00
|
|
|
faucet_addr, airdrop_amount, result
|
2019-04-25 12:46:40 -07:00
|
|
|
)
|
2019-10-23 22:01:22 -07:00
|
|
|
}
|
|
|
|
}
|
2018-12-11 19:47:32 -08:00
|
|
|
}
|
|
|
|
Err(err) => {
|
|
|
|
panic!(
|
|
|
|
"Error requesting airdrop: {:?} to addr: {:?} amount: {}",
|
2019-12-16 13:05:17 -08:00
|
|
|
err, faucet_addr, airdrop_amount
|
2018-12-11 19:47:32 -08:00
|
|
|
);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-11-06 13:15:00 -08:00
|
|
|
let current_balance = client
|
|
|
|
.get_balance_with_commitment(&id.pubkey(), CommitmentConfig::recent())
|
|
|
|
.unwrap_or_else(|e| {
|
|
|
|
info!("airdrop error {}", e);
|
|
|
|
starting_balance
|
|
|
|
});
|
2019-10-04 01:16:07 -07:00
|
|
|
info!("current balance {}...", current_balance);
|
2018-12-11 19:47:32 -08:00
|
|
|
|
2019-03-05 17:18:25 -08:00
|
|
|
metrics_submit_lamport_balance(current_balance);
|
2018-12-11 19:47:32 -08:00
|
|
|
if current_balance - starting_balance != airdrop_amount {
|
2019-10-04 01:16:07 -07:00
|
|
|
info!(
|
2018-12-11 19:47:32 -08:00
|
|
|
"Airdrop failed! {} {} {}",
|
|
|
|
id.pubkey(),
|
|
|
|
current_balance,
|
|
|
|
starting_balance
|
|
|
|
);
|
2019-06-12 15:01:59 -07:00
|
|
|
return Err(BenchTpsError::AirdropFailure);
|
2018-12-11 19:47:32 -08:00
|
|
|
}
|
|
|
|
}
|
2019-06-12 15:01:59 -07:00
|
|
|
Ok(())
|
2018-12-11 19:47:32 -08:00
|
|
|
}
|
|
|
|
|
2019-03-22 11:39:25 -07:00
|
|
|
fn compute_and_report_stats(
|
2019-05-01 15:58:35 -07:00
|
|
|
maxes: &Arc<RwLock<Vec<(String, SampleStats)>>>,
|
2018-12-11 19:47:32 -08:00
|
|
|
sample_period: u64,
|
|
|
|
tx_send_elapsed: &Duration,
|
|
|
|
total_tx_send_count: usize,
|
|
|
|
) {
|
|
|
|
// Compute/report stats
|
|
|
|
let mut max_of_maxes = 0.0;
|
|
|
|
let mut max_tx_count = 0;
|
|
|
|
let mut nodes_with_zero_tps = 0;
|
|
|
|
let mut total_maxes = 0.0;
|
2019-10-04 01:16:07 -07:00
|
|
|
info!(" Node address | Max TPS | Total Transactions");
|
|
|
|
info!("---------------------+---------------+--------------------");
|
2018-12-11 19:47:32 -08:00
|
|
|
|
|
|
|
for (sock, stats) in maxes.read().unwrap().iter() {
|
2019-05-01 15:58:35 -07:00
|
|
|
let maybe_flag = match stats.txs {
|
2018-12-11 19:47:32 -08:00
|
|
|
0 => "!!!!!",
|
|
|
|
_ => "",
|
|
|
|
};
|
|
|
|
|
2019-10-04 01:16:07 -07:00
|
|
|
info!(
|
2018-12-11 19:47:32 -08:00
|
|
|
"{:20} | {:13.2} | {} {}",
|
2019-05-01 15:58:35 -07:00
|
|
|
sock, stats.tps, stats.txs, maybe_flag
|
2018-12-11 19:47:32 -08:00
|
|
|
);
|
|
|
|
|
|
|
|
if stats.tps == 0.0 {
|
|
|
|
nodes_with_zero_tps += 1;
|
|
|
|
}
|
|
|
|
total_maxes += stats.tps;
|
|
|
|
|
|
|
|
if stats.tps > max_of_maxes {
|
|
|
|
max_of_maxes = stats.tps;
|
|
|
|
}
|
2019-05-01 15:58:35 -07:00
|
|
|
if stats.txs > max_tx_count {
|
|
|
|
max_tx_count = stats.txs;
|
2018-12-11 19:47:32 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if total_maxes > 0.0 {
|
|
|
|
let num_nodes_with_tps = maxes.read().unwrap().len() - nodes_with_zero_tps;
|
2019-05-01 15:58:35 -07:00
|
|
|
let average_max = total_maxes / num_nodes_with_tps as f32;
|
2019-10-04 01:16:07 -07:00
|
|
|
info!(
|
2018-12-11 19:47:32 -08:00
|
|
|
"\nAverage max TPS: {:.2}, {} nodes had 0 TPS",
|
|
|
|
average_max, nodes_with_zero_tps
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2019-05-01 15:58:35 -07:00
|
|
|
let total_tx_send_count = total_tx_send_count as u64;
|
|
|
|
let drop_rate = if total_tx_send_count > max_tx_count {
|
|
|
|
(total_tx_send_count - max_tx_count) as f64 / total_tx_send_count as f64
|
|
|
|
} else {
|
|
|
|
0.0
|
|
|
|
};
|
2019-10-04 01:16:07 -07:00
|
|
|
info!(
|
2018-12-11 19:47:32 -08:00
|
|
|
"\nHighest TPS: {:.2} sampling period {}s max transactions: {} clients: {} drop rate: {:.2}",
|
|
|
|
max_of_maxes,
|
|
|
|
sample_period,
|
|
|
|
max_tx_count,
|
|
|
|
maxes.read().unwrap().len(),
|
2019-05-01 15:58:35 -07:00
|
|
|
drop_rate,
|
2018-12-11 19:47:32 -08:00
|
|
|
);
|
2019-10-04 01:16:07 -07:00
|
|
|
info!(
|
2018-12-11 19:47:32 -08:00
|
|
|
"\tAverage TPS: {}",
|
|
|
|
max_tx_count as f32 / duration_as_s(tx_send_elapsed)
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2019-12-18 20:50:17 -08:00
|
|
|
// First transfer 2/3 of the lamports to the dest accounts
|
|
|
|
// then ping-pong 1/3 of the lamports back to the other account
|
|
|
|
// this leaves 1/3 lamport buffer in each account
|
|
|
|
fn should_switch_directions(num_lamports_per_account: u64, keypair_chunks: u64, i: u64) -> bool {
|
|
|
|
if i < keypair_chunks * (2 * num_lamports_per_account) / 3 {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
i % (keypair_chunks * num_lamports_per_account / 3) == 0
|
2018-12-11 19:47:32 -08:00
|
|
|
}
|
|
|
|
|
2019-07-31 11:15:14 -07:00
|
|
|
pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u64) {
|
2019-04-19 14:04:36 -07:00
|
|
|
let mut seed = [0u8; 32];
|
2019-05-01 13:21:45 -07:00
|
|
|
seed.copy_from_slice(&seed_keypair.to_bytes()[..32]);
|
2019-04-19 14:04:36 -07:00
|
|
|
let mut rnd = GenKeys::new(seed);
|
|
|
|
|
2019-07-01 17:32:03 -07:00
|
|
|
let mut total_keys = 0;
|
|
|
|
let mut extra = 0; // This variable tracks the number of keypairs needing extra transaction fees funded
|
|
|
|
let mut delta = 1;
|
2019-06-14 11:11:52 -07:00
|
|
|
while total_keys < count {
|
2019-07-01 17:32:03 -07:00
|
|
|
extra += delta;
|
|
|
|
delta *= MAX_SPENDS_PER_TX;
|
|
|
|
total_keys += delta;
|
2019-04-19 14:04:36 -07:00
|
|
|
}
|
2019-07-31 11:15:14 -07:00
|
|
|
(rnd.gen_n_keypairs(total_keys), extra)
|
2019-07-27 15:28:00 -07:00
|
|
|
}
|
|
|
|
|
2019-09-27 12:19:06 -07:00
|
|
|
#[cfg(feature = "move")]
|
2019-07-27 15:28:00 -07:00
|
|
|
fn fund_move_keys<T: Client>(
|
|
|
|
client: &T,
|
|
|
|
funding_key: &Keypair,
|
|
|
|
keypairs: &[Keypair],
|
|
|
|
total: u64,
|
|
|
|
libra_pay_program_id: &Pubkey,
|
|
|
|
libra_mint_program_id: &Pubkey,
|
2019-11-18 16:47:01 -08:00
|
|
|
libra_genesis_key: &Keypair,
|
2019-07-27 15:28:00 -07:00
|
|
|
) {
|
2019-09-06 07:24:04 -07:00
|
|
|
let (mut blockhash, _fee_calculator) = get_recent_blockhash(client);
|
2019-07-27 15:28:00 -07:00
|
|
|
|
|
|
|
info!("creating the libra funding account..");
|
|
|
|
let libra_funding_key = Keypair::new();
|
2019-11-08 02:27:35 -08:00
|
|
|
let tx = librapay_transaction::create_account(funding_key, &libra_funding_key, 1, blockhash);
|
|
|
|
client
|
|
|
|
.send_message(&[funding_key, &libra_funding_key], tx.message)
|
|
|
|
.unwrap();
|
2019-07-27 15:28:00 -07:00
|
|
|
|
|
|
|
info!("minting to funding keypair");
|
|
|
|
let tx = librapay_transaction::mint_tokens(
|
|
|
|
&libra_mint_program_id,
|
|
|
|
funding_key,
|
2019-11-18 16:47:01 -08:00
|
|
|
libra_genesis_key,
|
2019-07-27 15:28:00 -07:00
|
|
|
&libra_funding_key.pubkey(),
|
|
|
|
total,
|
|
|
|
blockhash,
|
|
|
|
);
|
2019-08-07 08:55:01 -07:00
|
|
|
client
|
2019-11-18 16:47:01 -08:00
|
|
|
.send_message(&[funding_key, libra_genesis_key], tx.message)
|
2019-08-07 08:55:01 -07:00
|
|
|
.unwrap();
|
2019-07-27 15:28:00 -07:00
|
|
|
|
2019-08-07 08:55:01 -07:00
|
|
|
info!("creating {} move accounts...", keypairs.len());
|
2019-11-18 16:47:01 -08:00
|
|
|
let total_len = keypairs.len();
|
|
|
|
let create_len = 5;
|
2019-07-27 15:28:00 -07:00
|
|
|
let mut funding_time = Measure::start("funding_time");
|
|
|
|
for (i, keys) in keypairs.chunks(create_len).enumerate() {
|
2019-11-06 13:15:00 -08:00
|
|
|
if client
|
|
|
|
.get_balance_with_commitment(&keys[0].pubkey(), CommitmentConfig::recent())
|
|
|
|
.unwrap_or(0)
|
|
|
|
> 0
|
|
|
|
{
|
2019-07-31 16:10:55 -07:00
|
|
|
// already created these accounts.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-11-08 02:27:35 -08:00
|
|
|
let keypairs: Vec<_> = keys.iter().map(|k| k).collect();
|
|
|
|
let tx = librapay_transaction::create_accounts(funding_key, &keypairs, 1, blockhash);
|
2019-07-27 15:28:00 -07:00
|
|
|
let ser_size = bincode::serialized_size(&tx).unwrap();
|
2019-11-18 16:47:01 -08:00
|
|
|
let mut keys = vec![funding_key];
|
|
|
|
keys.extend(&keypairs);
|
|
|
|
client.send_message(&keys, tx.message).unwrap();
|
2019-08-07 08:55:01 -07:00
|
|
|
|
2019-07-27 15:28:00 -07:00
|
|
|
if i % 10 == 0 {
|
|
|
|
info!(
|
2019-11-18 16:47:01 -08:00
|
|
|
"created {} accounts of {} (size {})",
|
2019-07-27 15:28:00 -07:00
|
|
|
i,
|
2019-11-18 16:47:01 -08:00
|
|
|
total_len / create_len,
|
|
|
|
ser_size,
|
2019-07-27 15:28:00 -07:00
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
2019-08-07 08:55:01 -07:00
|
|
|
|
2019-11-18 16:47:01 -08:00
|
|
|
const NUM_FUNDING_KEYS: usize = 10;
|
2019-08-07 08:55:01 -07:00
|
|
|
let funding_keys: Vec<_> = (0..NUM_FUNDING_KEYS).map(|_| Keypair::new()).collect();
|
|
|
|
let pubkey_amounts: Vec<_> = funding_keys
|
|
|
|
.iter()
|
|
|
|
.map(|key| (key.pubkey(), total / NUM_FUNDING_KEYS as u64))
|
|
|
|
.collect();
|
|
|
|
let tx = Transaction::new_signed_instructions(
|
|
|
|
&[funding_key],
|
|
|
|
system_instruction::transfer_many(&funding_key.pubkey(), &pubkey_amounts),
|
|
|
|
blockhash,
|
|
|
|
);
|
|
|
|
client.send_message(&[funding_key], tx.message).unwrap();
|
|
|
|
let mut balance = 0;
|
|
|
|
for _ in 0..20 {
|
2019-11-06 13:15:00 -08:00
|
|
|
if let Ok(balance_) = client
|
|
|
|
.get_balance_with_commitment(&funding_keys[0].pubkey(), CommitmentConfig::recent())
|
|
|
|
{
|
2019-08-27 14:36:48 -07:00
|
|
|
if balance_ > 0 {
|
|
|
|
balance = balance_;
|
|
|
|
break;
|
|
|
|
}
|
2019-08-07 08:55:01 -07:00
|
|
|
}
|
2019-08-27 14:36:48 -07:00
|
|
|
sleep(Duration::from_millis(100));
|
2019-08-07 08:55:01 -07:00
|
|
|
}
|
|
|
|
assert!(balance > 0);
|
2019-11-18 16:47:01 -08:00
|
|
|
info!(
|
|
|
|
"funded multiple funding accounts with {:?} lanports",
|
|
|
|
balance
|
|
|
|
);
|
2019-08-07 08:55:01 -07:00
|
|
|
|
|
|
|
let libra_funding_keys: Vec<_> = (0..NUM_FUNDING_KEYS).map(|_| Keypair::new()).collect();
|
|
|
|
for (i, key) in libra_funding_keys.iter().enumerate() {
|
2019-11-08 02:27:35 -08:00
|
|
|
let tx = librapay_transaction::create_account(&funding_keys[i], &key, 1, blockhash);
|
2019-08-07 08:55:01 -07:00
|
|
|
client
|
2019-11-08 02:27:35 -08:00
|
|
|
.send_message(&[&funding_keys[i], &key], tx.message)
|
2019-08-07 08:55:01 -07:00
|
|
|
.unwrap();
|
|
|
|
|
2019-07-27 15:28:00 -07:00
|
|
|
let tx = librapay_transaction::transfer(
|
|
|
|
libra_pay_program_id,
|
2019-11-18 16:47:01 -08:00
|
|
|
&libra_genesis_key.pubkey(),
|
2019-08-07 08:55:01 -07:00
|
|
|
&funding_keys[i],
|
2019-07-27 15:28:00 -07:00
|
|
|
&libra_funding_key,
|
|
|
|
&key.pubkey(),
|
2019-08-07 08:55:01 -07:00
|
|
|
total / NUM_FUNDING_KEYS as u64,
|
2019-07-27 15:28:00 -07:00
|
|
|
blockhash,
|
|
|
|
);
|
2019-08-07 08:55:01 -07:00
|
|
|
client
|
|
|
|
.send_message(&[&funding_keys[i], &libra_funding_key], tx.message)
|
|
|
|
.unwrap();
|
2019-07-27 15:28:00 -07:00
|
|
|
|
2019-08-07 08:55:01 -07:00
|
|
|
info!("funded libra funding key {}", i);
|
|
|
|
}
|
2019-07-31 16:10:55 -07:00
|
|
|
|
2019-12-18 20:50:17 -08:00
|
|
|
let keypair_count = keypairs.len();
|
|
|
|
let amount = total / (keypair_count as u64);
|
|
|
|
for (i, keys) in keypairs[..keypair_count]
|
|
|
|
.chunks(NUM_FUNDING_KEYS)
|
|
|
|
.enumerate()
|
|
|
|
{
|
2019-08-07 08:55:01 -07:00
|
|
|
for (j, key) in keys.iter().enumerate() {
|
|
|
|
let tx = librapay_transaction::transfer(
|
|
|
|
libra_pay_program_id,
|
2019-11-18 16:47:01 -08:00
|
|
|
&libra_genesis_key.pubkey(),
|
2019-08-07 08:55:01 -07:00
|
|
|
&funding_keys[j],
|
|
|
|
&libra_funding_keys[j],
|
|
|
|
&key.pubkey(),
|
|
|
|
amount,
|
|
|
|
blockhash,
|
|
|
|
);
|
2019-07-27 15:28:00 -07:00
|
|
|
|
2019-08-07 08:55:01 -07:00
|
|
|
let _sig = client
|
|
|
|
.async_send_transaction(tx.clone())
|
|
|
|
.expect("create_account in generate_and_fund_keypairs");
|
2019-07-27 15:28:00 -07:00
|
|
|
}
|
|
|
|
|
2019-08-07 08:55:01 -07:00
|
|
|
for (j, key) in keys.iter().enumerate() {
|
|
|
|
let mut times = 0;
|
|
|
|
loop {
|
|
|
|
let balance =
|
|
|
|
librapay_transaction::get_libra_balance(client, &key.pubkey()).unwrap();
|
|
|
|
if balance >= amount {
|
2019-07-27 15:28:00 -07:00
|
|
|
break;
|
2019-08-07 08:55:01 -07:00
|
|
|
} else if times > 20 {
|
|
|
|
info!("timed out.. {} key: {} balance: {}", i, j, balance);
|
|
|
|
break;
|
|
|
|
} else {
|
2019-07-27 15:28:00 -07:00
|
|
|
times += 1;
|
2019-08-07 08:55:01 -07:00
|
|
|
sleep(Duration::from_millis(100));
|
2019-07-27 15:28:00 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-08-07 08:55:01 -07:00
|
|
|
|
2019-11-18 16:47:01 -08:00
|
|
|
info!(
|
|
|
|
"funded group {} of {}",
|
|
|
|
i + 1,
|
|
|
|
keypairs.len() / NUM_FUNDING_KEYS
|
|
|
|
);
|
2019-09-06 07:24:04 -07:00
|
|
|
blockhash = get_recent_blockhash(client).0;
|
2019-07-27 15:28:00 -07:00
|
|
|
}
|
2019-08-07 08:55:01 -07:00
|
|
|
|
2019-11-18 16:47:01 -08:00
|
|
|
funding_time.stop();
|
|
|
|
info!("done funding keys, took {} ms", funding_time.as_ms());
|
2019-04-19 14:04:36 -07:00
|
|
|
}
|
|
|
|
|
2019-05-01 13:21:45 -07:00
|
|
|
pub fn generate_and_fund_keypairs<T: Client>(
|
|
|
|
client: &T,
|
2019-12-16 13:05:17 -08:00
|
|
|
faucet_addr: Option<SocketAddr>,
|
2019-07-27 15:28:00 -07:00
|
|
|
funding_key: &Keypair,
|
2019-12-18 20:50:17 -08:00
|
|
|
keypair_count: usize,
|
2019-05-01 13:21:45 -07:00
|
|
|
lamports_per_account: u64,
|
2019-07-31 16:10:55 -07:00
|
|
|
use_move: bool,
|
|
|
|
) -> Result<(Vec<Keypair>, Option<LibraKeys>, u64)> {
|
2019-12-18 20:50:17 -08:00
|
|
|
info!("Creating {} keypairs...", keypair_count);
|
|
|
|
let (mut keypairs, extra) = generate_keypairs(funding_key, keypair_count as u64);
|
2019-05-01 13:21:45 -07:00
|
|
|
info!("Get lamports...");
|
|
|
|
|
|
|
|
// Sample the first keypair, see if it has lamports, if so then resume.
|
|
|
|
// This logic is to prevent lamport loss on repeated solana-bench-tps executions
|
|
|
|
let last_keypair_balance = client
|
2019-12-18 20:50:17 -08:00
|
|
|
.get_balance(&keypairs[keypair_count - 1].pubkey())
|
2019-05-01 13:21:45 -07:00
|
|
|
.unwrap_or(0);
|
|
|
|
|
2019-09-27 12:19:06 -07:00
|
|
|
#[cfg(feature = "move")]
|
2019-07-31 11:15:14 -07:00
|
|
|
let mut move_keypairs_ret = None;
|
|
|
|
|
2019-09-27 12:19:06 -07:00
|
|
|
#[cfg(not(feature = "move"))]
|
|
|
|
let move_keypairs_ret = None;
|
|
|
|
|
2019-05-01 13:21:45 -07:00
|
|
|
if lamports_per_account > last_keypair_balance {
|
2019-09-06 07:24:04 -07:00
|
|
|
let (_blockhash, fee_calculator) = get_recent_blockhash(client);
|
2019-07-01 17:32:03 -07:00
|
|
|
let account_desired_balance =
|
2019-06-13 16:22:57 -07:00
|
|
|
lamports_per_account - last_keypair_balance + fee_calculator.max_lamports_per_signature;
|
2019-07-01 17:32:03 -07:00
|
|
|
let extra_fees = extra * fee_calculator.max_lamports_per_signature;
|
2019-07-31 11:15:14 -07:00
|
|
|
let mut total = account_desired_balance * (1 + keypairs.len() as u64) + extra_fees;
|
2019-07-31 16:10:55 -07:00
|
|
|
if use_move {
|
2019-08-07 08:55:01 -07:00
|
|
|
total *= 3;
|
2019-07-31 11:15:14 -07:00
|
|
|
}
|
2019-07-31 16:10:55 -07:00
|
|
|
|
2019-10-04 01:16:07 -07:00
|
|
|
info!("Previous key balance: {} max_fee: {} lamports_per_account: {} extra: {} desired_balance: {} total: {}",
|
2019-07-31 16:10:55 -07:00
|
|
|
last_keypair_balance, fee_calculator.max_lamports_per_signature, lamports_per_account, extra,
|
|
|
|
account_desired_balance, total
|
|
|
|
);
|
|
|
|
|
2019-07-27 15:28:00 -07:00
|
|
|
if client.get_balance(&funding_key.pubkey()).unwrap_or(0) < total {
|
2019-12-16 13:05:17 -08:00
|
|
|
airdrop_lamports(client, &faucet_addr.unwrap(), funding_key, total)?;
|
2019-07-27 15:28:00 -07:00
|
|
|
}
|
2019-07-31 11:15:14 -07:00
|
|
|
|
2019-09-27 12:19:06 -07:00
|
|
|
#[cfg(feature = "move")]
|
|
|
|
{
|
|
|
|
if use_move {
|
|
|
|
let libra_genesis_keypair = create_genesis(&funding_key, client, 10_000_000);
|
2019-11-18 16:47:01 -08:00
|
|
|
let libra_mint_program_id = upload_mint_script(&funding_key, client);
|
|
|
|
let libra_pay_program_id = upload_payment_script(&funding_key, client);
|
2019-09-27 12:19:06 -07:00
|
|
|
|
|
|
|
// Generate another set of keypairs for move accounts.
|
|
|
|
// Still fund the solana ones which will be used for fees.
|
|
|
|
let seed = [0u8; 32];
|
|
|
|
let mut rnd = GenKeys::new(seed);
|
2019-12-18 20:50:17 -08:00
|
|
|
let move_keypairs = rnd.gen_n_keypairs(keypair_count as u64);
|
2019-09-27 12:19:06 -07:00
|
|
|
fund_move_keys(
|
|
|
|
client,
|
|
|
|
funding_key,
|
|
|
|
&move_keypairs,
|
|
|
|
total / 3,
|
|
|
|
&libra_pay_program_id,
|
|
|
|
&libra_mint_program_id,
|
|
|
|
&libra_genesis_keypair,
|
|
|
|
);
|
|
|
|
move_keypairs_ret = Some((
|
|
|
|
libra_genesis_keypair,
|
|
|
|
libra_pay_program_id,
|
|
|
|
libra_mint_program_id,
|
|
|
|
move_keypairs,
|
|
|
|
));
|
2019-07-31 11:15:14 -07:00
|
|
|
|
2019-09-27 12:19:06 -07:00
|
|
|
// Give solana keys 1/3 and move keys 1/3 the lamports. Keep 1/3 for fees.
|
|
|
|
total /= 3;
|
|
|
|
}
|
2019-05-01 13:21:45 -07:00
|
|
|
}
|
2019-07-31 11:15:14 -07:00
|
|
|
|
|
|
|
fund_keys(
|
|
|
|
client,
|
|
|
|
funding_key,
|
|
|
|
&keypairs,
|
|
|
|
total,
|
|
|
|
fee_calculator.max_lamports_per_signature,
|
|
|
|
extra,
|
|
|
|
);
|
2019-05-01 13:21:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
|
2019-12-18 20:50:17 -08:00
|
|
|
keypairs.truncate(keypair_count);
|
2019-05-01 13:21:45 -07:00
|
|
|
|
2019-07-31 11:15:14 -07:00
|
|
|
Ok((keypairs, move_keypairs_ret, last_keypair_balance))
|
2019-05-01 13:21:45 -07:00
|
|
|
}
|
|
|
|
|
2018-12-11 19:47:32 -08:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
2019-04-19 14:04:36 -07:00
|
|
|
use solana_runtime::bank::Bank;
|
|
|
|
use solana_runtime::bank_client::BankClient;
|
2019-05-01 13:21:45 -07:00
|
|
|
use solana_sdk::client::SyncClient;
|
2019-07-01 17:32:03 -07:00
|
|
|
use solana_sdk::fee_calculator::FeeCalculator;
|
2019-11-08 20:56:57 -08:00
|
|
|
use solana_sdk::genesis_config::create_genesis_config;
|
2019-03-22 11:39:25 -07:00
|
|
|
|
2018-12-11 19:47:32 -08:00
|
|
|
#[test]
|
|
|
|
fn test_switch_directions() {
|
2019-12-18 20:50:17 -08:00
|
|
|
assert_eq!(should_switch_directions(30, 1, 0), false);
|
|
|
|
assert_eq!(should_switch_directions(30, 1, 1), false);
|
|
|
|
assert_eq!(should_switch_directions(30, 1, 20), true);
|
|
|
|
assert_eq!(should_switch_directions(30, 1, 21), false);
|
|
|
|
assert_eq!(should_switch_directions(30, 1, 30), true);
|
|
|
|
assert_eq!(should_switch_directions(30, 1, 90), true);
|
|
|
|
assert_eq!(should_switch_directions(30, 1, 91), false);
|
|
|
|
|
|
|
|
assert_eq!(should_switch_directions(30, 2, 0), false);
|
|
|
|
assert_eq!(should_switch_directions(30, 2, 1), false);
|
|
|
|
assert_eq!(should_switch_directions(30, 2, 20), false);
|
|
|
|
assert_eq!(should_switch_directions(30, 2, 40), true);
|
|
|
|
assert_eq!(should_switch_directions(30, 2, 90), false);
|
|
|
|
assert_eq!(should_switch_directions(30, 2, 100), true);
|
|
|
|
assert_eq!(should_switch_directions(30, 2, 101), false);
|
2018-12-11 19:47:32 -08:00
|
|
|
}
|
2019-03-22 11:39:25 -07:00
|
|
|
|
2019-04-19 14:04:36 -07:00
|
|
|
#[test]
|
|
|
|
fn test_bench_tps_bank_client() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let (genesis_config, id) = create_genesis_config(10_000);
|
|
|
|
let bank = Bank::new(&genesis_config);
|
2019-04-19 14:04:36 -07:00
|
|
|
let clients = vec![BankClient::new(bank)];
|
|
|
|
|
|
|
|
let mut config = Config::default();
|
|
|
|
config.id = id;
|
|
|
|
config.tx_count = 10;
|
|
|
|
config.duration = Duration::from_secs(5);
|
|
|
|
|
2019-12-18 20:50:17 -08:00
|
|
|
let keypair_count = config.tx_count * config.keypair_multiplier;
|
2019-07-31 11:15:14 -07:00
|
|
|
let (keypairs, _move_keypairs, _keypair_balance) =
|
2019-12-18 20:50:17 -08:00
|
|
|
generate_and_fund_keypairs(&clients[0], None, &config.id, keypair_count, 20, false)
|
2019-07-27 15:28:00 -07:00
|
|
|
.unwrap();
|
|
|
|
|
2019-07-31 11:15:14 -07:00
|
|
|
do_bench_tps(clients, config, keypairs, 0, None);
|
2019-03-22 11:39:25 -07:00
|
|
|
}
|
2019-05-01 13:21:45 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_bench_tps_fund_keys() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let (genesis_config, id) = create_genesis_config(10_000);
|
|
|
|
let bank = Bank::new(&genesis_config);
|
2019-05-01 13:21:45 -07:00
|
|
|
let client = BankClient::new(bank);
|
2019-12-18 20:50:17 -08:00
|
|
|
let keypair_count = 20;
|
2019-05-01 13:21:45 -07:00
|
|
|
let lamports = 20;
|
|
|
|
|
2019-07-31 11:15:14 -07:00
|
|
|
let (keypairs, _move_keypairs, _keypair_balance) =
|
2019-12-18 20:50:17 -08:00
|
|
|
generate_and_fund_keypairs(&client, None, &id, keypair_count, lamports, false).unwrap();
|
2019-05-01 13:21:45 -07:00
|
|
|
|
|
|
|
for kp in &keypairs {
|
2019-11-06 13:15:00 -08:00
|
|
|
assert_eq!(
|
|
|
|
client
|
|
|
|
.get_balance_with_commitment(&kp.pubkey(), CommitmentConfig::recent())
|
|
|
|
.unwrap(),
|
|
|
|
lamports
|
|
|
|
);
|
2019-07-01 17:32:03 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_bench_tps_fund_keys_with_fees() {
|
2019-11-08 20:56:57 -08:00
|
|
|
let (mut genesis_config, id) = create_genesis_config(10_000);
|
2019-10-08 22:34:26 -07:00
|
|
|
let fee_calculator = FeeCalculator::new(11, 0);
|
2019-11-08 20:56:57 -08:00
|
|
|
genesis_config.fee_calculator = fee_calculator;
|
|
|
|
let bank = Bank::new(&genesis_config);
|
2019-07-01 17:32:03 -07:00
|
|
|
let client = BankClient::new(bank);
|
2019-12-18 20:50:17 -08:00
|
|
|
let keypair_count = 20;
|
2019-07-01 17:32:03 -07:00
|
|
|
let lamports = 20;
|
|
|
|
|
2019-07-31 11:15:14 -07:00
|
|
|
let (keypairs, _move_keypairs, _keypair_balance) =
|
2019-12-18 20:50:17 -08:00
|
|
|
generate_and_fund_keypairs(&client, None, &id, keypair_count, lamports, false).unwrap();
|
2019-07-01 17:32:03 -07:00
|
|
|
|
|
|
|
let max_fee = client
|
2019-11-06 13:15:00 -08:00
|
|
|
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
2019-07-01 17:32:03 -07:00
|
|
|
.unwrap()
|
|
|
|
.1
|
|
|
|
.max_lamports_per_signature;
|
|
|
|
for kp in &keypairs {
|
|
|
|
assert_eq!(
|
|
|
|
client.get_balance(&kp.pubkey()).unwrap(),
|
|
|
|
lamports + max_fee
|
|
|
|
);
|
2019-05-01 13:21:45 -07:00
|
|
|
}
|
|
|
|
}
|
2018-12-11 19:47:32 -08:00
|
|
|
}
|