2018-07-02 21:13:47 -07:00
|
|
|
extern crate bincode;
|
2018-08-06 20:51:12 -07:00
|
|
|
#[macro_use]
|
2018-07-02 21:43:44 -07:00
|
|
|
extern crate clap;
|
2018-07-25 18:46:18 -07:00
|
|
|
extern crate influx_db_client;
|
2018-03-28 15:51:18 -07:00
|
|
|
extern crate rayon;
|
2018-03-05 14:34:15 -08:00
|
|
|
extern crate serde_json;
|
2018-03-27 15:24:05 -07:00
|
|
|
extern crate solana;
|
2018-02-28 09:07:54 -08:00
|
|
|
|
2018-07-02 21:43:44 -07:00
|
|
|
use clap::{App, Arg};
|
2018-07-25 18:46:18 -07:00
|
|
|
use influx_db_client as influxdb;
|
2018-04-02 20:15:21 -07:00
|
|
|
use rayon::prelude::*;
|
2018-07-31 22:07:53 -07:00
|
|
|
use solana::client::mk_client;
|
2018-07-11 00:18:48 -07:00
|
|
|
use solana::crdt::{Crdt, NodeInfo};
|
2018-07-31 22:07:53 -07:00
|
|
|
use solana::drone::DRONE_PORT;
|
2018-07-05 12:01:40 -07:00
|
|
|
use solana::fullnode::Config;
|
2018-06-14 16:42:27 -07:00
|
|
|
use solana::hash::Hash;
|
2018-07-27 21:37:53 -07:00
|
|
|
use solana::logger;
|
2018-07-25 18:46:18 -07:00
|
|
|
use solana::metrics;
|
2018-07-19 11:31:56 -07:00
|
|
|
use solana::nat::{udp_public_bind, udp_random_bind, UdpSocketPair};
|
2018-06-07 15:06:32 -07:00
|
|
|
use solana::ncp::Ncp;
|
2018-07-03 21:14:08 -07:00
|
|
|
use solana::service::Service;
|
2018-07-12 14:42:01 -07:00
|
|
|
use solana::signature::{read_keypair, GenKeys, KeyPair, KeyPairUtil};
|
2018-05-25 15:54:03 -07:00
|
|
|
use solana::streamer::default_window;
|
2018-05-08 17:59:01 -07:00
|
|
|
use solana::thin_client::ThinClient;
|
2018-06-14 16:42:27 -07:00
|
|
|
use solana::timing::{duration_as_ms, duration_as_s};
|
2018-03-27 15:24:05 -07:00
|
|
|
use solana::transaction::Transaction;
|
2018-07-31 22:07:53 -07:00
|
|
|
use solana::wallet::request_airdrop;
|
2018-07-23 14:26:16 -07:00
|
|
|
use std::collections::VecDeque;
|
2018-05-25 15:54:03 -07:00
|
|
|
use std::fs::File;
|
2018-07-31 22:07:53 -07:00
|
|
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
2018-04-19 07:06:19 -07:00
|
|
|
use std::process::exit;
|
2018-07-25 09:00:55 -07:00
|
|
|
use std::sync::atomic::{AtomicBool, AtomicIsize, Ordering};
|
2018-05-25 15:54:03 -07:00
|
|
|
use std::sync::{Arc, RwLock};
|
2018-05-04 11:11:39 -07:00
|
|
|
use std::thread::sleep;
|
2018-06-14 16:42:27 -07:00
|
|
|
use std::thread::Builder;
|
2018-05-29 20:20:28 -07:00
|
|
|
use std::thread::JoinHandle;
|
2018-05-04 11:11:39 -07:00
|
|
|
use std::time::Duration;
|
2018-04-26 12:17:36 -07:00
|
|
|
use std::time::Instant;
|
2018-03-04 00:21:40 -08:00
|
|
|
|
2018-07-19 20:09:57 -07:00
|
|
|
pub struct NodeStats {
|
|
|
|
pub tps: f64, // Maximum TPS reported by this node
|
|
|
|
pub tx: u64, // Total transactions reported by this node
|
|
|
|
}
|
|
|
|
|
2018-07-25 18:46:18 -07:00
|
|
|
fn metrics_submit_token_balance(token_balance: i64) {
|
|
|
|
println!("Token balance: {}", token_balance);
|
|
|
|
metrics::submit(
|
|
|
|
influxdb::Point::new("bench-tps")
|
|
|
|
.add_tag("op", influxdb::Value::String("token_balance".to_string()))
|
|
|
|
.add_field("balance", influxdb::Value::Integer(token_balance as i64))
|
|
|
|
.to_owned(),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2018-06-14 16:42:27 -07:00
|
|
|
fn sample_tx_count(
|
2018-07-22 16:20:07 -07:00
|
|
|
exit_signal: &Arc<AtomicBool>,
|
2018-07-19 20:09:57 -07:00
|
|
|
maxes: &Arc<RwLock<Vec<(SocketAddr, NodeStats)>>>,
|
2018-07-22 16:20:07 -07:00
|
|
|
first_tx_count: u64,
|
2018-07-11 12:32:54 -07:00
|
|
|
v: &NodeInfo,
|
2018-06-14 16:42:27 -07:00
|
|
|
sample_period: u64,
|
|
|
|
) {
|
2018-06-29 14:12:26 -07:00
|
|
|
let mut client = mk_client(&v);
|
2018-06-14 16:42:27 -07:00
|
|
|
let mut now = Instant::now();
|
|
|
|
let mut initial_tx_count = client.transaction_count();
|
|
|
|
let mut max_tps = 0.0;
|
|
|
|
let mut total;
|
2018-07-22 16:20:07 -07:00
|
|
|
|
|
|
|
let log_prefix = format!("{:21}:", v.contact_info.tpu.to_string());
|
|
|
|
|
2018-06-14 16:42:27 -07:00
|
|
|
loop {
|
|
|
|
let tx_count = client.transaction_count();
|
2018-07-28 10:35:16 -07:00
|
|
|
assert!(
|
|
|
|
tx_count >= initial_tx_count,
|
|
|
|
"expected tx_count({}) >= initial_tx_count({})",
|
|
|
|
tx_count,
|
|
|
|
initial_tx_count
|
|
|
|
);
|
2018-06-14 16:42:27 -07:00
|
|
|
let duration = now.elapsed();
|
|
|
|
now = Instant::now();
|
|
|
|
let sample = tx_count - initial_tx_count;
|
|
|
|
initial_tx_count = tx_count;
|
2018-08-05 22:53:37 -07:00
|
|
|
|
2018-06-14 16:42:27 -07:00
|
|
|
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
|
|
|
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
|
|
|
|
if tps > max_tps {
|
|
|
|
max_tps = tps;
|
|
|
|
}
|
2018-07-22 16:20:07 -07:00
|
|
|
if tx_count > first_tx_count {
|
|
|
|
total = tx_count - first_tx_count;
|
|
|
|
} else {
|
|
|
|
total = 0;
|
|
|
|
}
|
2018-08-05 22:53:37 -07:00
|
|
|
println!(
|
|
|
|
"{} {:9.2} TPS, Transactions: {:6}, Total transactions: {}",
|
|
|
|
log_prefix, tps, sample, total
|
|
|
|
);
|
2018-06-14 16:42:27 -07:00
|
|
|
sleep(Duration::new(sample_period, 0));
|
|
|
|
|
2018-07-22 16:20:07 -07:00
|
|
|
if exit_signal.load(Ordering::Relaxed) {
|
|
|
|
println!("{} Exiting validator thread", log_prefix);
|
2018-07-19 20:09:57 -07:00
|
|
|
let stats = NodeStats {
|
|
|
|
tps: max_tps,
|
|
|
|
tx: total,
|
|
|
|
};
|
|
|
|
maxes.write().unwrap().push((v.contact_info.tpu, stats));
|
2018-06-14 16:42:27 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-04 20:25:23 -07:00
|
|
|
/// Send loopback payment of 0 tokens and confirm the network processed it
|
|
|
|
fn send_barrier_transaction(barrier_client: &mut ThinClient, last_id: &mut Hash, id: &KeyPair) {
|
|
|
|
let transfer_start = Instant::now();
|
|
|
|
|
|
|
|
let mut poll_count = 0;
|
|
|
|
loop {
|
|
|
|
if poll_count > 0 && poll_count % 8 == 0 {
|
|
|
|
println!(
|
|
|
|
"polling for barrier transaction confirmation, attempt {}",
|
|
|
|
poll_count
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
*last_id = barrier_client.get_last_id();
|
|
|
|
let sig = barrier_client
|
|
|
|
.transfer(0, &id, id.pubkey(), last_id)
|
|
|
|
.expect("Unable to send barrier transaction");
|
|
|
|
|
|
|
|
let confirmatiom = barrier_client.poll_for_signature(&sig);
|
|
|
|
let duration_ms = duration_as_ms(&transfer_start.elapsed());
|
|
|
|
if confirmatiom.is_ok() {
|
|
|
|
println!("barrier transaction confirmed in {}ms", duration_ms);
|
|
|
|
|
|
|
|
metrics::submit(
|
|
|
|
influxdb::Point::new("bench-tps")
|
|
|
|
.add_tag(
|
|
|
|
"op",
|
|
|
|
influxdb::Value::String("send_barrier_transaction".to_string()),
|
|
|
|
)
|
|
|
|
.add_field("poll_count", influxdb::Value::Integer(poll_count))
|
|
|
|
.add_field("duration", influxdb::Value::Integer(duration_ms as i64))
|
|
|
|
.to_owned(),
|
|
|
|
);
|
|
|
|
|
|
|
|
// Sanity check that the client balance is still 1
|
|
|
|
let balance = barrier_client.poll_get_balance(&id.pubkey()).unwrap_or(-1);
|
|
|
|
if balance != 1 {
|
|
|
|
panic!("Expected an account balance of 1 (balance: {}", balance);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Timeout after 3 minutes. When running a CPU-only leader+validator+drone+bench-tps on a dev
|
|
|
|
// machine, some batches of transactions can take upwards of 1 minute...
|
|
|
|
if duration_ms > 1000 * 60 * 3 {
|
|
|
|
println!("Error: Couldn't confirm barrier transaction!");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
let new_last_id = barrier_client.get_last_id();
|
|
|
|
if new_last_id == *last_id {
|
|
|
|
if poll_count > 0 && poll_count % 8 == 0 {
|
|
|
|
println!("last_id is not advancing, still at {:?}", *last_id);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
*last_id = new_last_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
poll_count += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-23 14:26:16 -07:00
|
|
|
fn generate_txs(
|
|
|
|
shared_txs: &Arc<RwLock<VecDeque<Vec<Transaction>>>>,
|
2018-07-12 14:42:01 -07:00
|
|
|
id: &KeyPair,
|
2018-07-11 12:32:54 -07:00
|
|
|
keypairs: &[KeyPair],
|
2018-08-04 20:25:23 -07:00
|
|
|
last_id: &Hash,
|
2018-06-14 16:42:27 -07:00
|
|
|
threads: usize,
|
2018-07-11 22:21:51 -07:00
|
|
|
reclaim: bool,
|
2018-06-14 16:42:27 -07:00
|
|
|
) {
|
2018-08-06 10:52:16 -07:00
|
|
|
let tx_count = keypairs.len();
|
|
|
|
println!("Signing transactions... {} (reclaim={})", tx_count, reclaim);
|
2018-06-14 16:42:27 -07:00
|
|
|
let signing_start = Instant::now();
|
2018-07-02 21:13:47 -07:00
|
|
|
|
2018-08-05 22:41:19 -07:00
|
|
|
let transactions: Vec<_> = keypairs
|
|
|
|
.par_iter()
|
|
|
|
.map(|keypair| {
|
|
|
|
if !reclaim {
|
|
|
|
Transaction::new(&id, keypair.pubkey(), 1, *last_id)
|
|
|
|
} else {
|
|
|
|
Transaction::new(keypair, id.pubkey(), 1, *last_id)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect();
|
2018-06-14 16:42:27 -07:00
|
|
|
|
|
|
|
let duration = signing_start.elapsed();
|
|
|
|
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
2018-08-06 10:52:16 -07:00
|
|
|
let bsps = (tx_count) as f64 / ns as f64;
|
|
|
|
let nsps = ns as f64 / (tx_count) as f64;
|
2018-06-14 16:42:27 -07:00
|
|
|
println!(
|
2018-06-15 10:12:38 -07:00
|
|
|
"Done. {:.2} thousand signatures per second, {:.2} us per signature, {} ms total time",
|
2018-06-14 16:42:27 -07:00
|
|
|
bsps * 1_000_000_f64,
|
2018-06-15 10:12:38 -07:00
|
|
|
nsps / 1_000_f64,
|
|
|
|
duration_as_ms(&duration),
|
2018-06-14 16:42:27 -07:00
|
|
|
);
|
2018-07-25 18:46:18 -07:00
|
|
|
metrics::submit(
|
|
|
|
influxdb::Point::new("bench-tps")
|
|
|
|
.add_tag("op", influxdb::Value::String("generate_txs".to_string()))
|
|
|
|
.add_field(
|
|
|
|
"duration",
|
|
|
|
influxdb::Value::Integer(duration_as_ms(&duration) as i64),
|
|
|
|
)
|
|
|
|
.to_owned(),
|
|
|
|
);
|
2018-06-14 16:42:27 -07:00
|
|
|
|
|
|
|
let sz = transactions.len() / threads;
|
|
|
|
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
2018-07-23 14:26:16 -07:00
|
|
|
{
|
|
|
|
let mut shared_txs_wl = shared_txs.write().unwrap();
|
|
|
|
for chunk in chunks {
|
|
|
|
shared_txs_wl.push_back(chunk.to_vec());
|
|
|
|
}
|
|
|
|
}
|
2018-06-14 16:42:27 -07:00
|
|
|
}
|
|
|
|
|
2018-07-25 09:00:55 -07:00
|
|
|
fn do_tx_transfers(
|
|
|
|
exit_signal: &Arc<AtomicBool>,
|
|
|
|
shared_txs: &Arc<RwLock<VecDeque<Vec<Transaction>>>>,
|
|
|
|
leader: &NodeInfo,
|
|
|
|
shared_tx_thread_count: &Arc<AtomicIsize>,
|
|
|
|
) {
|
|
|
|
let client = mk_client(&leader);
|
|
|
|
loop {
|
|
|
|
let txs;
|
|
|
|
{
|
|
|
|
let mut shared_txs_wl = shared_txs.write().unwrap();
|
|
|
|
txs = shared_txs_wl.pop_front();
|
|
|
|
}
|
|
|
|
if let Some(txs0) = txs {
|
|
|
|
shared_tx_thread_count.fetch_add(1, Ordering::Relaxed);
|
|
|
|
println!(
|
|
|
|
"Transferring 1 unit {} times... to {}",
|
|
|
|
txs0.len(),
|
|
|
|
leader.contact_info.tpu
|
|
|
|
);
|
|
|
|
let tx_len = txs0.len();
|
|
|
|
let transfer_start = Instant::now();
|
|
|
|
for tx in txs0 {
|
|
|
|
client.transfer_signed(&tx).unwrap();
|
|
|
|
}
|
|
|
|
shared_tx_thread_count.fetch_add(-1, Ordering::Relaxed);
|
|
|
|
println!(
|
|
|
|
"Tx send done. {} ms {} tps",
|
|
|
|
duration_as_ms(&transfer_start.elapsed()),
|
|
|
|
tx_len as f32 / duration_as_s(&transfer_start.elapsed()),
|
|
|
|
);
|
2018-07-25 18:46:18 -07:00
|
|
|
metrics::submit(
|
|
|
|
influxdb::Point::new("bench-tps")
|
|
|
|
.add_tag("op", influxdb::Value::String("do_tx_transfers".to_string()))
|
|
|
|
.add_field(
|
|
|
|
"duration",
|
|
|
|
influxdb::Value::Integer(duration_as_ms(&transfer_start.elapsed()) as i64),
|
|
|
|
)
|
|
|
|
.add_field("count", influxdb::Value::Integer(tx_len as i64))
|
|
|
|
.to_owned(),
|
|
|
|
);
|
2018-07-25 09:00:55 -07:00
|
|
|
}
|
|
|
|
if exit_signal.load(Ordering::Relaxed) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-25 09:27:03 -07:00
|
|
|
fn airdrop_tokens(client: &mut ThinClient, leader: &NodeInfo, id: &KeyPair, tx_count: i64) {
|
|
|
|
let mut drone_addr = leader.contact_info.tpu;
|
|
|
|
drone_addr.set_port(DRONE_PORT);
|
|
|
|
|
|
|
|
let starting_balance = client.poll_get_balance(&id.pubkey()).unwrap();
|
2018-07-25 18:46:18 -07:00
|
|
|
metrics_submit_token_balance(starting_balance);
|
2018-07-25 09:27:03 -07:00
|
|
|
|
|
|
|
if starting_balance < tx_count {
|
|
|
|
let airdrop_amount = tx_count - starting_balance;
|
|
|
|
println!(
|
|
|
|
"Airdropping {:?} tokens from {}",
|
|
|
|
airdrop_amount, drone_addr
|
|
|
|
);
|
|
|
|
|
|
|
|
let previous_balance = starting_balance;
|
2018-07-31 22:07:53 -07:00
|
|
|
request_airdrop(&drone_addr, &id.pubkey(), airdrop_amount as u64).unwrap();
|
2018-07-25 09:27:03 -07:00
|
|
|
|
|
|
|
// TODO: return airdrop Result from Drone instead of polling the
|
|
|
|
// network
|
|
|
|
let mut current_balance = previous_balance;
|
|
|
|
for _ in 0..20 {
|
|
|
|
sleep(Duration::from_millis(500));
|
|
|
|
current_balance = client.poll_get_balance(&id.pubkey()).unwrap();
|
|
|
|
if starting_balance != current_balance {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
println!(".");
|
|
|
|
}
|
2018-07-25 18:46:18 -07:00
|
|
|
metrics_submit_token_balance(current_balance);
|
2018-07-25 09:27:03 -07:00
|
|
|
if current_balance - starting_balance != airdrop_amount {
|
|
|
|
println!("Airdrop failed!");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn compute_and_report_stats(
|
|
|
|
maxes: &Arc<RwLock<Vec<(SocketAddr, NodeStats)>>>,
|
|
|
|
sample_period: u64,
|
|
|
|
tx_send_elapsed: &Duration,
|
|
|
|
) {
|
|
|
|
// Compute/report stats
|
|
|
|
let mut max_of_maxes = 0.0;
|
|
|
|
let mut total_txs = 0;
|
|
|
|
let mut nodes_with_zero_tps = 0;
|
|
|
|
let mut total_maxes = 0.0;
|
|
|
|
println!(" Node address | Max TPS | Total Transactions");
|
|
|
|
println!("---------------------+---------------+--------------------");
|
|
|
|
|
|
|
|
for (sock, stats) in maxes.read().unwrap().iter() {
|
|
|
|
let maybe_flag = match stats.tx {
|
|
|
|
0 => "!!!!!",
|
|
|
|
_ => "",
|
|
|
|
};
|
|
|
|
|
|
|
|
println!(
|
|
|
|
"{:20} | {:13.2} | {} {}",
|
|
|
|
(*sock).to_string(),
|
|
|
|
stats.tps,
|
|
|
|
stats.tx,
|
|
|
|
maybe_flag
|
|
|
|
);
|
|
|
|
|
|
|
|
if stats.tps == 0.0 {
|
|
|
|
nodes_with_zero_tps += 1;
|
|
|
|
}
|
|
|
|
total_maxes += stats.tps;
|
|
|
|
|
|
|
|
if stats.tps > max_of_maxes {
|
|
|
|
max_of_maxes = stats.tps;
|
|
|
|
}
|
|
|
|
total_txs += stats.tx;
|
|
|
|
}
|
|
|
|
|
|
|
|
if total_maxes > 0.0 {
|
|
|
|
let num_nodes_with_tps = maxes.read().unwrap().len() - nodes_with_zero_tps;
|
|
|
|
let average_max = total_maxes / num_nodes_with_tps as f64;
|
|
|
|
println!(
|
|
|
|
"\nAverage max TPS: {:.2}, {} nodes had 0 TPS",
|
|
|
|
average_max, nodes_with_zero_tps
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
println!(
|
|
|
|
"\nHighest TPS: {:.2} sampling period {}s total transactions: {} clients: {}",
|
|
|
|
max_of_maxes,
|
|
|
|
sample_period,
|
|
|
|
total_txs,
|
|
|
|
maxes.read().unwrap().len()
|
|
|
|
);
|
|
|
|
println!(
|
|
|
|
"\tAverage TPS: {}",
|
|
|
|
total_txs as f32 / duration_as_s(tx_send_elapsed)
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2018-02-28 09:07:54 -08:00
|
|
|
fn main() {
|
2018-07-27 21:37:53 -07:00
|
|
|
logger::setup();
|
2018-07-25 18:46:18 -07:00
|
|
|
metrics::set_panic_hook("bench-tps");
|
2018-04-09 11:28:54 -07:00
|
|
|
let mut threads = 4usize;
|
2018-05-31 16:48:05 -07:00
|
|
|
let mut num_nodes = 1usize;
|
2018-07-11 22:21:51 -07:00
|
|
|
let mut time_sec = 90;
|
2018-07-19 11:31:56 -07:00
|
|
|
let mut addr = None;
|
2018-07-25 09:00:55 -07:00
|
|
|
let mut sustained = false;
|
|
|
|
let mut tx_count = 500_000;
|
2018-03-03 20:15:42 -08:00
|
|
|
|
2018-07-19 12:59:31 -07:00
|
|
|
let matches = App::new("solana-bench-tps")
|
2018-08-06 20:51:12 -07:00
|
|
|
.version(crate_version!())
|
2018-07-02 21:43:44 -07:00
|
|
|
.arg(
|
|
|
|
Arg::with_name("leader")
|
|
|
|
.short("l")
|
|
|
|
.long("leader")
|
|
|
|
.value_name("PATH")
|
|
|
|
.takes_value(true)
|
|
|
|
.help("/path/to/leader.json"),
|
|
|
|
)
|
2018-07-12 14:42:01 -07:00
|
|
|
.arg(
|
|
|
|
Arg::with_name("keypair")
|
|
|
|
.short("k")
|
|
|
|
.long("keypair")
|
|
|
|
.value_name("PATH")
|
|
|
|
.takes_value(true)
|
2018-07-12 15:02:14 -07:00
|
|
|
.default_value("~/.config/solana/id.json")
|
2018-07-12 14:42:01 -07:00
|
|
|
.help("/path/to/id.json"),
|
|
|
|
)
|
2018-07-02 21:43:44 -07:00
|
|
|
.arg(
|
|
|
|
Arg::with_name("num_nodes")
|
|
|
|
.short("n")
|
|
|
|
.long("nodes")
|
|
|
|
.value_name("NUMBER")
|
|
|
|
.takes_value(true)
|
|
|
|
.help("number of nodes to converge to"),
|
|
|
|
)
|
|
|
|
.arg(
|
|
|
|
Arg::with_name("threads")
|
|
|
|
.short("t")
|
|
|
|
.long("threads")
|
|
|
|
.value_name("NUMBER")
|
|
|
|
.takes_value(true)
|
|
|
|
.help("number of threads"),
|
|
|
|
)
|
|
|
|
.arg(
|
|
|
|
Arg::with_name("seconds")
|
|
|
|
.short("s")
|
|
|
|
.long("sec")
|
|
|
|
.value_name("NUMBER")
|
|
|
|
.takes_value(true)
|
|
|
|
.help("send transactions for this many seconds"),
|
|
|
|
)
|
2018-07-19 09:21:31 -07:00
|
|
|
.arg(
|
|
|
|
Arg::with_name("converge_only")
|
|
|
|
.short("c")
|
|
|
|
.help("exit immediately after converging"),
|
|
|
|
)
|
2018-07-19 11:31:56 -07:00
|
|
|
.arg(
|
|
|
|
Arg::with_name("addr")
|
|
|
|
.short("a")
|
|
|
|
.long("addr")
|
|
|
|
.value_name("PATH")
|
|
|
|
.takes_value(true)
|
|
|
|
.help("address to advertise to the network"),
|
|
|
|
)
|
2018-07-25 09:00:55 -07:00
|
|
|
.arg(
|
|
|
|
Arg::with_name("sustained")
|
|
|
|
.long("sustained")
|
|
|
|
.help("Use sustained performance mode vs. peak mode. This overlaps the tx generation with transfers."),
|
|
|
|
)
|
|
|
|
.arg(
|
|
|
|
Arg::with_name("tx_count")
|
|
|
|
.long("tx_count")
|
|
|
|
.value_name("NUMBER")
|
|
|
|
.takes_value(true)
|
|
|
|
.help("number of transactions to send in a single batch")
|
|
|
|
)
|
2018-07-02 21:43:44 -07:00
|
|
|
.get_matches();
|
|
|
|
|
2018-07-11 00:18:48 -07:00
|
|
|
let leader: NodeInfo;
|
2018-07-02 21:43:44 -07:00
|
|
|
if let Some(l) = matches.value_of("leader") {
|
2018-07-11 12:32:54 -07:00
|
|
|
leader = read_leader(l).node_info;
|
2018-07-02 21:43:44 -07:00
|
|
|
} else {
|
|
|
|
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
2018-07-11 00:18:48 -07:00
|
|
|
leader = NodeInfo::new_leader(&server_addr);
|
2018-04-09 11:28:54 -07:00
|
|
|
};
|
|
|
|
|
2018-07-12 15:02:14 -07:00
|
|
|
let id = read_keypair(matches.value_of("keypair").unwrap()).expect("client keypair");
|
2018-07-02 21:43:44 -07:00
|
|
|
|
|
|
|
if let Some(t) = matches.value_of("threads") {
|
|
|
|
threads = t.to_string().parse().expect("integer");
|
2018-04-19 10:16:20 -07:00
|
|
|
}
|
2018-07-02 21:43:44 -07:00
|
|
|
|
2018-07-16 17:23:35 -07:00
|
|
|
if let Some(n) = matches.value_of("num_nodes") {
|
2018-07-02 21:43:44 -07:00
|
|
|
num_nodes = n.to_string().parse().expect("integer");
|
2018-06-14 16:42:27 -07:00
|
|
|
}
|
2018-04-21 06:12:57 -07:00
|
|
|
|
2018-07-02 21:43:44 -07:00
|
|
|
if let Some(s) = matches.value_of("seconds") {
|
|
|
|
time_sec = s.to_string().parse().expect("integer");
|
|
|
|
}
|
2018-05-31 15:48:03 -07:00
|
|
|
|
2018-07-19 11:31:56 -07:00
|
|
|
if let Some(s) = matches.value_of("addr") {
|
|
|
|
addr = Some(s.to_string());
|
|
|
|
}
|
|
|
|
|
2018-07-25 09:00:55 -07:00
|
|
|
if let Some(s) = matches.value_of("tx_count") {
|
|
|
|
tx_count = s.to_string().parse().expect("integer");
|
|
|
|
}
|
|
|
|
|
|
|
|
if matches.is_present("sustained") {
|
|
|
|
sustained = true;
|
|
|
|
}
|
|
|
|
|
2018-07-23 13:49:24 -07:00
|
|
|
let exit_signal = Arc::new(AtomicBool::new(false));
|
2018-05-25 15:54:03 -07:00
|
|
|
let mut c_threads = vec![];
|
2018-07-23 13:49:24 -07:00
|
|
|
let validators = converge(&leader, &exit_signal, num_nodes, &mut c_threads, addr);
|
2018-07-21 20:23:52 -07:00
|
|
|
|
2018-07-22 16:20:07 -07:00
|
|
|
println!(" Node address | Node identifier");
|
|
|
|
println!("----------------------+------------------");
|
2018-07-21 20:23:52 -07:00
|
|
|
for node in &validators {
|
2018-07-22 16:20:07 -07:00
|
|
|
println!(
|
|
|
|
" {:20} | {:16x}",
|
|
|
|
node.contact_info.tpu.to_string(),
|
|
|
|
node.debug_id()
|
|
|
|
);
|
2018-07-21 20:23:52 -07:00
|
|
|
}
|
2018-07-19 09:21:31 -07:00
|
|
|
println!("Nodes: {}", validators.len());
|
2018-07-21 20:23:52 -07:00
|
|
|
|
|
|
|
if validators.len() < num_nodes {
|
|
|
|
println!(
|
|
|
|
"Error: Insufficient nodes discovered. Expecting {} or more",
|
|
|
|
num_nodes
|
|
|
|
);
|
|
|
|
exit(1);
|
|
|
|
}
|
2018-05-11 15:35:53 -07:00
|
|
|
|
2018-07-19 09:21:31 -07:00
|
|
|
if matches.is_present("converge_only") {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-06-29 14:12:26 -07:00
|
|
|
let mut client = mk_client(&leader);
|
2018-08-04 20:25:23 -07:00
|
|
|
let mut barrier_client = mk_client(&leader);
|
2018-03-05 14:34:15 -08:00
|
|
|
|
2018-06-11 13:04:51 -07:00
|
|
|
let mut seed = [0u8; 32];
|
2018-07-12 14:42:01 -07:00
|
|
|
seed.copy_from_slice(&id.public_key_bytes()[..32]);
|
2018-07-31 15:49:58 -07:00
|
|
|
let mut rnd = GenKeys::new(seed);
|
2018-05-08 21:03:05 -07:00
|
|
|
|
2018-07-25 09:00:55 -07:00
|
|
|
println!("Creating {} keypairs...", tx_count / 2);
|
|
|
|
let keypairs = rnd.gen_n_keypairs(tx_count / 2);
|
2018-08-04 20:25:23 -07:00
|
|
|
let barrier_id = rnd.gen_n_keypairs(1).pop().unwrap();
|
|
|
|
|
|
|
|
println!("Get tokens...");
|
|
|
|
airdrop_tokens(&mut client, &leader, &id, tx_count);
|
|
|
|
airdrop_tokens(&mut barrier_client, &leader, &barrier_id, 1);
|
|
|
|
|
|
|
|
println!("Get last ID...");
|
|
|
|
let mut last_id = client.get_last_id();
|
|
|
|
println!("Got last ID {:?}", last_id);
|
2018-03-05 14:34:15 -08:00
|
|
|
|
2018-07-22 16:20:07 -07:00
|
|
|
let first_tx_count = client.transaction_count();
|
|
|
|
println!("Initial transaction count {}", first_tx_count);
|
2018-04-17 15:41:58 -07:00
|
|
|
|
2018-06-15 10:12:38 -07:00
|
|
|
// Setup a thread per validator to sample every period
|
|
|
|
// collect the max transaction rate and total tx count seen
|
2018-06-14 16:42:27 -07:00
|
|
|
let maxes = Arc::new(RwLock::new(Vec::new()));
|
2018-06-02 09:59:39 -07:00
|
|
|
let sample_period = 1; // in seconds
|
2018-07-23 14:11:40 -07:00
|
|
|
println!("Sampling TPS every {} second...", sample_period);
|
2018-06-14 16:42:27 -07:00
|
|
|
let v_threads: Vec<_> = validators
|
|
|
|
.into_iter()
|
|
|
|
.map(|v| {
|
2018-07-23 13:49:24 -07:00
|
|
|
let exit_signal = exit_signal.clone();
|
2018-06-14 16:42:27 -07:00
|
|
|
let maxes = maxes.clone();
|
|
|
|
Builder::new()
|
|
|
|
.name("solana-client-sample".to_string())
|
|
|
|
.spawn(move || {
|
2018-07-22 16:20:07 -07:00
|
|
|
sample_tx_count(&exit_signal, &maxes, first_tx_count, &v, sample_period);
|
2018-06-14 16:42:27 -07:00
|
|
|
})
|
|
|
|
.unwrap()
|
2018-06-02 09:59:39 -07:00
|
|
|
})
|
|
|
|
.collect();
|
2018-06-14 16:42:27 -07:00
|
|
|
|
2018-07-23 14:26:16 -07:00
|
|
|
let shared_txs: Arc<RwLock<VecDeque<Vec<Transaction>>>> =
|
|
|
|
Arc::new(RwLock::new(VecDeque::new()));
|
|
|
|
|
2018-07-25 09:00:55 -07:00
|
|
|
let shared_tx_active_thread_count = Arc::new(AtomicIsize::new(0));
|
|
|
|
|
2018-07-23 14:26:16 -07:00
|
|
|
let s_threads: Vec<_> = (0..threads)
|
|
|
|
.map(|_| {
|
|
|
|
let exit_signal = exit_signal.clone();
|
|
|
|
let shared_txs = shared_txs.clone();
|
|
|
|
let leader = leader.clone();
|
2018-07-25 09:00:55 -07:00
|
|
|
let shared_tx_active_thread_count = shared_tx_active_thread_count.clone();
|
2018-07-23 14:26:16 -07:00
|
|
|
Builder::new()
|
|
|
|
.name("solana-client-sender".to_string())
|
|
|
|
.spawn(move || {
|
2018-07-25 09:00:55 -07:00
|
|
|
do_tx_transfers(
|
|
|
|
&exit_signal,
|
|
|
|
&shared_txs,
|
|
|
|
&leader,
|
|
|
|
&shared_tx_active_thread_count,
|
|
|
|
);
|
2018-07-23 14:26:16 -07:00
|
|
|
})
|
|
|
|
.unwrap()
|
|
|
|
})
|
|
|
|
.collect();
|
2018-06-24 10:12:08 -07:00
|
|
|
|
2018-06-15 10:12:38 -07:00
|
|
|
// generate and send transactions for the specified duration
|
2018-07-20 11:47:57 -07:00
|
|
|
let time = Duration::new(time_sec, 0);
|
|
|
|
let now = Instant::now();
|
|
|
|
let mut reclaim_tokens_back_to_source_account = false;
|
2018-07-24 15:45:21 -07:00
|
|
|
while now.elapsed() < time || reclaim_tokens_back_to_source_account {
|
2018-07-24 23:15:09 -07:00
|
|
|
let balance = client.poll_get_balance(&id.pubkey()).unwrap_or(-1);
|
2018-07-25 18:46:18 -07:00
|
|
|
metrics_submit_token_balance(balance);
|
2018-07-24 22:19:47 -07:00
|
|
|
|
2018-07-20 11:47:57 -07:00
|
|
|
// ping-pong between source and destination accounts for each loop iteration
|
2018-07-24 15:45:21 -07:00
|
|
|
// this seems to be faster than trying to determine the balance of individual
|
2018-07-20 11:47:57 -07:00
|
|
|
// accounts
|
2018-07-23 14:26:16 -07:00
|
|
|
generate_txs(
|
|
|
|
&shared_txs,
|
2018-07-11 22:21:51 -07:00
|
|
|
&id,
|
|
|
|
&keypairs,
|
2018-08-04 20:25:23 -07:00
|
|
|
&last_id,
|
2018-07-11 22:21:51 -07:00
|
|
|
threads,
|
2018-07-20 11:47:57 -07:00
|
|
|
reclaim_tokens_back_to_source_account,
|
2018-06-14 16:42:27 -07:00
|
|
|
);
|
2018-07-20 11:47:57 -07:00
|
|
|
reclaim_tokens_back_to_source_account = !reclaim_tokens_back_to_source_account;
|
2018-07-25 09:00:55 -07:00
|
|
|
|
|
|
|
// In sustained mode overlap the transfers with generation
|
|
|
|
// this has higher average performance but lower peak performance
|
|
|
|
// in tested environments.
|
|
|
|
if !sustained {
|
|
|
|
while shared_tx_active_thread_count.load(Ordering::Relaxed) > 0 {
|
|
|
|
sleep(Duration::from_millis(100));
|
|
|
|
}
|
|
|
|
}
|
2018-08-04 20:25:23 -07:00
|
|
|
// It's not feasible (would take too much time) to confirm each of the `tx_count / 2`
|
|
|
|
// transactions sent by `generate_txs()` so instead send and confirm a single transaction
|
|
|
|
// to validate the network is still functional.
|
|
|
|
send_barrier_transaction(&mut barrier_client, &mut last_id, &barrier_id);
|
2018-06-14 16:42:27 -07:00
|
|
|
}
|
|
|
|
|
2018-06-15 10:12:38 -07:00
|
|
|
// Stop the sampling threads so it will collect the stats
|
2018-07-23 13:49:24 -07:00
|
|
|
exit_signal.store(true, Ordering::Relaxed);
|
2018-07-28 16:28:55 -07:00
|
|
|
|
|
|
|
println!("Waiting for validator threads...");
|
2018-06-14 16:42:27 -07:00
|
|
|
for t in v_threads {
|
2018-07-28 16:28:55 -07:00
|
|
|
if let Err(err) = t.join() {
|
|
|
|
println!(" join() failed with: {:?}", err);
|
|
|
|
}
|
2018-06-14 16:42:27 -07:00
|
|
|
}
|
|
|
|
|
2018-07-23 14:26:16 -07:00
|
|
|
// join the tx send threads
|
2018-07-28 16:28:55 -07:00
|
|
|
println!("Waiting for transmit threads...");
|
2018-07-23 14:26:16 -07:00
|
|
|
for t in s_threads {
|
2018-07-28 16:28:55 -07:00
|
|
|
if let Err(err) = t.join() {
|
|
|
|
println!(" join() failed with: {:?}", err);
|
|
|
|
}
|
2018-07-23 14:26:16 -07:00
|
|
|
}
|
|
|
|
|
2018-07-24 23:15:09 -07:00
|
|
|
let balance = client.poll_get_balance(&id.pubkey()).unwrap_or(-1);
|
2018-07-25 18:46:18 -07:00
|
|
|
metrics_submit_token_balance(balance);
|
2018-07-24 22:19:47 -07:00
|
|
|
|
2018-07-25 09:27:03 -07:00
|
|
|
compute_and_report_stats(&maxes, sample_period, &now.elapsed());
|
2018-06-14 16:42:27 -07:00
|
|
|
|
2018-06-15 10:12:38 -07:00
|
|
|
// join the crdt client threads
|
2018-05-25 15:54:03 -07:00
|
|
|
for t in c_threads {
|
|
|
|
t.join().unwrap();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-19 11:31:56 -07:00
|
|
|
fn spy_node(addr: Option<String>) -> (NodeInfo, UdpSocket) {
|
|
|
|
let gossip_socket_pair;
|
|
|
|
if let Some(a) = addr {
|
2018-07-30 11:10:11 -07:00
|
|
|
let gossip_socket = udp_random_bind(8000, 10000, 5).unwrap();
|
|
|
|
let gossip_addr = SocketAddr::new(
|
|
|
|
a.parse().unwrap(),
|
|
|
|
gossip_socket.local_addr().unwrap().port(),
|
|
|
|
);
|
2018-07-19 11:31:56 -07:00
|
|
|
gossip_socket_pair = UdpSocketPair {
|
2018-07-30 11:10:11 -07:00
|
|
|
addr: gossip_addr,
|
|
|
|
receiver: gossip_socket.try_clone().unwrap(),
|
|
|
|
sender: gossip_socket,
|
2018-07-19 11:31:56 -07:00
|
|
|
};
|
|
|
|
} else {
|
|
|
|
gossip_socket_pair = udp_public_bind("gossip", 8000, 10000);
|
|
|
|
}
|
|
|
|
|
2018-05-25 15:54:03 -07:00
|
|
|
let pubkey = KeyPair::new().pubkey();
|
2018-06-29 14:12:26 -07:00
|
|
|
let daddr = "0.0.0.0:0".parse().unwrap();
|
2018-07-17 11:23:32 -07:00
|
|
|
assert!(!gossip_socket_pair.addr.ip().is_unspecified());
|
|
|
|
assert!(!gossip_socket_pair.addr.ip().is_multicast());
|
2018-07-11 00:18:48 -07:00
|
|
|
let node = NodeInfo::new(
|
2018-06-02 08:32:51 -07:00
|
|
|
pubkey,
|
2018-06-29 14:12:26 -07:00
|
|
|
//gossip.local_addr().unwrap(),
|
|
|
|
gossip_socket_pair.addr,
|
2018-06-02 08:32:51 -07:00
|
|
|
daddr,
|
|
|
|
daddr,
|
|
|
|
daddr,
|
|
|
|
daddr,
|
|
|
|
);
|
2018-06-29 14:12:26 -07:00
|
|
|
(node, gossip_socket_pair.receiver)
|
2018-05-25 15:54:03 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
fn converge(
|
2018-07-11 00:18:48 -07:00
|
|
|
leader: &NodeInfo,
|
2018-07-22 16:20:07 -07:00
|
|
|
exit_signal: &Arc<AtomicBool>,
|
2018-05-25 15:54:03 -07:00
|
|
|
num_nodes: usize,
|
|
|
|
threads: &mut Vec<JoinHandle<()>>,
|
2018-07-19 11:31:56 -07:00
|
|
|
addr: Option<String>,
|
2018-07-11 00:18:48 -07:00
|
|
|
) -> Vec<NodeInfo> {
|
2018-05-25 15:54:03 -07:00
|
|
|
//lets spy on the network
|
2018-07-19 11:31:56 -07:00
|
|
|
let (spy, spy_gossip) = spy_node(addr);
|
2018-07-16 19:31:52 -07:00
|
|
|
let mut spy_crdt = Crdt::new(spy).expect("Crdt::new");
|
2018-05-25 15:54:03 -07:00
|
|
|
spy_crdt.insert(&leader);
|
|
|
|
spy_crdt.set_leader(leader.id);
|
|
|
|
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
2018-05-27 18:21:39 -07:00
|
|
|
let window = default_window();
|
2018-06-28 11:28:28 -07:00
|
|
|
let gossip_send_socket = udp_random_bind(8000, 10000, 5).unwrap();
|
2018-06-07 15:06:32 -07:00
|
|
|
let ncp = Ncp::new(
|
2018-07-13 12:32:20 -07:00
|
|
|
&spy_ref,
|
2018-05-27 18:21:39 -07:00
|
|
|
window.clone(),
|
|
|
|
spy_gossip,
|
|
|
|
gossip_send_socket,
|
2018-07-22 16:20:07 -07:00
|
|
|
exit_signal.clone(),
|
2018-05-27 18:21:39 -07:00
|
|
|
).expect("DataReplicator::new");
|
2018-07-30 10:55:05 -07:00
|
|
|
let mut v: Vec<NodeInfo> = vec![];
|
2018-06-07 13:51:15 -07:00
|
|
|
//wait for the network to converge, 30 seconds should be plenty
|
2018-05-25 15:54:03 -07:00
|
|
|
for _ in 0..30 {
|
2018-07-30 10:55:05 -07:00
|
|
|
v = spy_ref
|
2018-06-07 13:51:15 -07:00
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.table
|
|
|
|
.values()
|
|
|
|
.into_iter()
|
2018-07-18 01:07:43 -07:00
|
|
|
.filter(|x| Crdt::is_valid_address(x.contact_info.rpu))
|
2018-06-07 13:51:15 -07:00
|
|
|
.cloned()
|
|
|
|
.collect();
|
|
|
|
if v.len() >= num_nodes {
|
|
|
|
println!("CONVERGED!");
|
2018-05-25 15:54:03 -07:00
|
|
|
break;
|
2018-07-17 21:29:51 -07:00
|
|
|
} else {
|
|
|
|
println!(
|
|
|
|
"{} node(s) discovered (looking for {} or more)",
|
|
|
|
v.len(),
|
|
|
|
num_nodes
|
|
|
|
);
|
2018-05-25 15:54:03 -07:00
|
|
|
}
|
2018-05-04 11:11:39 -07:00
|
|
|
sleep(Duration::new(1, 0));
|
2018-04-17 15:41:58 -07:00
|
|
|
}
|
2018-07-03 21:14:08 -07:00
|
|
|
threads.extend(ncp.thread_hdls().into_iter());
|
2018-07-30 10:55:05 -07:00
|
|
|
v
|
2018-05-25 15:54:03 -07:00
|
|
|
}
|
|
|
|
|
2018-07-11 12:32:54 -07:00
|
|
|
fn read_leader(path: &str) -> Config {
|
|
|
|
let file = File::open(path).unwrap_or_else(|_| panic!("file not found: {}", path));
|
|
|
|
serde_json::from_reader(file).unwrap_or_else(|_| panic!("failed to parse {}", path))
|
2018-02-28 09:07:54 -08:00
|
|
|
}
|