logs and tps counting

This commit is contained in:
Anatoly Yakovenko 2018-05-24 04:45:40 +00:00 committed by Greg Fitzgerald
parent e12e154877
commit 2128c58fbe
5 changed files with 29 additions and 19 deletions

View File

@ -1,5 +1,6 @@
#!/bin/bash #!/bin/bash
cd /home/ubuntu/solana cd /home/ubuntu/solana
git pull git pull
export RUST_LOG=solana::crdt=trace export RUST_LOG=solana::crdt=info
cat genesis.log | cargo run --release --features cuda --bin solana-testnode -- -s leader.json -b 8000 -d cat genesis.log | cargo run --release --features cuda --bin solana-testnode -- -s leader.json -b 8000 -d | grep INFO
#cat genesis.log | cargo run --release --bin solana-testnode -- -s leader.json -b 8000 -d

View File

@ -3,5 +3,6 @@ cd /home/ubuntu/solana
git pull git pull
scp ubuntu@18.206.1.146:~/solana/leader.json . scp ubuntu@18.206.1.146:~/solana/leader.json .
scp ubuntu@18.206.1.146:~/solana/genesis.log . scp ubuntu@18.206.1.146:~/solana/genesis.log .
export RUST_LOG=solana::crdt=trace scp ubuntu@18.206.1.146:~/solana/libcuda_verify_ed25519.a .
cat genesis.log | cargo run --release --features cuda --bin solana-testnode -- -s replicator.json -r leader.json -b 9000 -d export RUST_LOG=solana=info
cat genesis.log | cargo run --release --features cuda --bin solana-testnode -- -s replicator.json -v leader.json -b 9000 -d | grep INFO

View File

@ -139,11 +139,12 @@ fn main() {
nsps / 1_000_f64 nsps / 1_000_f64
); );
let initial_tx_count = client.transaction_count(); let first_count = client.transaction_count();
let mut initial_tx_count = first_count;
println!("initial count {}", initial_tx_count); println!("initial count {}", initial_tx_count);
println!("Transfering {} transactions in {} batches", txs, threads); println!("Transfering {} transactions in {} batches", txs, threads);
let now = Instant::now(); let mut now = Instant::now();
let sz = transactions.len() / threads; let sz = transactions.len() / threads;
let chunks: Vec<_> = transactions.chunks(sz).collect(); let chunks: Vec<_> = transactions.chunks(sz).collect();
chunks.into_par_iter().for_each(|trs| { chunks.into_par_iter().for_each(|trs| {
@ -154,11 +155,13 @@ fn main() {
} }
}); });
println!("Waiting for transactions to complete...",); println!("Sampling tps every second...",);
for _ in 0..10 { for _ in 0..20 {
let mut tx_count = client.transaction_count(); let tx_count = client.transaction_count();
duration = now.elapsed(); duration = now.elapsed();
now = Instant::now();
let txs = tx_count - initial_tx_count; let txs = tx_count - initial_tx_count;
initial_tx_count = tx_count;
println!("Transactions processed {}", txs); println!("Transactions processed {}", txs);
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos()); let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let tps = (txs * 1_000_000_000) as f64 / ns as f64; let tps = (txs * 1_000_000_000) as f64 / ns as f64;
@ -171,13 +174,12 @@ fn main() {
for val in validators { for val in validators {
println!("Checking balance on {} ...", val.events_addr); println!("Checking balance on {} ...", val.events_addr);
let mut client = mk_client(&client_addr, &val); let mut client = mk_client(&client_addr, &val);
let mut tx_count = client.transaction_count(); let tx_count = client.transaction_count();
duration = now.elapsed(); let txs = tx_count - first_count;
let txs = tx_count - initial_tx_count; println!(
println!("Transactions processed {} on {}", txs, val.events_addr); "Total Transactions processed {} on {}",
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos()); txs, val.events_addr
let tps = (txs * 1_000_000_000) as f64 / ns as f64; );
println!("{} tps on {}", tps, val.events_addr);
} }
signal.store(true, Ordering::Relaxed); signal.store(true, Ordering::Relaxed);
for t in c_threads { for t in c_threads {

View File

@ -348,7 +348,7 @@ impl Crdt {
fn gossip_request(&self) -> Result<(SocketAddr, Protocol)> { fn gossip_request(&self) -> Result<(SocketAddr, Protocol)> {
let options: Vec<_> = self.table.values().filter(|v| v.id != self.me).collect(); let options: Vec<_> = self.table.values().filter(|v| v.id != self.me).collect();
if options.len() < 1 { if options.len() < 1 {
trace!("crdt too small for gossip"); info!("crdt too small for gossip");
return Err(Error::CrdtTooSmall); return Err(Error::CrdtTooSmall);
} }
let n = (Self::random() as usize) % options.len(); let n = (Self::random() as usize) % options.len();
@ -497,7 +497,13 @@ impl Crdt {
sock.set_read_timeout(Some(Duration::new(2, 0))) sock.set_read_timeout(Some(Duration::new(2, 0)))
.expect("'sock.set_read_timeout' in crdt.rs"); .expect("'sock.set_read_timeout' in crdt.rs");
spawn(move || loop { spawn(move || loop {
let _ = Self::run_listen(&obj, &window, &sock); let e = Self::run_listen(&obj, &window, &sock);
if e.is_err() {
info!(
"run_listen timeout, table size: {}",
obj.read().unwrap().table.len()
);
}
if exit.load(Ordering::Relaxed) { if exit.load(Ordering::Relaxed) {
return; return;
} }

View File

@ -342,7 +342,7 @@ fn broadcast(
let mut blobs = dq.into_iter().collect(); let mut blobs = dq.into_iter().collect();
/// appends codes to the list of blobs allowing us to reconstruct the stream /// appends codes to the list of blobs allowing us to reconstruct the stream
#[cfg(feature = "erasure")] #[cfg(feature = "erasure")]
erasure::generate_codes(blobs); erasure::generate_coding(re, blobs, consumed);
Crdt::broadcast(crdt, &blobs, &sock, transmit_index)?; Crdt::broadcast(crdt, &blobs, &sock, transmit_index)?;
// keep the cache of blobs that are broadcast // keep the cache of blobs that are broadcast
{ {