Add benches for shredding and poh (#6307)

* Add benches for shredding and poh

* ignore poh bench

* Factor out Poh bench as separate function
This commit is contained in:
carllin 2019-10-10 14:00:24 -07:00 committed by GitHub
parent 1b775044f7
commit 1960ea8ed7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 69 additions and 29 deletions

View File

@ -3,17 +3,33 @@
extern crate test;
use solana_core::entry::create_ticks;
use solana_core::entry::Entry;
use solana_core::shred::{
max_ticks_per_n_shreds, Shredder, RECOMMENDED_FEC_RATE, SIZE_OF_DATA_SHRED_HEADER,
max_entries_per_n_shred, max_ticks_per_n_shreds, Shredder, RECOMMENDED_FEC_RATE,
SIZE_OF_DATA_SHRED_HEADER,
};
use solana_core::test_tx;
use solana_sdk::hash::Hash;
use solana_sdk::packet::PACKET_DATA_SIZE;
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::sync::Arc;
use test::Bencher;
fn make_test_entry(txs_per_entry: u64) -> Entry {
Entry {
num_hashes: 100_000,
hash: Hash::default(),
transactions: vec![test_tx::test_tx(); txs_per_entry as usize],
}
}
fn make_large_unchained_entries(txs_per_entry: u64, num_entries: u64) -> Vec<Entry> {
(0..num_entries)
.map(|_| make_test_entry(txs_per_entry))
.collect()
}
#[bench]
fn bench_shredder(bencher: &mut Bencher) {
fn bench_shredder_ticks(bencher: &mut Bencher) {
let kp = Arc::new(Keypair::new());
let shred_size = PACKET_DATA_SIZE - *SIZE_OF_DATA_SHRED_HEADER;
let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size;
@ -26,6 +42,21 @@ fn bench_shredder(bencher: &mut Bencher) {
})
}
#[bench]
fn bench_shredder_large_entries(bencher: &mut Bencher) {
let kp = Arc::new(Keypair::new());
let shred_size = PACKET_DATA_SIZE - *SIZE_OF_DATA_SHRED_HEADER;
let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size;
let txs_per_entry = 128;
let num_entries = max_entries_per_n_shred(&make_test_entry(txs_per_entry), num_shreds as u64);
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
// 1Mb
bencher.iter(|| {
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone()).unwrap();
shredder.entries_to_shreds(&entries, true, 0);
})
}
#[bench]
fn bench_deshredder(bencher: &mut Bencher) {
let kp = Arc::new(Keypair::new());

View File

@ -1,5 +1,7 @@
//! The `Poh` module provides an object for generating a Proof of History.
use solana_sdk::hash::{hash, hashv, Hash};
use std::thread::{Builder, JoinHandle};
use std::time::{Duration, Instant};
pub struct Poh {
pub hash: Hash,
@ -80,6 +82,37 @@ impl Poh {
}
}
pub fn compute_hashes_per_tick(duration: Duration, hashes_sample_size: u64) -> u64 {
let num_cpu = sys_info::cpu_num().unwrap();
// calculate hash rate with the system under maximum load
info!(
"Running {} hashes in parallel on all threads...",
hashes_sample_size
);
let threads: Vec<JoinHandle<u64>> = (0..num_cpu)
.map(|_| {
Builder::new()
.name("solana-poh".to_string())
.spawn(move || {
let mut v = Hash::default();
let start = Instant::now();
for _ in 0..hashes_sample_size {
v = hash(&v.as_ref());
}
start.elapsed().as_millis() as u64
})
.unwrap()
})
.collect();
let avg_elapsed = (threads
.into_iter()
.map(|elapsed| elapsed.join().unwrap())
.sum::<u64>())
/ u64::from(num_cpu);
duration.as_millis() as u64 * hashes_sample_size / avg_elapsed
}
#[cfg(test)]
mod tests {
use crate::poh::{Poh, PohEntry};

View File

@ -2,8 +2,8 @@
use base64;
use clap::{crate_description, crate_name, crate_version, value_t_or_exit, App, Arg};
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use solana_core::blocktree::create_new_ledger;
use solana_core::poh::compute_hashes_per_tick;
use solana_genesis::PrimordialAccountDetails;
use solana_sdk::{
account::Account,
@ -11,7 +11,6 @@ use solana_sdk::{
epoch_schedule::EpochSchedule,
fee_calculator::FeeCalculator,
genesis_block::Builder,
hash::{hash, Hash},
poh_config::PohConfig,
pubkey::Pubkey,
rent_calculator::RentCalculator,
@ -21,15 +20,7 @@ use solana_sdk::{
use solana_stake_api::stake_state;
use solana_storage_api::storage_contract;
use solana_vote_api::vote_state;
use std::{
collections::HashMap,
error,
fs::File,
io,
path::PathBuf,
str::FromStr,
time::{Duration, Instant},
};
use std::{collections::HashMap, error, fs::File, io, path::PathBuf, str::FromStr, time::Duration};
pub const BOOTSTRAP_LEADER_LAMPORTS: u64 = 42;
@ -377,23 +368,8 @@ fn main() -> Result<(), Box<dyn error::Error>> {
match matches.value_of("hashes_per_tick").unwrap() {
"auto" => {
let v = Hash::default();
// calculate hash rate with the system under maximum load
println!("Running 1 million hashes in parallel on all threads...");
let start = Instant::now();
(0..sys_info::cpu_num().unwrap())
.into_par_iter()
.for_each_with(v, |v, _| {
for _ in 0..1_000_000 {
*v = hash(&v.as_ref());
}
});
let end = Instant::now();
let elapsed = end.duration_since(start).as_millis();
let hashes_per_tick =
(poh_config.target_tick_duration.as_millis() * 1_000_000 / elapsed) as u64;
compute_hashes_per_tick(poh_config.target_tick_duration, 1_000_000);
println!("Hashes per tick: {}", hashes_per_tick);
poh_config.hashes_per_tick = Some(hashes_per_tick);
}