From 942256a6478fcdb28635d56012bbdf51cd18148f Mon Sep 17 00:00:00 2001 From: carllin Date: Fri, 23 Nov 2018 06:12:43 -0800 Subject: [PATCH] Add db_ledger benchmarks (#1875) * Add db_ledger benchmarks * ignore benches in CI, due to timeouts --- Cargo.toml | 3 + benches/db_ledger.rs | 205 +++++++++++++++++++++++++++++++++++++++++++ src/ledger.rs | 27 ++++-- tests/multinode.rs | 4 +- 4 files changed, 231 insertions(+), 8 deletions(-) create mode 100644 benches/db_ledger.rs diff --git a/Cargo.toml b/Cargo.toml index e77e0b948f..8e36f49d0c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -120,6 +120,9 @@ name = "bank" [[bench]] name = "banking_stage" +[[bench]] +name = "db_ledger" + [[bench]] name = "ledger" diff --git a/benches/db_ledger.rs b/benches/db_ledger.rs new file mode 100644 index 0000000000..c8ac35e4bf --- /dev/null +++ b/benches/db_ledger.rs @@ -0,0 +1,205 @@ +#![feature(test)] +extern crate rand; +extern crate rocksdb; +extern crate solana; +extern crate test; + +use rand::distributions::{Distribution, Range}; +use rand::{thread_rng, Rng}; +use rocksdb::{Options, DB}; +use solana::db_ledger::{DataCf, DbLedger, LedgerColumnFamilyRaw}; +use solana::ledger::{get_tmp_ledger_path, make_large_test_entries, make_tiny_test_entries, Block}; +use solana::packet::{Blob, BLOB_HEADER_SIZE}; +use test::Bencher; + +// Given some blobs and a ledger at ledger_path, benchmark writing the blobs to the ledger +fn bench_write_blobs(bench: &mut Bencher, blobs: &mut [&mut Blob], ledger_path: &str) { + let db_ledger = + DbLedger::open(&ledger_path).expect("Expected to be able to open database ledger"); + let slot = 0; + let num_blobs = blobs.len(); + bench.iter(move || { + for blob in blobs.iter_mut() { + let index = blob.index().unwrap(); + let key = DataCf::key(slot, index); + let size = blob.size().unwrap(); + db_ledger + .data_cf + .put(&db_ledger.db, &key, &blob.data[..BLOB_HEADER_SIZE + size]) + .unwrap(); + blob.set_index(index + num_blobs as u64).unwrap(); + } + }); + + DB::destroy(&Options::default(), &ledger_path) + .expect("Expected successful database destruction"); +} + +// Insert some blobs into the ledger in preparation for read benchmarks +fn setup_read_bench( + db_ledger: &mut DbLedger, + num_small_blobs: u64, + num_large_blobs: u64, + slot: u64, +) { + // Make some big and small entries + let mut entries = make_large_test_entries(num_large_blobs as usize); + entries.extend(make_tiny_test_entries(num_small_blobs as usize)); + + // Convert the entries to blobs, wrsite the blobs to the ledger + let shared_blobs = entries.to_blobs(); + db_ledger + .write_shared_blobs(slot, &shared_blobs) + .expect("Expectd successful insertion of blobs into ledger"); +} + +// Write small blobs to the ledger +#[bench] +#[ignore] +fn bench_write_small(bench: &mut Bencher) { + let ledger_path = get_tmp_ledger_path("bench_write_small"); + let num_entries = 32 * 1024; + let entries = make_tiny_test_entries(num_entries); + let shared_blobs = entries.to_blobs(); + let mut blob_locks: Vec<_> = shared_blobs.iter().map(|b| b.write().unwrap()).collect(); + let mut blobs: Vec<&mut Blob> = blob_locks.iter_mut().map(|b| &mut **b).collect(); + bench_write_blobs(bench, &mut blobs, &ledger_path); +} + +// Write big blobs to the ledger +#[bench] +#[ignore] +fn bench_write_big(bench: &mut Bencher) { + let ledger_path = get_tmp_ledger_path("bench_write_big"); + let num_entries = 32 * 1024; + let entries = make_tiny_test_entries(num_entries); + let shared_blobs = entries.to_blobs(); + let mut blob_locks: Vec<_> = shared_blobs.iter().map(|b| b.write().unwrap()).collect(); + let mut blobs: Vec<&mut Blob> = blob_locks.iter_mut().map(|b| &mut **b).collect(); + bench_write_blobs(bench, &mut blobs, &ledger_path); +} + +#[bench] +#[ignore] +fn bench_read_sequential(bench: &mut Bencher) { + let ledger_path = get_tmp_ledger_path("bench_read_sequential"); + let mut db_ledger = + DbLedger::open(&ledger_path).expect("Expected to be able to open database ledger"); + + // Insert some big and small blobs into the ledger + let num_small_blobs = 32 * 1024; + let num_large_blobs = 32 * 1024; + let total_blobs = num_small_blobs + num_large_blobs; + let slot = 0; + setup_read_bench(&mut db_ledger, num_small_blobs, num_large_blobs, slot); + + let num_reads = total_blobs / 15; + // Make range [0, total_blobs - 1] + let range = Range::new(0, num_small_blobs + num_large_blobs); + + let mut rng = rand::thread_rng(); + bench.iter(move || { + // Generate random starting point in that range, read num_reads blobs sequentially + let start_index = range.sample(&mut rng); + for i in start_index..start_index + num_reads { + let _ = + db_ledger + .data_cf + .get_by_slot_index(&db_ledger.db, slot, i as u64 % total_blobs); + } + }); + + DB::destroy(&Options::default(), &ledger_path) + .expect("Expected successful database destruction"); +} + +#[bench] +#[ignore] +fn bench_read_random(bench: &mut Bencher) { + let ledger_path = get_tmp_ledger_path("bench_read_random"); + let mut db_ledger = + DbLedger::open(&ledger_path).expect("Expected to be able to open database ledger"); + + // Insert some big and small blobs into the ledger + let num_small_blobs = 32 * 1024; + let num_large_blobs = 32 * 1024; + let total_blobs = num_small_blobs + num_large_blobs; + let slot = 0; + setup_read_bench(&mut db_ledger, num_small_blobs, num_large_blobs, slot); + + let num_reads = total_blobs / 15; + + // Make range [0, total_blobs - 1] + let range = Range::new(0, total_blobs); + + // Generate a num_reads sized random sample of indexes in range [0, total_blobs - 1], + // simulating random reads + let mut rng = rand::thread_rng(); + let indexes: Vec = (0..num_reads) + .map(|_| range.sample(&mut rng) as usize) + .collect(); + bench.iter(move || { + for i in indexes.iter() { + let _ = db_ledger + .data_cf + .get_by_slot_index(&db_ledger.db, slot, *i as u64); + } + }); + + DB::destroy(&Options::default(), &ledger_path) + .expect("Expected successful database destruction"); +} + +#[bench] +#[ignore] +fn bench_insert_data_blob_small(bench: &mut Bencher) { + let ledger_path = get_tmp_ledger_path("bench_insert_data_blob_small"); + let db_ledger = + DbLedger::open(&ledger_path).expect("Expected to be able to open database ledger"); + let num_entries = 32 * 1024; + let entries = make_tiny_test_entries(num_entries); + let shared_blobs = entries.to_blobs(); + let mut blob_locks: Vec<_> = shared_blobs.iter().map(|b| b.write().unwrap()).collect(); + let mut blobs: Vec<&mut Blob> = blob_locks.iter_mut().map(|b| &mut **b).collect(); + thread_rng().shuffle(&mut blobs); + let slot = 0; + + bench.iter(move || { + for blob in blobs.iter_mut() { + let index = blob.index().unwrap(); + let key = DataCf::key(slot, index); + db_ledger.insert_data_blob(&key, blob).unwrap(); + blob.set_index(index + num_entries as u64).unwrap(); + } + }); + + DB::destroy(&Options::default(), &ledger_path) + .expect("Expected successful database destruction"); +} + +#[bench] +#[ignore] +fn bench_insert_data_blob_big(bench: &mut Bencher) { + let ledger_path = get_tmp_ledger_path("bench_insert_data_blob_big"); + let db_ledger = + DbLedger::open(&ledger_path).expect("Expected to be able to open database ledger"); + let num_entries = 32 * 1024; + let entries = make_large_test_entries(num_entries); + let shared_blobs = entries.to_blobs(); + let mut blob_locks: Vec<_> = shared_blobs.iter().map(|b| b.write().unwrap()).collect(); + let mut blobs: Vec<&mut Blob> = blob_locks.iter_mut().map(|b| &mut **b).collect(); + thread_rng().shuffle(&mut blobs); + let slot = 0; + + bench.iter(move || { + for blob in blobs.iter_mut() { + let index = blob.index().unwrap(); + let key = DataCf::key(slot, index); + db_ledger.insert_data_blob(&key, blob).unwrap(); + blob.set_index(index + num_entries as u64).unwrap(); + } + }); + + DB::destroy(&Options::default(), &ledger_path) + .expect("Expected successful database destruction"); +} diff --git a/src/ledger.rs b/src/ledger.rs index e37314dc2a..e4f4f0e37a 100644 --- a/src/ledger.rs +++ b/src/ledger.rs @@ -3,9 +3,7 @@ //! access read to a persistent file-based ledger. use bincode::{self, deserialize_from, serialize_into, serialized_size}; -#[cfg(test)] use budget_transaction::BudgetTransaction; -#[cfg(test)] use chrono::prelude::Utc; use entry::Entry; use log::Level::Trace; @@ -13,9 +11,7 @@ use mint::Mint; use packet::{SharedBlob, BLOB_DATA_SIZE}; use rayon::prelude::*; use signature::{Keypair, KeypairUtil}; -#[cfg(test)] -use solana_sdk::hash::hash; -use solana_sdk::hash::Hash; +use solana_sdk::hash::{hash, Hash}; use solana_sdk::pubkey::Pubkey; use std::fs::{create_dir_all, remove_dir_all, File, OpenOptions}; use std::io::prelude::*; @@ -642,7 +638,6 @@ pub fn create_tmp_sample_ledger( (mint, path, genesis) } -#[cfg(test)] pub fn make_tiny_test_entries(num: usize) -> Vec { let zero = Hash::default(); let one = hash(&zero.as_ref()); @@ -666,6 +661,26 @@ pub fn make_tiny_test_entries(num: usize) -> Vec { }).collect() } +pub fn make_large_test_entries(num_entries: usize) -> Vec { + let zero = Hash::default(); + let one = hash(&zero.as_ref()); + let keypair = Keypair::new(); + + let tx = Transaction::budget_new_timestamp( + &keypair, + keypair.pubkey(), + keypair.pubkey(), + Utc::now(), + one, + ); + + let serialized_size = serialized_size(&vec![&tx]).unwrap(); + let num_txs = BLOB_DATA_SIZE / serialized_size as usize; + let txs = vec![tx; num_txs]; + let entry = next_entries(&one, 1, txs)[0].clone(); + vec![entry; num_entries] +} + #[cfg(test)] mod tests { use super::*; diff --git a/tests/multinode.rs b/tests/multinode.rs index a3b4e62bc0..9a68508c17 100644 --- a/tests/multinode.rs +++ b/tests/multinode.rs @@ -922,7 +922,7 @@ fn test_leader_to_validator_transition() { // Leader could have executed transactions in bank but not recorded them, so // we only have an upper bound on the balance if let Ok(bal) = leader_client.poll_get_balance(&bob_pubkey) { - assert!(bal <= i - 1); + assert!(bal <= i); } // Check the ledger to make sure it's the right height, we should've @@ -1060,7 +1060,7 @@ fn test_leader_validator_basic() { // Leader could have executed transactions in bank but not recorded them, so // we only have an upper bound on the balance if let Ok(bal) = leader_client.poll_get_balance(&bob_pubkey) { - assert!(bal <= i - 1); + assert!(bal <= i); } // Shut down