solana/benches/appendvec.rs

181 lines
5.8 KiB
Rust
Raw Normal View History

2019-01-31 18:37:05 -08:00
#![cfg_attr(feature = "unstable", feature(test))]
extern crate rand;
extern crate test;
use rand::{thread_rng, Rng};
use solana_runtime::appendvec::AppendVec;
use std::env;
use std::path::PathBuf;
2019-01-31 18:37:05 -08:00
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::spawn;
use test::Bencher;
const START_SIZE: u64 = 4 * 1024 * 1024;
const INC_SIZE: u64 = 1 * 1024 * 1024;
fn get_appendvec_bench_path(path: &str) -> PathBuf {
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
let mut buf = PathBuf::new();
buf.push(&format!("{}/{}", out_dir, path));
buf
}
2019-01-31 18:37:05 -08:00
#[bench]
fn appendvec_atomic_append(bencher: &mut Bencher) {
let path = get_appendvec_bench_path("bench_append");
let mut vec = AppendVec::<AtomicUsize>::new(&path, true, START_SIZE, INC_SIZE);
2019-01-31 18:37:05 -08:00
bencher.iter(|| {
if vec.append(AtomicUsize::new(0)).is_none() {
assert!(vec.grow_file().is_ok());
assert!(vec.append(AtomicUsize::new(0)).is_some());
}
});
std::fs::remove_file(path).unwrap();
}
#[bench]
fn appendvec_atomic_random_access(bencher: &mut Bencher) {
let path = get_appendvec_bench_path("bench_ra");
let mut vec = AppendVec::<AtomicUsize>::new(&path, true, START_SIZE, INC_SIZE);
let size = 1_000_000;
2019-01-31 18:37:05 -08:00
for _ in 0..size {
if vec.append(AtomicUsize::new(0)).is_none() {
assert!(vec.grow_file().is_ok());
assert!(vec.append(AtomicUsize::new(0)).is_some());
}
}
bencher.iter(|| {
let index = thread_rng().gen_range(0, size as u64);
vec.get(index * std::mem::size_of::<AtomicUsize>() as u64);
});
std::fs::remove_file(path).unwrap();
}
#[bench]
fn appendvec_atomic_random_change(bencher: &mut Bencher) {
let path = get_appendvec_bench_path("bench_rax");
let mut vec = AppendVec::<AtomicUsize>::new(&path, true, START_SIZE, INC_SIZE);
let size = 1_000_000;
2019-01-31 18:37:05 -08:00
for _ in 0..size {
if vec.append(AtomicUsize::new(0)).is_none() {
assert!(vec.grow_file().is_ok());
assert!(vec.append(AtomicUsize::new(0)).is_some());
}
}
bencher.iter(|| {
let index = thread_rng().gen_range(0, size as u64);
let atomic1 = vec.get(index * std::mem::size_of::<AtomicUsize>() as u64);
let current1 = atomic1.load(Ordering::Relaxed);
let next = current1 + 1;
atomic1.store(next, Ordering::Relaxed);
let atomic2 = vec.get(index * std::mem::size_of::<AtomicUsize>() as u64);
let current2 = atomic2.load(Ordering::Relaxed);
assert_eq!(current2, next);
});
std::fs::remove_file(path).unwrap();
}
#[bench]
fn appendvec_atomic_random_read(bencher: &mut Bencher) {
let path = get_appendvec_bench_path("bench_read");
let mut vec = AppendVec::<AtomicUsize>::new(&path, true, START_SIZE, INC_SIZE);
let size = 1_000_000;
2019-01-31 18:37:05 -08:00
for _ in 0..size {
if vec.append(AtomicUsize::new(0)).is_none() {
assert!(vec.grow_file().is_ok());
assert!(vec.append(AtomicUsize::new(0)).is_some());
}
}
bencher.iter(|| {
let index = thread_rng().gen_range(0, size);
let atomic1 = vec.get((index * std::mem::size_of::<AtomicUsize>()) as u64);
let current1 = atomic1.load(Ordering::Relaxed);
assert_eq!(current1, 0);
});
std::fs::remove_file(path).unwrap();
}
#[bench]
fn appendvec_concurrent_lock_append(bencher: &mut Bencher) {
let path = get_appendvec_bench_path("bench_lock_append");
2019-01-31 18:37:05 -08:00
let vec = Arc::new(RwLock::new(AppendVec::<AtomicUsize>::new(
&path, true, START_SIZE, INC_SIZE,
2019-01-31 18:37:05 -08:00
)));
let vec1 = vec.clone();
let size = 1_000_000;
2019-01-31 18:37:05 -08:00
let count = Arc::new(AtomicUsize::new(0));
let count1 = count.clone();
spawn(move || loop {
let mut len = count.load(Ordering::Relaxed);
{
let rlock = vec1.read().unwrap();
loop {
if rlock.append(AtomicUsize::new(0)).is_none() {
break;
}
len = count.fetch_add(1, Ordering::Relaxed);
}
if len >= size {
break;
}
}
{
let mut wlock = vec1.write().unwrap();
if len >= size {
break;
}
assert!(wlock.grow_file().is_ok());
}
});
bencher.iter(|| {
let _rlock = vec.read().unwrap();
let len = count1.load(Ordering::Relaxed);
assert!(len < size * 2);
});
std::fs::remove_file(path).unwrap();
}
#[bench]
fn appendvec_concurrent_get_append(bencher: &mut Bencher) {
let path = get_appendvec_bench_path("bench_get_append");
2019-01-31 18:37:05 -08:00
let vec = Arc::new(RwLock::new(AppendVec::<AtomicUsize>::new(
&path, true, START_SIZE, INC_SIZE,
2019-01-31 18:37:05 -08:00
)));
let vec1 = vec.clone();
let size = 1_000_000;
2019-01-31 18:37:05 -08:00
let count = Arc::new(AtomicUsize::new(0));
let count1 = count.clone();
spawn(move || loop {
let mut len = count.load(Ordering::Relaxed);
{
let rlock = vec1.read().unwrap();
loop {
if rlock.append(AtomicUsize::new(0)).is_none() {
break;
}
len = count.fetch_add(1, Ordering::Relaxed);
}
if len >= size {
break;
}
}
{
let mut wlock = vec1.write().unwrap();
if len >= size {
break;
}
assert!(wlock.grow_file().is_ok());
}
});
bencher.iter(|| {
let rlock = vec.read().unwrap();
let len = count1.load(Ordering::Relaxed);
if len > 0 {
let index = thread_rng().gen_range(0, len);
rlock.get((index * std::mem::size_of::<AtomicUsize>()) as u64);
}
});
std::fs::remove_file(path).unwrap();
}