Merge branch 'master' into pow_validation

This commit is contained in:
NikVolf 2016-11-25 15:16:32 +03:00
commit 6f6b6be083
12 changed files with 382 additions and 7 deletions

View File

@ -26,6 +26,7 @@ addons:
- g++-4.8
script:
- ./tools/test.sh
- ./tools/bench.sh
after_success: |
[ false ] &&
[ $TRAVIS_BRANCH = master ] &&

14
Cargo.lock generated
View File

@ -64,6 +64,19 @@ name = "base58"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "bencher"
version = "0.1.0"
dependencies = [
"chain 0.1.0",
"db 0.1.0",
"ethcore-devtools 1.3.0",
"primitives 0.1.0",
"test-data 0.1.0",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"verification 0.1.0",
]
[[package]]
name = "bit-vec"
version = "0.4.3"
@ -482,6 +495,7 @@ name = "pbtc"
version = "0.1.0"
dependencies = [
"app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"bencher 0.1.0",
"chain 0.1.0",
"clap 2.18.0 (registry+https://github.com/rust-lang/crates.io-index)",
"db 0.1.0",

View File

@ -20,6 +20,7 @@ db = { path = "db" }
verification = { path = "verification" }
sync = { path = "sync" }
import = { path = "import" }
bencher = { path = "bencher" }
[[bin]]
path = "pbtc/main.rs"

19
bencher/Cargo.toml Normal file
View File

@ -0,0 +1,19 @@
[package]
name = "bencher"
version = "0.1.0"
license = "GPL-3.0"
authors = ["Ethcore <admin@ethcore.io>"]
description = "Parity bitcoin client."
[dependencies]
db = { path = "../db" }
verification = { path = "../verification" }
chain = { path = "../chain" }
primitives = { path = "../primitives" }
ethcore-devtools = { path = "../devtools" }
test-data = { path = "../test-data" }
time = "*"
[[bin]]
path = "src/main.rs"
name = "bencher"

226
bencher/src/database.rs Normal file
View File

@ -0,0 +1,226 @@
use devtools::RandomTempPath;
use db::{Storage, BlockStapler, BlockProvider, BlockRef, BlockInsertedChain};
use test_data;
use super::Benchmark;
pub fn fetch(benchmark: &mut Benchmark) {
// params
const BLOCKS: usize = 1000;
benchmark.samples(BLOCKS);
// test setup
let path = RandomTempPath::create_dir();
let store = Storage::new(path.as_path()).unwrap();
let genesis = test_data::genesis();
store.insert_block(&genesis).unwrap();
let genesis = test_data::genesis();
store.insert_block(&genesis).unwrap();
let mut rolling_hash = genesis.hash();
let mut blocks = Vec::new();
let mut hashes = Vec::new();
for x in 0..BLOCKS {
let next_block = test_data::block_builder()
.transaction()
.coinbase()
.output().value(5000000000).build()
.build()
.merkled_header().parent(rolling_hash.clone()).nonce(x as u32).build()
.build();
rolling_hash = next_block.hash();
blocks.push(next_block);
hashes.push(rolling_hash.clone());
}
for block in blocks.iter() { store.insert_block(block).unwrap(); }
// bench
benchmark.start();
for _ in 0..BLOCKS {
let block = store.block(BlockRef::Hash(hashes[0].clone())).unwrap();
assert_eq!(&block.hash(), &hashes[0]);
}
benchmark.stop();
}
pub fn write(benchmark: &mut Benchmark) {
// params
const BLOCKS: usize = 1000;
benchmark.samples(BLOCKS);
// setup
let path = RandomTempPath::create_dir();
let store = Storage::new(path.as_path()).unwrap();
let genesis = test_data::genesis();
store.insert_block(&genesis).unwrap();
let mut rolling_hash = genesis.hash();
let mut blocks = Vec::new();
for x in 0..BLOCKS {
let next_block = test_data::block_builder()
.transaction()
.coinbase()
.output().value(5000000000).build()
.build()
.merkled_header().parent(rolling_hash.clone()).nonce(x as u32).build()
.build();
rolling_hash = next_block.hash();
blocks.push(next_block);
}
// bench
benchmark.start();
for idx in 0..BLOCKS {
store.insert_block(&blocks[idx]).unwrap();
}
benchmark.stop();
}
pub fn reorg_short(benchmark: &mut Benchmark) {
// params
const BLOCKS: usize = 1000;
benchmark.samples(BLOCKS);
// setup
let path = RandomTempPath::create_dir();
let store = Storage::new(path.as_path()).unwrap();
let genesis = test_data::genesis();
store.insert_block(&genesis).unwrap();
let mut rolling_hash = genesis.hash();
let mut blocks = Vec::new();
for x in 0..BLOCKS {
let base = rolling_hash.clone();
let next_block = test_data::block_builder()
.transaction()
.coinbase()
.output().value(5000000000).build()
.build()
.merkled_header().parent(rolling_hash.clone()).nonce(x as u32 * 4).build()
.build();
rolling_hash = next_block.hash();
blocks.push(next_block);
let next_block_side = test_data::block_builder()
.transaction()
.coinbase()
.output().value(5000000000).build()
.build()
.merkled_header().parent(base).nonce(x as u32 * 4 + 2).build()
.build();
let next_base = next_block_side.hash();
blocks.push(next_block_side);
let next_block_side_continue = test_data::block_builder()
.transaction()
.coinbase()
.output().value(5000000000).build()
.build()
.merkled_header().parent(next_base).nonce(x as u32 * 4 + 3).build()
.build();
blocks.push(next_block_side_continue);
let next_block_continue = test_data::block_builder()
.transaction()
.coinbase()
.output().value(5000000000).build()
.build()
.merkled_header().parent(rolling_hash.clone()).nonce(x as u32 * 4 + 1).build()
.build();
rolling_hash = next_block_continue.hash();
blocks.push(next_block_continue);
}
let mut total: usize = 0;
let mut reorgs: usize = 0;
// bench
benchmark.start();
for idx in 0..BLOCKS {
total += 1;
if let BlockInsertedChain::Reorganized(_) = store.insert_block(&blocks[idx]).unwrap() {
reorgs += 1;
}
}
benchmark.stop();
// reorgs occur twice per iteration except last one where there only one, blocks are inserted with rate 4/iteration
// so reorgs = total/2 - 1
assert_eq!(1000, total);
assert_eq!(499, reorgs);
}
// 1. write 12000 blocks
// 2. write 100 blocks that has 100 transaction each spending outputs from first 1000 blocks
pub fn write_heavy(benchmark: &mut Benchmark) {
// params
const BLOCKS_INITIAL: usize = 12000;
const BLOCKS: usize = 100;
const TRANSACTIONS: usize = 100;
benchmark.samples(BLOCKS);
// test setup
let path = RandomTempPath::create_dir();
let store = Storage::new(path.as_path()).unwrap();
let genesis = test_data::genesis();
store.insert_block(&genesis).unwrap();
let genesis = test_data::genesis();
store.insert_block(&genesis).unwrap();
let mut rolling_hash = genesis.hash();
let mut blocks = Vec::new();
let mut hashes = Vec::new();
for x in 0..BLOCKS_INITIAL {
let next_block = test_data::block_builder()
.transaction()
.coinbase()
.output().value(5000000000).build()
.build()
.merkled_header().parent(rolling_hash.clone()).nonce(x as u32).build()
.build();
rolling_hash = next_block.hash();
blocks.push(next_block);
hashes.push(rolling_hash.clone());
}
for b in 0..BLOCKS {
let mut builder = test_data::block_builder()
.transaction().coinbase().build();
for t in 0..TRANSACTIONS {
builder = builder.transaction()
.input().hash(blocks[b*TRANSACTIONS+t].transactions()[0].hash()).build() // default index is 0 which is ok
.output().value(1000).build()
.build();
}
let next_block = builder.merkled_header().parent(rolling_hash).build().build();
rolling_hash = next_block.hash();
blocks.push(next_block);
hashes.push(rolling_hash.clone());
}
for block in blocks[..BLOCKS_INITIAL].iter() { store.insert_block(block).unwrap(); }
// bench
benchmark.start();
for block in blocks[BLOCKS_INITIAL+1..].iter() { store.insert_block(block).unwrap(); }
benchmark.stop();
}

73
bencher/src/main.rs Normal file
View File

@ -0,0 +1,73 @@
extern crate db;
extern crate chain;
extern crate ethcore_devtools as devtools;
extern crate test_data;
extern crate time;
mod database;
use time::{PreciseTime, Duration};
use std::io::Write;
use std::str;
#[derive(Default)]
pub struct Benchmark {
start: Option<PreciseTime>,
end: Option<PreciseTime>,
samples: Option<usize>,
}
impl Benchmark {
pub fn start(&mut self) {
self.start = Some(PreciseTime::now());
}
pub fn stop(&mut self) {
self.end = Some(PreciseTime::now());
}
pub fn evaluate(&self) -> Duration {
self.start.expect("benchmarch never ended").to(self.end.expect("benchmark never started"))
}
pub fn samples(&mut self, samples: usize) {
self.samples = Some(samples);
}
}
fn decimal_mark(s: String) -> String {
let bytes: Vec<_> = s.bytes().rev().collect();
let chunks: Vec<_> = bytes.chunks(3).map(|chunk| str::from_utf8(chunk).unwrap()).collect();
let result: Vec<_> = chunks.join(",").bytes().rev().collect();
String::from_utf8(result).unwrap()
}
fn run_benchmark<F>(name: &str, f: F) where F: FnOnce(&mut Benchmark) {
print!("{}: ", name);
::std::io::stdout().flush().unwrap();
let mut benchmark = Benchmark::default();
f(&mut benchmark);
if let Some(samples) = benchmark.samples {
println!("{} ns/sample",
decimal_mark(format!("{}", benchmark.evaluate().num_nanoseconds().unwrap() / samples as i64)),
);
}
else {
println!("{} ns", decimal_mark(format!("{}", benchmark.evaluate().num_nanoseconds().unwrap())));
}
}
macro_rules! benchmark {
($t:expr) => {
run_benchmark(stringify!($t), $t);
};
}
fn main() {
benchmark!(database::fetch);
benchmark!(database::write);
benchmark!(database::reorg_short);
benchmark!(database::write_heavy);
}

View File

@ -29,6 +29,12 @@ args:
- printtoconsole:
long: printtoconsole
help: Send trace/debug info to console instead of debug.log file
- data-dir:
short: d
long: data-dir
value_name: PATH
help: Specify the database & configuration directory PATH
takes_value: true
- db-cache:
long: db-cache
help: Sets db cache size

View File

@ -2,7 +2,7 @@ use std::net::SocketAddr;
use sync::create_sync_connection_factory;
use message::Services;
use util::{open_db, init_db, node_table_path};
use {config, p2p, PROTOCOL_VERSION, PROTOCOL_MINIMUM, USER_AGENT};
use {config, p2p, PROTOCOL_VERSION, PROTOCOL_MINIMUM};
pub fn start(cfg: config::Config) -> Result<(), String> {
let mut el = p2p::event_loop();
@ -10,6 +10,8 @@ pub fn start(cfg: config::Config) -> Result<(), String> {
let db = open_db(&cfg);
try!(init_db(&cfg, &db));
let nodes_path = node_table_path(&cfg);
let p2p_cfg = p2p::Config {
threads: cfg.p2p_threads,
inbound_connections: cfg.inbound_connections,
@ -20,13 +22,13 @@ pub fn start(cfg: config::Config) -> Result<(), String> {
magic: cfg.magic,
local_address: SocketAddr::new("127.0.0.1".parse().unwrap(), cfg.port),
services: Services::default().with_network(true),
user_agent: USER_AGENT.into(),
user_agent: cfg.user_agent,
start_height: 0,
relay: false,
},
peers: cfg.connect.map_or_else(|| vec![], |x| vec![x]),
seeds: cfg.seednode.map_or_else(|| vec![], |x| vec![x]),
node_table_path: node_table_path(),
node_table_path: nodes_path,
};
let sync_handle = el.handle();

View File

@ -1,6 +1,7 @@
use std::net;
use clap;
use message::Magic;
use {USER_AGENT, REGTEST_USER_AGENT};
pub struct Config {
pub magic: Magic,
@ -12,6 +13,8 @@ pub struct Config {
pub outbound_connections: u32,
pub p2p_threads: usize,
pub db_cache: usize,
pub data_dir: Option<String>,
pub user_agent: String,
}
pub const DEFAULT_DB_CACHE: usize = 512;
@ -35,6 +38,12 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
Magic::Regtest => 1,
};
// to skip idiotic 30 seconds delay in test-scripts
let user_agent = match magic {
Magic::Testnet | Magic::Mainnet => USER_AGENT,
Magic::Regtest => REGTEST_USER_AGENT,
};
let port = match matches.value_of("port") {
Some(port) => try!(port.parse().map_err(|_| "Invalid port".to_owned())),
None => magic.port(),
@ -60,6 +69,11 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
None => DEFAULT_DB_CACHE,
};
let data_dir = match matches.value_of("data-dir") {
Some(s) => Some(try!(s.parse().map_err(|_| "Invalid data-dir".to_owned()))),
None => None,
};
let config = Config {
print_to_console: print_to_console,
magic: magic,
@ -70,6 +84,8 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
outbound_connections: out_connections,
p2p_threads: p2p_threads,
db_cache: db_cache,
data_dir: data_dir,
user_agent: user_agent.to_string(),
};
Ok(config)

View File

@ -26,6 +26,7 @@ pub const APP_INFO: AppInfo = AppInfo { name: "pbtc", author: "Parity" };
pub const PROTOCOL_VERSION: u32 = 70_014;
pub const PROTOCOL_MINIMUM: u32 = 70_001;
pub const USER_AGENT: &'static str = "pbtc";
pub const REGTEST_USER_AGENT: &'static str = "/Satoshi:0.12.1/";
fn main() {
env_logger::init().unwrap();

View File

@ -1,17 +1,23 @@
use std::sync::Arc;
use std::path::PathBuf;
use std::fs::create_dir_all;
use app_dirs::{app_dir, AppDataType};
use chain::RepresentH256;
use {db, APP_INFO};
use config::Config;
pub fn open_db(cfg: &Config) -> db::SharedStore {
let db_path = app_dir(AppDataType::UserData, &APP_INFO, "db").expect("Failed to get app dir");
let db_path = match cfg.data_dir {
Some(ref data_dir) => custom_path(&data_dir, "db"),
None => app_dir(AppDataType::UserData, &APP_INFO, "db").expect("Failed to get app dir"),
};
Arc::new(db::Storage::with_cache(db_path, cfg.db_cache).expect("Failed to open database"))
}
pub fn node_table_path() -> PathBuf {
let mut node_table = app_dir(AppDataType::UserData, &APP_INFO, "p2p").expect("Failed to get app dir");
pub fn node_table_path(cfg: &Config) -> PathBuf {
let mut node_table = match cfg.data_dir {
Some(ref data_dir) => custom_path(&data_dir, "p2p"),
None => app_dir(AppDataType::UserData, &APP_INFO, "p2p").expect("Failed to get app dir"),
};
node_table.push("nodes.csv");
node_table
}
@ -28,3 +34,10 @@ pub fn init_db(cfg: &Config, db: &db::SharedStore) -> Result<(), String> {
}
}
}
fn custom_path(data_dir: &str, sub_dir: &str) -> PathBuf {
let mut path = PathBuf::from(data_dir);
path.push(sub_dir);
create_dir_all(&path).expect("Failed to get app dir");
path
}

3
tools/bench.sh Executable file
View File

@ -0,0 +1,3 @@
#!/bin/bash
cargo run --manifest-path ./bencher/Cargo.toml --release