block assembler in progress

This commit is contained in:
debris 2016-12-08 16:45:37 +01:00
parent 986baa8685
commit f5b1d95d2d
13 changed files with 524 additions and 21 deletions

2
Cargo.lock generated
View File

@ -348,6 +348,8 @@ dependencies = [
name = "miner" name = "miner"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"bitcrypto 0.1.0",
"byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
"chain 0.1.0", "chain 0.1.0",
"db 0.1.0", "db 0.1.0",
"heapsize 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",

View File

@ -65,6 +65,13 @@ impl Deserializable for OutPoint {
} }
impl OutPoint { impl OutPoint {
pub fn null() -> Self {
OutPoint {
hash: H256::default(),
index: u32::max_value(),
}
}
pub fn hash(&self) -> &H256 { pub fn hash(&self) -> &H256 {
&self.hash &self.hash
} }

View File

@ -0,0 +1,25 @@
use std::cmp;
use primitives::hash::H256;
use chain::Transaction;
#[derive(Debug)]
pub struct IndexedTransaction {
pub transaction: Transaction,
pub hash: H256,
}
impl From<Transaction> for IndexedTransaction {
fn from(t: Transaction) -> Self {
let hash = t.hash();
IndexedTransaction {
transaction: t,
hash: hash,
}
}
}
impl cmp::PartialEq for IndexedTransaction {
fn eq(&self, other: &Self) -> bool {
self.hash == other.hash
}
}

View File

@ -28,6 +28,7 @@ mod transaction_meta_provider;
mod error; mod error;
mod update_context; mod update_context;
mod indexed_block; mod indexed_block;
mod indexed_transaction;
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub enum BlockRef { pub enum BlockRef {
@ -72,6 +73,7 @@ pub use transaction_meta_provider::TransactionMetaProvider;
pub use block_stapler::{BlockStapler, BlockInsertedChain}; pub use block_stapler::{BlockStapler, BlockInsertedChain};
pub use block_provider::{BlockProvider, BlockHeaderProvider, AsBlockHeaderProvider}; pub use block_provider::{BlockProvider, BlockHeaderProvider, AsBlockHeaderProvider};
pub use indexed_block::{IndexedBlock, IndexedTransactions}; pub use indexed_block::{IndexedBlock, IndexedTransactions};
pub use indexed_transaction::{IndexedTransaction};
#[cfg(feature="dev")] #[cfg(feature="dev")]
pub use test_storage::TestStorage; pub use test_storage::TestStorage;

View File

@ -4,7 +4,9 @@ version = "0.1.0"
authors = ["Ethcore <admin@ethcore.io>"] authors = ["Ethcore <admin@ethcore.io>"]
[dependencies] [dependencies]
byteorder = "0.5"
heapsize = "0.3" heapsize = "0.3"
bitcrypto = { path = "../crypto" }
chain = { path = "../chain" } chain = { path = "../chain" }
db = { path = "../db" } db = { path = "../db" }
primitives = { path = "../primitives" } primitives = { path = "../primitives" }

View File

@ -0,0 +1,227 @@
use primitives::hash::H256;
use db::{SharedStore, IndexedTransaction};
use memory_pool::{MemoryPool, OrderingStrategy};
/// Block template as described in BIP0022
/// Minimal version
/// [BIP0022](https://github.com/bitcoin/bips/blob/master/bip-0022.mediawiki#block-template-request)
pub struct BlockTemplate {
/// Version
pub version: u32,
/// The hash of previous block
pub previous_header_hash: H256,
/// The current time as seen by the server
pub time: u32,
/// The compressed difficulty
pub nbits: u32,
/// Block height
pub height: u32,
/// Block transactions (excluding coinbase)
pub transactions: Vec<IndexedTransaction>,
/// Total funds available for the coinbase (in Satoshis)
pub coinbase_value: u32,
}
/// Block size and number of signatures opcodes is limited
/// This structure should be used for storing this values.
struct SizePolicy {
/// Current size
current_size: u32,
/// Max size
max_size: u32,
/// When current_size + size_buffer > max_size
/// we need to start finishing the block
size_buffer: u32,
/// Number of transactions checked since finishing started
finish_counter: u32,
/// Number of transactions to check when finishing the block
finish_limit: u32,
}
/// When appending transaction, opcode count and block size policies
/// must agree on appending the transaction to the block
#[derive(Debug, PartialEq, Copy, Clone)]
enum NextStep {
/// Append the transaction, check the next one
Append,
/// Append the transaction, do not check the next one
FinishAndAppend,
/// Ignore transaction, check the next one
Ignore,
/// Ignore transaction, do not check the next one
FinishAndIgnore,
}
impl NextStep {
fn and(self, other: NextStep) -> Self {
match (self, other) {
(_, NextStep::FinishAndIgnore) |
(NextStep::FinishAndIgnore, _) |
(NextStep::FinishAndAppend, NextStep::Ignore) |
(NextStep::Ignore, NextStep::FinishAndAppend) => NextStep::FinishAndIgnore,
(NextStep::Ignore, _) |
(_, NextStep::Ignore) => NextStep::Ignore,
(_, NextStep::FinishAndAppend) |
(NextStep::FinishAndAppend, _) => NextStep::FinishAndAppend,
(NextStep::Append, NextStep::Append) => NextStep::Append,
}
}
}
impl SizePolicy {
fn new(current_size: u32, max_size: u32, size_buffer: u32, finish_limit: u32) -> Self {
SizePolicy {
current_size: current_size,
max_size: max_size,
size_buffer: size_buffer,
finish_counter: 0,
finish_limit: finish_limit,
}
}
fn decide(&mut self, size: u32) -> NextStep {
let finishing = self.current_size + self.size_buffer > self.max_size;
let fits = self.current_size + size <= self.max_size;
let finish = self.finish_counter + 1 >= self.finish_limit;
if finishing {
self.finish_counter += 1;
}
if fits {
self.current_size += size;
}
match (fits, finish) {
(true, true) => NextStep::FinishAndAppend,
(true, false) => NextStep::Append,
(false, true) => NextStep::FinishAndIgnore,
(false, false) => NextStep::Ignore,
}
}
}
/// Block assembler
pub struct BlockAssembler;
impl BlockAssembler {
pub fn create_new_block(store: &SharedStore, mempool: &MemoryPool, time: u32) -> BlockTemplate {
// get best block
// take it's hash && height
let best_block = store.best_block().expect("Cannot assemble new block without genesis block");
let previous_header_hash = best_block.hash;
let height = best_block.number + 1;
// TODO: calculate nbits (retarget may be required)
let nbits = 0;
// TODO: calculate version
let version = 0;
// TODO: use constants and real values
let mut block_size = SizePolicy::new(0, 1_000_000, 100_000, 50);
// TODO: use constants and real values
let mut sigops = SizePolicy::new(0, 2000, 8, 50);
// TODO: calculate coinbase fee
let mut coinbase_value = 0u32;
let mut transactions = Vec::new();
// add priority transactions
BlockAssembler::fill_transactions(mempool, &mut block_size, &mut sigops, &mut coinbase_value, &mut transactions, OrderingStrategy::ByTransactionScore);
// add package transactions
BlockAssembler::fill_transactions(mempool, &mut block_size, &mut sigops, &mut coinbase_value, &mut transactions, OrderingStrategy::ByPackageScore);
BlockTemplate {
version: version,
previous_header_hash: previous_header_hash,
time: time,
nbits: nbits,
height: height,
transactions: transactions,
coinbase_value: coinbase_value,
}
}
fn fill_transactions(
mempool: &MemoryPool,
block_size: &mut SizePolicy,
sigops: &mut SizePolicy,
coinbase_value: &mut u32,
transactions: &mut Vec<IndexedTransaction>,
strategy: OrderingStrategy
) {
for entry in mempool.iter(strategy) {
if transactions.iter().any(|x| x.hash == entry.hash) {
break;
}
// TODO: calucalte sigops
let transaction_size = entry.size as u32;
let transaction_sigops = 0;
let size_step = block_size.decide(transaction_size);
let sigops_step = sigops.decide(transaction_sigops);
let transaction = IndexedTransaction {
transaction: entry.transaction.clone(),
hash: entry.hash.clone(),
};
match size_step.and(sigops_step) {
NextStep::Append => {
// miner_fee is i64, but we can safely cast it to u32
// memory pool should restrict miner fee to be positive
*coinbase_value += entry.miner_fee as u32;
transactions.push(transaction);
},
NextStep::FinishAndAppend => {
transactions.push(transaction);
break;
},
NextStep::Ignore => (),
NextStep::FinishAndIgnore => {
break;
},
}
}
}
}
#[cfg(test)]
mod tests {
use super::{SizePolicy, NextStep};
#[test]
fn test_size_policy() {
let mut size_policy = SizePolicy::new(0, 1000, 200, 3);
assert_eq!(size_policy.decide(100), NextStep::Append);
assert_eq!(size_policy.decide(500), NextStep::Append);
assert_eq!(size_policy.decide(600), NextStep::Ignore);
assert_eq!(size_policy.decide(200), NextStep::Append);
assert_eq!(size_policy.decide(300), NextStep::Ignore);
assert_eq!(size_policy.decide(300), NextStep::Ignore);
// this transaction will make counter + buffer > max size
assert_eq!(size_policy.decide(1), NextStep::Append);
// so now only 3 more transactions may accepted / ignored
assert_eq!(size_policy.decide(1), NextStep::Append);
assert_eq!(size_policy.decide(1000), NextStep::Ignore);
assert_eq!(size_policy.decide(1), NextStep::FinishAndAppend);
// we should not call decide again after it returned finish...
// but we can, let's check if result is ok
assert_eq!(size_policy.decide(1000), NextStep::FinishAndIgnore);
}
#[test]
fn test_next_step_and() {
assert_eq!(NextStep::Append.and(NextStep::Append), NextStep::Append);
assert_eq!(NextStep::Ignore.and(NextStep::Append), NextStep::Ignore);
assert_eq!(NextStep::FinishAndIgnore.and(NextStep::Append), NextStep::FinishAndIgnore);
assert_eq!(NextStep::Ignore.and(NextStep::FinishAndIgnore), NextStep::FinishAndIgnore);
assert_eq!(NextStep::FinishAndAppend.and(NextStep::FinishAndIgnore), NextStep::FinishAndIgnore);
assert_eq!(NextStep::FinishAndAppend.and(NextStep::Ignore), NextStep::FinishAndIgnore);
assert_eq!(NextStep::FinishAndAppend.and(NextStep::Append), NextStep::FinishAndAppend);
}
}

135
miner/src/cpu_miner.rs Normal file
View File

@ -0,0 +1,135 @@
use byteorder::{WriteBytesExt, LittleEndian};
use primitives::bytes::Bytes;
use primitives::hash::H256;
use primitives::uint::U256;
use chain::{merkle_root, Transaction};
use crypto::dhash256;
use ser::Stream;
use block_assembler::BlockTemplate;
use pow::is_valid_proof_of_work_hash;
/// Instead of serializing `BlockHeader` from scratch over and over again,
/// let's keep it serialized in memory and replace needed bytes
struct BlockHeaderBytes {
data: Bytes,
}
impl BlockHeaderBytes {
/// Creates new instance of block header bytes.
fn new(version: u32, previous_header_hash: H256, nbits: u32) -> Self {
let merkle_root_hash = H256::default();
let time = 0u32;
let nonce = 0u32;
let mut stream = Stream::default();
stream
.append(&version)
.append(&previous_header_hash)
.append(&merkle_root_hash)
.append(&time)
.append(&nbits)
.append(&nonce);
BlockHeaderBytes {
data: stream.out(),
}
}
/// Set merkle root hash
fn set_merkle_root_hash(&mut self, hash: &H256) {
let mut merkle_bytes: &mut [u8] = &mut self.data[4 + 32..4 + 32 + 32];
merkle_bytes.copy_from_slice(&**hash);
}
/// Set block header time
fn set_time(&mut self, time: u32) {
let mut time_bytes: &mut [u8] = &mut self.data[4 + 32 + 32..];
time_bytes.write_u32::<LittleEndian>(time).unwrap();
}
/// Set block header nonce
fn set_nonce(&mut self, nonce: u32) {
let mut nonce_bytes: &mut [u8] = &mut self.data[4 + 32 + 32 + 4 + 4..];
nonce_bytes.write_u32::<LittleEndian>(nonce).unwrap();
}
/// Returns block header hash
fn hash(&self) -> H256 {
dhash256(&self.data)
}
}
/// This trait should be implemented by coinbase transaction.
pub trait CoinbaseTransaction {
/// Protocols like stratum limit number of extranonce bytes.
/// This function informs miner about maximum size of extra nonce.
fn max_extranonce(&self) -> U256;
/// Should be used to increase number of hash possibities for miner
fn set_extranonce(&mut self, extranocne: &U256);
/// Returns transaction hash
fn hash(&self) -> H256;
/// Coverts transaction into raw bytes
fn drain(self) -> Transaction;
}
/// Cpu miner solution.
pub struct Solution {
/// Block header nonce.
pub nonce: u32,
/// Coinbase transaction extra nonce (modyfiable by miner).
pub extranonce: U256,
/// Block header time.
pub time: u32,
/// Coinbase transaction (extranonce is already set).
pub coinbase_transaction: Transaction,
}
/// Simple bitcoin cpu miner.
/// First it tries to find solution by changing block header nonce.
/// Once all nonce values have been tried, it increases extranonce.
/// Once all of them have been tried (quite unlikely on cpu ;),
/// and solution still hasn't been found it returns None.
/// It's possible to also experiment with time, but I find it pointless
/// to implement on CPU.
pub fn find_solution<T>(block: BlockTemplate, mut coinbase_transaction: T) -> Option<Solution> where T: CoinbaseTransaction {
let mut nonce = 0u32;
let max_extranonce = coinbase_transaction.max_extranonce();
let mut extranonce = U256::default();
let mut header_bytes = BlockHeaderBytes::new(block.version, block.previous_header_hash, block.nbits);
// update header with time
header_bytes.set_time(block.time);
while extranonce < max_extranonce {
// update coinbase transaction with new extranonce
coinbase_transaction.set_extranonce(&extranonce);
// recalculate merkle root hash
let mut merkle_tree = vec![coinbase_transaction.hash()];
merkle_tree.extend(block.transactions.iter().map(|tx| tx.hash.clone()));
let merkle_root_hash = merkle_root(&merkle_tree);
// update header with new merkle root hash
header_bytes.set_merkle_root_hash(&merkle_root_hash);
for nonce in 0..(u32::max_value() as u64 + 1) {
// update §
header_bytes.set_nonce(nonce as u32);
let hash = header_bytes.hash();
if is_valid_proof_of_work_hash(block.nbits.into(), &hash) {
let solution = Solution {
nonce: nonce as u32,
extranonce: extranonce,
time: block.time,
coinbase_transaction: coinbase_transaction.drain(),
};
return Some(solution);
}
}
extranonce = extranonce + 1.into();
}
None
}

View File

@ -1,12 +1,18 @@
extern crate byteorder;
extern crate heapsize;
extern crate bitcrypto as crypto;
extern crate chain; extern crate chain;
extern crate db; extern crate db;
extern crate heapsize;
extern crate primitives; extern crate primitives;
extern crate serialization as ser; extern crate serialization as ser;
extern crate test_data; extern crate test_data;
mod block_assembler;
mod cpu_miner;
mod fee; mod fee;
mod memory_pool; mod memory_pool;
mod pow;
pub use fee::{transaction_fee, transaction_fee_rate}; pub use fee::{transaction_fee, transaction_fee_rate};
pub use memory_pool::{MemoryPool, Information as MemoryPoolInformation, OrderingStrategy as MemoryPoolOrderingStrategy}; pub use memory_pool::{MemoryPool, Information as MemoryPoolInformation, OrderingStrategy as MemoryPoolOrderingStrategy};

View File

@ -50,25 +50,25 @@ pub struct MemoryPool {
#[derive(Debug)] #[derive(Debug)]
pub struct Entry { pub struct Entry {
/// Transaction /// Transaction
transaction: Transaction, pub transaction: Transaction,
/// In-pool ancestors hashes for this transaction /// In-pool ancestors hashes for this transaction
ancestors: HashSet<H256>, pub ancestors: HashSet<H256>,
/// Transaction hash (stored for effeciency) /// Transaction hash (stored for effeciency)
hash: H256, pub hash: H256,
/// Transaction size (stored for effeciency) /// Transaction size (stored for effeciency)
size: usize, pub size: usize,
/// Throughout index of this transaction in memory pool (non persistent) /// Throughout index of this transaction in memory pool (non persistent)
storage_index: u64, pub storage_index: u64,
/// Transaction fee (stored for efficiency) /// Transaction fee (stored for efficiency)
miner_fee: i64, pub miner_fee: i64,
/// Virtual transaction fee (a way to prioritize/penalize transaction) /// Virtual transaction fee (a way to prioritize/penalize transaction)
miner_virtual_fee: i64, pub miner_virtual_fee: i64,
/// size + Sum(size) for all in-pool descendants /// size + Sum(size) for all in-pool descendants
package_size: usize, pub package_size: usize,
/// miner_fee + Sum(miner_fee) for all in-pool descendants /// miner_fee + Sum(miner_fee) for all in-pool descendants
package_miner_fee: i64, pub package_miner_fee: i64,
/// miner_virtual_fee + Sum(miner_virtual_fee) for all in-pool descendants /// miner_virtual_fee + Sum(miner_virtual_fee) for all in-pool descendants
package_miner_virtual_fee: i64, pub package_miner_virtual_fee: i64,
} }
/// Multi-index transactions storage /// Multi-index transactions storage
@ -624,7 +624,7 @@ impl MemoryPool {
/// Ancestors are always returned before descendant transactions. /// Ancestors are always returned before descendant transactions.
/// Use this function with care, only if really needed (heavy memory usage) /// Use this function with care, only if really needed (heavy memory usage)
pub fn read_n_with_strategy(&mut self, n: usize, strategy: OrderingStrategy) -> Vec<H256> { pub fn read_n_with_strategy(&mut self, n: usize, strategy: OrderingStrategy) -> Vec<H256> {
self.iter(strategy).take(n).collect() self.iter(strategy).map(|entry| entry.hash.clone()).take(n).collect()
} }
/// Removes the 'top' transaction from the `MemoryPool` using selected strategy. /// Removes the 'top' transaction from the `MemoryPool` using selected strategy.
@ -772,7 +772,7 @@ impl<'a> MemoryPoolIterator<'a> {
} }
impl<'a> Iterator for MemoryPoolIterator<'a> { impl<'a> Iterator for MemoryPoolIterator<'a> {
type Item = H256; type Item = &'a Entry;
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
let top_hash = match self.strategy { let top_hash = match self.strategy {
@ -781,13 +781,12 @@ impl<'a> Iterator for MemoryPoolIterator<'a> {
OrderingStrategy::ByPackageScore => self.references.ordered.by_package_score.iter().map(|entry| entry.hash.clone()).nth(0), OrderingStrategy::ByPackageScore => self.references.ordered.by_package_score.iter().map(|entry| entry.hash.clone()).nth(0),
}; };
if let Some(ref top_hash) = top_hash { top_hash.map(|top_hash| {
let entry = self.memory_pool.storage.by_hash.get(top_hash).expect("missing hash is a sign of MemoryPool internal inconsistancy"); let entry = self.memory_pool.storage.by_hash.get(&top_hash).expect("missing hash is a sign of MemoryPool internal inconsistancy");
self.removed.insert(top_hash.clone()); self.removed.insert(top_hash.clone());
self.references.remove(Some(&self.removed), &self.memory_pool.storage.by_hash, entry); self.references.remove(Some(&self.removed), &self.memory_pool.storage.by_hash, entry);
} entry
})
top_hash
} }
} }

85
miner/src/pow.rs Normal file
View File

@ -0,0 +1,85 @@
use std::cmp;
use primitives::compact::Compact;
use primitives::hash::H256;
use primitives::uint::U256;
const RETARGETING_FACTOR: u32 = 4;
const TARGET_TIMESPAN_SECONDS: u32 = 2 * 7 * 24 * 60 * 60;
// The upper and lower bounds for retargeting timespan
const MIN_TIMESPAN: u32 = TARGET_TIMESPAN_SECONDS / RETARGETING_FACTOR;
const MAX_TIMESPAN: u32 = TARGET_TIMESPAN_SECONDS * RETARGETING_FACTOR;
fn range_constrain(value: i64, min: i64, max: i64) -> i64 {
cmp::min(cmp::max(value, min), max)
}
/// Returns true if hash is lower or equal than target represented by compact bits
pub fn is_valid_proof_of_work_hash(bits: Compact, hash: &H256) -> bool {
let target = match bits.to_u256() {
Ok(target) => target,
_err => return false,
};
let value = U256::from(&*hash.reversed() as &[u8]);
value <= target
}
/// Returns true if hash is lower or equal than target and target is lower or equal
/// than current network maximum
pub fn is_valid_proof_of_work(max_work_bits: Compact, bits: Compact, hash: &H256) -> bool {
let maximum = match max_work_bits.to_u256() {
Ok(max) => max,
_err => return false,
};
let target = match bits.to_u256() {
Ok(target) => target,
_err => return false,
};
let value = U256::from(&*hash.reversed() as &[u8]);
target <= maximum && value <= target
}
/// Returns constrained number of seconds since last retarget
pub fn retarget_timespan(retarget_timestamp: u32, last_timestamp: u32) -> u32 {
// subtract unsigned 32 bit numbers in signed 64 bit space in
// order to prevent underflow before applying the range constraint.
let timespan = last_timestamp as i64 - retarget_timestamp as i64;
range_constrain(timespan, MIN_TIMESPAN as i64, MAX_TIMESPAN as i64) as u32
}
/// Algorithm used for retargeting work every 2 weeks
pub fn work_required_retarget(max_work_bits: Compact, retarget_timestamp: u32, last_timestamp: u32, last_bits: Compact) -> Compact {
let mut retarget: U256 = last_bits.into();
let maximum: U256 = max_work_bits.into();
retarget = retarget * retarget_timespan(retarget_timestamp, last_timestamp).into();
retarget = retarget / TARGET_TIMESPAN_SECONDS.into();
if retarget > maximum {
max_work_bits
} else {
retarget.into()
}
}
#[cfg(test)]
mod tests {
use super::{is_valid_proof_of_work_hash, is_valid_proof_of_work};
use hash::H256;
fn is_valid_pow(max: u32, bits: u32, hash: &'static str) -> bool {
is_valid_proof_of_work_hash(bits.into(), &H256::from_reversed_str(hash)) &&
is_valid_proof_of_work(max.into(), bits.into(), &H256::from_reversed_str(hash))
}
#[test]
fn test_is_valid_proof_of_work() {
// block 2
assert!(is_valid_pow(0x1d00ffffu32, 486604799u32, "000000006a625f06636b8bb6ac7b960a8d03705d1ace08b1a19da3fdcc99ddbd"));
// block 400_000
assert!(is_valid_pow(0x1d00ffffu32, 403093919u32, "000000000000000004ec466ce4732fe6f1ed1cddc2ed4b328fff5224276e3f6f"));
}
}

View File

@ -15,13 +15,26 @@ impl From<Compact> for u32 {
} }
} }
impl From<U256> for Compact {
fn from(u: U256) -> Self {
Compact::from_u256(u)
}
}
impl From<Compact> for U256 {
fn from(c: Compact) -> Self {
// ignore overflows and negative values
c.to_u256().unwrap_or_else(|x| x)
}
}
impl Compact { impl Compact {
pub fn new(u: u32) -> Self { pub fn new(u: u32) -> Self {
Compact(u) Compact(u)
} }
/// Computes the target [0, T] that a blockhash must land in to be valid /// Computes the target [0, T] that a blockhash must land in to be valid
/// Returns None, if there is an overflow or its negative value /// Returns value in error, if there is an overflow or its negative value
pub fn to_u256(&self) -> Result<U256, U256> { pub fn to_u256(&self) -> Result<U256, U256> {
let size = self.0 >> 24; let size = self.0 >> 24;
let mut word = self.0 & 0x007fffff; let mut word = self.0 & 0x007fffff;

View File

@ -4,6 +4,7 @@
extern crate rustc_serialize; extern crate rustc_serialize;
pub mod bytes; pub mod bytes;
pub mod compact;
pub mod hash; pub mod hash;
pub mod uint; pub mod uint;

View File

@ -21,11 +21,10 @@ extern crate ethcore_devtools as devtools;
extern crate test_data; extern crate test_data;
mod chain_verifier; mod chain_verifier;
mod compact;
mod utils; mod utils;
mod task; mod task;
pub use primitives::{uint, hash}; pub use primitives::{uint, hash, compact};
pub use chain_verifier::ChainVerifier; pub use chain_verifier::ChainVerifier;