merge and fix warnings

This commit is contained in:
NikVolf 2019-04-09 18:45:51 +03:00
commit cfc90d3102
50 changed files with 1184 additions and 827 deletions

1
Cargo.lock generated
View File

@ -1881,6 +1881,7 @@ version = "0.1.0"
dependencies = [
"assert_matches 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitcrypto 0.1.0",
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"bitvec 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
"chain 0.1.0",

View File

@ -130,10 +130,10 @@ It it is possible to import existing `zcashd` database:
./target/release/pzec import "$ZCASH_DB/blocks"
```
By default import verifies imported the blocks. You can disable this, by adding `--verification-level=none` flag.
By default, import verifies the imported blocks. You can disable this, by adding the `--verification-level=none` option.
```
./target/release/pzec import "$ZCASH_DB/blocks" --verification-level=none
./target/release/pzec --verification-level=none import "$ZCASH_DB/blocks"
```
## Command line interface

View File

@ -44,7 +44,7 @@ pub fn fetch(benchmark: &mut Benchmark) {
benchmark.start();
for _ in 0..BLOCKS {
let block = store.block(BlockRef::Hash(hashes[0].clone())).unwrap();
assert_eq!(&block.hash(), &hashes[0]);
assert_eq!(block.hash(), &hashes[0]);
}
benchmark.stop();
}

View File

@ -104,7 +104,7 @@ pub fn main(benchmark: &mut Benchmark) {
// bench
benchmark.start();
for block in verification_blocks.iter() {
chain_verifier.verify(VerificationLevel::Full, block).unwrap();
chain_verifier.verify(VerificationLevel::FULL, block).unwrap();
}
benchmark.stop();
}

View File

@ -153,7 +153,6 @@ Whenever a miner mines a block, it includes a special transaction called a coinb
### Block (block.rs)
A relatively straight forward implementation of the data structure described above. A `block` is a rust `struct`. It implements the following traits:
* ```From<&'static str>```: this trait takes in a string and outputs a `block`. It is implemented via the `from` function which deserializes the received string into a `block` data structure. Read more about serialization [here](https://github.com/bitcoinbook/bitcoinbook/blob/develop/ch06.asciidoc#transaction-serializationoutputs) (in the context of transactions).
* ```RepresentH256```: this trait takes a `block` data structure and hashes it, returning the hash.
The `block` has a few methods of its own. The entirety of these are simple getter methods.

View File

@ -1,9 +1,11 @@
use hex::FromHex;
use hash::H256;
use ser::{deserialize};
use merkle_root::merkle_root;
use {BlockHeader, Transaction};
use super::RepresentH256;
#[cfg(any(test, feature = "test-helpers"))]
use hash::H256;
#[cfg(any(test, feature = "test-helpers"))]
use merkle_root::merkle_root;
#[derive(Debug, PartialEq, Clone, Serializable, Deserializable)]
pub struct Block {
@ -17,16 +19,13 @@ impl From<&'static str> for Block {
}
}
impl RepresentH256 for Block {
fn h256(&self) -> H256 { self.hash() }
}
impl Block {
pub fn new(header: BlockHeader, transactions: Vec<Transaction>) -> Self {
Block { block_header: header, transactions: transactions }
}
/// Returns block's merkle root.
#[cfg(any(test, feature = "test-helpers"))]
pub fn merkle_root(&self) -> H256 {
let hashes = self.transactions.iter().map(Transaction::hash).collect::<Vec<H256>>();
merkle_root(&hashes)
@ -40,6 +39,7 @@ impl Block {
&self.block_header
}
#[cfg(any(test, feature = "test-helpers"))]
pub fn hash(&self) -> H256 {
self.block_header.hash()
}

View File

@ -21,8 +21,10 @@ pub struct BlockHeader {
}
impl BlockHeader {
/// Compute hash of the block header.
#[cfg(any(test, feature = "test-helpers"))]
pub fn hash(&self) -> H256 {
dhash256(&serialize(self))
block_header_hash(self)
}
pub fn equihash_input(&self) -> Bytes {
@ -58,6 +60,11 @@ impl From<&'static str> for BlockHeader {
}
}
/// Compute hash of the block header.
pub(crate) fn block_header_hash(block_header: &BlockHeader) -> H256 {
dhash256(&serialize(block_header))
}
#[cfg(test)]
mod tests {
use ser::{Reader, Error as ReaderError, Stream};

View File

@ -1,7 +1,7 @@
use std::{io, cmp, fmt};
use hash::H256;
use ser::{Deserializable, Reader, Error as ReaderError};
use block_header::BlockHeader;
use block_header::{BlockHeader, block_header_hash};
use read_and_hash::ReadAndHash;
#[derive(Clone)]
@ -37,7 +37,7 @@ impl IndexedBlockHeader {
///
/// Hashes the contents of block header.
pub fn from_raw(header: BlockHeader) -> Self {
IndexedBlockHeader::new(header.hash(), header)
IndexedBlockHeader::new(block_header_hash(&header), header)
}
}

View File

@ -2,7 +2,7 @@ use std::{cmp, io, fmt};
use hash::H256;
use heapsize::HeapSizeOf;
use ser::{Deserializable, Reader, Error as ReaderError};
use transaction::Transaction;
use transaction::{Transaction, transaction_hash};
use read_and_hash::ReadAndHash;
#[derive(Default, Clone)]
@ -46,7 +46,7 @@ impl IndexedTransaction {
/// Hashes transaction contents.
pub fn from_raw<T>(transaction: T) -> Self where Transaction: From<T> {
let transaction = Transaction::from(transaction);
Self::new(transaction.hash(), transaction)
Self::new(transaction_hash(&transaction), transaction)
}
}

View File

@ -22,10 +22,6 @@ mod indexed_block;
mod indexed_header;
mod indexed_transaction;
pub trait RepresentH256 {
fn h256(&self) -> hash::H256;
}
pub use primitives::{hash, bytes, bigint, compact};
pub use transaction::{BTC_TX_VERSION, SPROUT_TX_VERSION, OVERWINTER_TX_VERSION, SAPLING_TX_VERSION};

View File

@ -129,8 +129,9 @@ impl Transaction {
version
}
#[cfg(any(test, feature = "test-helpers"))]
pub fn hash(&self) -> H256 {
dhash256(&serialize(self))
transaction_hash(self)
}
pub fn inputs(&self) -> &[TransactionInput] {
@ -328,6 +329,10 @@ impl Deserializable for Transaction {
}
}
pub(crate) fn transaction_hash(transaction: &Transaction) -> H256 {
dhash256(&serialize(transaction))
}
#[cfg(test)]
mod tests {
use hex::ToHex;

View File

@ -5,7 +5,7 @@ use parking_lot::RwLock;
use hash::H256;
use bytes::Bytes;
use chain::{
IndexedBlock, IndexedBlockHeader, IndexedTransaction, BlockHeader, Block, Transaction,
IndexedBlock, IndexedBlockHeader, IndexedTransaction,
OutPoint, TransactionOutput,
};
use ser::{
@ -22,7 +22,7 @@ use kv::{
COL_SPROUT_BLOCK_ROOTS, COL_TREE_STATES,
};
use storage::{
BlockRef, Error, BlockHeaderProvider, BlockProvider, BlockOrigin, TransactionMeta, IndexedBlockProvider,
BlockRef, Error, BlockHeaderProvider, BlockProvider, BlockOrigin, TransactionMeta,
TransactionMetaProvider, TransactionProvider, TransactionOutputProvider, BlockChain, Store,
SideChainOrigin, ForkChain, Forkable, CanonStore, BestBlock, NullifierTracker,
EpochTag, EpochRef, SproutTreeState, SaplingTreeState, TreeStateProvider,
@ -233,7 +233,7 @@ impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
sidechain_route.push(next_hash.clone());
next_hash = self.block_header(next_hash.into())
.expect("not to find orphaned side chain in database; qed")
.previous_header_hash;
.raw.previous_header_hash;
}
}
}
@ -308,12 +308,10 @@ impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
/// Rollbacks single best block.
fn rollback_best(&self) -> Result<H256, Error> {
let decanonized = match self.block(self.best_block.read().hash.clone().into()) {
Some(block) => block,
None => return Ok(H256::default()),
};
let best_block_hash = self.best_block.read().hash.clone();
let tx_to_decanonize = self.block_transaction_hashes(best_block_hash.into());
let decanonized_hash = self.decanonize()?;
debug_assert_eq!(decanonized.hash(), decanonized_hash);
debug_assert_eq!(best_block_hash, decanonized_hash);
// and now remove decanonized block from database
// all code currently works in assumption that origin of all blocks is one of:
@ -321,8 +319,8 @@ impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
let mut update = DBTransaction::new();
update.delete(Key::BlockHeader(decanonized_hash.clone()));
update.delete(Key::BlockTransactions(decanonized_hash.clone()));
for tx in decanonized.transactions.into_iter() {
update.delete(Key::Transaction(tx.hash()));
for tx_hash in tx_to_decanonize {
update.delete(Key::Transaction(tx_hash));
}
self.db.write(update).map_err(Error::DatabaseError)?;
@ -336,7 +334,7 @@ impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
/// Updates meta data.
pub fn canonize(&self, hash: &H256) -> Result<(), Error> {
let mut best_block = self.best_block.write();
let block = match self.indexed_block(hash.clone().into()) {
let block = match self.block(hash.clone().into()) {
Some(block) => block,
None => return Err(Error::CannotCanonize),
};
@ -431,7 +429,7 @@ impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
pub fn decanonize(&self) -> Result<H256, Error> {
let mut best_block = self.best_block.write();
let block = match self.indexed_block(best_block.hash.clone().into()) {
let block = match self.block(best_block.hash.clone().into()) {
Some(block) => block,
None => return Err(Error::CannotCanonize),
};
@ -533,13 +531,16 @@ impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
impl<T> BlockHeaderProvider for BlockChainDatabase<T> where T: KeyValueDatabase {
fn block_header_bytes(&self, block_ref: BlockRef) -> Option<Bytes> {
self.block_header(block_ref).map(|header| serialize(&header))
self.block_header(block_ref).map(|header| serialize(&header.raw))
}
fn block_header(&self, block_ref: BlockRef) -> Option<BlockHeader> {
fn block_header(&self, block_ref: BlockRef) -> Option<IndexedBlockHeader> {
self.resolve_hash(block_ref)
.and_then(|hash| self.get(Key::BlockHeader(hash)))
.and_then(Value::as_block_header)
.and_then(|block_hash| {
self.get(Key::BlockHeader(block_hash.clone()))
.and_then(Value::as_block_header)
.map(|header| IndexedBlockHeader::new(block_hash, header))
})
}
}
@ -554,13 +555,13 @@ impl<T> BlockProvider for BlockChainDatabase<T> where T: KeyValueDatabase {
.and_then(Value::as_block_hash)
}
fn block(&self, block_ref: BlockRef) -> Option<Block> {
fn block(&self, block_ref: BlockRef) -> Option<IndexedBlock> {
self.resolve_hash(block_ref)
.and_then(|block_hash| {
self.block_header(block_hash.clone().into())
.map(|header| {
let transactions = self.block_transactions(block_hash.into());
Block::new(header, transactions)
IndexedBlock::new(header, transactions)
})
})
}
@ -579,44 +580,12 @@ impl<T> BlockProvider for BlockChainDatabase<T> where T: KeyValueDatabase {
.unwrap_or_default()
}
fn block_transactions(&self, block_ref: BlockRef) -> Vec<Transaction> {
fn block_transactions(&self, block_ref: BlockRef) -> Vec<IndexedTransaction> {
self.block_transaction_hashes(block_ref)
.into_iter()
.filter_map(|hash| self.get(Key::Transaction(hash)))
.filter_map(Value::as_transaction)
.collect()
}
}
impl<T> IndexedBlockProvider for BlockChainDatabase<T> where T: KeyValueDatabase {
fn indexed_block_header(&self, block_ref: BlockRef) -> Option<IndexedBlockHeader> {
self.resolve_hash(block_ref)
.and_then(|block_hash| {
self.get(Key::BlockHeader(block_hash.clone()))
.and_then(Value::as_block_header)
.map(|header| IndexedBlockHeader::new(block_hash, header))
})
}
fn indexed_block(&self, block_ref: BlockRef) -> Option<IndexedBlock> {
self.resolve_hash(block_ref)
.and_then(|block_hash| {
self.indexed_block_header(block_hash.clone().into())
.map(|header| {
let transactions = self.indexed_block_transactions(block_hash.into());
IndexedBlock::new(header, transactions)
})
})
}
fn indexed_block_transactions(&self, block_ref: BlockRef) -> Vec<IndexedTransaction> {
self.block_transaction_hashes(block_ref)
.into_iter()
.filter_map(|hash| {
self.get(Key::Transaction(hash.clone()))
.and_then(Value::as_transaction)
.map(|tx| IndexedTransaction::new(hash, tx))
})
.filter_map(|hash| self.get(Key::Transaction(hash))
.and_then(Value::as_transaction)
.map(|tx| IndexedTransaction::new(hash, tx)))
.collect()
}
}
@ -630,12 +599,13 @@ impl<T> TransactionMetaProvider for BlockChainDatabase<T> where T: KeyValueDatab
impl<T> TransactionProvider for BlockChainDatabase<T> where T: KeyValueDatabase {
fn transaction_bytes(&self, hash: &H256) -> Option<Bytes> {
self.transaction(hash).map(|tx| serialize(&tx))
self.transaction(hash).map(|tx| serialize(&tx.raw))
}
fn transaction(&self, hash: &H256) -> Option<Transaction> {
fn transaction(&self, hash: &H256) -> Option<IndexedTransaction> {
self.get(Key::Transaction(hash.clone()))
.and_then(Value::as_transaction)
.map(|tx| IndexedTransaction::new(*hash, tx))
}
}
@ -644,7 +614,7 @@ impl<T> TransactionOutputProvider for BlockChainDatabase<T> where T: KeyValueDat
// return previous transaction outputs only for canon chain transactions
self.transaction_meta(&prevout.hash)
.and_then(|_| self.transaction(&prevout.hash))
.and_then(|tx| tx.outputs.into_iter().nth(prevout.index as usize))
.and_then(|tx| tx.raw.outputs.into_iter().nth(prevout.index as usize))
}
fn is_spent(&self, prevout: &OutPoint) -> bool {
@ -675,7 +645,7 @@ impl<T> TreeStateProvider for BlockChainDatabase<T> where T: KeyValueDatabase {
fn sapling_block_root(&self, block_hash: &H256) -> Option<H256> {
self.block_header(BlockRef::Hash(*block_hash))
.map(|header| header.final_sapling_root)
.map(|header| header.raw.final_sapling_root)
}
}
@ -729,7 +699,7 @@ impl<T> Store for BlockChainDatabase<T> where T: KeyValueDatabase {
}
/// get best header
fn best_header(&self) -> BlockHeader {
fn best_header(&self) -> IndexedBlockHeader {
self.block_header(self.best_block().hash.into()).expect("best block header should be in db; qed")
}
}

View File

@ -811,8 +811,8 @@ impl TransactionProvider for MemoryPool {
self.get(hash).map(|t| serialize(t))
}
fn transaction(&self, hash: &H256) -> Option<Transaction> {
self.get(hash).cloned()
fn transaction(&self, hash: &H256) -> Option<IndexedTransaction> {
self.get(hash).cloned().map(|tx| IndexedTransaction::new(*hash, tx))
}
}

View File

@ -2,7 +2,7 @@
//! https://www.anintegratedworld.com/unravelling-the-mysterious-block-chain-magic-number/
use compact::Compact;
use chain::Block;
use chain::IndexedBlock;
use primitives::hash::H256;
use primitives::bigint::U256;
@ -69,11 +69,12 @@ impl Network {
}
}
pub fn genesis_block(&self) -> Block {
pub fn genesis_block(&self) -> IndexedBlock {
match *self {
Network::Mainnet | Network::Other(_) => "040000000000000000000000000000000000000000000000000000000000000000000000db4d7a85b768123f1dff1d4c4cece70083b2d27e117b4ac2e31d087988a5eac4000000000000000000000000000000000000000000000000000000000000000090041358ffff071f5712000000000000000000000000000000000000000000000000000000000000fd4005000a889f00854b8665cd555f4656f68179d31ccadc1b1f7fb0952726313b16941da348284d67add4686121d4e3d930160c1348d8191c25f12b267a6a9c131b5031cbf8af1f79c9d513076a216ec87ed045fa966e01214ed83ca02dc1797270a454720d3206ac7d931a0a680c5c5e099057592570ca9bdf6058343958b31901fce1a15a4f38fd347750912e14004c73dfe588b903b6c03166582eeaf30529b14072a7b3079e3a684601b9b3024054201f7440b0ee9eb1a7120ff43f713735494aa27b1f8bab60d7f398bca14f6abb2adbf29b04099121438a7974b078a11635b594e9170f1086140b4173822dd697894483e1c6b4e8b8dcd5cb12ca4903bc61e108871d4d915a9093c18ac9b02b6716ce1013ca2c1174e319c1a570215bc9ab5f7564765f7be20524dc3fdf8aa356fd94d445e05ab165ad8bb4a0db096c097618c81098f91443c719416d39837af6de85015dca0de89462b1d8386758b2cf8a99e00953b308032ae44c35e05eb71842922eb69797f68813b59caf266cb6c213569ae3280505421a7e3a0a37fdf8e2ea354fc5422816655394a9454bac542a9298f176e211020d63dee6852c40de02267e2fc9d5e1ff2ad9309506f02a1a71a0501b16d0d36f70cdfd8de78116c0c506ee0b8ddfdeb561acadf31746b5a9dd32c21930884397fb1682164cb565cc14e089d66635a32618f7eb05fe05082b8a3fae620571660a6b89886eac53dec109d7cbb6930ca698a168f301a950be152da1be2b9e07516995e20baceebecb5579d7cdbc16d09f3a50cb3c7dffe33f26686d4ff3f8946ee6475e98cf7b3cf9062b6966e838f865ff3de5fb064a37a21da7bb8dfd2501a29e184f207caaba364f36f2329a77515dcb710e29ffbf73e2bbd773fab1f9a6b005567affff605c132e4e4dd69f36bd201005458cfbd2c658701eb2a700251cefd886b1e674ae816d3f719bac64be649c172ba27a4fd55947d95d53ba4cbc73de97b8af5ed4840b659370c556e7376457f51e5ebb66018849923db82c1c9a819f173cccdb8f3324b239609a300018d0fb094adf5bd7cbb3834c69e6d0b3798065c525b20f040e965e1a161af78ff7561cd874f5f1b75aa0bc77f720589e1b810f831eac5073e6dd46d00a2793f70f7427f0f798f2f53a67e615e65d356e66fe40609a958a05edb4c175bcc383ea0530e67ddbe479a898943c6e3074c6fcc252d6014de3a3d292b03f0d88d312fe221be7be7e3c59d07fa0f2f4029e364f1f355c5d01fa53770d0cd76d82bf7e60f6903bc1beb772e6fde4a70be51d9c7e03c8d6d8dfb361a234ba47c470fe630820bbd920715621b9fbedb49fcee165ead0875e6c2b1af16f50b5d6140cc981122fcbcf7c5a4e3772b3661b628e08380abc545957e59f634705b1bbde2f0b4e055a5ec5676d859be77e20962b645e051a880fddb0180b4555789e1f9344a436a84dc5579e2553f1e5fb0a599c137be36cabbed0319831fea3fddf94ddc7971e4bcf02cdc93294a9aab3e3b13e3b058235b4f4ec06ba4ceaa49d675b4ba80716f3bc6976b1fbf9c8bf1f3e3a4dc1cd83ef9cf816667fb94f1e923ff63fef072e6a19321e4812f96cb0ffa864da50ad74deb76917a336f31dce03ed5f0303aad5e6a83634f9fcc371096f8288b8f02ddded5ff1bb9d49331e4a84dbe1543164438fde9ad71dab024779dcdde0b6602b5ae0a6265c14b94edd83b37403f4b78fcd2ed555b596402c28ee81d87a909c4e8722b30c71ecdd861b05f61f8b1231795c76adba2fdefa451b283a5d527955b9f3de1b9828e7b2e74123dd47062ddcc09b05e7fa13cb2212a6fdbc65d7e852cec463ec6fd929f5b8483cf3052113b13dac91b69f49d1b7d1aec01c4a68e41ce1570101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff071f0104455a6361736830623963346565663862376363343137656535303031653335303039383462366665613335363833613763616331343161303433633432303634383335643334ffffffff010000000000000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
Network::Testnet => "040000000000000000000000000000000000000000000000000000000000000000000000db4d7a85b768123f1dff1d4c4cece70083b2d27e117b4ac2e31d087988a5eac40000000000000000000000000000000000000000000000000000000000000000a11e1358ffff07200600000000000000000000000000000000000000000000000000000000000000fd400500a6a51259c3f6732481e2d035197218b7a69504461d04335503cd69759b2d02bd2b53a9653f42cb33c608511c953673fa9da76170958115fe92157ad3bb5720d927f18e09459bf5c6072973e143e20f9bdf0584058c96b7c2234c7565f100d5eea083ba5d3dbaff9f0681799a113e7beff4a611d2b49590563109962baa149b628aae869af791f2f70bb041bd7ebfa658570917f6654a142b05e7ec0289a4f46470be7be5f693b90173eaaa6e84907170f32602204f1f4e1c04b1830116ffd0c54f0b1caa9a5698357bd8aa1f5ac8fc93b405265d824ba0e49f69dab5446653927298e6b7bdc61ee86ff31c07bde86331b4e500d42e4e50417e285502684b7966184505b885b42819a88469d1e9cf55072d7f3510f85580db689302eab377e4e11b14a91fdd0df7627efc048934f0aff8e7eb77eb17b3a95de13678004f2512293891d8baf8dde0ef69be520a58bbd6038ce899c9594cf3e30b8c3d9c7ecc832d4c19a6212747b50724e6f70f6451f78fd27b58ce43ca33b1641304a916186cfbe7dbca224f55d08530ba851e4df22baf7ab7078e9cbea46c0798b35a750f54103b0cdd08c81a6505c4932f6bfbd492a9fced31d54e98b6370d4c96600552fcf5b37780ed18c8787d03200963600db297a8f05dfa551321d17b9917edadcda51e274830749d133ad226f8bb6b94f13b4f77e67b35b71f52112ce9ba5da706ad9573584a2570a4ff25d29ab9761a06bdcf2c33638bf9baf2054825037881c14adf3816ba0cbd0fca689aad3ce16f2fe362c98f48134a9221765d939f0b49677d1c2447e56b46859f1810e2cf23e82a53e0d44f34dae932581b3b7f49eaec59af872cf9de757a964f7b33d143a36c270189508fcafe19398e4d2966948164d40556b05b7ff532f66f5d1edc41334ef742f78221dfe0c7ae2275bb3f24c89ae35f00afeea4e6ed187b866b209dc6e83b660593fce7c40e143beb07ac86c56f39e895385924667efe3a3f031938753c7764a2dbeb0a643fd359c46e614873fd0424e435fa7fac083b9a41a9d6bf7e284eee537ea7c50dd239f359941a43dc982745184bf3ee31a8dc850316aa9c6b66d6985acee814373be3458550659e1a06287c3b3b76a185c5cb93e38c1eebcf34ff072894b6430aed8d34122dafd925c46a515cca79b0269c92b301890ca6b0dc8b679cdac0f23318c105de73d7a46d16d2dad988d49c22e9963c117960bdc70ef0db6b091cf09445a516176b7f6d58ec29539166cc8a38bbff387acefffab2ea5faad0e8bb70625716ef0edf61940733c25993ea3de9f0be23d36e7cb8da10505f9dc426cd0e6e5b173ab4fff8c37e1f1fb56d1ea372013d075e0934c6919393cfc21395eea20718fad03542a4162a9ded66c814ad8320b2d7c2da3ecaf206da34c502db2096d1c46699a91dd1c432f019ad434e2c1ce507f91104f66f491fed37b225b8e0b2888c37276cfa0468fc13b8d593fd9a2675f0f5b20b8a15f8fa7558176a530d6865738ddb25d3426dab905221681cf9da0e0200eea5b2eba3ad3a5237d2a391f9074bf1779a2005cee43eec2b058511532635e0fea61664f531ac2b356f40db5c5d275a4cf5c82d468976455af4e3362cc8f71aa95e71d394aff3ead6f7101279f95bcd8a0fedce1d21cb3c9f6dd3b182fce0db5d6712981b651f29178a24119968b14783cafa713bc5f2a65205a42e4ce9dc7ba462bdb1f3e4553afc15f5f39998fdb53e7e231e3e520a46943734a007c2daa1eda9f495791657eefcac5c32833936e568d06187857ed04d7b97167ae207c5c5ae54e528c36016a984235e9c5b2f0718d7b3aa93c7822ccc772580b6599671b3c02ece8a21399abd33cfd3028790133167d0a97e7de53dc8ff0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff071f0104455a6361736830623963346565663862376363343137656535303031653335303039383462366665613335363833613763616331343161303433633432303634383335643334ffffffff010000000000000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
Network::Regtest | Network::Unitest => "TODO".into(),
Network::Mainnet | Network::Regtest | Network::Unitest | Network::Other(_) =>
IndexedBlock::from_raw("040000000000000000000000000000000000000000000000000000000000000000000000db4d7a85b768123f1dff1d4c4cece70083b2d27e117b4ac2e31d087988a5eac4000000000000000000000000000000000000000000000000000000000000000090041358ffff071f5712000000000000000000000000000000000000000000000000000000000000fd4005000a889f00854b8665cd555f4656f68179d31ccadc1b1f7fb0952726313b16941da348284d67add4686121d4e3d930160c1348d8191c25f12b267a6a9c131b5031cbf8af1f79c9d513076a216ec87ed045fa966e01214ed83ca02dc1797270a454720d3206ac7d931a0a680c5c5e099057592570ca9bdf6058343958b31901fce1a15a4f38fd347750912e14004c73dfe588b903b6c03166582eeaf30529b14072a7b3079e3a684601b9b3024054201f7440b0ee9eb1a7120ff43f713735494aa27b1f8bab60d7f398bca14f6abb2adbf29b04099121438a7974b078a11635b594e9170f1086140b4173822dd697894483e1c6b4e8b8dcd5cb12ca4903bc61e108871d4d915a9093c18ac9b02b6716ce1013ca2c1174e319c1a570215bc9ab5f7564765f7be20524dc3fdf8aa356fd94d445e05ab165ad8bb4a0db096c097618c81098f91443c719416d39837af6de85015dca0de89462b1d8386758b2cf8a99e00953b308032ae44c35e05eb71842922eb69797f68813b59caf266cb6c213569ae3280505421a7e3a0a37fdf8e2ea354fc5422816655394a9454bac542a9298f176e211020d63dee6852c40de02267e2fc9d5e1ff2ad9309506f02a1a71a0501b16d0d36f70cdfd8de78116c0c506ee0b8ddfdeb561acadf31746b5a9dd32c21930884397fb1682164cb565cc14e089d66635a32618f7eb05fe05082b8a3fae620571660a6b89886eac53dec109d7cbb6930ca698a168f301a950be152da1be2b9e07516995e20baceebecb5579d7cdbc16d09f3a50cb3c7dffe33f26686d4ff3f8946ee6475e98cf7b3cf9062b6966e838f865ff3de5fb064a37a21da7bb8dfd2501a29e184f207caaba364f36f2329a77515dcb710e29ffbf73e2bbd773fab1f9a6b005567affff605c132e4e4dd69f36bd201005458cfbd2c658701eb2a700251cefd886b1e674ae816d3f719bac64be649c172ba27a4fd55947d95d53ba4cbc73de97b8af5ed4840b659370c556e7376457f51e5ebb66018849923db82c1c9a819f173cccdb8f3324b239609a300018d0fb094adf5bd7cbb3834c69e6d0b3798065c525b20f040e965e1a161af78ff7561cd874f5f1b75aa0bc77f720589e1b810f831eac5073e6dd46d00a2793f70f7427f0f798f2f53a67e615e65d356e66fe40609a958a05edb4c175bcc383ea0530e67ddbe479a898943c6e3074c6fcc252d6014de3a3d292b03f0d88d312fe221be7be7e3c59d07fa0f2f4029e364f1f355c5d01fa53770d0cd76d82bf7e60f6903bc1beb772e6fde4a70be51d9c7e03c8d6d8dfb361a234ba47c470fe630820bbd920715621b9fbedb49fcee165ead0875e6c2b1af16f50b5d6140cc981122fcbcf7c5a4e3772b3661b628e08380abc545957e59f634705b1bbde2f0b4e055a5ec5676d859be77e20962b645e051a880fddb0180b4555789e1f9344a436a84dc5579e2553f1e5fb0a599c137be36cabbed0319831fea3fddf94ddc7971e4bcf02cdc93294a9aab3e3b13e3b058235b4f4ec06ba4ceaa49d675b4ba80716f3bc6976b1fbf9c8bf1f3e3a4dc1cd83ef9cf816667fb94f1e923ff63fef072e6a19321e4812f96cb0ffa864da50ad74deb76917a336f31dce03ed5f0303aad5e6a83634f9fcc371096f8288b8f02ddded5ff1bb9d49331e4a84dbe1543164438fde9ad71dab024779dcdde0b6602b5ae0a6265c14b94edd83b37403f4b78fcd2ed555b596402c28ee81d87a909c4e8722b30c71ecdd861b05f61f8b1231795c76adba2fdefa451b283a5d527955b9f3de1b9828e7b2e74123dd47062ddcc09b05e7fa13cb2212a6fdbc65d7e852cec463ec6fd929f5b8483cf3052113b13dac91b69f49d1b7d1aec01c4a68e41ce1570101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff071f0104455a6361736830623963346565663862376363343137656535303031653335303039383462366665613335363833613763616331343161303433633432303634383335643334ffffffff010000000000000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into()),
Network::Testnet =>
IndexedBlock::from_raw("040000000000000000000000000000000000000000000000000000000000000000000000db4d7a85b768123f1dff1d4c4cece70083b2d27e117b4ac2e31d087988a5eac40000000000000000000000000000000000000000000000000000000000000000a11e1358ffff07200600000000000000000000000000000000000000000000000000000000000000fd400500a6a51259c3f6732481e2d035197218b7a69504461d04335503cd69759b2d02bd2b53a9653f42cb33c608511c953673fa9da76170958115fe92157ad3bb5720d927f18e09459bf5c6072973e143e20f9bdf0584058c96b7c2234c7565f100d5eea083ba5d3dbaff9f0681799a113e7beff4a611d2b49590563109962baa149b628aae869af791f2f70bb041bd7ebfa658570917f6654a142b05e7ec0289a4f46470be7be5f693b90173eaaa6e84907170f32602204f1f4e1c04b1830116ffd0c54f0b1caa9a5698357bd8aa1f5ac8fc93b405265d824ba0e49f69dab5446653927298e6b7bdc61ee86ff31c07bde86331b4e500d42e4e50417e285502684b7966184505b885b42819a88469d1e9cf55072d7f3510f85580db689302eab377e4e11b14a91fdd0df7627efc048934f0aff8e7eb77eb17b3a95de13678004f2512293891d8baf8dde0ef69be520a58bbd6038ce899c9594cf3e30b8c3d9c7ecc832d4c19a6212747b50724e6f70f6451f78fd27b58ce43ca33b1641304a916186cfbe7dbca224f55d08530ba851e4df22baf7ab7078e9cbea46c0798b35a750f54103b0cdd08c81a6505c4932f6bfbd492a9fced31d54e98b6370d4c96600552fcf5b37780ed18c8787d03200963600db297a8f05dfa551321d17b9917edadcda51e274830749d133ad226f8bb6b94f13b4f77e67b35b71f52112ce9ba5da706ad9573584a2570a4ff25d29ab9761a06bdcf2c33638bf9baf2054825037881c14adf3816ba0cbd0fca689aad3ce16f2fe362c98f48134a9221765d939f0b49677d1c2447e56b46859f1810e2cf23e82a53e0d44f34dae932581b3b7f49eaec59af872cf9de757a964f7b33d143a36c270189508fcafe19398e4d2966948164d40556b05b7ff532f66f5d1edc41334ef742f78221dfe0c7ae2275bb3f24c89ae35f00afeea4e6ed187b866b209dc6e83b660593fce7c40e143beb07ac86c56f39e895385924667efe3a3f031938753c7764a2dbeb0a643fd359c46e614873fd0424e435fa7fac083b9a41a9d6bf7e284eee537ea7c50dd239f359941a43dc982745184bf3ee31a8dc850316aa9c6b66d6985acee814373be3458550659e1a06287c3b3b76a185c5cb93e38c1eebcf34ff072894b6430aed8d34122dafd925c46a515cca79b0269c92b301890ca6b0dc8b679cdac0f23318c105de73d7a46d16d2dad988d49c22e9963c117960bdc70ef0db6b091cf09445a516176b7f6d58ec29539166cc8a38bbff387acefffab2ea5faad0e8bb70625716ef0edf61940733c25993ea3de9f0be23d36e7cb8da10505f9dc426cd0e6e5b173ab4fff8c37e1f1fb56d1ea372013d075e0934c6919393cfc21395eea20718fad03542a4162a9ded66c814ad8320b2d7c2da3ecaf206da34c502db2096d1c46699a91dd1c432f019ad434e2c1ce507f91104f66f491fed37b225b8e0b2888c37276cfa0468fc13b8d593fd9a2675f0f5b20b8a15f8fa7558176a530d6865738ddb25d3426dab905221681cf9da0e0200eea5b2eba3ad3a5237d2a391f9074bf1779a2005cee43eec2b058511532635e0fea61664f531ac2b356f40db5c5d275a4cf5c82d468976455af4e3362cc8f71aa95e71d394aff3ead6f7101279f95bcd8a0fedce1d21cb3c9f6dd3b182fce0db5d6712981b651f29178a24119968b14783cafa713bc5f2a65205a42e4ce9dc7ba462bdb1f3e4553afc15f5f39998fdb53e7e231e3e520a46943734a007c2daa1eda9f495791657eefcac5c32833936e568d06187857ed04d7b97167ae207c5c5ae54e528c36016a984235e9c5b2f0718d7b3aa93c7822ccc772580b6599671b3c02ece8a21399abd33cfd3028790133167d0a97e7de53dc8ff0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff071f0104455a6361736830623963346565663862376363343137656535303031653335303039383462366665613335363833613763616331343161303433633432303634383335643334ffffffff010000000000000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into()),
}
}
@ -81,7 +82,7 @@ impl Network {
match *self {
// block #410100, best checkpoint of zcashd as of 12.03.2019
Network::Mainnet => H256::from_reversed_str("0000000002c565958f783a24a4ac17cde898ff525e75ed9baf66861b0b9fcada"),
_ => self.genesis_block().hash(),
_ => self.genesis_block().hash().clone(),
}
}
}

View File

@ -4,7 +4,7 @@ use std::net::SocketAddr;
use parking_lot::RwLock;
use futures::{Future, finished, failed};
use futures::stream::Stream;
use futures_cpupool::CpuPool;
use futures_cpupool::{CpuPool, Builder as CpuPoolBuilder};
use tokio_io::IoFuture;
use tokio_core::net::{TcpListener, TcpStream};
use tokio_core::reactor::{Handle, Remote, Timeout, Interval};
@ -429,7 +429,10 @@ impl Drop for P2P {
impl P2P {
pub fn new(config: Config, local_sync_node: LocalSyncNodeRef, handle: Handle) -> Result<Self, Box<error::Error>> {
let pool = CpuPool::new(config.threads);
let pool = CpuPoolBuilder::new()
.name_prefix("I/O thread")
.pool_size(config.threads)
.create();
let context = try!(Context::new(local_sync_node, pool.clone(), handle.remote().clone(), config.clone()));

View File

@ -23,7 +23,7 @@ pub fn import(cfg: Config, matches: &ArgMatches) -> Result<(), String> {
}
}
Err(Error::TooManyOrphanBlocks) => return Err("Too many orphan (unordered) blocks".into()),
Err(_) => return Err("Cannot append block".into()),
Err(error) => return Err(format!("Cannot append block: {:?}", error)),
}
}

View File

@ -17,8 +17,8 @@ pub fn rollback(cfg: Config, matches: &ArgMatches) -> Result<(), String> {
BlockRef::Number(block_ref.parse().map_err(|e| format!("Invalid block hash: {}", e))?)
};
let required_block_hash = cfg.db.block_header(block_ref.clone()).ok_or(format!("Block {:?} is unknown", block_ref))?.hash();
let genesis_hash = cfg.network.genesis_block().hash();
let required_block_hash = cfg.db.block_header(block_ref.clone()).ok_or(format!("Block {:?} is unknown", block_ref))?.hash;
let genesis_hash = *cfg.network.genesis_block().hash();
let mut best_block_hash = cfg.db.best_block().hash;
debug_assert!(best_block_hash != H256::default()); // genesis inserted in init_db

View File

@ -126,15 +126,15 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
let services = Services::default().with_network(true);
let verification_level = match matches.value_of("verification-level") {
Some(s) if s == "full" => VerificationLevel::Full,
Some(s) if s == "header" => VerificationLevel::Header,
Some(s) if s == "none" => VerificationLevel::NoVerification,
Some(s) if s == "full" => VerificationLevel::FULL,
Some(s) if s == "header" => VerificationLevel::HEADER,
Some(s) if s == "none" => VerificationLevel::NO_VERIFICATION,
Some(s) => return Err(format!("Invalid verification level: {}", s)),
None => VerificationLevel::Full,
None => VerificationLevel::FULL,
};
let verification_edge = match matches.value_of("verification-edge") {
Some(s) if verification_level != VerificationLevel::Full => {
Some(s) if verification_level != VerificationLevel::FULL => {
let edge: H256 = s.parse().map_err(|_| "Invalid verification edge".to_owned())?;
edge.reversed()
},

View File

@ -5,7 +5,6 @@ use app_dirs::{app_dir, AppDataType};
use {storage, APP_INFO};
use db;
use config::Config;
use chain::IndexedBlock;
pub fn open_db(data_dir: &Option<String>, db_cache: usize) -> storage::SharedStore {
let db_path = match *data_dir {
@ -26,7 +25,7 @@ pub fn node_table_path(cfg: &Config) -> PathBuf {
pub fn init_db(cfg: &Config) -> Result<(), String> {
// insert genesis block if db is empty
let genesis_block = IndexedBlock::from_raw(cfg.network.genesis_block());
let genesis_block = cfg.network.genesis_block();
match cfg.db.block_hash(0) {
Some(ref db_genesis_block_hash) if db_genesis_block_hash != genesis_block.hash() => Err("Trying to open database with incompatible genesis block".into()),
Some(_) => Ok(()),

View File

@ -73,12 +73,12 @@ impl BlockChainClientCoreApi for BlockChainClientCore {
fn raw_block(&self, hash: GlobalH256) -> Option<RawBlock> {
self.storage.block(hash.into())
.map(|block| {
serialize(&block).into()
serialize(&block.to_raw_block()).into()
})
}
fn verbose_block(&self, hash: GlobalH256) -> Option<VerboseBlock> {
self.storage.indexed_block(hash.into())
self.storage.block(hash.into())
.map(|block| {
let height = self.storage.block_number(block.hash());
let confirmations = match height {
@ -113,7 +113,7 @@ impl BlockChainClientCoreApi for BlockChainClientCore {
None => return Err(transaction_not_found(prev_out.hash)),
};
if prev_out.index >= transaction.outputs.len() as u32 {
if prev_out.index >= transaction.raw.outputs.len() as u32 {
return Err(transaction_output_not_found(prev_out));
}
@ -135,15 +135,15 @@ impl BlockChainClientCoreApi for BlockChainClientCore {
return Err(transaction_not_found(prev_out.hash));
}
let ref script_bytes = transaction.outputs[prev_out.index as usize].script_pubkey;
let ref script_bytes = transaction.raw.outputs[prev_out.index as usize].script_pubkey;
let script: Script = script_bytes.clone().into();
let script_asm = format!("{}", script);
let script_addresses = script.extract_destinations().unwrap_or(vec![]);
Ok(GetTxOutResponse {
bestblock: block_header.hash().into(),
bestblock: block_header.hash.into(),
confirmations: best_block.number - meta.height() + 1,
value: 0.00000001f64 * (transaction.outputs[prev_out.index as usize].value as f64),
value: 0.00000001f64 * (transaction.raw.outputs[prev_out.index as usize].value as f64),
script: TransactionOutputScript {
asm: script_asm,
hex: script_bytes.clone().into(),
@ -160,8 +160,8 @@ impl BlockChainClientCoreApi for BlockChainClientCore {
kind: a.kind,
}).collect(),
},
version: transaction.version,
coinbase: transaction.is_coinbase(),
version: transaction.raw.version,
coinbase: transaction.raw.is_coinbase(),
})
}
}

View File

@ -1,4 +1,4 @@
use chain::BlockHeader;
use chain::IndexedBlockHeader;
use {BlockRef, BlockHeaderProvider};
pub struct BlockAncestors<'a> {
@ -16,12 +16,12 @@ impl<'a> BlockAncestors<'a> {
}
impl<'a> Iterator for BlockAncestors<'a> {
type Item = BlockHeader;
type Item = IndexedBlockHeader;
fn next(&mut self) -> Option<Self::Item> {
let result = self.block.take().and_then(|block| self.headers.block_header(block));
self.block = match result {
Some(ref header) => Some(BlockRef::Hash(header.previous_header_hash.clone())),
Some(ref header) => Some(BlockRef::Hash(header.raw.previous_header_hash.clone())),
None => None,
};
result

View File

@ -1,4 +1,4 @@
use chain::BlockHeader;
use chain::IndexedBlockHeader;
use {BlockRef, BlockHeaderProvider};
pub struct BlockIterator<'a> {
@ -18,7 +18,7 @@ impl<'a> BlockIterator<'a> {
}
impl<'a> Iterator for BlockIterator<'a> {
type Item = (u32, BlockHeader);
type Item = (u32, IndexedBlockHeader);
fn next(&mut self) -> Option<Self::Item> {
let result = self.headers.block_header(BlockRef::Number(self.block));

View File

@ -1,6 +1,6 @@
use hash::H256;
use bytes::Bytes;
use chain::{BlockHeader, Transaction, Block, IndexedBlock, IndexedBlockHeader, IndexedTransaction};
use chain::{IndexedBlock, IndexedBlockHeader, IndexedTransaction};
use {BlockRef};
pub trait BlockHeaderProvider {
@ -8,11 +8,10 @@ pub trait BlockHeaderProvider {
fn block_header_bytes(&self, block_ref: BlockRef) -> Option<Bytes>;
/// resolves header bytes by block reference (number/hash)
fn block_header(&self, block_ref: BlockRef) -> Option<BlockHeader>;
fn block_header(&self, block_ref: BlockRef) -> Option<IndexedBlockHeader>;
}
pub trait BlockProvider: BlockHeaderProvider {
/// resolves number by block hash
fn block_number(&self, hash: &H256) -> Option<u32>;
@ -20,7 +19,7 @@ pub trait BlockProvider: BlockHeaderProvider {
fn block_hash(&self, number: u32) -> Option<H256>;
/// resolves deserialized block body by block reference (number/hash)
fn block(&self, block_ref: BlockRef) -> Option<Block>;
fn block(&self, block_ref: BlockRef) -> Option<IndexedBlock>;
/// returns true if store contains given block
fn contains_block(&self, block_ref: BlockRef) -> bool {
@ -31,13 +30,5 @@ pub trait BlockProvider: BlockHeaderProvider {
fn block_transaction_hashes(&self, block_ref: BlockRef) -> Vec<H256>;
/// returns all transactions in the block by block reference (number/hash)
fn block_transactions(&self, block_ref: BlockRef) -> Vec<Transaction>;
}
pub trait IndexedBlockProvider: BlockProvider {
fn indexed_block_header(&self, block_ref: BlockRef) -> Option<IndexedBlockHeader>;
fn indexed_block(&self, block_ref: BlockRef) -> Option<IndexedBlock>;
fn indexed_block_transactions(&self, block_ref: BlockRef) -> Vec<IndexedTransaction>;
fn block_transactions(&self, block_ref: BlockRef) -> Vec<IndexedTransaction>;
}

View File

@ -36,13 +36,15 @@ pub use block_ancestors::BlockAncestors;
pub use block_chain::{BlockChain, ForkChain, Forkable};
pub use block_iterator::BlockIterator;
pub use block_origin::{BlockOrigin, SideChainOrigin};
pub use block_provider::{BlockHeaderProvider, BlockProvider, IndexedBlockProvider};
pub use block_provider::{BlockHeaderProvider, BlockProvider};
pub use block_ref::BlockRef;
pub use duplex_store::{DuplexTransactionOutputProvider, NoopStore};
pub use error::Error;
pub use store::{AsSubstore, Store, SharedStore, CanonStore};
pub use transaction_meta::TransactionMeta;
pub use transaction_provider::{TransactionProvider, TransactionOutputProvider, TransactionMetaProvider};
pub use transaction_provider::{
TransactionProvider, TransactionOutputProvider, TransactionMetaProvider, CachedTransactionOutputProvider,
};
pub use nullifier_tracker::NullifierTracker;
pub use tree_state::{TreeState, H32 as H32TreeDim, Dim as TreeDim, SproutTreeState, SaplingTreeState};
pub use tree_state_provider::TreeStateProvider;

View File

@ -1,9 +1,8 @@
use std::sync::Arc;
use chain::BlockHeader;
use chain::IndexedBlockHeader;
use {
BestBlock, BlockProvider, BlockHeaderProvider, TransactionProvider, TransactionMetaProvider,
TransactionOutputProvider, BlockChain, IndexedBlockProvider, Forkable, NullifierTracker,
TreeStateProvider,
TransactionOutputProvider, BlockChain, Forkable, NullifierTracker, TreeStateProvider,
};
pub trait CanonStore: Store + Forkable {
@ -16,13 +15,13 @@ pub trait Store: AsSubstore {
fn best_block(&self) -> BestBlock;
/// get best header
fn best_header(&self) -> BlockHeader;
fn best_header(&self) -> IndexedBlockHeader;
}
/// Allows casting Arc<Store> to reference to any substore type
pub trait AsSubstore:
BlockChain +
IndexedBlockProvider +
BlockProvider +
TransactionProvider +
TransactionMetaProvider +
TransactionOutputProvider +
@ -45,7 +44,7 @@ pub trait AsSubstore:
impl<T> AsSubstore for T
where T: BlockChain +
IndexedBlockProvider +
BlockProvider +
TransactionProvider +
TransactionMetaProvider +
TransactionOutputProvider +

View File

@ -1,6 +1,8 @@
use std::collections::HashMap;
use parking_lot::RwLock;
use hash::H256;
use bytes::Bytes;
use chain::{Transaction, OutPoint, TransactionOutput};
use chain::{IndexedTransaction, OutPoint, TransactionOutput};
use {TransactionMeta};
/// Should be used to obtain all transactions from canon chain and forks.
@ -14,7 +16,7 @@ pub trait TransactionProvider {
fn transaction_bytes(&self, hash: &H256) -> Option<Bytes>;
/// Resolves serialized transaction info by transaction hash.
fn transaction(&self, hash: &H256) -> Option<Transaction>;
fn transaction(&self, hash: &H256) -> Option<IndexedTransaction>;
}
/// Should be used to get canon chain transaction outputs.
@ -32,3 +34,40 @@ pub trait TransactionMetaProvider: Send + Sync {
/// Otherwise returns transaction meta object
fn transaction_meta(&self, hash: &H256) -> Option<TransactionMeta>;
}
/// Transaction output provider that caches all read outputs.
///
/// Not intended for long-lasting life, because it never clears its internal
/// cache. The backing storage is considered readonly for the cache lifetime.
pub struct CachedTransactionOutputProvider<'a> {
backend: &'a TransactionOutputProvider,
cached_outputs: RwLock<HashMap<OutPoint, Option<TransactionOutput>>>,
}
impl<'a> CachedTransactionOutputProvider<'a> {
/// Create new cached tx output provider backed by passed provider.
pub fn new(backend: &'a TransactionOutputProvider) -> Self {
CachedTransactionOutputProvider {
backend,
cached_outputs: RwLock::new(HashMap::new()),
}
}
}
impl<'a> TransactionOutputProvider for CachedTransactionOutputProvider<'a> {
fn transaction_output(&self, outpoint: &OutPoint, transaction_index: usize) -> Option<TransactionOutput> {
let cached_value = self.cached_outputs.read().get(outpoint).cloned();
match cached_value {
Some(cached_value) => cached_value,
None => {
let value_from_backend = self.backend.transaction_output(outpoint, transaction_index);
self.cached_outputs.write().insert(outpoint.clone(), value_from_backend.clone());
value_from_backend
},
}
}
fn is_spent(&self, outpoint: &OutPoint) -> bool {
self.backend.is_spent(outpoint)
}
}

View File

@ -6,9 +6,12 @@ use storage;
use network::ConsensusParams;
use primitives::hash::H256;
use super::Error;
use synchronization_verifier::{Verifier, SyncVerifier, VerificationTask,
VerificationSink, BlockVerificationSink, TransactionVerificationSink};
use types::StorageRef;
use synchronization_verifier::{
Verifier, SyncVerifier, VerificationTask, HeadersVerificationSink,
VerificationSink, BlockVerificationSink, TransactionVerificationSink,
PartiallyVerifiedBlock,
};
use types::{PeerIndex, StorageRef};
use utils::OrphanBlocksPool;
use VerificationParameters;
@ -76,7 +79,7 @@ impl BlocksWriter {
let mut verification_queue: VecDeque<chain::IndexedBlock> = self.orphaned_blocks_pool.remove_blocks_for_parent(block.hash());
verification_queue.push_front(block);
while let Some(block) = verification_queue.pop_front() {
self.verifier.verify_block(block);
self.verifier.verify_block(PartiallyVerifiedBlock::NotVerified(block));
if let Some(err) = self.sink.error() {
return Err(err);
}
@ -141,6 +144,16 @@ impl TransactionVerificationSink for BlocksWriterSink {
}
}
impl HeadersVerificationSink for BlocksWriterSink {
fn on_headers_verification_success(&self, _headers: Vec<chain::IndexedBlockHeader>) {
unreachable!("not intended to verify headers")
}
fn on_headers_verification_error(&self, _peer: PeerIndex, _err: String, _hash: H256, _headers: Vec<chain::IndexedBlockHeader>) {
unreachable!("not intended to verify headers")
}
}
#[cfg(test)]
mod tests {
extern crate test_data;
@ -155,7 +168,7 @@ mod tests {
fn default_verification_params() -> VerificationParameters {
VerificationParameters {
verification_level: VerificationLevel::Full,
verification_level: VerificationLevel::FULL,
verification_edge: 0u8.into(),
}
}

View File

@ -1,4 +1,4 @@
use chain::{IndexedTransaction, IndexedBlock};
use chain::{IndexedTransaction, IndexedBlock, IndexedBlockHeader};
use message::types;
use p2p::{InboundSyncConnection, InboundSyncConnectionRef, InboundSyncConnectionStateRef};
use types::{PeersRef, LocalNodeRef, PeerIndex, RequestId};
@ -103,7 +103,8 @@ impl InboundSyncConnection for InboundConnection {
return;
}
self.node.on_headers(self.peer_index, message);
let headers = message.headers.into_iter().map(IndexedBlockHeader::from_raw).collect();
self.node.on_headers(self.peer_index, headers);
}
fn on_mempool(&self, message: types::MemPool) {

View File

@ -58,7 +58,7 @@ pub enum Error {
Verification(String),
}
#[derive(Debug)]
#[derive(Debug, Clone)]
/// Verification parameters.
pub struct VerificationParameters {
/// Blocks verification level.
@ -111,13 +111,29 @@ pub fn create_local_sync_node(consensus: ConsensusParams, db: storage::SharedSto
let sync_state = SynchronizationStateRef::new(SynchronizationState::with_storage(db.clone()));
let sync_chain = SyncChain::new(db.clone(), memory_pool.clone());
let chain_verifier = Arc::new(ChainVerifier::new(db.clone(), consensus.clone()));
let light_chain_verifier = Arc::new(ChainVerifier::new(db.clone(), consensus.clone()));
let heavy_chain_verifier = Arc::new(ChainVerifier::new(db.clone(), consensus.clone()));
let sync_executor = SyncExecutor::new(peers.clone());
let sync_server = Arc::new(ServerImpl::new(peers.clone(), db.clone(), memory_pool.clone(), sync_executor.clone()));
let sync_client_core = SynchronizationClientCore::new(sync_client_config, sync_state.clone(), peers.clone(), sync_executor.clone(), sync_chain, chain_verifier.clone());
let sync_client_core = SynchronizationClientCore::new(sync_client_config, sync_state.clone(), peers.clone(), sync_executor.clone(), sync_chain);
let verifier_sink = Arc::new(CoreVerificationSink::new(sync_client_core.clone()));
let verifier = AsyncVerifier::new(chain_verifier, db.clone(), memory_pool.clone(), verifier_sink, verification_params);
let sync_client = SynchronizationClient::new(sync_state.clone(), sync_client_core, verifier);
let light_verifier = AsyncVerifier::new(
"Light verification".into(),
light_chain_verifier,
db.clone(),
memory_pool.clone(),
verifier_sink.clone(),
verification_params.clone(),
);
let heavy_verifier = AsyncVerifier::new(
"Heavy verification".into(),
heavy_chain_verifier,
db.clone(),
memory_pool.clone(),
verifier_sink,
verification_params,
);
let sync_client = SynchronizationClient::new(sync_state.clone(), sync_client_core, light_verifier, heavy_verifier);
Arc::new(SyncNode::new(consensus, db, memory_pool, peers, sync_state, sync_client, sync_server))
}

View File

@ -2,7 +2,7 @@ use std::sync::Arc;
use parking_lot::{Mutex, Condvar};
use time;
use futures::{lazy, finished};
use chain::{IndexedTransaction, IndexedBlock};
use chain::{IndexedTransaction, IndexedBlock, IndexedBlockHeader};
use keys::Address;
use message::types;
use miner::BlockAssembler;
@ -94,9 +94,9 @@ impl<U, V> LocalNode<U, V> where U: Server, V: Client {
}
/// When headers message is received
pub fn on_headers(&self, peer_index: PeerIndex, message: types::Headers) {
trace!(target: "sync", "Got `headers` message from peer#{}. Headers len: {}", peer_index, message.headers.len());
self.client.on_headers(peer_index, message);
pub fn on_headers(&self, peer_index: PeerIndex, headers: Vec<IndexedBlockHeader>) {
trace!(target: "sync", "Got `headers` message from peer#{}. Headers len: {}", peer_index, headers.len());
self.client.on_headers(peer_index, headers);
}
/// When transaction is received
@ -282,7 +282,6 @@ pub mod tests {
use synchronization_server::tests::DummyServer;
use synchronization_verifier::tests::DummyVerifier;
use primitives::bytes::Bytes;
use verification::BackwardsCompatibleChainVerifier as ChainVerifier;
use std::iter::repeat;
use synchronization_peers::PeersImpl;
use utils::SynchronizationState;
@ -312,14 +311,15 @@ pub mod tests {
let executor = DummyTaskExecutor::new();
let server = Arc::new(DummyServer::new());
let config = Config { close_connection_on_bad_block: true };
let chain_verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Mainnet)));
let client_core = SynchronizationClientCore::new(config, sync_state.clone(), sync_peers.clone(), executor.clone(), chain, chain_verifier);
let mut verifier = match verifier {
let client_core = SynchronizationClientCore::new(config, sync_state.clone(), sync_peers.clone(), executor.clone(), chain);
let mut light_verifier = DummyVerifier::default();
light_verifier.set_sink(Arc::new(CoreVerificationSink::new(client_core.clone())));
let mut heavy_verifier = match verifier {
Some(verifier) => verifier,
None => DummyVerifier::default(),
};
verifier.set_sink(Arc::new(CoreVerificationSink::new(client_core.clone())));
let client = SynchronizationClient::new(sync_state.clone(), client_core, verifier);
heavy_verifier.set_sink(Arc::new(CoreVerificationSink::new(client_core.clone())));
let client = SynchronizationClient::new(sync_state.clone(), client_core, light_verifier, heavy_verifier);
let local_node = LocalNode::new(ConsensusParams::new(Network::Mainnet), storage, memory_pool, sync_peers, sync_state, client, server.clone());
(executor, server, local_node)
}

View File

@ -1,7 +1,7 @@
use std::collections::{VecDeque, HashSet};
use std::fmt;
use linked_hash_map::LinkedHashMap;
use chain::{BlockHeader, Transaction, IndexedBlockHeader, IndexedBlock, IndexedTransaction, OutPoint, TransactionOutput};
use chain::{IndexedBlockHeader, IndexedBlock, IndexedTransaction, OutPoint, TransactionOutput};
use storage;
use miner::{MemoryPoolOrderingStrategy, MemoryPoolInformation, FeeCalculator};
use primitives::bytes::Bytes;
@ -51,6 +51,8 @@ impl BlockInsertionResult {
pub enum BlockState {
/// Block is unknown
Unknown,
/// Block header is currently verifying
VerifyingHeader,
/// Scheduled for requesting
Scheduled,
/// Requested from peers
@ -78,6 +80,8 @@ pub enum TransactionState {
/// Synchronization chain information
pub struct Information {
/// Number of blocks that are currently passing header verification state
pub headers_verifying: BlockHeight,
/// Number of blocks hashes currently scheduled for requesting
pub scheduled: BlockHeight,
/// Number of blocks hashes currently requested from peers
@ -104,6 +108,8 @@ pub struct Chain {
best_storage_block: storage::BestBlock,
/// Local blocks storage
storage: StorageRef,
/// The set of headers we currently verifying.
verifying_headers: HashSet<H256>,
/// In-memory queue of blocks hashes
hash_chain: HashQueueChain,
/// In-memory queue of blocks headers
@ -149,6 +155,7 @@ impl Chain {
genesis_block_hash: genesis_block_hash,
best_storage_block: best_storage_block,
storage: storage,
verifying_headers: HashSet::new(),
hash_chain: HashQueueChain::with_number_of_queues(NUMBER_OF_QUEUES),
headers_chain: BestHeadersChain::new(best_storage_block_hash),
verifying_transactions: LinkedHashMap::new(),
@ -160,6 +167,7 @@ impl Chain {
/// Get information on current blockchain state
pub fn information(&self) -> Information {
Information {
headers_verifying: self.verifying_headers.len() as BlockHeight,
scheduled: self.hash_chain.len_of(SCHEDULED_QUEUE),
requested: self.hash_chain.len_of(REQUESTED_QUEUE),
verifying: self.hash_chain.len_of(VERIFYING_QUEUE),
@ -183,6 +191,7 @@ impl Chain {
pub fn length_of_blocks_state(&self, state: BlockState) -> BlockHeight {
match state {
BlockState::Stored => self.best_storage_block.number + 1,
BlockState::VerifyingHeader => self.verifying_headers.len() as BlockHeight,
_ => self.hash_chain.len_of(state.to_queue_index()),
}
}
@ -246,7 +255,7 @@ impl Chain {
/// Get block header by number
pub fn block_header_by_number(&self, number: BlockHeight) -> Option<IndexedBlockHeader> {
if number <= self.best_storage_block.number {
self.storage.indexed_block_header(storage::BlockRef::Number(number))
self.storage.block_header(storage::BlockRef::Number(number))
} else {
self.headers_chain.at(number - self.best_storage_block.number)
}
@ -254,7 +263,7 @@ impl Chain {
/// Get block header by hash
pub fn block_header_by_hash(&self, hash: &H256) -> Option<IndexedBlockHeader> {
if let Some(header) = self.storage.indexed_block_header(storage::BlockRef::Hash(hash.clone())) {
if let Some(header) = self.storage.block_header(storage::BlockRef::Hash(hash.clone())) {
return Some(header);
}
self.headers_chain.by_hash(hash)
@ -262,6 +271,10 @@ impl Chain {
/// Get block state
pub fn block_state(&self, hash: &H256) -> BlockState {
if self.verifying_headers.contains(hash) {
return BlockState::VerifyingHeader;
}
match self.hash_chain.contains_in(hash) {
Some(queue_index) => BlockState::from_queue_index(queue_index),
None => if self.storage.contains_block(storage::BlockRef::Hash(hash.clone())) {
@ -292,6 +305,18 @@ impl Chain {
block_locator_hashes
}
/// Add headers to verifying queue
pub fn verify_headers(&mut self, headers: &[IndexedBlockHeader]) {
self.verifying_headers.extend(headers.iter().map(|h| h.hash))
}
/// Remove headers from verifying queue
pub fn headers_verified(&mut self, headers: &[IndexedBlockHeader]) {
for header in headers {
self.verifying_headers.remove(&header.hash);
}
}
/// Schedule blocks hashes for requesting
pub fn schedule_blocks_headers(&mut self, headers: Vec<IndexedBlockHeader>) {
self.hash_chain.push_back_n_at(SCHEDULED_QUEUE, headers.iter().map(|h| h.hash.clone()).collect());
@ -305,18 +330,15 @@ impl Chain {
scheduled
}
/// Add block to verifying queue
pub fn verify_block(&mut self, header: IndexedBlockHeader) {
/// Add block to verifying queue.
///
/// Returns true if the header has already been in the headers chain. The fact that it is in the
/// chain, guarantees the header has already been pre-verified. The opposite isn't true -
/// if the header isn't in the chain, it could have been (in rare cases) pre-verified.
pub fn verify_block(&mut self, header: IndexedBlockHeader) -> bool {
// insert header to the in-memory chain in case when it is not already there (non-headers-first sync)
self.hash_chain.push_back_at(VERIFYING_QUEUE, header.hash.clone());
self.headers_chain.insert(header);
}
/// Add blocks to verifying queue
pub fn verify_blocks(&mut self, blocks: Vec<IndexedBlockHeader>) {
for block in blocks {
self.verify_block(block);
}
self.headers_chain.insert(header)
}
/// Moves n blocks from requested queue to verifying queue
@ -406,7 +428,7 @@ impl Chain {
// reverify all transactions from old main branch' blocks
let old_main_blocks_transactions = origin.decanonized_route.into_iter()
.flat_map(|block_hash| self.storage.indexed_block_transactions(block_hash.into()))
.flat_map(|block_hash| self.storage.block_transactions(block_hash.into()))
.collect::<Vec<_>>();
trace!(target: "sync", "insert_best_block, old_main_blocks_transactions: {:?}",
@ -661,7 +683,7 @@ impl storage::TransactionProvider for Chain {
.or_else(|| self.storage.transaction_bytes(hash))
}
fn transaction(&self, hash: &H256) -> Option<Transaction> {
fn transaction(&self, hash: &H256) -> Option<IndexedTransaction> {
self.memory_pool.read().transaction(hash)
.or_else(|| self.storage.transaction(hash))
}
@ -682,20 +704,27 @@ impl storage::TransactionOutputProvider for Chain {
impl storage::BlockHeaderProvider for Chain {
fn block_header_bytes(&self, block_ref: storage::BlockRef) -> Option<Bytes> {
use ser::serialize;
self.block_header(block_ref).map(|h| serialize(&h))
self.block_header(block_ref).map(|h| serialize(&h.raw))
}
fn block_header(&self, block_ref: storage::BlockRef) -> Option<BlockHeader> {
fn block_header(&self, block_ref: storage::BlockRef) -> Option<IndexedBlockHeader> {
match block_ref {
storage::BlockRef::Hash(hash) => self.block_header_by_hash(&hash).map(|h| h.raw),
storage::BlockRef::Number(n) => self.block_header_by_number(n).map(|h| h.raw),
storage::BlockRef::Hash(hash) => self.block_header_by_hash(&hash),
storage::BlockRef::Number(n) => self.block_header_by_number(n),
}
}
}
impl fmt::Debug for Information {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[sch:{} -> req:{} -> vfy:{} -> stored: {}]", self.scheduled, self.requested, self.verifying, self.stored)
write!(
f,
"[sch:{} > req:{} > vfy:{} > db:{}]",
self.scheduled,
self.requested,
self.verifying,
self.stored,
)
}
}

View File

@ -1,6 +1,6 @@
use std::sync::Arc;
use parking_lot::Mutex;
use chain::{IndexedTransaction, IndexedBlock};
use chain::{IndexedTransaction, IndexedBlock, IndexedBlockHeader};
use message::types;
use synchronization_executor::TaskExecutor;
use synchronization_verifier::{Verifier, TransactionVerificationSink};
@ -124,7 +124,7 @@ pub trait Client : Send + Sync + 'static {
fn on_connect(&self, peer_index: PeerIndex);
fn on_disconnect(&self, peer_index: PeerIndex);
fn on_inventory(&self, peer_index: PeerIndex, message: types::Inv);
fn on_headers(&self, peer_index: PeerIndex, message: types::Headers);
fn on_headers(&self, peer_index: PeerIndex, headers: Vec<IndexedBlockHeader>);
fn on_block(&self, peer_index: PeerIndex, block: IndexedBlock);
fn on_transaction(&self, peer_index: PeerIndex, transaction: IndexedTransaction);
fn on_notfound(&self, peer_index: PeerIndex, message: types::NotFound);
@ -135,14 +135,16 @@ pub trait Client : Send + Sync + 'static {
/// Synchronization client facade
pub struct SynchronizationClient<T: TaskExecutor, U: Verifier> {
/// Verification mutex
verification_lock: Mutex<()>,
/// Shared client state
shared_state: SynchronizationStateRef,
/// Client core
core: ClientCoreRef<SynchronizationClientCore<T>>,
/// Verifier
verifier: U,
/// Verification mutex
heavy_verification_lock: Mutex<()>,
/// Verifier that performs heavy verifications (blocks during sync + transactions).
heavy_verifier: U,
/// Verifier that performs lightweight verifications (headers during sync).
light_verifier: U,
}
impl<T, U> Client for SynchronizationClient<T, U> where T: TaskExecutor, U: Verifier {
@ -158,8 +160,11 @@ impl<T, U> Client for SynchronizationClient<T, U> where T: TaskExecutor, U: Veri
self.core.lock().on_inventory(peer_index, message);
}
fn on_headers(&self, peer_index: PeerIndex, message: types::Headers) {
self.core.lock().on_headers(peer_index, message);
fn on_headers(&self, peer_index: PeerIndex, headers: Vec<IndexedBlockHeader>) {
let headers_to_verify = self.core.lock().on_headers(peer_index, headers);
if let Some(headers_to_verify) = headers_to_verify {
self.light_verifier.verify_headers(peer_index, headers_to_verify);
}
}
fn on_block(&self, peer_index: PeerIndex, block: IndexedBlock) {
@ -169,13 +174,13 @@ impl<T, U> Client for SynchronizationClient<T, U> where T: TaskExecutor, U: Veri
{
// verification tasks must be scheduled in the same order as they were built in on_block
// => here we use verification_lock for this
let _verification_lock = self.verification_lock.lock();
let _verification_lock = self.heavy_verification_lock.lock();
let blocks_to_verify = self.core.lock().on_block(peer_index, block);
// verify blocks
if let Some(mut blocks_to_verify) = blocks_to_verify {
while let Some(block) = blocks_to_verify.pop_front() {
self.verifier.verify_block(block);
self.heavy_verifier.verify_block(block);
}
}
}
@ -201,7 +206,7 @@ impl<T, U> Client for SynchronizationClient<T, U> where T: TaskExecutor, U: Veri
// => we should verify blocks we mine
let next_block_height = self.shared_state.best_storage_block_height() + 1;
while let Some(tx) = transactions_to_verify.pop_front() {
self.verifier.verify_transaction(next_block_height, tx);
self.heavy_verifier.verify_transaction(next_block_height, tx);
}
}
}
@ -219,7 +224,7 @@ impl<T, U> Client for SynchronizationClient<T, U> where T: TaskExecutor, U: Veri
let next_block_height = self.shared_state.best_storage_block_height() + 1;
while let Some(tx) = transactions_to_verify.pop_front() {
self.verifier.verify_transaction(next_block_height, tx);
self.heavy_verifier.verify_transaction(next_block_height, tx);
}
Ok(())
}
@ -231,12 +236,18 @@ impl<T, U> Client for SynchronizationClient<T, U> where T: TaskExecutor, U: Veri
impl<T, U> SynchronizationClient<T, U> where T: TaskExecutor, U: Verifier {
/// Create new synchronization client
pub fn new(shared_state: SynchronizationStateRef, core: ClientCoreRef<SynchronizationClientCore<T>>, verifier: U) -> Arc<Self> {
pub fn new(
shared_state: SynchronizationStateRef,
core: ClientCoreRef<SynchronizationClientCore<T>>,
light_verifier: U,
heavy_verifier: U,
) -> Arc<Self> {
Arc::new(SynchronizationClient {
verification_lock: Mutex::new(()),
shared_state: shared_state,
core: core,
verifier: verifier,
light_verifier: light_verifier,
heavy_verification_lock: Mutex::new(()),
heavy_verifier: heavy_verifier,
})
}
}

View File

@ -4,29 +4,32 @@ use std::collections::hash_map::Entry;
use std::sync::Arc;
use futures::Future;
use parking_lot::Mutex;
use rand::{thread_rng, Rng};
use time::precise_time_s;
use chain::{IndexedBlockHeader, IndexedTransaction, IndexedBlock};
use message::types;
use message::common::{InventoryType, InventoryVector};
use miner::transaction_fee_rate;
use primitives::hash::H256;
use verification::BackwardsCompatibleChainVerifier as ChainVerifier;
use synchronization_chain::{Chain, BlockState, TransactionState, BlockInsertionResult};
use synchronization_executor::{Task, TaskExecutor};
use synchronization_manager::ManagementWorker;
use synchronization_peers_tasks::PeersTasks;
use synchronization_verifier::{VerificationSink, BlockVerificationSink, TransactionVerificationSink, VerificationTask};
use synchronization_verifier::{
VerificationSink, HeadersVerificationSink, BlockVerificationSink,
TransactionVerificationSink, VerificationTask, PartiallyVerifiedBlock,
};
use types::{BlockHeight, ClientCoreRef, PeersRef, PeerIndex, SynchronizationStateRef, EmptyBoxFuture, SyncListenerRef};
use utils::{AverageSpeedMeter, MessageBlockHeadersProvider, OrphanBlocksPool, OrphanTransactionsPool, HashPosition};
use utils::{AverageSpeedMeter, OrphanBlocksPool, OrphanTransactionsPool, HashPosition};
#[cfg(test)] use synchronization_peers_tasks::{Information as PeersTasksInformation};
#[cfg(test)] use synchronization_chain::{Information as ChainInformation};
/// Approximate maximal number of blocks hashes in scheduled queue.
const MAX_SCHEDULED_HASHES: BlockHeight = 4 * 1024;
/// Approximate maximal number of blocks hashes in requested queue.
const MAX_REQUESTED_BLOCKS: BlockHeight = 256;
const MAX_REQUESTED_BLOCKS: BlockHeight = 512;
/// Approximate maximal number of blocks in verifying queue.
const MAX_VERIFYING_BLOCKS: BlockHeight = 256;
const MAX_VERIFYING_BLOCKS: BlockHeight = 512;
/// Minimum number of blocks to request from peer
const MIN_BLOCKS_IN_REQUEST: BlockHeight = 32;
/// Maximum number of blocks to request from peer
@ -41,6 +44,8 @@ const SYNC_SPEED_BLOCKS_TO_INSPECT: usize = 512;
const BLOCKS_SPEED_BLOCKS_TO_INSPECT: usize = 512;
/// Minimal time between duplicated blocks requests.
const MIN_BLOCK_DUPLICATION_INTERVAL_S: f64 = 10_f64;
/// Time before we will duplicate headers request.
const MIN_HEADERS_DUPLICATION_INTERVAL_S: f64 = 2_f64;
/// Maximal number of blocks in duplicate requests.
const MAX_BLOCKS_IN_DUPLICATE_REQUEST: BlockHeight = 4;
/// Minimal number of blocks in duplicate requests.
@ -67,8 +72,8 @@ pub trait ClientCore {
fn on_connect(&mut self, peer_index: PeerIndex);
fn on_disconnect(&mut self, peer_index: PeerIndex);
fn on_inventory(&self, peer_index: PeerIndex, message: types::Inv);
fn on_headers(&mut self, peer_index: PeerIndex, message: types::Headers);
fn on_block(&mut self, peer_index: PeerIndex, block: IndexedBlock) -> Option<VecDeque<IndexedBlock>>;
fn on_headers(&mut self, peer_index: PeerIndex, headers: Vec<IndexedBlockHeader>) -> Option<Vec<IndexedBlockHeader>>;
fn on_block(&mut self, peer_index: PeerIndex, block: IndexedBlock) -> Option<VecDeque<PartiallyVerifiedBlock>>;
fn on_transaction(&mut self, peer_index: PeerIndex, transaction: IndexedTransaction) -> Option<VecDeque<IndexedTransaction>>;
fn on_notfound(&mut self, peer_index: PeerIndex, message: types::NotFound);
fn after_peer_nearly_blocks_verified(&mut self, peer_index: PeerIndex, future: EmptyBoxFuture);
@ -105,10 +110,6 @@ pub struct SynchronizationClientCore<T: TaskExecutor> {
orphaned_blocks_pool: OrphanBlocksPool,
/// Orphaned transactions pool.
orphaned_transactions_pool: OrphanTransactionsPool,
/// Chain verifier
chain_verifier: Arc<ChainVerifier>,
/// Verify block headers?
verify_headers: bool,
/// Verifying blocks by peer
verifying_blocks_by_peer: HashMap<H256, PeerIndex>,
/// Verifying blocks futures
@ -127,6 +128,10 @@ pub struct SynchronizationClientCore<T: TaskExecutor> {
listener: Option<SyncListenerRef>,
/// Time of last duplicated blocks request.
last_dup_time: f64,
/// Last NEW headers receival timestamp.
new_headers_receival_timestamp: f64,
/// Best block number when the last headers request has been sent.
last_headers_request_best_number: u32,
}
/// Verification sink for synchronization client core
@ -166,16 +171,6 @@ enum AppendTransactionError {
Orphan(HashSet<H256>),
}
/// Blocks headers verification result
enum BlocksHeadersVerificationResult {
/// Skip these blocks headers
Skip,
/// Error during verification of header with given index
Error(usize),
/// Successful verification
Success,
}
impl State {
pub fn is_saturated(&self) -> bool {
match *self {
@ -259,11 +254,8 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
}
/// Try to queue synchronization of unknown blocks when blocks headers are received.
fn on_headers(&mut self, peer_index: PeerIndex, message: types::Headers) {
assert!(!message.headers.is_empty(), "This must be checked in incoming connection");
// transform to indexed headers
let mut headers: Vec<_> = message.headers.into_iter().map(IndexedBlockHeader::from_raw).collect();
fn on_headers(&mut self, peer_index: PeerIndex, headers: Vec<IndexedBlockHeader>) -> Option<Vec<IndexedBlockHeader>> {
assert!(! headers.is_empty(), "This must be checked in incoming connection");
// update peers to select next tasks
self.peers_tasks.on_headers_received(peer_index);
@ -287,82 +279,101 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
self.peers.misbehaving(peer_index, "Too many failures.");
}
return;
return None;
}
// find first unknown header position
// optimization: normally, the first header will be unknown
let num_headers = headers.len();
let first_unknown_index = match self.chain.block_state(&header0.hash) {
BlockState::Unknown => 0,
_ => {
// optimization: if last header is known, then all headers are also known
let header_last = &headers[num_headers - 1];
match self.chain.block_state(&header_last.hash) {
BlockState::Unknown => 1 + headers.iter().skip(1)
.position(|header| self.chain.block_state(&header.hash) == BlockState::Unknown)
.expect("last header has UnknownState; we are searching for first unknown header; qed"),
// else all headers are known
_ => {
trace!(target: "sync", "Ignoring {} known headers from peer#{}", headers.len(), peer_index);
// but this peer is still useful for synchronization
self.peers_tasks.useful_peer(peer_index);
return;
},
}
}
};
let headers_in_message = headers.len();
let headers = self.find_unknown_headers(headers);
if headers.is_empty() {
trace!(target: "sync", "Ignoring {} known headers from peer#{}", headers_in_message, peer_index);
// but this peer is still useful for synchronization
self.peers_tasks.useful_peer(peer_index);
return None;
}
// validate blocks headers before scheduling
let last_known_hash = if first_unknown_index > 0 { headers[first_unknown_index - 1].hash.clone() } else { header0.raw.previous_header_hash.clone() };
let mut last_known_hash = headers[0].raw.previous_header_hash;
if self.config.close_connection_on_bad_block && self.chain.block_state(&last_known_hash) == BlockState::DeadEnd {
self.peers.misbehaving(peer_index, &format!("Provided after dead-end block {}", last_known_hash.to_reversed_str()));
return;
return None;
}
match self.verify_headers(peer_index, last_known_hash, &headers[first_unknown_index..num_headers]) {
BlocksHeadersVerificationResult::Error(error_index) => self.chain.mark_dead_end_block(&headers[first_unknown_index + error_index].hash),
BlocksHeadersVerificationResult::Skip => (),
BlocksHeadersVerificationResult::Success => {
// report progress
let num_new_headers = num_headers - first_unknown_index;
trace!(target: "sync", "New {} headers from peer#{}. First {:?}, last: {:?}",
num_new_headers,
for (header_index, header) in headers.iter().enumerate() {
// check that this header is direct child of previous header
if header.raw.previous_header_hash != last_known_hash {
self.peers.misbehaving(
peer_index,
headers[first_unknown_index].hash.to_reversed_str(),
headers[num_headers - 1].hash.to_reversed_str()
&format!(
"Neighbour headers in `headers` message are unlinked: Prev: {}, PrevLink: {}, Curr: {}",
last_known_hash.to_reversed_str(),
header.raw.previous_header_hash.to_reversed_str(),
header.hash.to_reversed_str(),
),
);
return None;
}
// prepare new headers array
let new_headers = headers.split_off(first_unknown_index);
self.chain.schedule_blocks_headers(new_headers);
// check that we do not know all blocks in range [first_unknown_index..]
// if we know some block => there has been verification error => all headers should be ignored
// see when_previous_block_verification_failed_fork_is_not_requested for details
match self.chain.block_state(&header.hash) {
BlockState::Unknown => (),
BlockState::DeadEnd if self.config.close_connection_on_bad_block => {
self.peers.misbehaving(
peer_index,
&format!(
"Provided dead-end block {:?}",
header.hash.to_reversed_str(),
),
);
return None;
},
block_state => {
trace!(
target: "sync",
"Ignoring {} headers from peer#{} - known ({:?}) header {} at the {}/{} ({}...{})",
headers.len(), peer_index, block_state, header.hash.to_reversed_str(), header_index,
headers.len(), headers[0].hash.to_reversed_str(),
headers[headers.len() - 1].hash.to_reversed_str());
self.peers_tasks.useful_peer(peer_index);
return None;
},
}
// switch to synchronization state
if !self.state.is_synchronizing() {
if self.chain.length_of_blocks_state(BlockState::Scheduled) +
self.chain.length_of_blocks_state(BlockState::Requested) == 1 {
self.switch_to_nearly_saturated_state();
} else {
self.switch_to_synchronization_state();
}
}
// these peers have supplied us with new headers => useful indeed
self.peers_tasks.useful_peer(peer_index);
// and execute tasks
self.execute_synchronization_tasks(None, None);
},
last_known_hash = header.hash;
}
// report progress
trace!(target: "sync", "New {} headers from peer#{}. First {:?}, last: {:?}",
headers.len(),
peer_index,
headers[0].hash.to_reversed_str(),
headers[headers.len() - 1].hash.to_reversed_str()
);
// peer has supplied us with new headers => useful indeed
self.peers_tasks.useful_peer(peer_index);
// remember the moment we have last seen new headers
self.new_headers_receival_timestamp = precise_time_s();
// remember headers that we're verifying
self.chain.verify_headers(&headers);
Some(headers)
}
fn on_block(&mut self, peer_index: PeerIndex, block: IndexedBlock) -> Option<VecDeque<IndexedBlock>> {
fn on_block(&mut self, peer_index: PeerIndex, block: IndexedBlock) -> Option<VecDeque<PartiallyVerifiedBlock>> {
// update peers to select next tasks
self.peers_tasks.on_block_received(peer_index, &block.header.hash);
// prepare list of blocks to verify + make all required changes to the chain
let mut result: Option<VecDeque<IndexedBlock>> = None;
let mut result = None;
let block_state = self.chain.block_state(&block.header.hash);
match block_state {
BlockState::Verifying | BlockState::Stored => {
BlockState::VerifyingHeader | BlockState::Verifying | BlockState::Stored => {
// remember peer as useful
// and do nothing else, because we have already processed this block before
self.peers_tasks.useful_peer(peer_index);
@ -414,32 +425,34 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
}
}
},
BlockState::Verifying | BlockState::Stored => {
BlockState::VerifyingHeader | BlockState::Verifying | BlockState::Stored => {
// update synchronization speed
self.sync_speed_meter.checkpoint();
// remember peer as useful
self.peers_tasks.useful_peer(peer_index);
// schedule verification
let mut blocks_to_verify: VecDeque<IndexedBlock> = VecDeque::new();
blocks_to_verify.extend(self.orphaned_blocks_pool.remove_blocks_for_parent(&block.header.hash));
blocks_to_verify.push_front(block);
// forget blocks we are going to process
let blocks_hashes_to_forget: Vec<_> = blocks_to_verify.iter().map(|b| b.hash().clone()).collect();
self.chain.forget_blocks_leave_header(&blocks_hashes_to_forget);
let mut blocks_to_verify_hashes = vec![*block.hash()];
let orphaned_blocks = self.orphaned_blocks_pool.remove_blocks_for_parent(&block.header.hash);
blocks_to_verify_hashes.extend(orphaned_blocks.iter().map(IndexedBlock::hash).cloned());
self.chain.forget_blocks_leave_header(&blocks_to_verify_hashes);
// remember that we are verifying these blocks
let blocks_headers_to_verify: Vec<_> = blocks_to_verify.iter().map(|b| b.header.clone()).collect();
self.chain.verify_blocks(blocks_headers_to_verify);
let blocks_to_verify = ::std::iter::once(block).chain(orphaned_blocks)
.map(|block| if self.chain.verify_block(block.header.clone()) {
PartiallyVerifiedBlock::HeaderPreVerified(block)
} else {
PartiallyVerifiedBlock::NotVerified(block)
})
.collect::<VecDeque<_>>();
// remember that we are verifying block from this peer
for verifying_block_hash in blocks_to_verify.iter().map(|b| b.hash().clone()) {
self.verifying_blocks_by_peer.insert(verifying_block_hash, peer_index);
for verifying_block_hash in &blocks_to_verify_hashes {
self.verifying_blocks_by_peer.insert(*verifying_block_hash, peer_index);
}
match self.verifying_blocks_futures.entry(peer_index) {
Entry::Occupied(mut entry) => {
entry.get_mut().0.extend(blocks_to_verify.iter().map(|b| b.hash().clone()));
entry.get_mut().0.extend(blocks_to_verify_hashes);
},
Entry::Vacant(entry) => {
let block_hashes: HashSet<_> = blocks_to_verify.iter().map(|b| b.hash().clone()).collect();
entry.insert((block_hashes, Vec::new()));
entry.insert((blocks_to_verify_hashes.into_iter().collect(), Vec::new()));
}
}
result = Some(blocks_to_verify);
@ -579,20 +592,31 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
let mut blocks_requests: Option<Vec<H256>> = None;
let blocks_idle_peers: Vec<_> = self.peers_tasks.idle_peers_for_blocks().iter().cloned().collect();
{
// check if we can query some blocks headers
let headers_idle_peers: Vec<_> = self.peers_tasks.idle_peers_for_headers().iter().cloned().collect();
if !headers_idle_peers.is_empty() {
// check if we can query some blocks headers. Since the same block locator hashes are sent to all
// peers, it makes sense to send request to one peer only. But if we haven't received new headers
// for some non-neglected time, we could ask >1 peers for same headers
let num_headers_idle_peers = self.compute_num_peers_for_headers_request();
if num_headers_idle_peers > 0 {
let scheduled_hashes_len = self.chain.length_of_blocks_state(BlockState::Scheduled);
if scheduled_hashes_len < MAX_SCHEDULED_HASHES {
for header_peer in &headers_idle_peers {
self.peers_tasks.on_headers_requested(*header_peer);
}
let mut headers_idle_peers: Vec<_> = self.peers_tasks.idle_peers_for_headers().iter().cloned().collect();
thread_rng().shuffle(headers_idle_peers.as_mut_slice());
let block_locator_hashes = self.chain.block_locator_hashes();
let headers_tasks = headers_idle_peers
.iter()
.map(move |peer_index| Task::GetHeaders(*peer_index, types::GetHeaders::with_block_locator_hashes(block_locator_hashes.clone())));
tasks.extend(headers_tasks);
let num_headers_idle_peers = ::std::cmp::min(num_headers_idle_peers, headers_idle_peers.len());
let headers_idle_peers = &headers_idle_peers[..num_headers_idle_peers];
if !headers_idle_peers.is_empty() {
for header_peer in headers_idle_peers {
self.peers_tasks.on_headers_requested(*header_peer);
}
let block_locator_hashes = self.chain.block_locator_hashes();
let headers_tasks = headers_idle_peers
.iter()
.map(move |peer_index| Task::GetHeaders(*peer_index, types::GetHeaders::with_block_locator_hashes(block_locator_hashes.clone())));
tasks.extend(headers_tasks);
self.last_headers_request_best_number = self.chain.best_block().number;
}
}
}
@ -694,7 +718,8 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
fn try_switch_to_saturated_state(&mut self) -> bool {
let switch_to_saturated = {
// requested block is received => move to saturated state if there are no more blocks
self.chain.length_of_blocks_state(BlockState::Scheduled) == 0
self.chain.length_of_blocks_state(BlockState::VerifyingHeader) == 0
&& self.chain.length_of_blocks_state(BlockState::Scheduled) == 0
&& self.chain.length_of_blocks_state(BlockState::Requested) == 0
};
@ -717,6 +742,16 @@ impl<T> CoreVerificationSink<T> where T: TaskExecutor {
impl<T> VerificationSink for CoreVerificationSink<T> where T: TaskExecutor {
}
impl<T> HeadersVerificationSink for CoreVerificationSink<T> where T: TaskExecutor {
fn on_headers_verification_success(&self, headers: Vec<IndexedBlockHeader>) {
self.core.lock().on_headers_verification_success(headers)
}
fn on_headers_verification_error(&self, peer: PeerIndex, error: String, hash: H256, headers: Vec<IndexedBlockHeader>) {
self.core.lock().on_headers_verification_error(peer, error, hash, headers)
}
}
impl<T> BlockVerificationSink for CoreVerificationSink<T> where T: TaskExecutor {
/// Process successful block verification
fn on_block_verification_success(&self, block: IndexedBlock) -> Option<Vec<VerificationTask>> {
@ -743,7 +778,7 @@ impl<T> TransactionVerificationSink for CoreVerificationSink<T> where T: TaskExe
impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
/// Create new synchronization client core
pub fn new(config: Config, shared_state: SynchronizationStateRef, peers: PeersRef, executor: Arc<T>, chain: Chain, chain_verifier: Arc<ChainVerifier>) -> ClientCoreRef<Self> {
pub fn new(config: Config, shared_state: SynchronizationStateRef, peers: PeersRef, executor: Arc<T>, chain: Chain) -> ClientCoreRef<Self> {
let sync = Arc::new(Mutex::new(
SynchronizationClientCore {
shared_state: shared_state,
@ -755,8 +790,6 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
chain: chain,
orphaned_blocks_pool: OrphanBlocksPool::new(),
orphaned_transactions_pool: OrphanTransactionsPool::new(),
chain_verifier: chain_verifier,
verify_headers: true,
verifying_blocks_by_peer: HashMap::new(),
verifying_blocks_futures: HashMap::new(),
verifying_transactions_sinks: HashMap::new(),
@ -766,6 +799,8 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
config: config,
listener: None,
last_dup_time: 0f64,
new_headers_receival_timestamp: 0f64,
last_headers_request_best_number: 0,
}
));
@ -820,12 +855,6 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
&mut self.orphaned_transactions_pool
}
/// Verify block headers or not?
#[cfg(test)]
pub fn set_verify_headers(&mut self, verify: bool) {
self.verify_headers = verify;
}
/// Print synchronization information
pub fn print_synchronization_information(&mut self) {
if let State::Synchronizing(timestamp, num_of_blocks) = self.state {
@ -857,56 +886,6 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
}
}
/// Verify and select unknown headers for scheduling
fn verify_headers(&mut self, peer_index: PeerIndex, last_known_hash: H256, headers: &[IndexedBlockHeader]) -> BlocksHeadersVerificationResult {
// validate blocks headers before scheduling
let mut last_known_hash = &last_known_hash;
let mut headers_provider = MessageBlockHeadersProvider::new(&self.chain, self.chain.best_block_header().number);
for (header_index, header) in headers.iter().enumerate() {
// check that this header is direct child of previous header
if &header.raw.previous_header_hash != last_known_hash {
self.peers.misbehaving(peer_index, &format!("Neighbour headers in `headers` message are unlinked: Prev: {}, PrevLink: {}, Curr: {}",
last_known_hash.to_reversed_str(), header.raw.previous_header_hash.to_reversed_str(), header.hash.to_reversed_str()));
return BlocksHeadersVerificationResult::Skip;
}
// check that we do not know all blocks in range [first_unknown_index..]
// if we know some block => there has been verification error => all headers should be ignored
// see when_previous_block_verification_failed_fork_is_not_requested for details
match self.chain.block_state(&header.hash) {
BlockState::Unknown => (),
BlockState::DeadEnd if self.config.close_connection_on_bad_block => {
self.peers.misbehaving(peer_index, &format!("Provided dead-end block {:?}", header.hash.to_reversed_str()));
return BlocksHeadersVerificationResult::Skip;
},
block_state => {
trace!(target: "sync", "Ignoring {} headers from peer#{} - known ({:?}) header {} at the {}/{} ({}...{})",
headers.len(), peer_index, block_state, header.hash.to_reversed_str(), header_index, headers.len(),
headers[0].hash.to_reversed_str(), headers[headers.len() - 1].hash.to_reversed_str());
self.peers_tasks.useful_peer(peer_index);
return BlocksHeadersVerificationResult::Skip;
},
}
// verify header
if self.verify_headers {
if let Err(error) = self.chain_verifier.verify_block_header(&headers_provider, &header.hash, &header.raw) {
if self.config.close_connection_on_bad_block {
self.peers.misbehaving(peer_index, &format!("Error verifying header {} from `headers`: {:?}", header.hash.to_reversed_str(), error));
} else {
warn!(target: "sync", "Error verifying header {} from `headers` message: {:?}", header.hash.to_reversed_str(), error);
}
return BlocksHeadersVerificationResult::Error(header_index);
}
}
last_known_hash = &header.hash;
headers_provider.append_header(header.hash.clone(), header.raw.clone());
}
BlocksHeadersVerificationResult::Success
}
/// Process new peer transaction
fn process_peer_transaction(&mut self, _peer_index: Option<PeerIndex>, transaction: IndexedTransaction, relay: bool) -> Option<VecDeque<IndexedTransaction>> {
match self.try_append_transaction(transaction.clone(), relay) {
@ -952,6 +931,10 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
fn prepare_blocks_requests_tasks(&mut self, limits: &BlocksRequestLimits, mut peers: Vec<PeerIndex>, mut hashes: Vec<H256>) -> Vec<Task> {
use std::mem::swap;
if hashes.is_empty() {
return Vec::new();
}
// ask fastest peers for hashes at the beginning of `hashes`
self.peers_tasks.sort_peers_for_blocks(&mut peers);
@ -1052,6 +1035,71 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
}
}
fn find_unknown_headers(&self, mut headers: Vec<IndexedBlockHeader>) -> Vec<IndexedBlockHeader> {
// find first unknown header position
// optimization: normally, the first header will be unknown
let num_headers = headers.len();
let first_unknown_index = match self.chain.block_state(&headers[0].hash) {
BlockState::Unknown => 0,
_ => {
// optimization: if last header is known, then all headers are also known
let header_last = &headers[num_headers - 1];
match self.chain.block_state(&header_last.hash) {
BlockState::Unknown => 1 + headers.iter().skip(1)
.position(|header| self.chain.block_state(&header.hash) == BlockState::Unknown)
.expect("last header has UnknownState; we are searching for first unknown header; qed"),
// else all headers are known
_ => headers.len(),
}
}
};
if first_unknown_index == 0 { headers } else { headers.split_off(first_unknown_index) }
}
fn on_headers_verification_success(&mut self, headers: Vec<IndexedBlockHeader>) {
self.chain.headers_verified(&headers);
self.chain.schedule_blocks_headers(headers);
// switch to synchronization state
if !self.state.is_synchronizing() {
if self.chain.length_of_blocks_state(BlockState::VerifyingHeader) +
self.chain.length_of_blocks_state(BlockState::Scheduled) +
self.chain.length_of_blocks_state(BlockState::Requested) == 1 {
self.switch_to_nearly_saturated_state();
} else {
self.switch_to_synchronization_state();
}
}
self.execute_synchronization_tasks(None, None);
}
fn on_headers_verification_error(&mut self, peer: PeerIndex, error: String, hash: H256, headers: Vec<IndexedBlockHeader>) {
self.chain.headers_verified(&headers);
if self.config.close_connection_on_bad_block {
self.peers.misbehaving(
peer,
&format!(
"Error verifying header {} from `headers`: {:?}",
hash.to_reversed_str(),
error,
),
);
} else {
warn!(
target: "sync",
"Error verifying header {} from `headers` message: {:?}",
hash.to_reversed_str(),
error,
);
}
self.chain.mark_dead_end_block(&hash);
self.execute_synchronization_tasks(None, None);
}
fn on_block_verification_success(&mut self, block: IndexedBlock) -> Option<Vec<VerificationTask>> {
// update block processing speed
self.block_speed_meter.checkpoint();
@ -1092,7 +1140,7 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
// relay block to our peers
if needs_relay && (self.state.is_saturated() || self.state.is_nearly_saturated()) {
for block_hash in insert_result.canonized_blocks_hashes {
if let Some(block) = self.chain.storage().indexed_block(block_hash.into()) {
if let Some(block) = self.chain.storage().block(block_hash.into()) {
self.executor.execute(Task::RelayNewBlock(block));
}
}
@ -1216,6 +1264,27 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
block_entry.remove_entry();
}
}
fn compute_num_peers_for_headers_request(&self) -> usize {
// if there are no active requests => ask immediately
if self.peers_tasks.ordered_headers_requests().is_empty() {
return 1;
}
// if last headers request has been sent with different block locator hashes
// => ask immediately
if self.chain.best_block().number != self.last_headers_request_best_number {
return 1;
}
// if there are active requests, but we haven't seen NEW headers in last
// N secs, we need to duplicate request
if precise_time_s() - self.new_headers_receival_timestamp > MIN_HEADERS_DUPLICATION_INTERVAL_S {
return ::std::usize::MAX;
}
0
}
}
impl Default for BlocksRequestLimits {
@ -1301,17 +1370,16 @@ pub mod tests {
let config = Config { close_connection_on_bad_block: true };
let chain_verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Unitest)));
let client_core = SynchronizationClientCore::new(config, sync_state.clone(), sync_peers.clone(), executor.clone(), chain, chain_verifier.clone());
{
client_core.lock().set_verify_headers(false);
}
let mut verifier = verifier.unwrap_or_default();
verifier.set_sink(Arc::new(CoreVerificationSink::new(client_core.clone())));
verifier.set_storage(storage);
verifier.set_memory_pool(memory_pool);
verifier.set_verifier(chain_verifier);
let client_core = SynchronizationClientCore::new(config, sync_state.clone(), sync_peers.clone(), executor.clone(), chain);
let mut light_verifier = DummyVerifier::default();
light_verifier.set_sink(Arc::new(CoreVerificationSink::new(client_core.clone())));
let mut heavy_verifier = verifier.unwrap_or_default();
heavy_verifier.set_sink(Arc::new(CoreVerificationSink::new(client_core.clone())));
heavy_verifier.set_storage(storage);
heavy_verifier.set_memory_pool(memory_pool);
heavy_verifier.set_verifier(chain_verifier);
let client = SynchronizationClient::new(sync_state, client_core.clone(), verifier);
let client = SynchronizationClient::new(sync_state, client_core.clone(), light_verifier, heavy_verifier);
(executor, client_core, client)
}
@ -1356,7 +1424,7 @@ pub mod tests {
let block1: Block = test_data::block_h1();
let block2: Block = test_data::block_h2();
sync.on_headers(5, types::Headers::with_headers(vec![block1.block_header.clone()]));
sync.on_headers(5, vec![block1.block_header.clone().into()]);
let tasks = executor.take_tasks();
assert_eq!(tasks, vec![request_block_headers_genesis_and(5, vec![block1.hash()]), request_blocks(5, vec![block1.hash()])]);
assert!(core.lock().information().state.is_nearly_saturated());
@ -1393,7 +1461,10 @@ pub mod tests {
fn synchronization_out_of_order_block_path() {
let (_, core, sync) = create_sync(None, None);
sync.on_headers(5, types::Headers::with_headers(vec![test_data::block_h1().block_header.clone(), test_data::block_h2().block_header.clone()]));
sync.on_headers(5, vec![
test_data::block_h1().block_header.into(),
test_data::block_h2().block_header.into(),
]);
sync.on_block(5, test_data::block_h169().into());
// out-of-order block was presented by the peer
@ -1419,7 +1490,7 @@ pub mod tests {
// not synchronizing after start
assert!(core.lock().information().state.is_saturated());
// receive inventory from new peer#1
sync.on_headers(1, types::Headers::with_headers(vec![block1.block_header.clone()]));
sync.on_headers(1, vec![block1.block_header.clone().into()]);
assert_eq!(core.lock().information().chain.requested, 1);
// synchronization has started && new blocks have been requested
let tasks = executor.take_tasks();
@ -1429,7 +1500,7 @@ pub mod tests {
{
// receive inventory from new peer#2
sync.on_headers(2, types::Headers::with_headers(vec![block1.block_header.clone(), block2.block_header.clone()]));
sync.on_headers(2, vec![block1.block_header.clone().into(), block2.block_header.clone().into()]);
assert_eq!(core.lock().information().chain.requested, 2);
// synchronization has started && new blocks have been requested
let tasks = executor.take_tasks();
@ -1458,7 +1529,7 @@ pub mod tests {
// request new blocks
{
sync.on_headers(1, types::Headers::with_headers(vec![test_data::block_h1().block_header]));
sync.on_headers(1, vec![test_data::block_h1().block_header.into()]);
assert!(core.lock().information().state.is_nearly_saturated());
}
@ -1473,7 +1544,7 @@ pub mod tests {
fn synchronization_not_starting_when_receiving_known_blocks() {
let (executor, core, sync) = create_sync(None, None);
// saturated => receive inventory with known blocks only
sync.on_headers(1, types::Headers::with_headers(vec![test_data::genesis().block_header]));
sync.on_headers(1, vec![test_data::genesis().block_header.into()]);
// => no need to start synchronization
assert!(!core.lock().information().state.is_nearly_saturated());
// => no synchronization tasks are scheduled
@ -1485,16 +1556,13 @@ pub mod tests {
fn synchronization_asks_for_inventory_after_saturating() {
let (executor, _, sync) = create_sync(None, None);
let block = test_data::block_h1();
sync.on_headers(1, types::Headers::with_headers(vec![block.block_header.clone()]));
sync.on_headers(2, types::Headers::with_headers(vec![block.block_header.clone()]));
sync.on_headers(1, vec![block.block_header.clone().into()]);
sync.on_headers(2, vec![block.block_header.clone().into()]);
executor.take_tasks();
sync.on_block(2, block.clone().into());
let tasks = executor.take_tasks();
assert_eq!(tasks.len(), 6);
// TODO: when saturating, RequestBlocksHeaders is sent twice to the peer who has supplied last block:
// 1) from on_block_verification_success
// 2) from switch_to_saturated_state
assert_eq!(tasks.len(), 5);
assert!(tasks.iter().any(|t| t == &request_block_headers_genesis_and(1, vec![block.hash()])));
assert!(tasks.iter().any(|t| t == &request_block_headers_genesis_and(2, vec![block.hash()])));
assert!(tasks.iter().any(|t| t == &Task::MemoryPool(1)));
@ -1508,7 +1576,7 @@ pub mod tests {
let b1 = test_data::block_h1();
let b2 = test_data::block_h2();
sync.on_headers(1, types::Headers::with_headers(vec![b1.block_header.clone(), b2.block_header.clone()]));
sync.on_headers(1, vec![b1.block_header.clone().into(), b2.block_header.clone().into()]);
let tasks = executor.take_tasks();
assert_eq!(tasks.len(), 2);
@ -1550,7 +1618,7 @@ pub mod tests {
let b1 = test_data::block_h1();
let b2 = test_data::block_h2();
sync.on_headers(1, types::Headers::with_headers(vec![b1.block_header.clone(), b2.block_header.clone()]));
sync.on_headers(1, vec![b1.block_header.clone().into(), b2.block_header.clone().into()]);
let tasks = executor.take_tasks();
assert_eq!(tasks.len(), 2);
@ -1591,7 +1659,7 @@ pub mod tests {
let (executor, core, sync) = create_sync(None, None);
let b169 = test_data::block_h169();
sync.on_headers(1, types::Headers::with_headers(vec![b169.block_header]));
sync.on_headers(1, vec![b169.block_header.into()]);
let tasks = executor.take_tasks();
assert_eq!(tasks, vec![]);
@ -1611,8 +1679,12 @@ pub mod tests {
let fork1 = test_data::build_n_empty_blocks_from(2, 100, &genesis_header);
let fork2 = test_data::build_n_empty_blocks_from(3, 200, &genesis_header);
sync.on_headers(1, types::Headers::with_headers(vec![fork1[0].block_header.clone(), fork1[1].block_header.clone()]));
sync.on_headers(2, types::Headers::with_headers(vec![fork2[0].block_header.clone(), fork2[1].block_header.clone(), fork2[2].block_header.clone()]));
sync.on_headers(1, vec![fork1[0].block_header.clone().into(), fork1[1].block_header.clone().into()]);
sync.on_headers(2, vec![
fork2[0].block_header.clone().into(),
fork2[1].block_header.clone().into(),
fork2[2].block_header.clone().into(),
]);
let tasks = { executor.take_tasks() };
assert_eq!(tasks, vec![request_block_headers_genesis_and(1, vec![fork1[1].hash(), fork1[0].hash()]),
request_blocks(1, vec![fork1[0].hash(), fork1[1].hash()]),
@ -1667,8 +1739,17 @@ pub mod tests {
let fork1 = test_data::build_n_empty_blocks_from(2, 100, &common_block.block_header);
let fork2 = test_data::build_n_empty_blocks_from(3, 200, &common_block.block_header);
sync.on_headers(1, types::Headers::with_headers(vec![common_block.block_header.clone(), fork1[0].block_header.clone(), fork1[1].block_header.clone()]));
sync.on_headers(2, types::Headers::with_headers(vec![common_block.block_header.clone(), fork2[0].block_header.clone(), fork2[1].block_header.clone(), fork2[2].block_header.clone()]));
sync.on_headers(1, vec![
common_block.block_header.clone().into(),
fork1[0].block_header.clone().into(),
fork1[1].block_header.clone().into(),
]);
sync.on_headers(2, vec![
common_block.block_header.clone().into(),
fork2[0].block_header.clone().into(),
fork2[1].block_header.clone().into(),
fork2[2].block_header.clone().into(),
]);
let tasks = { executor.take_tasks() };
assert_eq!(tasks, vec![request_block_headers_genesis_and(1, vec![fork1[1].hash(), fork1[0].hash(), common_block.hash()]),
@ -1745,7 +1826,7 @@ pub mod tests {
{
// receive inventory from new peer#1
sync.on_headers(1, types::Headers::with_headers(vec![block1.block_header.clone()]));
sync.on_headers(1, vec![block1.block_header.clone().into()]);
// synchronization has started && new blocks have been requested
let tasks = executor.take_tasks();
assert_eq!(tasks, vec![
@ -1756,7 +1837,7 @@ pub mod tests {
{
// receive inventory from new peer#2
sync.on_headers(2, types::Headers::with_headers(vec![block1.block_header.clone(), block2.block_header.clone()]));
sync.on_headers(2, vec![block1.block_header.clone().into(), block2.block_header.clone().into()]);
// synchronization has started && new blocks have been requested
let tasks = executor.take_tasks();
assert_eq!(tasks, vec![
@ -1795,7 +1876,7 @@ pub mod tests {
let b1 = test_data::block_h1();
let b2 = test_data::block_h2();
sync.on_headers(1, types::Headers::with_headers(vec![b1.block_header.clone(), b2.block_header.clone()]));
sync.on_headers(1, vec![b1.block_header.clone().into(), b2.block_header.clone().into()]);
let tasks = executor.take_tasks();
assert_eq!(tasks, vec![request_block_headers_genesis_and(1, vec![b2.hash().clone(), b1.hash().clone()]), request_blocks(1, vec![b1.hash(), b2.hash()])]);
@ -1820,7 +1901,7 @@ pub mod tests {
let b1 = test_data::block_h1();
let b2 = test_data::block_h2();
sync.on_headers(1, types::Headers::with_headers(vec![b1.block_header.clone(), b2.block_header.clone()]));
sync.on_headers(1, vec![b1.block_header.clone().into(), b2.block_header.clone().into()]);
let tasks = executor.take_tasks();
assert_eq!(tasks, vec![request_block_headers_genesis_and(1, vec![b2.hash().clone(), b1.hash().clone()]), request_blocks(1, vec![b1.hash(), b2.hash()])]);
@ -1851,7 +1932,7 @@ pub mod tests {
}
let b1 = test_data::block_h1();
sync.on_headers(1, types::Headers::with_headers(vec![b1.block_header.clone()]));
sync.on_headers(1, vec![b1.block_header.clone().into()]);
assert!(core.lock().information().state.is_nearly_saturated());
{ executor.take_tasks(); } // forget tasks
@ -1900,7 +1981,7 @@ pub mod tests {
let b1 = test_data::block_h1();
let b2 = test_data::block_h2();
sync.on_headers(1, types::Headers::with_headers(vec![b1.block_header.clone(), b2.block_header.clone()]));
sync.on_headers(1, vec![b1.block_header.into(), b2.block_header.into()]);
assert!(core.lock().information().state.is_synchronizing());
@ -1919,7 +2000,7 @@ pub mod tests {
assert_eq!(core.lock().information().chain.transactions.transactions_count, 1);
let b2 = test_data::block_h2();
sync.on_headers(1, types::Headers::with_headers(vec![b2.block_header.clone()]));
sync.on_headers(1, vec![b2.block_header.into()]);
assert!(core.lock().information().state.is_nearly_saturated());
@ -1988,8 +2069,16 @@ pub mod tests {
let (_, _, sync) = create_sync(None, Some(dummy_verifier));
sync.on_headers(1, types::Headers::with_headers(vec![b10.block_header.clone(), b11.block_header.clone(), b12.block_header.clone()]));
sync.on_headers(2, types::Headers::with_headers(vec![b10.block_header.clone(), b21.block_header.clone(), b22.block_header.clone()]));
sync.on_headers(1, vec![
b10.block_header.clone().into(),
b11.block_header.clone().into(),
b12.block_header.clone().into(),
]);
sync.on_headers(2, vec![
b10.block_header.clone().into(),
b21.block_header.clone().into(),
b22.block_header.clone().into(),
]);
sync.on_block(1, b10.clone().into());
sync.on_block(1, b11.into());
@ -1998,8 +2087,12 @@ pub mod tests {
sync.on_block(2, b21.clone().into());
// should not panic here
sync.on_headers(2, types::Headers::with_headers(vec![b10.block_header.clone(), b21.block_header.clone(),
b22.block_header.clone(), b23.block_header.clone()]));
sync.on_headers(2, vec![
b10.block_header.into(),
b21.block_header.into(),
b22.block_header.into(),
b23.block_header.into(),
]);
}
#[test]
@ -2011,7 +2104,7 @@ pub mod tests {
let b2 = test_data::block_builder().header().parent(b1.hash()).build().build();
let b3 = test_data::block_builder().header().parent(b2.hash()).build().build();
sync.on_headers(1, types::Headers::with_headers(vec![b0.block_header.clone(), b1.block_header.clone()]));
sync.on_headers(1, vec![b0.block_header.clone().into(), b1.block_header.clone().into()]);
sync.on_block(1, b0.clone().into());
sync.on_block(1, b1.clone().into());
@ -2038,7 +2131,7 @@ pub mod tests {
]);
}
sync.on_headers(1, types::Headers::with_headers(vec![b3.block_header.clone()]));
sync.on_headers(1, vec![b3.block_header.clone().into()]);
sync.on_block(1, b3.clone().into());
// we were in nearly saturated state => block is relayed
@ -2107,7 +2200,7 @@ pub mod tests {
core.lock().peers.insert(0, Services::default(), DummyOutboundSyncConnection::new());
assert!(core.lock().peers.enumerate().contains(&0));
sync.on_headers(0, types::Headers::with_headers(vec![b0.block_header.clone(), b1.block_header.clone(), b2.block_header.clone()]));
sync.on_headers(0, vec![b0.block_header.into(), b1.block_header.into(), b2.block_header.into()]);
assert!(!core.lock().peers.enumerate().contains(&0));
}
@ -2125,11 +2218,11 @@ pub mod tests {
chain.mark_dead_end_block(&b1.hash());
}
core.lock().set_verify_headers(true);
// core.lock().set_verify_headers(true);
core.lock().peers.insert(0, Services::default(), DummyOutboundSyncConnection::new());
assert!(core.lock().peers.enumerate().contains(&0));
sync.on_headers(0, types::Headers::with_headers(vec![b0.block_header.clone(), b1.block_header.clone(), b2.block_header.clone()]));
sync.on_headers(0, vec![b0.block_header.into(), b1.block_header.into(), b2.block_header.into()]);
assert!(!core.lock().peers.enumerate().contains(&0));
}
@ -2183,9 +2276,9 @@ pub mod tests {
let (executor, core, sync) = create_sync(None, None);
// when peer1 announces 'false' b0
sync.on_headers(1, types::Headers::with_headers(vec![b0.block_header.clone()]));
sync.on_headers(1, vec![b0.block_header.clone().into()]);
// and peer2 announces 'true' b1
sync.on_headers(2, types::Headers::with_headers(vec![b1.block_header.clone(), b2.block_header.clone()]));
sync.on_headers(2, vec![b1.block_header.clone().into(), b2.block_header.clone().into()]);
// check that all blocks are requested
assert_eq!(core.lock().information().chain.requested, 3);
@ -2342,12 +2435,12 @@ pub mod tests {
assert_eq!(data.lock().best_blocks.len(), 0);
// supply with new block header => is_synchronizing is still false
sync.on_headers(0, types::Headers::with_headers(vec![test_data::block_h1().block_header]));
sync.on_headers(0, vec![test_data::block_h1().block_header.into()]);
assert_eq!(data.lock().is_synchronizing, false);
assert_eq!(data.lock().best_blocks.len(), 0);
// supply with 2 new blocks headers => is_synchronizing is true
sync.on_headers(0, types::Headers::with_headers(vec![test_data::block_h2().block_header, test_data::block_h3().block_header]));
sync.on_headers(0, vec![test_data::block_h2().block_header.into(), test_data::block_h3().block_header.into()]);
assert_eq!(data.lock().is_synchronizing, true);
assert_eq!(data.lock().best_blocks.len(), 0);

View File

@ -2,6 +2,7 @@ use std::sync::Arc;
use chain::{IndexedBlock, IndexedTransaction};
use message::common::InventoryVector;
use message::types;
use primitives::hash::H256;
use synchronization_peers::{BlockAnnouncementType, TransactionAnnouncementType};
use types::{PeerIndex, PeersRef, RequestId};
use utils::KnownHashType;
@ -25,7 +26,7 @@ pub enum Task {
/// Send block
Block(PeerIndex, IndexedBlock),
/// Send merkleblock
MerkleBlock(PeerIndex, types::MerkleBlock),
MerkleBlock(PeerIndex, H256, types::MerkleBlock),
/// Send transaction
Transaction(PeerIndex, IndexedTransaction),
/// Send notfound
@ -95,9 +96,8 @@ impl LocalSynchronizationTaskExecutor {
}
}
fn execute_merkleblock(&self, peer_index: PeerIndex, block: types::MerkleBlock) {
fn execute_merkleblock(&self, peer_index: PeerIndex, hash: H256, block: types::MerkleBlock) {
if let Some(connection) = self.peers.connection(peer_index) {
let hash = block.block_header.hash();
trace!(target: "sync", "Sending merkle block {} to peer#{}", hash.to_reversed_str(), peer_index);
self.peers.hash_known_as(peer_index, hash, KnownHashType::Block);
connection.send_merkleblock(&block);
@ -177,7 +177,7 @@ impl TaskExecutor for LocalSynchronizationTaskExecutor {
Task::GetHeaders(peer_index, getheaders) => self.execute_getheaders(peer_index, getheaders),
Task::MemoryPool(peer_index) => self.execute_memorypool(peer_index),
Task::Block(peer_index, block) => self.execute_block(peer_index, block),
Task::MerkleBlock(peer_index, block) => self.execute_merkleblock(peer_index, block),
Task::MerkleBlock(peer_index, hash, block) => self.execute_merkleblock(peer_index, hash, block),
Task::Transaction(peer_index, transaction) => self.execute_transaction(peer_index, transaction),
Task::NotFound(peer_index, notfound) => self.execute_notfound(peer_index, notfound),
Task::Inventory(peer_index, inventory) => self.execute_inventory(peer_index, inventory),

View File

@ -269,7 +269,7 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
}
},
common::InventoryType::MessageBlock => {
if let Some(block) = self.storage.indexed_block(next_item.hash.clone().into()) {
if let Some(block) = self.storage.block(next_item.hash.clone().into()) {
trace!(target: "sync", "'getblocks' response to peer#{} is ready with block {}", peer_index, next_item.hash.to_reversed_str());
self.executor.execute(Task::Block(peer_index, block));
} else {
@ -277,12 +277,12 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
}
},
common::InventoryType::MessageFilteredBlock => {
if let Some(block) = self.storage.indexed_block(next_item.hash.clone().into()) {
if let Some(block) = self.storage.block(next_item.hash.clone().into()) {
let message_artefacts = self.peers.build_merkle_block(peer_index, &block);
if let Some(message_artefacts) = message_artefacts {
// send merkleblock first
trace!(target: "sync", "'getblocks' response to peer#{} is ready with merkleblock {}", peer_index, next_item.hash.to_reversed_str());
self.executor.execute(Task::MerkleBlock(peer_index, message_artefacts.merkleblock));
self.executor.execute(Task::MerkleBlock(peer_index, *block.hash(), message_artefacts.merkleblock));
// also send all matched transactions
for matched_transaction in message_artefacts.matching_transactions {
@ -334,6 +334,7 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
.map(|block_hash| self.storage.block_header(block_hash.into()))
.take_while(Option::is_some)
.map(Option::unwrap)
.map(|h| h.raw)
.collect();
// empty inventory messages are invalid according to regtests, while empty headers messages are valid
trace!(target: "sync", "'getheaders' response to peer#{} is ready with {} headers", peer_index, headers.len());
@ -375,11 +376,11 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
Some(block_header) => block_header,
};
if let Some(block_number) = self.storage.block_number(&block_header.previous_header_hash) {
if let Some(block_number) = self.storage.block_number(&block_header.raw.previous_header_hash) {
return Some(block_number);
}
block_hash = block_header.previous_header_hash;
block_hash = block_header.raw.previous_header_hash;
}
}
@ -741,7 +742,7 @@ pub mod tests {
let mut index = 0;
let tasks = sync_executor.take_tasks();
match tasks[index] {
Task::MerkleBlock(_, _) => {
Task::MerkleBlock(_, _, _) => {
if get_tx1 {
index += 1;
match tasks[index] {
@ -755,7 +756,7 @@ pub mod tests {
index += 1;
match tasks[index] {
Task::MerkleBlock(_, _) => {
Task::MerkleBlock(_, _, _) => {
if get_tx2 {
index += 1;
match tasks[index] {

View File

@ -5,15 +5,32 @@ use std::sync::atomic::{AtomicBool, Ordering};
use std::thread;
use parking_lot::Mutex;
use time::get_time;
use chain::{IndexedBlock, IndexedTransaction};
use chain::{IndexedBlockHeader, IndexedBlock, IndexedTransaction};
use network::ConsensusParams;
use primitives::hash::H256;
use verification::{BackwardsCompatibleChainVerifier as ChainVerifier, Verify as VerificationVerify,
Error as VerificationError, VerificationLevel};
use types::{BlockHeight, StorageRef, MemoryPoolRef};
use types::{PeerIndex, BlockHeight, StorageRef, MemoryPoolRef};
use utils::MemoryPoolTransactionOutputProvider;
use VerificationParameters;
//// Block that is (possibly) partially verified.
#[derive(Debug)]
pub enum PartiallyVerifiedBlock {
/// Block that isn't verified at all.
NotVerified(IndexedBlock),
/// Block that has its header pre-verified (mind that AcceptHeader isn't called).
HeaderPreVerified(IndexedBlock),
}
/// Headers verification events sink
pub trait HeadersVerificationSink : Send + Sync + 'static {
/// When headers verification has completed successfully.
fn on_headers_verification_success(&self, headers: Vec<IndexedBlockHeader>);
/// When headers verification has failed.
fn on_headers_verification_error(&self, peer: PeerIndex, error: String, hash: H256, headers: Vec<IndexedBlockHeader>);
}
/// Block verification events sink
pub trait BlockVerificationSink : Send + Sync + 'static {
/// When block verification has completed successfully.
@ -31,14 +48,16 @@ pub trait TransactionVerificationSink : Send + Sync + 'static {
}
/// Verification events sink
pub trait VerificationSink : BlockVerificationSink + TransactionVerificationSink {
pub trait VerificationSink : HeadersVerificationSink + BlockVerificationSink + TransactionVerificationSink {
}
/// Verification thread tasks
#[derive(Debug)]
pub enum VerificationTask {
/// Verify headers
VerifyHeaders(PeerIndex, Vec<IndexedBlockHeader>),
/// Verify single block
VerifyBlock(IndexedBlock),
VerifyBlock(PartiallyVerifiedBlock),
/// Verify single transaction
VerifyTransaction(BlockHeight, IndexedTransaction),
/// Stop verification thread
@ -47,8 +66,10 @@ pub enum VerificationTask {
/// Synchronization verifier
pub trait Verifier : Send + Sync + 'static {
/// Verify headers
fn verify_headers(&self, peer: PeerIndex, headers: Vec<IndexedBlockHeader>);
/// Verify block
fn verify_block(&self, block: IndexedBlock);
fn verify_block(&self, block: PartiallyVerifiedBlock);
/// Verify transaction
fn verify_transaction(&self, height: BlockHeight, transaction: IndexedTransaction);
}
@ -67,10 +88,36 @@ pub struct ChainVerifierWrapper {
pub verifier: Arc<ChainVerifier>,
/// Verification parameters.
verification_params: VerificationParameters,
/// Is verification edge passed.
/// True if we have passed verification edge && full verification is required.
pub enforce_full_verification: AtomicBool,
}
impl PartiallyVerifiedBlock {
/// Returns hash of the block.
pub fn hash(&self) -> &H256 {
match *self {
PartiallyVerifiedBlock::NotVerified(ref block)
| PartiallyVerifiedBlock::HeaderPreVerified(ref block) => block.hash(),
}
}
}
impl From<PartiallyVerifiedBlock> for IndexedBlock {
fn from(block: PartiallyVerifiedBlock) -> Self {
match block {
PartiallyVerifiedBlock::NotVerified(block) => block,
PartiallyVerifiedBlock::HeaderPreVerified(block) => block,
}
}
}
#[cfg(test)]
impl From<IndexedBlock> for PartiallyVerifiedBlock {
fn from(block: IndexedBlock) -> Self {
PartiallyVerifiedBlock::NotVerified(block)
}
}
impl ChainVerifierWrapper {
/// Create new chain verifier wrapper.
pub fn new(verifier: Arc<ChainVerifier>, storage: &StorageRef, verification_params: VerificationParameters) -> Self {
@ -82,20 +129,36 @@ impl ChainVerifierWrapper {
}
}
/// Verify header.
pub fn verify_block_header(&self, header: &IndexedBlockHeader) -> Result<(), VerificationError> {
self.verifier.verify_block_header(header)
}
/// Verify block.
pub fn verify_block(&self, block: &IndexedBlock) -> Result<(), VerificationError> {
pub fn verify_block(&self, block: &PartiallyVerifiedBlock) -> Result<(), VerificationError> {
let enforce_full_verification = if block.hash() == &self.verification_params.verification_edge {
self.enforce_full_verification.store(true, Ordering::Relaxed);
true
} else {
self.enforce_full_verification.load(Ordering::Relaxed)
};
let verification_level = if enforce_full_verification {
VerificationLevel::Full
// select base verification level
let mut verification_level = if enforce_full_verification {
VerificationLevel::FULL
} else {
self.verification_params.verification_level
};
// update verification level with hints, if necessary
let block = match *block {
PartiallyVerifiedBlock::NotVerified(ref block) => block,
PartiallyVerifiedBlock::HeaderPreVerified(ref block) => {
verification_level.insert(VerificationLevel::HINT_HEADER_PRE_VERIFIED);
block
},
};
self.verifier.verify(verification_level, block)
}
}
@ -112,12 +175,19 @@ impl VerificationTask {
impl AsyncVerifier {
/// Create new async verifier
pub fn new<T: VerificationSink>(verifier: Arc<ChainVerifier>, storage: StorageRef, memory_pool: MemoryPoolRef, sink: Arc<T>, verification_params: VerificationParameters) -> Self {
pub fn new<T: VerificationSink>(
thread_name: String,
verifier: Arc<ChainVerifier>,
storage: StorageRef,
memory_pool: MemoryPoolRef,
sink: Arc<T>,
verification_params: VerificationParameters,
) -> Self {
let (verification_work_sender, verification_work_receiver) = channel();
AsyncVerifier {
verification_work_sender: Mutex::new(verification_work_sender),
verification_worker_thread: Some(thread::Builder::new()
.name("Sync verification thread".to_string())
.name(thread_name)
.spawn(move || {
let verifier = ChainVerifierWrapper::new(verifier, &storage, verification_params);
AsyncVerifier::verification_worker_proc(sink, storage, memory_pool, verifier, verification_work_receiver)
@ -127,8 +197,14 @@ impl AsyncVerifier {
}
/// Thread procedure for handling verification tasks
fn verification_worker_proc<T: VerificationSink>(sink: Arc<T>, storage: StorageRef, memory_pool: MemoryPoolRef, verifier: ChainVerifierWrapper, work_receiver: Receiver<VerificationTask>) {
while let Ok(task) = work_receiver.recv() {
fn verification_worker_proc<T: VerificationSink>(
sink: Arc<T>,
storage: StorageRef,
memory_pool: MemoryPoolRef,
verifier: ChainVerifierWrapper,
work_receiver: Receiver<VerificationTask>,
) {
while let Some(task) = work_receiver.recv().ok() {
if !AsyncVerifier::execute_single_task(&sink, &storage, &memory_pool, &verifier, task) {
break;
}
@ -138,7 +214,13 @@ impl AsyncVerifier {
}
/// Execute single verification task
pub fn execute_single_task<T: VerificationSink>(sink: &Arc<T>, storage: &StorageRef, memory_pool: &MemoryPoolRef, verifier: &ChainVerifierWrapper, task: VerificationTask) -> bool {
pub fn execute_single_task<T: VerificationSink>(
sink: &Arc<T>,
storage: &StorageRef,
memory_pool: &MemoryPoolRef,
verifier: &ChainVerifierWrapper,
task: VerificationTask,
) -> bool {
// block verification && insertion can lead to reorganization
// => transactions from decanonized blocks should be put back to the MemoryPool
// => they must be verified again
@ -148,11 +230,20 @@ impl AsyncVerifier {
while let Some(task) = tasks_queue.pop_front() {
match task {
VerificationTask::VerifyHeaders(peer, headers) => {
let result = headers.iter()
.try_for_each(|header| verifier.verify_block_header(header)
.map_err(|error| (error, header.hash)));
match result {
Ok(_) => sink.on_headers_verification_success(headers),
Err((error, hash)) => sink.on_headers_verification_error(peer, format!("{:?}", error), hash, headers),
}
},
VerificationTask::VerifyBlock(block) => {
// verify block
match verifier.verify_block(&block) {
Ok(_) => {
if let Some(tasks) = sink.on_block_verification_success(block) {
if let Some(tasks) = sink.on_block_verification_success(block.into()) {
tasks_queue.extend(tasks);
}
},
@ -185,7 +276,6 @@ impl AsyncVerifier {
}
}
impl Drop for AsyncVerifier {
fn drop(&mut self) {
if let Some(join_handle) = self.verification_worker_thread.take() {
@ -200,14 +290,18 @@ impl Drop for AsyncVerifier {
}
impl Verifier for AsyncVerifier {
/// Verify block
fn verify_block(&self, block: IndexedBlock) {
fn verify_headers(&self, peer: PeerIndex, headers: Vec<IndexedBlockHeader>) {
self.verification_work_sender.lock()
.send(VerificationTask::VerifyHeaders(peer, headers))
.expect("Verification thread have the same lifetime as `AsyncVerifier`");
}
fn verify_block(&self, block: PartiallyVerifiedBlock) {
self.verification_work_sender.lock()
.send(VerificationTask::VerifyBlock(block))
.expect("Verification thread have the same lifetime as `AsyncVerifier`");
}
/// Verify transaction
fn verify_transaction(&self, height: BlockHeight, transaction: IndexedTransaction) {
self.verification_work_sender.lock()
.send(VerificationTask::VerifyTransaction(height, transaction))
@ -236,14 +330,19 @@ impl<T> SyncVerifier<T> where T: VerificationSink {
}
impl<T> Verifier for SyncVerifier<T> where T: VerificationSink {
/// Verify headers
fn verify_headers(&self, _peer: PeerIndex, _headers: Vec<IndexedBlockHeader>) {
unreachable!("SyncVerifier is used only for blocks verification")
}
/// Verify block
fn verify_block(&self, block: IndexedBlock) {
fn verify_block(&self, block: PartiallyVerifiedBlock) {
match self.verifier.verify_block(&block) {
Ok(_) => {
// SyncVerifier is used for bulk blocks import only
// => there is no memory pool
// => we could ignore decanonized transactions
self.sink.on_block_verification_success(block);
self.sink.on_block_verification_success(block.into());
},
Err(e) => self.sink.on_block_verification_error(&format!("{:?}", e), block.hash()),
}
@ -251,7 +350,7 @@ impl<T> Verifier for SyncVerifier<T> where T: VerificationSink {
/// Verify transaction
fn verify_transaction(&self, _height: BlockHeight, _transaction: IndexedTransaction) {
unimplemented!() // sync verifier is currently only used for blocks verification
unreachable!("SyncVerifier is used only for blocks verification")
}
}
@ -269,9 +368,10 @@ pub mod tests {
use synchronization_client_core::CoreVerificationSink;
use synchronization_executor::tests::DummyTaskExecutor;
use primitives::hash::H256;
use chain::{IndexedBlock, IndexedTransaction};
use super::{Verifier, BlockVerificationSink, TransactionVerificationSink, AsyncVerifier, VerificationTask, ChainVerifierWrapper};
use types::{BlockHeight, StorageRef, MemoryPoolRef};
use chain::{IndexedBlockHeader, IndexedBlock, IndexedTransaction};
use super::{Verifier, HeadersVerificationSink, BlockVerificationSink, TransactionVerificationSink,
AsyncVerifier, VerificationTask, ChainVerifierWrapper, PartiallyVerifiedBlock};
use types::{PeerIndex, BlockHeight, StorageRef, MemoryPoolRef};
use VerificationParameters;
#[derive(Default)]
@ -299,7 +399,7 @@ pub mod tests {
pub fn set_verifier(&mut self, verifier: Arc<ChainVerifier>) {
self.verifier = Some(ChainVerifierWrapper::new(verifier, self.storage.as_ref().unwrap(), VerificationParameters {
verification_level: VerificationLevel::Full,
verification_level: VerificationLevel::FULL,
verification_edge: 0u8.into(),
}));
}
@ -314,7 +414,14 @@ pub mod tests {
}
impl Verifier for DummyVerifier {
fn verify_block(&self, block: IndexedBlock) {
fn verify_headers(&self, _peer: PeerIndex, headers: Vec<IndexedBlockHeader>) {
match self.sink {
Some(ref sink) => sink.on_headers_verification_success(headers),
_ => (),
}
}
fn verify_block(&self, block: PartiallyVerifiedBlock) {
match self.sink {
Some(ref sink) => match self.errors.get(&block.hash()) {
Some(err) => sink.on_block_verification_error(&err, &block.hash()),
@ -322,7 +429,7 @@ pub mod tests {
if self.actual_checks.contains(block.hash()) {
AsyncVerifier::execute_single_task(sink, self.storage.as_ref().unwrap(), self.memory_pool.as_ref().unwrap(), self.verifier.as_ref().unwrap(), VerificationTask::VerifyBlock(block));
} else {
sink.on_block_verification_success(block);
sink.on_block_verification_success(block.into());
}
},
},
@ -355,18 +462,18 @@ pub mod tests {
// switching to full verification when block is already in db
assert_eq!(ChainVerifierWrapper::new(verifier.clone(), &storage, VerificationParameters {
verification_level: VerificationLevel::NoVerification,
verification_level: VerificationLevel::NO_VERIFICATION,
verification_edge: test_data::genesis().hash(),
}).enforce_full_verification.load(Ordering::Relaxed), true);
// switching to full verification when block with given hash is coming
let wrapper = ChainVerifierWrapper::new(verifier, &storage, VerificationParameters {
verification_level: VerificationLevel::NoVerification,
verification_level: VerificationLevel::NO_VERIFICATION,
verification_edge: test_data::block_h1().hash(),
});
assert_eq!(wrapper.enforce_full_verification.load(Ordering::Relaxed), false);
let block: IndexedBlock = test_data::block_h1().into();
let _ = wrapper.verify_block(&block);
let _ = wrapper.verify_block(&block.into());
assert_eq!(wrapper.enforce_full_verification.load(Ordering::Relaxed), true);
}
@ -416,17 +523,17 @@ pub mod tests {
// Ok(()) when tx script is not checked
let wrapper = ChainVerifierWrapper::new(verifier.clone(), &storage, VerificationParameters {
verification_level: VerificationLevel::Header,
verification_level: VerificationLevel::HEADER,
verification_edge: 1.into(),
});
assert_eq!(wrapper.verify_block(&bad_transaction_block), Ok(()));
assert_eq!(wrapper.verify_block(&bad_transaction_block.clone().into()), Ok(()));
// Error when tx script is checked
let wrapper = ChainVerifierWrapper::new(verifier, &storage, VerificationParameters {
verification_level: VerificationLevel::Full,
verification_level: VerificationLevel::FULL,
verification_edge: 1.into(),
});
assert_eq!(wrapper.verify_block(&bad_transaction_block), Err(VerificationError::Transaction(1, TransactionError::Signature(0, ScriptError::InvalidStackOperation))));
assert_eq!(wrapper.verify_block(&bad_transaction_block.into()), Err(VerificationError::Transaction(1, TransactionError::Signature(0, ScriptError::InvalidStackOperation))));
}
#[test]
@ -437,16 +544,16 @@ pub mod tests {
// Ok(()) when nothing is verified
let wrapper = ChainVerifierWrapper::new(verifier.clone(), &storage, VerificationParameters {
verification_level: VerificationLevel::NoVerification,
verification_level: VerificationLevel::NO_VERIFICATION,
verification_edge: 1.into(),
});
assert_eq!(wrapper.verify_block(&bad_block), Ok(()));
assert_eq!(wrapper.verify_block(&bad_block.clone().into()), Ok(()));
// Error when everything is verified
let wrapper = ChainVerifierWrapper::new(verifier, &storage, VerificationParameters {
verification_level: VerificationLevel::Full,
verification_level: VerificationLevel::FULL,
verification_edge: 1.into(),
});
assert_eq!(wrapper.verify_block(&bad_block), Err(VerificationError::Empty));
assert_eq!(wrapper.verify_block(&bad_block.into()), Err(VerificationError::Empty));
}
}

View File

@ -74,13 +74,14 @@ impl BestHeadersChain {
}
/// Insert new block header
pub fn insert(&mut self, header: IndexedBlockHeader) {
pub fn insert(&mut self, header: IndexedBlockHeader) -> bool {
// append to the best chain
if self.best_block_hash() == header.raw.previous_header_hash {
let header_hash = header.hash.clone();
self.headers.insert(header_hash.clone(), header);
self.best.push_back(header_hash);
return;
self.headers.insert(header_hash.clone(), header).is_some()
} else {
self.headers.contains_key(&header.hash)
}
}
@ -228,4 +229,13 @@ mod tests {
assert_eq!(chain.information().best, 1);
assert_eq!(chain.information().total, 1);
}
#[test]
fn insert_to_best_chain_returns_true_if_header_is_in_chain() {
let b0 = test_data::block_builder().header().build().build();
let b1 = test_data::block_builder().header().parent(b0.hash()).build().build().block_header;
let mut chain = BestHeadersChain::new(b0.hash());
assert!(!chain.insert(b1.clone().into()));
assert!(chain.insert(b1.clone().into()));
}
}

View File

@ -1,84 +0,0 @@
use std::collections::HashMap;
use chain::BlockHeader;
use storage::{BlockRef, BlockHeaderProvider};
use primitives::bytes::Bytes;
use primitives::hash::H256;
/// Block headers provider from `headers` message
pub struct MessageBlockHeadersProvider<'a> {
/// Synchronization chain headers provider
chain_provider: &'a BlockHeaderProvider,
/// headers offset
first_header_number: u32,
/// headers by hash
headers: HashMap<H256, BlockHeader>,
/// headers by order
headers_order: Vec<H256>,
}
impl<'a> MessageBlockHeadersProvider<'a> {
pub fn new(chain_provider: &'a BlockHeaderProvider, best_block_header_height: u32) -> Self {
MessageBlockHeadersProvider {
chain_provider: chain_provider,
first_header_number: best_block_header_height + 1,
headers: HashMap::new(),
headers_order: Vec::new(),
}
}
pub fn append_header(&mut self, hash: H256, header: BlockHeader) {
self.headers.insert(hash.clone(), header);
self.headers_order.push(hash);
}
}
impl<'a> BlockHeaderProvider for MessageBlockHeadersProvider<'a> {
fn block_header_bytes(&self, block_ref: BlockRef) -> Option<Bytes> {
use ser::serialize;
self.block_header(block_ref).map(|h| serialize(&h))
}
fn block_header(&self, block_ref: BlockRef) -> Option<BlockHeader> {
self.chain_provider.block_header(block_ref.clone())
.or_else(move || match block_ref {
BlockRef::Hash(h) => self.headers.get(&h).cloned(),
BlockRef::Number(n) => if n >= self.first_header_number && n - self.first_header_number < self.headers_order.len() as u32 {
let header_hash = &self.headers_order[(n - self.first_header_number) as usize];
Some(self.headers[header_hash].clone())
} else {
None
},
})
}
}
#[cfg(test)]
mod tests {
extern crate test_data;
use storage::{AsSubstore, BlockHeaderProvider, BlockRef};
use db::BlockChainDatabase;
use primitives::hash::H256;
use super::MessageBlockHeadersProvider;
#[test]
fn test_message_block_headers_provider() {
let storage = BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]);
let storage_provider = storage.as_block_header_provider();
let mut headers_provider = MessageBlockHeadersProvider::new(storage_provider, 0);
assert_eq!(headers_provider.block_header(BlockRef::Hash(test_data::genesis().hash())), Some(test_data::genesis().block_header));
assert_eq!(headers_provider.block_header(BlockRef::Number(0)), Some(test_data::genesis().block_header));
assert_eq!(headers_provider.block_header(BlockRef::Hash(H256::from(1))), None);
assert_eq!(headers_provider.block_header(BlockRef::Number(1)), None);
headers_provider.append_header(test_data::block_h1().hash(), test_data::block_h1().block_header);
assert_eq!(headers_provider.block_header(BlockRef::Hash(test_data::genesis().hash())), Some(test_data::genesis().block_header));
assert_eq!(headers_provider.block_header(BlockRef::Number(0)), Some(test_data::genesis().block_header));
assert_eq!(headers_provider.block_header(BlockRef::Hash(test_data::block_h1().hash())), Some(test_data::block_h1().block_header));
assert_eq!(headers_provider.block_header(BlockRef::Number(1)), Some(test_data::block_h1().block_header));
assert_eq!(headers_provider.block_header(BlockRef::Hash(H256::from(1))), None);
assert_eq!(headers_provider.block_header(BlockRef::Number(2)), None);
}
}

View File

@ -6,7 +6,6 @@ mod fee_rate_filter;
mod hash_queue;
mod known_hash_filter;
mod memory_pool_transaction_provider;
mod message_block_headers_provider;
mod orphan_blocks_pool;
mod orphan_transactions_pool;
mod partial_merkle_tree;
@ -20,7 +19,6 @@ pub use self::fee_rate_filter::FeeRateFilter;
pub use self::hash_queue::{HashQueue, HashQueueChain, HashPosition};
pub use self::known_hash_filter::{KnownHashType, KnownHashFilter};
pub use self::memory_pool_transaction_provider::MemoryPoolTransactionOutputProvider;
pub use self::message_block_headers_provider::MessageBlockHeadersProvider;
pub use self::orphan_blocks_pool::OrphanBlocksPool;
pub use self::orphan_transactions_pool::{OrphanTransactionsPool, OrphanTransaction};
pub use self::partial_merkle_tree::{PartialMerkleTree, build_partial_merkle_tree};

View File

@ -19,6 +19,7 @@ storage = { path = "../storage" }
bitcrypto = { path = "../crypto" }
rustc-hex = "2"
bitvec = "0.10"
bitflags = "1.0"
[dev-dependencies]
rand = "0.4"

View File

@ -1,5 +1,8 @@
use rayon::prelude::{IntoParallelRefIterator, IndexedParallelIterator, ParallelIterator};
use storage::{DuplexTransactionOutputProvider, Store};
use storage::{
DuplexTransactionOutputProvider, TransactionOutputProvider, TransactionMetaProvider,
BlockHeaderProvider, TreeStateProvider, NullifierTracker,
};
use network::ConsensusParams;
use error::Error;
use canon::CanonBlock;
@ -17,30 +20,41 @@ pub struct ChainAcceptor<'a> {
}
impl<'a> ChainAcceptor<'a> {
pub fn new(store: &'a Store, consensus: &'a ConsensusParams, verification_level: VerificationLevel, block: CanonBlock<'a>, height: u32, time: u32, deployments: &'a BlockDeployments) -> Self {
pub fn new(
tx_out_provider: &'a TransactionOutputProvider,
tx_meta_provider: &'a TransactionMetaProvider,
header_provider: &'a BlockHeaderProvider,
tree_state_provider: &'a TreeStateProvider,
nullifier_tracker: &'a NullifierTracker,
consensus: &'a ConsensusParams,
verification_level: VerificationLevel,
block: CanonBlock<'a>,
height: u32,
time: u32,
deployments: &'a BlockDeployments,
) -> Self {
trace!(target: "verification", "Block verification {}", block.hash().to_reversed_str());
let output_store = DuplexTransactionOutputProvider::new(store.as_transaction_output_provider(), block.raw());
let tree_cache = TreeCache::new(store.as_tree_state_provider());
let headers = store.as_block_header_provider();
let output_store = DuplexTransactionOutputProvider::new(tx_out_provider, block.raw());
let tree_cache = TreeCache::new(tree_state_provider);
ChainAcceptor {
block: BlockAcceptor::new(
store.as_transaction_output_provider(),
store.as_tree_state_provider(),
tx_out_provider,
tree_state_provider,
consensus,
block,
height,
deployments,
headers,
header_provider,
),
header: HeaderAcceptor::new(headers, consensus, block.header(), height, time, deployments),
header: HeaderAcceptor::new(header_provider, consensus, block.header(), height, time, deployments),
transactions: block.transactions()
.into_iter()
.enumerate()
.map(|(tx_index, tx)| TransactionAcceptor::new(
store.as_transaction_meta_provider(),
tx_meta_provider,
output_store,
store.as_nullifier_tracker(),
nullifier_tracker,
consensus,
tx,
verification_level,

View File

@ -127,7 +127,7 @@ impl<'a> MemoryPoolTransactionAcceptor<'a> {
overspent: TransactionOverspent::new(transaction, output_store),
sigops: TransactionSigops::new(transaction, output_store, consensus, max_block_sigops, time),
double_spent: TransactionDoubleSpend::new(transaction, output_store),
eval: TransactionEval::new(transaction, output_store, consensus, VerificationLevel::Full, height, time, deployments),
eval: TransactionEval::new(transaction, output_store, consensus, VerificationLevel::FULL, height, time, deployments),
join_split: JoinSplitVerification::new(consensus, transaction, nullifier_tracker, tree_cache),
sapling: SaplingVerification::new(
nullifier_tracker,
@ -405,8 +405,7 @@ impl<'a> TransactionEval<'a> {
false => Default::default(),
};
if self.verification_level == VerificationLevel::Header
|| self.verification_level == VerificationLevel::NoVerification {
if self.verification_level.intersects(VerificationLevel::HEADER | VerificationLevel::NO_VERIFICATION) {
return Ok(no_input_sighash);
}
@ -456,6 +455,10 @@ impl<'a> TransactionDoubleSpend<'a> {
}
fn check(&self) -> Result<(), TransactionError> {
if self.transaction.raw.is_coinbase() {
return Ok(());
}
for input in &self.transaction.raw.inputs {
if self.store.is_spent(&input.previous_output) {
return Err(TransactionError::UsingSpentOutput(

View File

@ -1,9 +1,8 @@
//! Bitcoin chain verifier
use hash::H256;
use chain::{IndexedBlock, IndexedBlockHeader, BlockHeader, IndexedTransaction};
use chain::{IndexedBlock, IndexedBlockHeader, IndexedTransaction};
use storage::{SharedStore, TransactionOutputProvider, BlockHeaderProvider, BlockOrigin,
DuplexTransactionOutputProvider, NoopStore};
DuplexTransactionOutputProvider, NoopStore, CachedTransactionOutputProvider};
use network::ConsensusParams;
use error::{Error, TransactionError};
use canon::{CanonBlock, CanonTransaction};
@ -31,53 +30,102 @@ impl BackwardsCompatibleChainVerifier {
}
fn verify_block(&self, verification_level: VerificationLevel, block: &IndexedBlock) -> Result<(), Error> {
if verification_level == VerificationLevel::NoVerification {
if verification_level.intersects(VerificationLevel::NO_VERIFICATION) {
return Ok(());
}
let current_time = ::time::get_time().sec as u32;
// first run pre-verification
let chain_verifier = ChainVerifier::new(block, &self.consensus, current_time);
let chain_verifier = ChainVerifier::new(block, &self.consensus, current_time, verification_level);
chain_verifier.check()?;
assert_eq!(Some(self.store.best_block().hash), self.store.block_hash(self.store.best_block().number));
let block_origin = self.store.block_origin(&block.header)?;
trace!(target: "verification", "verify_block: {:?} best_block: {:?} block_origin: {:?}", block.hash().reversed(), self.store.best_block(), block_origin);
trace!(
target: "verification",
"verify_block: {:?} best_block: {:?} block_origin: {:?}",
block.hash().reversed(),
self.store.best_block(),
block_origin,
);
let canon_block = CanonBlock::new(block);
match block_origin {
BlockOrigin::KnownBlock => {
// there should be no known blocks at this point
unreachable!();
},
BlockOrigin::CanonChain { block_number } => {
let tx_out_provider = CachedTransactionOutputProvider::new(self.store.as_store().as_transaction_output_provider());
let tx_meta_provider = self.store.as_store().as_transaction_meta_provider();
let header_provider = self.store.as_store().as_block_header_provider();
let tree_state_provider = self.store.as_store().as_tree_state_provider();
let nullifier_tracker = self.store.as_store().as_nullifier_tracker();
let deployments = BlockDeployments::new(&self.deployments, block_number, header_provider, &self.consensus);
let canon_block = CanonBlock::new(block);
let chain_acceptor = ChainAcceptor::new(self.store.as_store(), &self.consensus, verification_level,
canon_block, block_number, block.header.raw.time, &deployments);
let chain_acceptor = ChainAcceptor::new(
&tx_out_provider,
tx_meta_provider,
header_provider,
tree_state_provider,
nullifier_tracker,
&self.consensus,
verification_level,
canon_block,
block_number,
block.header.raw.time,
&deployments,
);
chain_acceptor.check()?;
},
BlockOrigin::SideChain(origin) => {
let block_number = origin.block_number;
let header_provider = self.store.as_store().as_block_header_provider();
let deployments = BlockDeployments::new(&self.deployments, block_number, header_provider, &self.consensus);
let fork = self.store.fork(origin)?;
let canon_block = CanonBlock::new(block);
let chain_acceptor = ChainAcceptor::new(fork.store(), &self.consensus, verification_level, canon_block,
block_number, block.header.raw.time, &deployments);
let tx_out_provider = CachedTransactionOutputProvider::new(fork.store().as_transaction_output_provider());
let tx_meta_provider = fork.store().as_transaction_meta_provider();
let header_provider = fork.store().as_block_header_provider();
let tree_state_provider = fork.store().as_tree_state_provider();
let nullifier_tracker = fork.store().as_nullifier_tracker();
let deployments = BlockDeployments::new(&self.deployments, block_number, header_provider, &self.consensus);
let chain_acceptor = ChainAcceptor::new(
&tx_out_provider,
tx_meta_provider,
header_provider,
tree_state_provider,
nullifier_tracker,
&self.consensus,
verification_level,
canon_block,
block_number,
block.header.raw.time,
&deployments,
);
chain_acceptor.check()?;
},
BlockOrigin::SideChainBecomingCanonChain(origin) => {
let block_number = origin.block_number;
let header_provider = self.store.as_store().as_block_header_provider();
let deployments = BlockDeployments::new(&self.deployments, block_number, header_provider, &self.consensus);
let fork = self.store.fork(origin)?;
let canon_block = CanonBlock::new(block);
let chain_acceptor = ChainAcceptor::new(fork.store(), &self.consensus, verification_level, canon_block,
block_number, block.header.raw.time, &deployments);
let tx_out_provider = CachedTransactionOutputProvider::new(fork.store().as_transaction_output_provider());
let tx_meta_provider = fork.store().as_transaction_meta_provider();
let header_provider = fork.store().as_block_header_provider();
let tree_state_provider = fork.store().as_tree_state_provider();
let nullifier_tracker = fork.store().as_nullifier_tracker();
let deployments = BlockDeployments::new(&self.deployments, block_number, header_provider, &self.consensus);
let chain_acceptor = ChainAcceptor::new(
&tx_out_provider,
tx_meta_provider,
header_provider,
tree_state_provider,
nullifier_tracker,
&self.consensus,
verification_level,
canon_block,
block_number,
block.header.raw.time,
&deployments,
);
chain_acceptor.check()?;
},
}
};
assert_eq!(Some(self.store.best_block().hash), self.store.block_hash(self.store.best_block().number));
Ok(())
@ -85,15 +133,10 @@ impl BackwardsCompatibleChainVerifier {
pub fn verify_block_header(
&self,
_block_header_provider: &BlockHeaderProvider,
hash: &H256,
header: &BlockHeader
header: &IndexedBlockHeader,
) -> Result<(), Error> {
// let's do only preverifcation
// TODO: full verification
let current_time = ::time::get_time().sec as u32;
let header = IndexedBlockHeader::new(hash.clone(), header.clone());
let header_verifier = HeaderVerifier::new(&header, &self.consensus, current_time);
let header_verifier = HeaderVerifier::new(header, &self.consensus, current_time);
header_verifier.check()
}
@ -161,7 +204,7 @@ mod tests {
let storage = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let b2 = test_data::block_h2().into();
let verifier = ChainVerifier::new(storage, ConsensusParams::new(Network::Unitest));
assert_eq!(Err(Error::Database(DBError::UnknownParent)), verifier.verify(VerificationLevel::Full, &b2));
assert_eq!(Err(Error::Database(DBError::UnknownParent)), verifier.verify(VerificationLevel::FULL, &b2));
}
#[test]
@ -169,7 +212,7 @@ mod tests {
let storage = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let b1 = test_data::block_h1();
let verifier = ChainVerifier::new(storage, ConsensusParams::new(Network::Mainnet));
assert_eq!(verifier.verify(VerificationLevel::Full, &b1.into()), Ok(()));
assert_eq!(verifier.verify(VerificationLevel::FULL, &b1.into()), Ok(()));
}
#[test]
@ -181,7 +224,7 @@ mod tests {
]);
let b1 = test_data::block_h2();
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Mainnet));
assert_eq!(verifier.verify(VerificationLevel::Full, &b1.into()), Ok(()));
assert_eq!(verifier.verify(VerificationLevel::FULL, &b1.into()), Ok(()));
}
#[test]
@ -218,7 +261,7 @@ mod tests {
TransactionError::Maturity,
));
assert_eq!(expected, verifier.verify(VerificationLevel::Full, &block.into()));
assert_eq!(expected, verifier.verify(VerificationLevel::FULL, &block.into()));
}
#[test]
@ -253,7 +296,7 @@ mod tests {
.build();
let verifier = ChainVerifier::new(Arc::new(storage), consensus);
assert_eq!(verifier.verify(VerificationLevel::Full, &block.into()), Ok(()));
assert_eq!(verifier.verify(VerificationLevel::FULL, &block.into()), Ok(()));
}
#[test]
@ -292,7 +335,7 @@ mod tests {
.build();
let verifier = ChainVerifier::new(Arc::new(storage), consensus);
assert!(verifier.verify(VerificationLevel::Full, &block.into()).is_ok());
assert!(verifier.verify(VerificationLevel::FULL, &block.into()).is_ok());
}
#[test]
@ -333,7 +376,7 @@ mod tests {
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest));
let expected = Err(Error::Transaction(2, TransactionError::Overspend));
assert_eq!(expected, verifier.verify(VerificationLevel::Full, &block.into()));
assert_eq!(expected, verifier.verify(VerificationLevel::FULL, &block.into()));
}
#[test]
@ -373,7 +416,7 @@ mod tests {
.build();
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest));
assert!(verifier.verify(VerificationLevel::Full, &block.into()).is_ok());
assert!(verifier.verify(VerificationLevel::FULL, &block.into()).is_ok());
}
#[test]
@ -421,7 +464,7 @@ mod tests {
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest));
let expected = Err(Error::MaximumSigops);
assert_eq!(expected, verifier.verify(VerificationLevel::Full, &block.into()));
assert_eq!(expected, verifier.verify(VerificationLevel::FULL, &block.into()));
}
#[test]
@ -448,6 +491,6 @@ mod tests {
actual: 1250000001,
});
assert_eq!(expected, verifier.verify(VerificationLevel::Full, &block.into()));
assert_eq!(expected, verifier.verify(VerificationLevel::FULL, &block.into()));
}
}

View File

@ -127,7 +127,7 @@ fn threshold_state(cache: &mut DeploymentStateCache, deployment: Deployment, num
let number = first_of_the_period(number, miner_confirmation_window);
let hash = match headers.block_header(BlockRef::Number(number)) {
Some(header) => header.hash(),
Some(header) => header.hash,
None => return ThresholdState::Defined,
};
@ -174,7 +174,7 @@ fn first_of_the_period(block: u32, miner_confirmation_window: u32) -> u32 {
fn count_deployment_matches(block_number: u32, blocks: &BlockHeaderProvider, deployment: Deployment, window: u32) -> usize {
BlockAncestors::new(BlockRef::Number(block_number), blocks)
.take(window as usize)
.filter(|header| deployment.matches(header.version))
.filter(|header| deployment.matches(header.raw.version))
.count()
}
@ -209,7 +209,7 @@ impl<'a> Iterator for ThresholdIterator<'a> {
None => return None,
};
let median = median_timestamp(&header, self.headers);
let median = median_timestamp(&header.raw, self.headers);
match self.last_state {
ThresholdState::Defined => {
@ -239,7 +239,7 @@ impl<'a> Iterator for ThresholdIterator<'a> {
let result = DeploymentState {
block_number: block_number,
block_hash: header.hash(),
block_hash: header.hash,
state: self.last_state,
};
@ -251,7 +251,7 @@ impl<'a> Iterator for ThresholdIterator<'a> {
mod tests {
use std::sync::atomic::{AtomicUsize, Ordering};
use std::collections::HashMap;
use chain::BlockHeader;
use chain::{BlockHeader, IndexedBlockHeader};
use storage::{BlockHeaderProvider, BlockRef};
use network::Deployment;
use hash::H256;
@ -295,12 +295,12 @@ mod tests {
unimplemented!()
}
fn block_header(&self, block_ref: BlockRef) -> Option<BlockHeader> {
fn block_header(&self, block_ref: BlockRef) -> Option<IndexedBlockHeader> {
self.request_count.fetch_add(1, Ordering::Relaxed);
match block_ref {
BlockRef::Number(height) => self.by_height.get(height as usize).cloned(),
BlockRef::Hash(hash) => self.by_hash.get(&hash).and_then(|height| self.by_height.get(*height)).cloned(),
}
}.map(Into::into)
}
}

View File

@ -1,96 +1,147 @@
// https://github.com/zcash/zcash/commit/fdda3c5085199d2c2170887aa064fc42afdb0360
use crypto::Blake2b;
use byteorder::{BigEndian, LittleEndian, ByteOrder};
use chain::BlockHeader;
use crypto::Blake2b;
#[allow(non_snake_case)]
#[derive(Debug)]
pub struct EquihashParams {
pub n: u32,
pub k: u32,
}
impl EquihashParams {
pub fn indices_per_hash_output(&self) -> usize {
(512 / self.n) as usize
}
pub fn hash_output(&self) -> usize {
(self.indices_per_hash_output() * self.n as usize / 8usize) as usize
}
pub fn collision_bit_length(&self) -> usize {
(self.n / (self.k + 1)) as usize
}
pub fn collision_byte_length(&self) -> usize {
(self.collision_bit_length() + 7) / 8
}
pub fn final_full_width(&self) -> usize {
2 * self.collision_byte_length() + 4 * (1 << self.k)
}
pub fn solution_size(&self) -> usize {
((1usize << self.k) * (self.collision_bit_length() + 1) / 8) as usize
}
pub fn hash_length(&self) -> usize {
(self.k as usize + 1) * self.collision_byte_length()
}
}
/// Verify equihash solution of the block header.
pub fn verify_block_equihash_solution(params: (u32, u32), header: &BlockHeader) -> bool {
let (n, k) = params;
let params = EquihashParams { n, k };
let equihash_solution = header.solution.as_ref();
let input = header.equihash_input();
verify_equihash_solution(&params, &input, equihash_solution)
debug_assert_eq!(
params,
(OnChainEquihash::N, OnChainEquihash::K),
"Wrong equihash parameters specified in consensus",
);
verify_equihash_solution::<OnChainEquihash>(
&header.equihash_input(),
header.solution.as_ref(),
)
}
pub fn verify_equihash_solution(params: &EquihashParams, input: &[u8], solution: &[u8]) -> bool {
if solution.len() != params.solution_size() {
return false;
}
/// Equihash algorithm instance.
///
/// A brief, yet incomplete overview of the algorithm:
/// (1) prepare indexed set of 2^(N / (K + 1) + 1) N-bits strings (BSTR);
/// (2) select 2^K BSTRs from this set, such that their' XOR is zero;
/// (3) solution is indices of selected BSTRs.
///
/// In case of Zcash Equihash, the BSTR is the hash of block header (excluding solution itself) ++
/// the hash index. The hash could be/is splitted into several BSTRs, which are used as the input
/// for the Wagner's Generalized Birthday problem algorithm.
///
/// The Wagner's algorithm (https://people.eecs.berkeley.edu/~daw/papers/genbday-long.ps) itself
/// works with paded BSTRs (rows).
trait Equihash {
/// Parameter N of Equihash algorithm.
const N: u32;
/// Parameter K of Equihash algorithm.
const K: u32;
/// Blake2b personalization used by the algorithm instance.
const BLAKE2B_PERSONALIZATION: [u8; 16];
let mut personalization = [0u8; 16];
personalization[0..8].clone_from_slice(b"ZcashPoW");
personalization[8..12].clone_from_slice(&to_little_endian(params.n));
personalization[12..16].clone_from_slice(&to_little_endian(params.k));
/// The number of N-bit BSTRs that could be generated from the single computed hash.
const BSTRS_PER_HASH: usize = (512 / Self::N) as usize;
/// The size required to fit of every BSTR.
const HASH_SIZE: usize = Self::BSTRS_PER_HASH * (Self::N as usize) / 8;
/// Number of bits required to store single BSTR index.
const BSTR_INDEX_BITS: usize = (Self::N / (Self::K + 1)) as usize;
/// Number of bytes required to store single BSTR index (there could be extra bits in
/// binary representation of the index).
const BSTR_INDEX_BYTES: usize = (Self::BSTR_INDEX_BITS + 7) / 8;
/// Number of BSTR indices in solution.
const BSTR_INDICES_IN_SOLUTION: usize = 1usize << Self::K;
/// The size (in bytes) of compressed Equihash solution (compressed array of BE-encoded BSTRs indices).
const SOLUTION_COMPRESSED_SIZE: usize = Self::BSTR_INDICES_IN_SOLUTION * (Self::BSTR_INDEX_BITS + 1) / 8;
/// Number of leading zero bytes to pad compressed BSTR index to fit into u32.
const SOLUTION_PAD_BYTES: usize = 4 - (Self::BSTR_INDEX_BITS + 8) / 8;
/// The size (in bytes) of single row used by Wagner algorithm.
const ROW_SIZE: usize = 2 * Self::BSTR_INDEX_BYTES + 4 * Self::BSTR_INDICES_IN_SOLUTION;
/// The size (in bytes) of the hash part of the row.
const ROW_HASH_LENGTH: usize = (Self::K as usize + 1) * Self::BSTR_INDEX_BYTES;
let mut context = Blake2b::with_params(params.hash_output(), &[], &[], &personalization);
/// Type of hash (bytes of HASH_SIZE length). This should be [u8; Self::HASH_SIZE] when Rust will be able
/// to interpret this.
type Hash: Default + AsRef<[u8]> + AsMut<[u8]>;
}
/// Equihash algorithm instance that is used by all Zcash chains.
struct OnChainEquihash;
impl Equihash for OnChainEquihash {
const N: u32 = 200;
const K: u32 = 9;
const BLAKE2B_PERSONALIZATION: [u8; 16] = [
0x5a, 0x63, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x57, // b"ZcashPoW"
0xc8, 0x00, 0x00, 0x00, // LE(N)
0x09, 0x00, 0x00, 0x00, // LE(K)
];
type Hash = self::on_chain_equihash::Hash;
}
/// Verify equihash solution.
fn verify_equihash_solution<Algorithm: Equihash>(
input: &[u8],
solution: &[u8],
) -> bool {
// prepare Blake2b context with personalization
let mut context = Blake2b::with_params(Algorithm::HASH_SIZE, &[], &[], &Algorithm::BLAKE2B_PERSONALIZATION);
context.update(input);
// pure equihash
// we're using two dynamic vectors here && swap pointers when required
// for on-chain algorithm instance:
// sizeof(*rows1) ~ 512 * 2054 ~ 1M
// sizeof(*rows2) ~ 256 * 2054 ~ 512K
let mut rows1 = vec![0u8; Algorithm::BSTR_INDICES_IN_SOLUTION * Algorithm::ROW_SIZE];
let mut rows2 = vec![0u8; Algorithm::BSTR_INDICES_IN_SOLUTION * Algorithm::ROW_SIZE / 2];
let collision_bit_length = params.collision_bit_length();
let indices = get_indices_from_minimal(solution, collision_bit_length);
let mut current_rows = &mut rows1;
let mut backup_rows = &mut rows2;
let mut rows = Vec::new();
for idx in indices {
let hash = generate_hash(&context, (idx as usize / params.indices_per_hash_output()) as u32);
let hash_begin = (idx as usize % params.indices_per_hash_output()) * params.n as usize / 8;
let hash_end = hash_begin + params.n as usize / 8;
let mut hash = Algorithm::Hash::default();
let mut current_rows_pos = 0;
for_each_solution_index::<Algorithm, _>(
solution,
&mut |index| {
let hash_half_index = (index as usize / Algorithm::BSTRS_PER_HASH) as u32;
generate_hash(&context, hash_half_index, hash.as_mut());
let mut row = vec![0; params.final_full_width()];
let expanded_hash = expand_array(
&hash[hash_begin..hash_end],
params.collision_bit_length(),
0);
row[0..expanded_hash.len()].clone_from_slice(&expanded_hash);
row[params.hash_length()..params.hash_length() + 4].clone_from_slice(&to_big_endian(idx));
rows.push(row);
}
let hash_begin = (index as usize % Algorithm::BSTRS_PER_HASH) * Algorithm::N as usize / 8;
let hash_end = hash_begin + Algorithm::N as usize / 8;
let sub_hash = &hash.as_ref()[hash_begin..hash_end];
let mut hash_len = params.hash_length();
let mut current_rows_sub_pos = current_rows_pos;
expand_array(
sub_hash,
Algorithm::BSTR_INDEX_BITS,
0,
&mut |buffer: &[u8; 4]| {
current_rows[current_rows_sub_pos..current_rows_sub_pos+Algorithm::BSTR_INDEX_BYTES]
.copy_from_slice(&buffer[0..Algorithm::BSTR_INDEX_BYTES]);
current_rows_sub_pos += Algorithm::BSTR_INDEX_BYTES;
},
);
current_rows[current_rows_pos+Algorithm::ROW_HASH_LENGTH..current_rows_pos+Algorithm::ROW_HASH_LENGTH+4]
.copy_from_slice(&index.to_be_bytes());
current_rows_pos += Algorithm::ROW_SIZE;
}
);
let mut hash_len = Algorithm::ROW_HASH_LENGTH;
let mut indices_len = 4;
while rows.len() > 1 {
let mut rows_check = Vec::new();
for i in 0..rows.len() / 2 {
let row1 = &rows[i * 2];
let row2 = &rows[i * 2 + 1];
if !has_collision(row1, row2, params.collision_byte_length()) {
let mut current_rows_count = current_rows.len() / Algorithm::ROW_SIZE;
loop {
if current_rows_count <= 1 {
break;
}
let mut current_row_begin = 0;
let mut current_row_end = Algorithm::ROW_SIZE;
let mut next_row_begin = Algorithm::ROW_SIZE;
let mut next_row_end = Algorithm::ROW_SIZE + Algorithm::ROW_SIZE;
let mut merged_row_begin = 0;
let mut merged_row_end = Algorithm::ROW_SIZE;
for _ in 0..current_rows_count / 2 {
let row1 = &current_rows[current_row_begin..current_row_end];
let row2 = &current_rows[next_row_begin..next_row_end];
if !has_collision(row1, row2, Algorithm::BSTR_INDEX_BYTES) {
return false;
}
if indices_before(row2, row1, hash_len, indices_len) {
@ -99,36 +150,109 @@ pub fn verify_equihash_solution(params: &EquihashParams, input: &[u8], solution:
if !distinct_indices(row1, row2, hash_len, indices_len) {
return false;
}
rows_check.push(merge_rows(row1, row2, hash_len, indices_len, params.collision_byte_length()));
let merged_row = &mut backup_rows[merged_row_begin..merged_row_end];
merge_rows(row1, row2, merged_row, hash_len, indices_len, Algorithm::BSTR_INDEX_BYTES);
current_row_begin += Algorithm::ROW_SIZE + Algorithm::ROW_SIZE;
current_row_end += Algorithm::ROW_SIZE + Algorithm::ROW_SIZE;
next_row_begin += Algorithm::ROW_SIZE + Algorithm::ROW_SIZE;
next_row_end += Algorithm::ROW_SIZE + Algorithm::ROW_SIZE;
merged_row_begin += Algorithm::ROW_SIZE;
merged_row_end += Algorithm::ROW_SIZE;
}
rows = rows_check;
hash_len -= params.collision_byte_length();
::std::mem::swap(&mut current_rows, &mut backup_rows);
hash_len -= Algorithm::BSTR_INDEX_BYTES;
indices_len *= 2;
current_rows_count /= 2;
}
rows[0].iter().take(hash_len).all(|x| *x == 0)
current_rows[0..Algorithm::ROW_SIZE].iter().take(hash_len).all(|x| *x == 0)
}
fn merge_rows(row1: &[u8], row2: &[u8], len: usize, indices_len: usize, trim: usize) -> Vec<u8> {
let mut row = row1.to_vec();
fn for_each_solution_index<Algorithm, ForEach>(solution: &[u8], for_each: &mut ForEach)
where
Algorithm: Equihash,
ForEach: FnMut(u32),
{
// consensus parameters enforces this
debug_assert_eq!(
solution.len(),
Algorithm::SOLUTION_COMPRESSED_SIZE,
"Wrong equihash parameters specified in consensus",
);
expand_array(
solution,
Algorithm::BSTR_INDEX_BITS + 1,
Algorithm::SOLUTION_PAD_BYTES,
&mut |buffer: &[u8; 4]| for_each(u32::from_be_bytes(*buffer)),
);
}
fn expand_array<E: FnMut(&[u8; 4])>(
compressed: &[u8],
blen: usize,
pad: usize,
expand_single: &mut E,
) {
let out_width = (blen + 7) / 8 + pad;
let blen_mask = (1u32 << blen) - 1;
// The acc_bits least-significant bits of acc_value represent a bit sequence
// in big-endian order.
let mut acc_buffer = [0u8; 4];
let mut acc_bits = 0usize;
let mut acc_value = 0u32;
for i in 0usize..compressed.len() {
acc_value = (acc_value << 8) | (compressed[i] as u32);
acc_bits += 8;
// When we have bit_len or more bits in the accumulator, write the next
// output element.
if acc_bits >= blen {
acc_bits -= blen;
for x in pad..out_width {
acc_buffer[x] = (
// Big-endian
(acc_value >> (acc_bits + (8 * (out_width - x - 1)))) as u8
) & (
// Apply blen_mask across byte boundaries
((blen_mask >> (8 * (out_width - x - 1))) & 0xFF) as u8
);
}
expand_single(&acc_buffer)
}
}
}
fn generate_hash(context: &Blake2b, index: u32, hash: &mut [u8]) {
let mut context = context.clone();
context.update(&index.to_le_bytes());
hash.copy_from_slice(context.finalize().as_bytes())
}
fn merge_rows(row1: &[u8], row2: &[u8], merged_row: &mut [u8], len: usize, indices_len: usize, trim: usize) {
let mut merged_row_pos = 0;
for i in trim..len {
row[i - trim] = row1[i] ^ row2[i];
merged_row[merged_row_pos] = row1[i] ^ row2[i];
merged_row_pos += 1;
}
if indices_before(row1, row2, len, indices_len) {
row[len - trim..len - trim + indices_len]
merged_row[len - trim..len - trim + indices_len]
.clone_from_slice(&row1[len..len + indices_len]);
row[len - trim + indices_len..len - trim + indices_len + indices_len]
merged_row[len - trim + indices_len..len - trim + indices_len + indices_len]
.clone_from_slice(&row2[len..len + indices_len]);
} else {
row[len - trim..len - trim + indices_len]
merged_row[len - trim..len - trim + indices_len]
.clone_from_slice(&row2[len..len + indices_len]);
row[len - trim + indices_len..len - trim + indices_len + indices_len]
merged_row[len - trim + indices_len..len - trim + indices_len + indices_len]
.clone_from_slice(&row1[len..len + indices_len]);
}
row
}
fn distinct_indices(row1: &[u8], row2: &[u8], len: usize, indices_len: usize) -> bool {
@ -171,92 +295,64 @@ fn indices_before(row1: &[u8], row2: &[u8], len: usize, indices_len: usize) -> b
false
}
fn generate_hash(context: &Blake2b, g: u32) -> Vec<u8> {
let mut context = context.clone();
context.update(&to_little_endian(g));
context.finalize().as_bytes().to_vec()
}
mod on_chain_equihash {
pub struct Hash(pub [u8; 50]);
fn get_indices_from_minimal(solution: &[u8], collision_bit_length: usize) -> Vec<u32> {
let indices_len = 8 * 4 * solution.len() / (collision_bit_length + 1);
let byte_pad = 4 - ((collision_bit_length + 1 + 7) / 8);
let array = expand_array(solution, collision_bit_length + 1, byte_pad);
let mut ret = Vec::new();
for i in 0..indices_len / 4 {
ret.push(array_to_eh_index(&array[i*4..i*4 + 4]));
}
ret
}
fn array_to_eh_index(data: &[u8]) -> u32 {
BigEndian::read_u32(data)
}
fn expand_array(data: &[u8], bit_len: usize, byte_pad: usize) -> Vec<u8> {
let mut array = Vec::new();
let out_width = (bit_len + 7) / 8 + byte_pad;
let bit_len_mask = (1u32 << bit_len) - 1;
// The acc_bits least-significant bits of acc_value represent a bit sequence
// in big-endian order.
let mut acc_bits = 0usize;
let mut acc_value = 0u32;
for i in 0usize..data.len() {
acc_value = (acc_value << 8) | (data[i] as u32);
acc_bits += 8;
// When we have bit_len or more bits in the accumulator, write the next
// output element.
if acc_bits >= bit_len {
acc_bits -= bit_len;
for _ in 0usize..byte_pad {
array.push(0);
}
for x in byte_pad..out_width {
array.push((
// Big-endian
(acc_value >> (acc_bits + (8 * (out_width - x - 1)))) as u8
) & (
// Apply bit_len_mask across byte boundaries
((bit_len_mask >> (8 * (out_width - x - 1))) & 0xFF) as u8
));
}
}
impl Default for Hash {
fn default() -> Self { Hash([0; 50]) }
}
array
}
impl AsRef<[u8]> for Hash {
fn as_ref(&self) -> &[u8] { &self.0 }
}
fn to_little_endian(num: u32) -> [u8; 4] {
let mut le_num = [0u8; 4];
LittleEndian::write_u32(&mut le_num[..], num);
le_num
}
fn to_big_endian(num: u32) -> [u8; 4] {
let mut be_num = [0u8; 4];
BigEndian::write_u32(&mut be_num[..], num);
be_num
impl AsMut<[u8]> for Hash {
fn as_mut(&mut self) -> &mut [u8] { &mut self.0 }
}
}
#[cfg(test)]
mod tests {
use primitives::bigint::U256;
use byteorder::WriteBytesExt;
use super::*;
use primitives::bigint::U256;
struct TestEquihash;
impl Equihash for TestEquihash {
const N: u32 = 96;
const K: u32 = 5;
const BLAKE2B_PERSONALIZATION: [u8; 16] = [
0x5a, 0x63, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x57, // b"ZcashPoW"
0x60, 0x00, 0x00, 0x00, // LE(N)
0x05, 0x00, 0x00, 0x00, // LE(K)
];
type Hash = TestHash;
}
struct TestHash(pub [u8; 60]);
impl Default for TestHash {
fn default() -> Self { TestHash([0; 60]) }
}
impl AsRef<[u8]> for TestHash {
fn as_ref(&self) -> &[u8] { &self.0 }
}
impl AsMut<[u8]> for TestHash {
fn as_mut(&mut self) -> &mut [u8] { &mut self.0 }
}
fn get_minimal_from_indices(indices: &[u32], collision_bit_length: usize) -> Vec<u8> {
let indices_len = indices.len() * 4;
let min_len = (collision_bit_length + 1) * indices_len / (8 * 4);
let byte_pad = 4 - ((collision_bit_length + 1) + 7) / 8;
let mut array = Vec::new();
for i in 0..indices.len() {
let mut be_index = Vec::new();
be_index.write_u32::<BigEndian>(indices[i]).unwrap();
array.extend(be_index);
for index in indices.iter() {
array.extend_from_slice(&index.to_be_bytes());
}
let mut ret = vec![0u8; min_len];
@ -293,28 +389,30 @@ mod tests {
}
}
fn test_equihash_verifier(n: u32, k: u32, input: &[u8], nonce: U256, solution: &[u32]) -> bool {
let solution = get_minimal_from_indices(solution, (n / (k + 1)) as usize);
fn test_equihash_verifier(input: &[u8], nonce: U256, solution: &[u32]) -> bool {
let solution = get_minimal_from_indices(solution, TestEquihash::BSTR_INDEX_BITS);
let mut le_nonce = vec![0; 32];
nonce.to_little_endian(&mut le_nonce);
let mut input = input.to_vec();
input.extend(le_nonce);
let params = EquihashParams { n, k };
verify_equihash_solution(&params, &input, &solution)
verify_equihash_solution::<TestEquihash>(&input, &solution)
}
#[test]
fn verify_equihash_solution_works() {
assert!(test_equihash_verifier(
96, 5, b"Equihash is an asymmetric PoW based on the Generalised Birthday problem.",
b"Equihash is an asymmetric PoW based on the Generalised Birthday problem.",
U256::one(), &vec![
2261, 15185, 36112, 104243, 23779, 118390, 118332, 130041, 32642, 69878, 76925, 80080, 45858, 116805, 92842, 111026, 15972, 115059, 85191, 90330, 68190, 122819, 81830, 91132, 23460, 49807, 52426, 80391, 69567, 114474, 104973, 122568,
],
));
}
#[test]
fn test_equihash_on_real_block() {
let block = test_data::block_h170();
assert!(verify_block_equihash_solution((200, 9), &block.block_header));
}
}

View File

@ -60,6 +60,8 @@ extern crate byteorder;
#[cfg(test)]
extern crate rand;
extern crate rustc_hex as hex;
#[macro_use]
extern crate bitflags;
extern crate storage;
extern crate chain;
@ -127,15 +129,19 @@ pub use work::{work_required, is_valid_proof_of_work, is_valid_proof_of_work_has
pub use deployments::Deployments;
pub use tree_cache::TreeCache;
#[derive(Debug, Clone, Copy, PartialEq)]
/// Blocks verification level.
pub enum VerificationLevel {
/// Full verification.
Full,
/// Transaction scripts are not checked.
Header,
/// No verification at all.
NoVerification,
bitflags! {
pub struct VerificationLevel: u32 {
/// Base level: perform full block verification.
const FULL = 0x00000001;
/// Base level: transaction scripts are not checked.
const HEADER = 0x00000002;
/// Base level: no blocks verification at all.
const NO_VERIFICATION = 0x00000004;
/// This bit is set if header pre-verification (non-context) has already been performed for the block.
const HINT_HEADER_PRE_VERIFIED = 0x10000000;
}
}
/// Interface for block verification

View File

@ -1,7 +1,5 @@
#![allow(dead_code)]
use chain::{JoinSplit, JoinSplitProof, JoinSplitDescription};
use crypto::{BnU256, Pghr13Proof, pghr13_verify};
use crypto::{Pghr13Proof, pghr13_verify};
/// Join split verification error kind
#[derive(Debug)]
@ -12,25 +10,6 @@ pub enum ErrorKind {
InvalidEncoding,
}
/// Join split verification error
#[derive(Debug)]
pub struct Error {
index: usize,
kind: ErrorKind,
}
impl Error {
pub fn proof(idx: usize) -> Self {
Error { kind: ErrorKind::InvalidProof, index: idx }
}
pub fn encoding(idx: usize) -> Self {
Error { kind: ErrorKind::InvalidEncoding, index: idx }
}
pub fn index(&self) -> usize { self.index }
}
// blake2 hash of (random_seed, nullifier[0], nullifier[1], pub_key_hash) with 'ZcashComputehSig' personal token
pub fn compute_hsig(random_seed: &[u8; 32], nullifiers: &[[u8; 32]; 2], pub_key_hash: &[u8; 32]) -> [u8; 32] {
use crypto::blake2::Params;
@ -98,12 +77,6 @@ impl Input {
Input { bits: bitvec::BitVec::with_capacity(size) }
}
fn push_u256(&mut self, val: BnU256) {
for i in 0..256 {
self.bits.push(val.get_bit(255-i).expect("for 0..256 index range will always return some; qeed"))
}
}
fn push_hash(&mut self, val: &[u8; 32]) {
self.push_bytes(&val[..])
}

View File

@ -15,7 +15,7 @@ pub fn median_timestamp(header: &BlockHeader, store: &BlockHeaderProvider) -> u3
pub fn median_timestamp_inclusive(previous_header_hash: H256, store: &BlockHeaderProvider) -> u32 {
let mut timestamps: Vec<_> = BlockAncestors::new(previous_header_hash.clone().into(), store)
.take(11)
.map(|header| header.time)
.map(|header| header.raw.time)
.collect();
if timestamps.is_empty() {

View File

@ -5,27 +5,39 @@ use error::Error;
use verify_block::BlockVerifier;
use verify_header::HeaderVerifier;
use verify_transaction::TransactionVerifier;
use VerificationLevel;
pub struct ChainVerifier<'a> {
pub block: BlockVerifier<'a>,
pub header: HeaderVerifier<'a>,
pub header: Option<HeaderVerifier<'a>>,
pub transactions: Vec<TransactionVerifier<'a>>,
}
impl<'a> ChainVerifier<'a> {
pub fn new(block: &'a IndexedBlock, consensus: &'a ConsensusParams, current_time: u32) -> Self {
pub fn new(
block: &'a IndexedBlock,
consensus: &'a ConsensusParams,
current_time: u32,
verification_level: VerificationLevel,
) -> Self {
trace!(target: "verification", "Block pre-verification {}", block.hash().to_reversed_str());
ChainVerifier {
block: BlockVerifier::new(block, consensus),
header: HeaderVerifier::new(&block.header, consensus, current_time),
header: if !verification_level.intersects(VerificationLevel::HINT_HEADER_PRE_VERIFIED) {
Some(HeaderVerifier::new(&block.header, consensus, current_time))
} else {
None
},
transactions: block.transactions.iter().map(|tx| TransactionVerifier::new(tx, consensus)).collect(),
}
}
pub fn check(&self) -> Result<(), Error> {
try!(self.block.check());
try!(self.header.check());
try!(self.check_transactions());
self.block.check()?;
if let Some(ref header) = self.header {
header.check()?;
}
self.check_transactions()?;
Ok(())
}

View File

@ -49,23 +49,23 @@ pub fn work_required(parent_hash: H256, time: u32, height: u32, store: &BlockHea
// then allow mining of a min-difficulty block.
if let Some(allow_min_difficulty_after_height) = consensus.pow_allow_min_difficulty_after_height {
if height >= allow_min_difficulty_after_height {
if time > parent_header.time + consensus.pow_target_spacing * 6 {
if time > parent_header.raw.time + consensus.pow_target_spacing * 6 {
return max_bits;
}
}
}
// Find the first block in the averaging interval + calculate total difficulty for blocks in the interval
let (count, oldest_hash, bits_total) = BlockAncestors::new(parent_header.previous_header_hash.into(), store)
let (count, oldest_hash, bits_total) = BlockAncestors::new(parent_header.raw.previous_header_hash.into(), store)
.take(consensus.pow_averaging_window as usize - 1)
.fold((1, Default::default(), U256::from(parent_header.bits)), |(count, _, bits_total), header|
(count + 1, header.previous_header_hash, bits_total.overflowing_add(header.bits.into()).0));
.fold((1, Default::default(), U256::from(parent_header.raw.bits)), |(count, _, bits_total), header|
(count + 1, header.raw.previous_header_hash, bits_total.overflowing_add(header.raw.bits.into()).0));
if count != consensus.pow_averaging_window {
return max_bits;
}
let bits_avg = bits_total / consensus.pow_averaging_window.into();
let parent_mtp = median_timestamp_inclusive(parent_header.hash(), store);
let parent_mtp = median_timestamp_inclusive(parent_hash, store);
let oldest_mtp = median_timestamp_inclusive(oldest_hash, store);
calculate_work_required(bits_avg, parent_mtp, oldest_mtp, consensus, max_bits)
@ -108,7 +108,7 @@ mod tests {
use primitives::bigint::U256;
use primitives::hash::H256;
use network::{Network, ConsensusParams};
use chain::BlockHeader;
use chain::{BlockHeader, IndexedBlockHeader};
use storage::{BlockHeaderProvider, BlockRef};
use timestamp::median_timestamp_inclusive;
use super::{work_required, calculate_work_required};
@ -150,11 +150,11 @@ mod tests {
unimplemented!()
}
fn block_header(&self, block_ref: BlockRef) -> Option<BlockHeader> {
fn block_header(&self, block_ref: BlockRef) -> Option<IndexedBlockHeader> {
match block_ref {
BlockRef::Hash(ref hash) => self.by_hash.get(hash).map(|h| &self.by_height[*h]).cloned(),
BlockRef::Number(height) => self.by_height.get(height as usize).cloned(),
}
}.map(Into::into)
}
}