Merge pull request #63 from paritytech/minimize_hashing2

Minimize chain structures hashing (part#2)
This commit is contained in:
Svyatoslav Nikolsky 2019-03-25 18:08:09 +03:00 committed by GitHub
commit a31b7c0ac7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 143 additions and 175 deletions

View File

@ -44,7 +44,7 @@ pub fn fetch(benchmark: &mut Benchmark) {
benchmark.start();
for _ in 0..BLOCKS {
let block = store.block(BlockRef::Hash(hashes[0].clone())).unwrap();
assert_eq!(&block.hash(), &hashes[0]);
assert_eq!(block.hash(), &hashes[0]);
}
benchmark.stop();
}

View File

@ -153,7 +153,6 @@ Whenever a miner mines a block, it includes a special transaction called a coinb
### Block (block.rs)
A relatively straight forward implementation of the data structure described above. A `block` is a rust `struct`. It implements the following traits:
* ```From<&'static str>```: this trait takes in a string and outputs a `block`. It is implemented via the `from` function which deserializes the received string into a `block` data structure. Read more about serialization [here](https://github.com/bitcoinbook/bitcoinbook/blob/develop/ch06.asciidoc#transaction-serializationoutputs) (in the context of transactions).
* ```RepresentH256```: this trait takes a `block` data structure and hashes it, returning the hash.
The `block` has a few methods of its own. The entirety of these are simple getter methods.

View File

@ -1,9 +1,11 @@
use hex::FromHex;
use hash::H256;
use ser::{deserialize};
use merkle_root::merkle_root;
use {BlockHeader, Transaction};
use super::RepresentH256;
#[cfg(any(test, feature = "test-helpers"))]
use hash::H256;
#[cfg(any(test, feature = "test-helpers"))]
use merkle_root::merkle_root;
#[derive(Debug, PartialEq, Clone, Serializable, Deserializable)]
pub struct Block {
@ -17,16 +19,13 @@ impl From<&'static str> for Block {
}
}
impl RepresentH256 for Block {
fn h256(&self) -> H256 { self.hash() }
}
impl Block {
pub fn new(header: BlockHeader, transactions: Vec<Transaction>) -> Self {
Block { block_header: header, transactions: transactions }
}
/// Returns block's merkle root.
#[cfg(any(test, feature = "test-helpers"))]
pub fn merkle_root(&self) -> H256 {
let hashes = self.transactions.iter().map(Transaction::hash).collect::<Vec<H256>>();
merkle_root(&hashes)
@ -40,6 +39,7 @@ impl Block {
&self.block_header
}
#[cfg(any(test, feature = "test-helpers"))]
pub fn hash(&self) -> H256 {
self.block_header.hash()
}

View File

@ -21,8 +21,10 @@ pub struct BlockHeader {
}
impl BlockHeader {
/// Compute hash of the block header.
#[cfg(any(test, feature = "test-helpers"))]
pub fn hash(&self) -> H256 {
dhash256(&serialize(self))
block_header_hash(self)
}
pub fn equihash_input(&self) -> Bytes {
@ -58,6 +60,11 @@ impl From<&'static str> for BlockHeader {
}
}
/// Compute hash of the block header.
pub(crate) fn block_header_hash(block_header: &BlockHeader) -> H256 {
dhash256(&serialize(block_header))
}
#[cfg(test)]
mod tests {
use ser::{Reader, Error as ReaderError, Stream};

View File

@ -1,7 +1,7 @@
use std::{io, cmp, fmt};
use hash::H256;
use ser::{Deserializable, Reader, Error as ReaderError};
use block_header::BlockHeader;
use block_header::{BlockHeader, block_header_hash};
use read_and_hash::ReadAndHash;
#[derive(Clone)]
@ -37,7 +37,7 @@ impl IndexedBlockHeader {
///
/// Hashes the contents of block header.
pub fn from_raw(header: BlockHeader) -> Self {
IndexedBlockHeader::new(header.hash(), header)
IndexedBlockHeader::new(block_header_hash(&header), header)
}
}

View File

@ -2,7 +2,7 @@ use std::{cmp, io, fmt};
use hash::H256;
use heapsize::HeapSizeOf;
use ser::{Deserializable, Reader, Error as ReaderError};
use transaction::Transaction;
use transaction::{Transaction, transaction_hash};
use read_and_hash::ReadAndHash;
#[derive(Default, Clone)]
@ -46,7 +46,7 @@ impl IndexedTransaction {
/// Hashes transaction contents.
pub fn from_raw<T>(transaction: T) -> Self where Transaction: From<T> {
let transaction = Transaction::from(transaction);
Self::new(transaction.hash(), transaction)
Self::new(transaction_hash(&transaction), transaction)
}
}

View File

@ -22,10 +22,6 @@ mod indexed_block;
mod indexed_header;
mod indexed_transaction;
pub trait RepresentH256 {
fn h256(&self) -> hash::H256;
}
pub use primitives::{hash, bytes, bigint, compact};
pub use transaction::{BTC_TX_VERSION, SPROUT_TX_VERSION, OVERWINTER_TX_VERSION, SAPLING_TX_VERSION};

View File

@ -129,8 +129,9 @@ impl Transaction {
version
}
#[cfg(any(test, feature = "test-helpers"))]
pub fn hash(&self) -> H256 {
dhash256(&serialize(self))
transaction_hash(self)
}
pub fn inputs(&self) -> &[TransactionInput] {
@ -328,6 +329,10 @@ impl Deserializable for Transaction {
}
}
pub(crate) fn transaction_hash(transaction: &Transaction) -> H256 {
dhash256(&serialize(transaction))
}
#[cfg(test)]
mod tests {
use hex::ToHex;

View File

@ -5,7 +5,7 @@ use parking_lot::RwLock;
use hash::H256;
use bytes::Bytes;
use chain::{
IndexedBlock, IndexedBlockHeader, IndexedTransaction, BlockHeader, Block, Transaction,
IndexedBlock, IndexedBlockHeader, IndexedTransaction,
OutPoint, TransactionOutput,
};
use ser::{
@ -22,7 +22,7 @@ use kv::{
COL_SPROUT_BLOCK_ROOTS, COL_TREE_STATES,
};
use storage::{
BlockRef, Error, BlockHeaderProvider, BlockProvider, BlockOrigin, TransactionMeta, IndexedBlockProvider,
BlockRef, Error, BlockHeaderProvider, BlockProvider, BlockOrigin, TransactionMeta,
TransactionMetaProvider, TransactionProvider, TransactionOutputProvider, BlockChain, Store,
SideChainOrigin, ForkChain, Forkable, CanonStore, BestBlock, NullifierTracker,
EpochTag, EpochRef, SproutTreeState, SaplingTreeState, TreeStateProvider,
@ -233,7 +233,7 @@ impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
sidechain_route.push(next_hash.clone());
next_hash = self.block_header(next_hash.into())
.expect("not to find orphaned side chain in database; qed")
.previous_header_hash;
.raw.previous_header_hash;
}
}
}
@ -308,12 +308,10 @@ impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
/// Rollbacks single best block.
fn rollback_best(&self) -> Result<H256, Error> {
let decanonized = match self.block(self.best_block.read().hash.clone().into()) {
Some(block) => block,
None => return Ok(H256::default()),
};
let best_block_hash = self.best_block.read().hash.clone();
let tx_to_decanonize = self.block_transaction_hashes(best_block_hash.into());
let decanonized_hash = self.decanonize()?;
debug_assert_eq!(decanonized.hash(), decanonized_hash);
debug_assert_eq!(best_block_hash, decanonized_hash);
// and now remove decanonized block from database
// all code currently works in assumption that origin of all blocks is one of:
@ -321,8 +319,8 @@ impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
let mut update = DBTransaction::new();
update.delete(Key::BlockHeader(decanonized_hash.clone()));
update.delete(Key::BlockTransactions(decanonized_hash.clone()));
for tx in decanonized.transactions.into_iter() {
update.delete(Key::Transaction(tx.hash()));
for tx_hash in tx_to_decanonize {
update.delete(Key::Transaction(tx_hash));
}
self.db.write(update).map_err(Error::DatabaseError)?;
@ -336,7 +334,7 @@ impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
/// Updates meta data.
pub fn canonize(&self, hash: &H256) -> Result<(), Error> {
let mut best_block = self.best_block.write();
let block = match self.indexed_block(hash.clone().into()) {
let block = match self.block(hash.clone().into()) {
Some(block) => block,
None => return Err(Error::CannotCanonize),
};
@ -431,7 +429,7 @@ impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
pub fn decanonize(&self) -> Result<H256, Error> {
let mut best_block = self.best_block.write();
let block = match self.indexed_block(best_block.hash.clone().into()) {
let block = match self.block(best_block.hash.clone().into()) {
Some(block) => block,
None => return Err(Error::CannotCanonize),
};
@ -533,13 +531,16 @@ impl<T> BlockChainDatabase<T> where T: KeyValueDatabase {
impl<T> BlockHeaderProvider for BlockChainDatabase<T> where T: KeyValueDatabase {
fn block_header_bytes(&self, block_ref: BlockRef) -> Option<Bytes> {
self.block_header(block_ref).map(|header| serialize(&header))
self.block_header(block_ref).map(|header| serialize(&header.raw))
}
fn block_header(&self, block_ref: BlockRef) -> Option<BlockHeader> {
fn block_header(&self, block_ref: BlockRef) -> Option<IndexedBlockHeader> {
self.resolve_hash(block_ref)
.and_then(|hash| self.get(Key::BlockHeader(hash)))
.and_then(Value::as_block_header)
.and_then(|block_hash| {
self.get(Key::BlockHeader(block_hash.clone()))
.and_then(Value::as_block_header)
.map(|header| IndexedBlockHeader::new(block_hash, header))
})
}
}
@ -554,13 +555,13 @@ impl<T> BlockProvider for BlockChainDatabase<T> where T: KeyValueDatabase {
.and_then(Value::as_block_hash)
}
fn block(&self, block_ref: BlockRef) -> Option<Block> {
fn block(&self, block_ref: BlockRef) -> Option<IndexedBlock> {
self.resolve_hash(block_ref)
.and_then(|block_hash| {
self.block_header(block_hash.clone().into())
.map(|header| {
let transactions = self.block_transactions(block_hash.into());
Block::new(header, transactions)
IndexedBlock::new(header, transactions)
})
})
}
@ -579,44 +580,12 @@ impl<T> BlockProvider for BlockChainDatabase<T> where T: KeyValueDatabase {
.unwrap_or_default()
}
fn block_transactions(&self, block_ref: BlockRef) -> Vec<Transaction> {
fn block_transactions(&self, block_ref: BlockRef) -> Vec<IndexedTransaction> {
self.block_transaction_hashes(block_ref)
.into_iter()
.filter_map(|hash| self.get(Key::Transaction(hash)))
.filter_map(Value::as_transaction)
.collect()
}
}
impl<T> IndexedBlockProvider for BlockChainDatabase<T> where T: KeyValueDatabase {
fn indexed_block_header(&self, block_ref: BlockRef) -> Option<IndexedBlockHeader> {
self.resolve_hash(block_ref)
.and_then(|block_hash| {
self.get(Key::BlockHeader(block_hash.clone()))
.and_then(Value::as_block_header)
.map(|header| IndexedBlockHeader::new(block_hash, header))
})
}
fn indexed_block(&self, block_ref: BlockRef) -> Option<IndexedBlock> {
self.resolve_hash(block_ref)
.and_then(|block_hash| {
self.indexed_block_header(block_hash.clone().into())
.map(|header| {
let transactions = self.indexed_block_transactions(block_hash.into());
IndexedBlock::new(header, transactions)
})
})
}
fn indexed_block_transactions(&self, block_ref: BlockRef) -> Vec<IndexedTransaction> {
self.block_transaction_hashes(block_ref)
.into_iter()
.filter_map(|hash| {
self.get(Key::Transaction(hash.clone()))
.and_then(Value::as_transaction)
.map(|tx| IndexedTransaction::new(hash, tx))
})
.filter_map(|hash| self.get(Key::Transaction(hash))
.and_then(Value::as_transaction)
.map(|tx| IndexedTransaction::new(hash, tx)))
.collect()
}
}
@ -630,12 +599,13 @@ impl<T> TransactionMetaProvider for BlockChainDatabase<T> where T: KeyValueDatab
impl<T> TransactionProvider for BlockChainDatabase<T> where T: KeyValueDatabase {
fn transaction_bytes(&self, hash: &H256) -> Option<Bytes> {
self.transaction(hash).map(|tx| serialize(&tx))
self.transaction(hash).map(|tx| serialize(&tx.raw))
}
fn transaction(&self, hash: &H256) -> Option<Transaction> {
fn transaction(&self, hash: &H256) -> Option<IndexedTransaction> {
self.get(Key::Transaction(hash.clone()))
.and_then(Value::as_transaction)
.map(|tx| IndexedTransaction::new(*hash, tx))
}
}
@ -644,7 +614,7 @@ impl<T> TransactionOutputProvider for BlockChainDatabase<T> where T: KeyValueDat
// return previous transaction outputs only for canon chain transactions
self.transaction_meta(&prevout.hash)
.and_then(|_| self.transaction(&prevout.hash))
.and_then(|tx| tx.outputs.into_iter().nth(prevout.index as usize))
.and_then(|tx| tx.raw.outputs.into_iter().nth(prevout.index as usize))
}
fn is_spent(&self, prevout: &OutPoint) -> bool {
@ -675,7 +645,7 @@ impl<T> TreeStateProvider for BlockChainDatabase<T> where T: KeyValueDatabase {
fn sapling_block_root(&self, block_hash: &H256) -> Option<H256> {
self.block_header(BlockRef::Hash(*block_hash))
.map(|header| header.final_sapling_root)
.map(|header| header.raw.final_sapling_root)
}
}
@ -729,7 +699,7 @@ impl<T> Store for BlockChainDatabase<T> where T: KeyValueDatabase {
}
/// get best header
fn best_header(&self) -> BlockHeader {
fn best_header(&self) -> IndexedBlockHeader {
self.block_header(self.best_block().hash.into()).expect("best block header should be in db; qed")
}
}

View File

@ -811,8 +811,8 @@ impl TransactionProvider for MemoryPool {
self.get(hash).map(|t| serialize(t))
}
fn transaction(&self, hash: &H256) -> Option<Transaction> {
self.get(hash).cloned()
fn transaction(&self, hash: &H256) -> Option<IndexedTransaction> {
self.get(hash).cloned().map(|tx| IndexedTransaction::new(*hash, tx))
}
}

View File

@ -2,7 +2,7 @@
//! https://www.anintegratedworld.com/unravelling-the-mysterious-block-chain-magic-number/
use compact::Compact;
use chain::Block;
use chain::IndexedBlock;
use primitives::hash::H256;
use primitives::bigint::U256;
@ -69,11 +69,12 @@ impl Network {
}
}
pub fn genesis_block(&self) -> Block {
pub fn genesis_block(&self) -> IndexedBlock {
match *self {
Network::Mainnet | Network::Other(_) => "040000000000000000000000000000000000000000000000000000000000000000000000db4d7a85b768123f1dff1d4c4cece70083b2d27e117b4ac2e31d087988a5eac4000000000000000000000000000000000000000000000000000000000000000090041358ffff071f5712000000000000000000000000000000000000000000000000000000000000fd4005000a889f00854b8665cd555f4656f68179d31ccadc1b1f7fb0952726313b16941da348284d67add4686121d4e3d930160c1348d8191c25f12b267a6a9c131b5031cbf8af1f79c9d513076a216ec87ed045fa966e01214ed83ca02dc1797270a454720d3206ac7d931a0a680c5c5e099057592570ca9bdf6058343958b31901fce1a15a4f38fd347750912e14004c73dfe588b903b6c03166582eeaf30529b14072a7b3079e3a684601b9b3024054201f7440b0ee9eb1a7120ff43f713735494aa27b1f8bab60d7f398bca14f6abb2adbf29b04099121438a7974b078a11635b594e9170f1086140b4173822dd697894483e1c6b4e8b8dcd5cb12ca4903bc61e108871d4d915a9093c18ac9b02b6716ce1013ca2c1174e319c1a570215bc9ab5f7564765f7be20524dc3fdf8aa356fd94d445e05ab165ad8bb4a0db096c097618c81098f91443c719416d39837af6de85015dca0de89462b1d8386758b2cf8a99e00953b308032ae44c35e05eb71842922eb69797f68813b59caf266cb6c213569ae3280505421a7e3a0a37fdf8e2ea354fc5422816655394a9454bac542a9298f176e211020d63dee6852c40de02267e2fc9d5e1ff2ad9309506f02a1a71a0501b16d0d36f70cdfd8de78116c0c506ee0b8ddfdeb561acadf31746b5a9dd32c21930884397fb1682164cb565cc14e089d66635a32618f7eb05fe05082b8a3fae620571660a6b89886eac53dec109d7cbb6930ca698a168f301a950be152da1be2b9e07516995e20baceebecb5579d7cdbc16d09f3a50cb3c7dffe33f26686d4ff3f8946ee6475e98cf7b3cf9062b6966e838f865ff3de5fb064a37a21da7bb8dfd2501a29e184f207caaba364f36f2329a77515dcb710e29ffbf73e2bbd773fab1f9a6b005567affff605c132e4e4dd69f36bd201005458cfbd2c658701eb2a700251cefd886b1e674ae816d3f719bac64be649c172ba27a4fd55947d95d53ba4cbc73de97b8af5ed4840b659370c556e7376457f51e5ebb66018849923db82c1c9a819f173cccdb8f3324b239609a300018d0fb094adf5bd7cbb3834c69e6d0b3798065c525b20f040e965e1a161af78ff7561cd874f5f1b75aa0bc77f720589e1b810f831eac5073e6dd46d00a2793f70f7427f0f798f2f53a67e615e65d356e66fe40609a958a05edb4c175bcc383ea0530e67ddbe479a898943c6e3074c6fcc252d6014de3a3d292b03f0d88d312fe221be7be7e3c59d07fa0f2f4029e364f1f355c5d01fa53770d0cd76d82bf7e60f6903bc1beb772e6fde4a70be51d9c7e03c8d6d8dfb361a234ba47c470fe630820bbd920715621b9fbedb49fcee165ead0875e6c2b1af16f50b5d6140cc981122fcbcf7c5a4e3772b3661b628e08380abc545957e59f634705b1bbde2f0b4e055a5ec5676d859be77e20962b645e051a880fddb0180b4555789e1f9344a436a84dc5579e2553f1e5fb0a599c137be36cabbed0319831fea3fddf94ddc7971e4bcf02cdc93294a9aab3e3b13e3b058235b4f4ec06ba4ceaa49d675b4ba80716f3bc6976b1fbf9c8bf1f3e3a4dc1cd83ef9cf816667fb94f1e923ff63fef072e6a19321e4812f96cb0ffa864da50ad74deb76917a336f31dce03ed5f0303aad5e6a83634f9fcc371096f8288b8f02ddded5ff1bb9d49331e4a84dbe1543164438fde9ad71dab024779dcdde0b6602b5ae0a6265c14b94edd83b37403f4b78fcd2ed555b596402c28ee81d87a909c4e8722b30c71ecdd861b05f61f8b1231795c76adba2fdefa451b283a5d527955b9f3de1b9828e7b2e74123dd47062ddcc09b05e7fa13cb2212a6fdbc65d7e852cec463ec6fd929f5b8483cf3052113b13dac91b69f49d1b7d1aec01c4a68e41ce1570101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff071f0104455a6361736830623963346565663862376363343137656535303031653335303039383462366665613335363833613763616331343161303433633432303634383335643334ffffffff010000000000000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
Network::Testnet => "040000000000000000000000000000000000000000000000000000000000000000000000db4d7a85b768123f1dff1d4c4cece70083b2d27e117b4ac2e31d087988a5eac40000000000000000000000000000000000000000000000000000000000000000a11e1358ffff07200600000000000000000000000000000000000000000000000000000000000000fd400500a6a51259c3f6732481e2d035197218b7a69504461d04335503cd69759b2d02bd2b53a9653f42cb33c608511c953673fa9da76170958115fe92157ad3bb5720d927f18e09459bf5c6072973e143e20f9bdf0584058c96b7c2234c7565f100d5eea083ba5d3dbaff9f0681799a113e7beff4a611d2b49590563109962baa149b628aae869af791f2f70bb041bd7ebfa658570917f6654a142b05e7ec0289a4f46470be7be5f693b90173eaaa6e84907170f32602204f1f4e1c04b1830116ffd0c54f0b1caa9a5698357bd8aa1f5ac8fc93b405265d824ba0e49f69dab5446653927298e6b7bdc61ee86ff31c07bde86331b4e500d42e4e50417e285502684b7966184505b885b42819a88469d1e9cf55072d7f3510f85580db689302eab377e4e11b14a91fdd0df7627efc048934f0aff8e7eb77eb17b3a95de13678004f2512293891d8baf8dde0ef69be520a58bbd6038ce899c9594cf3e30b8c3d9c7ecc832d4c19a6212747b50724e6f70f6451f78fd27b58ce43ca33b1641304a916186cfbe7dbca224f55d08530ba851e4df22baf7ab7078e9cbea46c0798b35a750f54103b0cdd08c81a6505c4932f6bfbd492a9fced31d54e98b6370d4c96600552fcf5b37780ed18c8787d03200963600db297a8f05dfa551321d17b9917edadcda51e274830749d133ad226f8bb6b94f13b4f77e67b35b71f52112ce9ba5da706ad9573584a2570a4ff25d29ab9761a06bdcf2c33638bf9baf2054825037881c14adf3816ba0cbd0fca689aad3ce16f2fe362c98f48134a9221765d939f0b49677d1c2447e56b46859f1810e2cf23e82a53e0d44f34dae932581b3b7f49eaec59af872cf9de757a964f7b33d143a36c270189508fcafe19398e4d2966948164d40556b05b7ff532f66f5d1edc41334ef742f78221dfe0c7ae2275bb3f24c89ae35f00afeea4e6ed187b866b209dc6e83b660593fce7c40e143beb07ac86c56f39e895385924667efe3a3f031938753c7764a2dbeb0a643fd359c46e614873fd0424e435fa7fac083b9a41a9d6bf7e284eee537ea7c50dd239f359941a43dc982745184bf3ee31a8dc850316aa9c6b66d6985acee814373be3458550659e1a06287c3b3b76a185c5cb93e38c1eebcf34ff072894b6430aed8d34122dafd925c46a515cca79b0269c92b301890ca6b0dc8b679cdac0f23318c105de73d7a46d16d2dad988d49c22e9963c117960bdc70ef0db6b091cf09445a516176b7f6d58ec29539166cc8a38bbff387acefffab2ea5faad0e8bb70625716ef0edf61940733c25993ea3de9f0be23d36e7cb8da10505f9dc426cd0e6e5b173ab4fff8c37e1f1fb56d1ea372013d075e0934c6919393cfc21395eea20718fad03542a4162a9ded66c814ad8320b2d7c2da3ecaf206da34c502db2096d1c46699a91dd1c432f019ad434e2c1ce507f91104f66f491fed37b225b8e0b2888c37276cfa0468fc13b8d593fd9a2675f0f5b20b8a15f8fa7558176a530d6865738ddb25d3426dab905221681cf9da0e0200eea5b2eba3ad3a5237d2a391f9074bf1779a2005cee43eec2b058511532635e0fea61664f531ac2b356f40db5c5d275a4cf5c82d468976455af4e3362cc8f71aa95e71d394aff3ead6f7101279f95bcd8a0fedce1d21cb3c9f6dd3b182fce0db5d6712981b651f29178a24119968b14783cafa713bc5f2a65205a42e4ce9dc7ba462bdb1f3e4553afc15f5f39998fdb53e7e231e3e520a46943734a007c2daa1eda9f495791657eefcac5c32833936e568d06187857ed04d7b97167ae207c5c5ae54e528c36016a984235e9c5b2f0718d7b3aa93c7822ccc772580b6599671b3c02ece8a21399abd33cfd3028790133167d0a97e7de53dc8ff0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff071f0104455a6361736830623963346565663862376363343137656535303031653335303039383462366665613335363833613763616331343161303433633432303634383335643334ffffffff010000000000000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
Network::Regtest | Network::Unitest => "TODO".into(),
Network::Mainnet | Network::Regtest | Network::Unitest | Network::Other(_) =>
IndexedBlock::from_raw("040000000000000000000000000000000000000000000000000000000000000000000000db4d7a85b768123f1dff1d4c4cece70083b2d27e117b4ac2e31d087988a5eac4000000000000000000000000000000000000000000000000000000000000000090041358ffff071f5712000000000000000000000000000000000000000000000000000000000000fd4005000a889f00854b8665cd555f4656f68179d31ccadc1b1f7fb0952726313b16941da348284d67add4686121d4e3d930160c1348d8191c25f12b267a6a9c131b5031cbf8af1f79c9d513076a216ec87ed045fa966e01214ed83ca02dc1797270a454720d3206ac7d931a0a680c5c5e099057592570ca9bdf6058343958b31901fce1a15a4f38fd347750912e14004c73dfe588b903b6c03166582eeaf30529b14072a7b3079e3a684601b9b3024054201f7440b0ee9eb1a7120ff43f713735494aa27b1f8bab60d7f398bca14f6abb2adbf29b04099121438a7974b078a11635b594e9170f1086140b4173822dd697894483e1c6b4e8b8dcd5cb12ca4903bc61e108871d4d915a9093c18ac9b02b6716ce1013ca2c1174e319c1a570215bc9ab5f7564765f7be20524dc3fdf8aa356fd94d445e05ab165ad8bb4a0db096c097618c81098f91443c719416d39837af6de85015dca0de89462b1d8386758b2cf8a99e00953b308032ae44c35e05eb71842922eb69797f68813b59caf266cb6c213569ae3280505421a7e3a0a37fdf8e2ea354fc5422816655394a9454bac542a9298f176e211020d63dee6852c40de02267e2fc9d5e1ff2ad9309506f02a1a71a0501b16d0d36f70cdfd8de78116c0c506ee0b8ddfdeb561acadf31746b5a9dd32c21930884397fb1682164cb565cc14e089d66635a32618f7eb05fe05082b8a3fae620571660a6b89886eac53dec109d7cbb6930ca698a168f301a950be152da1be2b9e07516995e20baceebecb5579d7cdbc16d09f3a50cb3c7dffe33f26686d4ff3f8946ee6475e98cf7b3cf9062b6966e838f865ff3de5fb064a37a21da7bb8dfd2501a29e184f207caaba364f36f2329a77515dcb710e29ffbf73e2bbd773fab1f9a6b005567affff605c132e4e4dd69f36bd201005458cfbd2c658701eb2a700251cefd886b1e674ae816d3f719bac64be649c172ba27a4fd55947d95d53ba4cbc73de97b8af5ed4840b659370c556e7376457f51e5ebb66018849923db82c1c9a819f173cccdb8f3324b239609a300018d0fb094adf5bd7cbb3834c69e6d0b3798065c525b20f040e965e1a161af78ff7561cd874f5f1b75aa0bc77f720589e1b810f831eac5073e6dd46d00a2793f70f7427f0f798f2f53a67e615e65d356e66fe40609a958a05edb4c175bcc383ea0530e67ddbe479a898943c6e3074c6fcc252d6014de3a3d292b03f0d88d312fe221be7be7e3c59d07fa0f2f4029e364f1f355c5d01fa53770d0cd76d82bf7e60f6903bc1beb772e6fde4a70be51d9c7e03c8d6d8dfb361a234ba47c470fe630820bbd920715621b9fbedb49fcee165ead0875e6c2b1af16f50b5d6140cc981122fcbcf7c5a4e3772b3661b628e08380abc545957e59f634705b1bbde2f0b4e055a5ec5676d859be77e20962b645e051a880fddb0180b4555789e1f9344a436a84dc5579e2553f1e5fb0a599c137be36cabbed0319831fea3fddf94ddc7971e4bcf02cdc93294a9aab3e3b13e3b058235b4f4ec06ba4ceaa49d675b4ba80716f3bc6976b1fbf9c8bf1f3e3a4dc1cd83ef9cf816667fb94f1e923ff63fef072e6a19321e4812f96cb0ffa864da50ad74deb76917a336f31dce03ed5f0303aad5e6a83634f9fcc371096f8288b8f02ddded5ff1bb9d49331e4a84dbe1543164438fde9ad71dab024779dcdde0b6602b5ae0a6265c14b94edd83b37403f4b78fcd2ed555b596402c28ee81d87a909c4e8722b30c71ecdd861b05f61f8b1231795c76adba2fdefa451b283a5d527955b9f3de1b9828e7b2e74123dd47062ddcc09b05e7fa13cb2212a6fdbc65d7e852cec463ec6fd929f5b8483cf3052113b13dac91b69f49d1b7d1aec01c4a68e41ce1570101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff071f0104455a6361736830623963346565663862376363343137656535303031653335303039383462366665613335363833613763616331343161303433633432303634383335643334ffffffff010000000000000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into()),
Network::Testnet =>
IndexedBlock::from_raw("040000000000000000000000000000000000000000000000000000000000000000000000db4d7a85b768123f1dff1d4c4cece70083b2d27e117b4ac2e31d087988a5eac40000000000000000000000000000000000000000000000000000000000000000a11e1358ffff07200600000000000000000000000000000000000000000000000000000000000000fd400500a6a51259c3f6732481e2d035197218b7a69504461d04335503cd69759b2d02bd2b53a9653f42cb33c608511c953673fa9da76170958115fe92157ad3bb5720d927f18e09459bf5c6072973e143e20f9bdf0584058c96b7c2234c7565f100d5eea083ba5d3dbaff9f0681799a113e7beff4a611d2b49590563109962baa149b628aae869af791f2f70bb041bd7ebfa658570917f6654a142b05e7ec0289a4f46470be7be5f693b90173eaaa6e84907170f32602204f1f4e1c04b1830116ffd0c54f0b1caa9a5698357bd8aa1f5ac8fc93b405265d824ba0e49f69dab5446653927298e6b7bdc61ee86ff31c07bde86331b4e500d42e4e50417e285502684b7966184505b885b42819a88469d1e9cf55072d7f3510f85580db689302eab377e4e11b14a91fdd0df7627efc048934f0aff8e7eb77eb17b3a95de13678004f2512293891d8baf8dde0ef69be520a58bbd6038ce899c9594cf3e30b8c3d9c7ecc832d4c19a6212747b50724e6f70f6451f78fd27b58ce43ca33b1641304a916186cfbe7dbca224f55d08530ba851e4df22baf7ab7078e9cbea46c0798b35a750f54103b0cdd08c81a6505c4932f6bfbd492a9fced31d54e98b6370d4c96600552fcf5b37780ed18c8787d03200963600db297a8f05dfa551321d17b9917edadcda51e274830749d133ad226f8bb6b94f13b4f77e67b35b71f52112ce9ba5da706ad9573584a2570a4ff25d29ab9761a06bdcf2c33638bf9baf2054825037881c14adf3816ba0cbd0fca689aad3ce16f2fe362c98f48134a9221765d939f0b49677d1c2447e56b46859f1810e2cf23e82a53e0d44f34dae932581b3b7f49eaec59af872cf9de757a964f7b33d143a36c270189508fcafe19398e4d2966948164d40556b05b7ff532f66f5d1edc41334ef742f78221dfe0c7ae2275bb3f24c89ae35f00afeea4e6ed187b866b209dc6e83b660593fce7c40e143beb07ac86c56f39e895385924667efe3a3f031938753c7764a2dbeb0a643fd359c46e614873fd0424e435fa7fac083b9a41a9d6bf7e284eee537ea7c50dd239f359941a43dc982745184bf3ee31a8dc850316aa9c6b66d6985acee814373be3458550659e1a06287c3b3b76a185c5cb93e38c1eebcf34ff072894b6430aed8d34122dafd925c46a515cca79b0269c92b301890ca6b0dc8b679cdac0f23318c105de73d7a46d16d2dad988d49c22e9963c117960bdc70ef0db6b091cf09445a516176b7f6d58ec29539166cc8a38bbff387acefffab2ea5faad0e8bb70625716ef0edf61940733c25993ea3de9f0be23d36e7cb8da10505f9dc426cd0e6e5b173ab4fff8c37e1f1fb56d1ea372013d075e0934c6919393cfc21395eea20718fad03542a4162a9ded66c814ad8320b2d7c2da3ecaf206da34c502db2096d1c46699a91dd1c432f019ad434e2c1ce507f91104f66f491fed37b225b8e0b2888c37276cfa0468fc13b8d593fd9a2675f0f5b20b8a15f8fa7558176a530d6865738ddb25d3426dab905221681cf9da0e0200eea5b2eba3ad3a5237d2a391f9074bf1779a2005cee43eec2b058511532635e0fea61664f531ac2b356f40db5c5d275a4cf5c82d468976455af4e3362cc8f71aa95e71d394aff3ead6f7101279f95bcd8a0fedce1d21cb3c9f6dd3b182fce0db5d6712981b651f29178a24119968b14783cafa713bc5f2a65205a42e4ce9dc7ba462bdb1f3e4553afc15f5f39998fdb53e7e231e3e520a46943734a007c2daa1eda9f495791657eefcac5c32833936e568d06187857ed04d7b97167ae207c5c5ae54e528c36016a984235e9c5b2f0718d7b3aa93c7822ccc772580b6599671b3c02ece8a21399abd33cfd3028790133167d0a97e7de53dc8ff0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff071f0104455a6361736830623963346565663862376363343137656535303031653335303039383462366665613335363833613763616331343161303433633432303634383335643334ffffffff010000000000000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into()),
}
}
@ -81,7 +82,7 @@ impl Network {
match *self {
// block #410100, best checkpoint of zcashd as of 12.03.2019
Network::Mainnet => H256::from_reversed_str("0000000002c565958f783a24a4ac17cde898ff525e75ed9baf66861b0b9fcada"),
_ => self.genesis_block().hash(),
_ => self.genesis_block().hash().clone(),
}
}
}

View File

@ -17,8 +17,8 @@ pub fn rollback(cfg: Config, matches: &ArgMatches) -> Result<(), String> {
BlockRef::Number(block_ref.parse().map_err(|e| format!("Invalid block hash: {}", e))?)
};
let required_block_hash = cfg.db.block_header(block_ref.clone()).ok_or(format!("Block {:?} is unknown", block_ref))?.hash();
let genesis_hash = cfg.network.genesis_block().hash();
let required_block_hash = cfg.db.block_header(block_ref.clone()).ok_or(format!("Block {:?} is unknown", block_ref))?.hash;
let genesis_hash = *cfg.network.genesis_block().hash();
let mut best_block_hash = cfg.db.best_block().hash;
debug_assert!(best_block_hash != H256::default()); // genesis inserted in init_db

View File

@ -5,7 +5,6 @@ use app_dirs::{app_dir, AppDataType};
use {storage, APP_INFO};
use db;
use config::Config;
use chain::IndexedBlock;
pub fn open_db(data_dir: &Option<String>, db_cache: usize) -> storage::SharedStore {
let db_path = match *data_dir {
@ -26,7 +25,7 @@ pub fn node_table_path(cfg: &Config) -> PathBuf {
pub fn init_db(cfg: &Config) -> Result<(), String> {
// insert genesis block if db is empty
let genesis_block = IndexedBlock::from_raw(cfg.network.genesis_block());
let genesis_block = cfg.network.genesis_block();
match cfg.db.block_hash(0) {
Some(ref db_genesis_block_hash) if db_genesis_block_hash != genesis_block.hash() => Err("Trying to open database with incompatible genesis block".into()),
Some(_) => Ok(()),

View File

@ -73,12 +73,12 @@ impl BlockChainClientCoreApi for BlockChainClientCore {
fn raw_block(&self, hash: GlobalH256) -> Option<RawBlock> {
self.storage.block(hash.into())
.map(|block| {
serialize(&block).into()
serialize(&block.to_raw_block()).into()
})
}
fn verbose_block(&self, hash: GlobalH256) -> Option<VerboseBlock> {
self.storage.indexed_block(hash.into())
self.storage.block(hash.into())
.map(|block| {
let height = self.storage.block_number(block.hash());
let confirmations = match height {
@ -113,7 +113,7 @@ impl BlockChainClientCoreApi for BlockChainClientCore {
None => return Err(transaction_not_found(prev_out.hash)),
};
if prev_out.index >= transaction.outputs.len() as u32 {
if prev_out.index >= transaction.raw.outputs.len() as u32 {
return Err(transaction_output_not_found(prev_out));
}
@ -135,15 +135,15 @@ impl BlockChainClientCoreApi for BlockChainClientCore {
return Err(transaction_not_found(prev_out.hash));
}
let ref script_bytes = transaction.outputs[prev_out.index as usize].script_pubkey;
let ref script_bytes = transaction.raw.outputs[prev_out.index as usize].script_pubkey;
let script: Script = script_bytes.clone().into();
let script_asm = format!("{}", script);
let script_addresses = script.extract_destinations().unwrap_or(vec![]);
Ok(GetTxOutResponse {
bestblock: block_header.hash().into(),
bestblock: block_header.hash.into(),
confirmations: best_block.number - meta.height() + 1,
value: 0.00000001f64 * (transaction.outputs[prev_out.index as usize].value as f64),
value: 0.00000001f64 * (transaction.raw.outputs[prev_out.index as usize].value as f64),
script: TransactionOutputScript {
asm: script_asm,
hex: script_bytes.clone().into(),
@ -160,8 +160,8 @@ impl BlockChainClientCoreApi for BlockChainClientCore {
kind: a.kind,
}).collect(),
},
version: transaction.version,
coinbase: transaction.is_coinbase(),
version: transaction.raw.version,
coinbase: transaction.raw.is_coinbase(),
})
}
}

View File

@ -1,4 +1,4 @@
use chain::BlockHeader;
use chain::IndexedBlockHeader;
use {BlockRef, BlockHeaderProvider};
pub struct BlockAncestors<'a> {
@ -16,12 +16,12 @@ impl<'a> BlockAncestors<'a> {
}
impl<'a> Iterator for BlockAncestors<'a> {
type Item = BlockHeader;
type Item = IndexedBlockHeader;
fn next(&mut self) -> Option<Self::Item> {
let result = self.block.take().and_then(|block| self.headers.block_header(block));
self.block = match result {
Some(ref header) => Some(BlockRef::Hash(header.previous_header_hash.clone())),
Some(ref header) => Some(BlockRef::Hash(header.raw.previous_header_hash.clone())),
None => None,
};
result

View File

@ -1,4 +1,4 @@
use chain::BlockHeader;
use chain::IndexedBlockHeader;
use {BlockRef, BlockHeaderProvider};
pub struct BlockIterator<'a> {
@ -18,7 +18,7 @@ impl<'a> BlockIterator<'a> {
}
impl<'a> Iterator for BlockIterator<'a> {
type Item = (u32, BlockHeader);
type Item = (u32, IndexedBlockHeader);
fn next(&mut self) -> Option<Self::Item> {
let result = self.headers.block_header(BlockRef::Number(self.block));

View File

@ -1,6 +1,6 @@
use hash::H256;
use bytes::Bytes;
use chain::{BlockHeader, Transaction, Block, IndexedBlock, IndexedBlockHeader, IndexedTransaction};
use chain::{IndexedBlock, IndexedBlockHeader, IndexedTransaction};
use {BlockRef};
pub trait BlockHeaderProvider {
@ -8,11 +8,10 @@ pub trait BlockHeaderProvider {
fn block_header_bytes(&self, block_ref: BlockRef) -> Option<Bytes>;
/// resolves header bytes by block reference (number/hash)
fn block_header(&self, block_ref: BlockRef) -> Option<BlockHeader>;
fn block_header(&self, block_ref: BlockRef) -> Option<IndexedBlockHeader>;
}
pub trait BlockProvider: BlockHeaderProvider {
/// resolves number by block hash
fn block_number(&self, hash: &H256) -> Option<u32>;
@ -20,7 +19,7 @@ pub trait BlockProvider: BlockHeaderProvider {
fn block_hash(&self, number: u32) -> Option<H256>;
/// resolves deserialized block body by block reference (number/hash)
fn block(&self, block_ref: BlockRef) -> Option<Block>;
fn block(&self, block_ref: BlockRef) -> Option<IndexedBlock>;
/// returns true if store contains given block
fn contains_block(&self, block_ref: BlockRef) -> bool {
@ -31,13 +30,5 @@ pub trait BlockProvider: BlockHeaderProvider {
fn block_transaction_hashes(&self, block_ref: BlockRef) -> Vec<H256>;
/// returns all transactions in the block by block reference (number/hash)
fn block_transactions(&self, block_ref: BlockRef) -> Vec<Transaction>;
}
pub trait IndexedBlockProvider: BlockProvider {
fn indexed_block_header(&self, block_ref: BlockRef) -> Option<IndexedBlockHeader>;
fn indexed_block(&self, block_ref: BlockRef) -> Option<IndexedBlock>;
fn indexed_block_transactions(&self, block_ref: BlockRef) -> Vec<IndexedTransaction>;
fn block_transactions(&self, block_ref: BlockRef) -> Vec<IndexedTransaction>;
}

View File

@ -36,7 +36,7 @@ pub use block_ancestors::BlockAncestors;
pub use block_chain::{BlockChain, ForkChain, Forkable};
pub use block_iterator::BlockIterator;
pub use block_origin::{BlockOrigin, SideChainOrigin};
pub use block_provider::{BlockHeaderProvider, BlockProvider, IndexedBlockProvider};
pub use block_provider::{BlockHeaderProvider, BlockProvider};
pub use block_ref::BlockRef;
pub use duplex_store::{DuplexTransactionOutputProvider, NoopStore};
pub use error::Error;

View File

@ -1,9 +1,8 @@
use std::sync::Arc;
use chain::BlockHeader;
use chain::IndexedBlockHeader;
use {
BestBlock, BlockProvider, BlockHeaderProvider, TransactionProvider, TransactionMetaProvider,
TransactionOutputProvider, BlockChain, IndexedBlockProvider, Forkable, NullifierTracker,
TreeStateProvider,
TransactionOutputProvider, BlockChain, Forkable, NullifierTracker, TreeStateProvider,
};
pub trait CanonStore: Store + Forkable {
@ -16,13 +15,13 @@ pub trait Store: AsSubstore {
fn best_block(&self) -> BestBlock;
/// get best header
fn best_header(&self) -> BlockHeader;
fn best_header(&self) -> IndexedBlockHeader;
}
/// Allows casting Arc<Store> to reference to any substore type
pub trait AsSubstore:
BlockChain +
IndexedBlockProvider +
BlockProvider +
TransactionProvider +
TransactionMetaProvider +
TransactionOutputProvider +
@ -45,7 +44,7 @@ pub trait AsSubstore:
impl<T> AsSubstore for T
where T: BlockChain +
IndexedBlockProvider +
BlockProvider +
TransactionProvider +
TransactionMetaProvider +
TransactionOutputProvider +

View File

@ -1,6 +1,6 @@
use hash::H256;
use bytes::Bytes;
use chain::{Transaction, OutPoint, TransactionOutput};
use chain::{IndexedTransaction, OutPoint, TransactionOutput};
use {TransactionMeta};
/// Should be used to obtain all transactions from canon chain and forks.
@ -14,7 +14,7 @@ pub trait TransactionProvider {
fn transaction_bytes(&self, hash: &H256) -> Option<Bytes>;
/// Resolves serialized transaction info by transaction hash.
fn transaction(&self, hash: &H256) -> Option<Transaction>;
fn transaction(&self, hash: &H256) -> Option<IndexedTransaction>;
}
/// Should be used to get canon chain transaction outputs.

View File

@ -1,7 +1,7 @@
use std::collections::{VecDeque, HashSet};
use std::fmt;
use linked_hash_map::LinkedHashMap;
use chain::{BlockHeader, Transaction, IndexedBlockHeader, IndexedBlock, IndexedTransaction, OutPoint, TransactionOutput};
use chain::{IndexedBlockHeader, IndexedBlock, IndexedTransaction, OutPoint, TransactionOutput};
use storage;
use miner::{MemoryPoolOrderingStrategy, MemoryPoolInformation, FeeCalculator};
use primitives::bytes::Bytes;
@ -246,7 +246,7 @@ impl Chain {
/// Get block header by number
pub fn block_header_by_number(&self, number: BlockHeight) -> Option<IndexedBlockHeader> {
if number <= self.best_storage_block.number {
self.storage.indexed_block_header(storage::BlockRef::Number(number))
self.storage.block_header(storage::BlockRef::Number(number))
} else {
self.headers_chain.at(number - self.best_storage_block.number)
}
@ -254,7 +254,7 @@ impl Chain {
/// Get block header by hash
pub fn block_header_by_hash(&self, hash: &H256) -> Option<IndexedBlockHeader> {
if let Some(header) = self.storage.indexed_block_header(storage::BlockRef::Hash(hash.clone())) {
if let Some(header) = self.storage.block_header(storage::BlockRef::Hash(hash.clone())) {
return Some(header);
}
self.headers_chain.by_hash(hash)
@ -406,7 +406,7 @@ impl Chain {
// reverify all transactions from old main branch' blocks
let old_main_blocks_transactions = origin.decanonized_route.into_iter()
.flat_map(|block_hash| self.storage.indexed_block_transactions(block_hash.into()))
.flat_map(|block_hash| self.storage.block_transactions(block_hash.into()))
.collect::<Vec<_>>();
trace!(target: "sync", "insert_best_block, old_main_blocks_transactions: {:?}",
@ -661,7 +661,7 @@ impl storage::TransactionProvider for Chain {
.or_else(|| self.storage.transaction_bytes(hash))
}
fn transaction(&self, hash: &H256) -> Option<Transaction> {
fn transaction(&self, hash: &H256) -> Option<IndexedTransaction> {
self.memory_pool.read().transaction(hash)
.or_else(|| self.storage.transaction(hash))
}
@ -682,13 +682,13 @@ impl storage::TransactionOutputProvider for Chain {
impl storage::BlockHeaderProvider for Chain {
fn block_header_bytes(&self, block_ref: storage::BlockRef) -> Option<Bytes> {
use ser::serialize;
self.block_header(block_ref).map(|h| serialize(&h))
self.block_header(block_ref).map(|h| serialize(&h.raw))
}
fn block_header(&self, block_ref: storage::BlockRef) -> Option<BlockHeader> {
fn block_header(&self, block_ref: storage::BlockRef) -> Option<IndexedBlockHeader> {
match block_ref {
storage::BlockRef::Hash(hash) => self.block_header_by_hash(&hash).map(|h| h.raw),
storage::BlockRef::Number(n) => self.block_header_by_number(n).map(|h| h.raw),
storage::BlockRef::Hash(hash) => self.block_header_by_hash(&hash),
storage::BlockRef::Number(n) => self.block_header_by_number(n),
}
}
}

View File

@ -901,7 +901,7 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
}
last_known_hash = &header.hash;
headers_provider.append_header(header.hash.clone(), header.raw.clone());
headers_provider.append_header(header.hash.clone(), header.clone());
}
BlocksHeadersVerificationResult::Success
@ -1092,7 +1092,7 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
// relay block to our peers
if needs_relay && (self.state.is_saturated() || self.state.is_nearly_saturated()) {
for block_hash in insert_result.canonized_blocks_hashes {
if let Some(block) = self.chain.storage().indexed_block(block_hash.into()) {
if let Some(block) = self.chain.storage().block(block_hash.into()) {
self.executor.execute(Task::RelayNewBlock(block));
}
}

View File

@ -2,6 +2,7 @@ use std::sync::Arc;
use chain::{IndexedBlock, IndexedTransaction};
use message::common::InventoryVector;
use message::types;
use primitives::hash::H256;
use synchronization_peers::{BlockAnnouncementType, TransactionAnnouncementType};
use types::{PeerIndex, PeersRef, RequestId};
use utils::KnownHashType;
@ -25,7 +26,7 @@ pub enum Task {
/// Send block
Block(PeerIndex, IndexedBlock),
/// Send merkleblock
MerkleBlock(PeerIndex, types::MerkleBlock),
MerkleBlock(PeerIndex, H256, types::MerkleBlock),
/// Send transaction
Transaction(PeerIndex, IndexedTransaction),
/// Send notfound
@ -95,9 +96,8 @@ impl LocalSynchronizationTaskExecutor {
}
}
fn execute_merkleblock(&self, peer_index: PeerIndex, block: types::MerkleBlock) {
fn execute_merkleblock(&self, peer_index: PeerIndex, hash: H256, block: types::MerkleBlock) {
if let Some(connection) = self.peers.connection(peer_index) {
let hash = block.block_header.hash();
trace!(target: "sync", "Sending merkle block {} to peer#{}", hash.to_reversed_str(), peer_index);
self.peers.hash_known_as(peer_index, hash, KnownHashType::Block);
connection.send_merkleblock(&block);
@ -177,7 +177,7 @@ impl TaskExecutor for LocalSynchronizationTaskExecutor {
Task::GetHeaders(peer_index, getheaders) => self.execute_getheaders(peer_index, getheaders),
Task::MemoryPool(peer_index) => self.execute_memorypool(peer_index),
Task::Block(peer_index, block) => self.execute_block(peer_index, block),
Task::MerkleBlock(peer_index, block) => self.execute_merkleblock(peer_index, block),
Task::MerkleBlock(peer_index, hash, block) => self.execute_merkleblock(peer_index, hash, block),
Task::Transaction(peer_index, transaction) => self.execute_transaction(peer_index, transaction),
Task::NotFound(peer_index, notfound) => self.execute_notfound(peer_index, notfound),
Task::Inventory(peer_index, inventory) => self.execute_inventory(peer_index, inventory),

View File

@ -269,7 +269,7 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
}
},
common::InventoryType::MessageBlock => {
if let Some(block) = self.storage.indexed_block(next_item.hash.clone().into()) {
if let Some(block) = self.storage.block(next_item.hash.clone().into()) {
trace!(target: "sync", "'getblocks' response to peer#{} is ready with block {}", peer_index, next_item.hash.to_reversed_str());
self.executor.execute(Task::Block(peer_index, block));
} else {
@ -277,12 +277,12 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
}
},
common::InventoryType::MessageFilteredBlock => {
if let Some(block) = self.storage.indexed_block(next_item.hash.clone().into()) {
if let Some(block) = self.storage.block(next_item.hash.clone().into()) {
let message_artefacts = self.peers.build_merkle_block(peer_index, &block);
if let Some(message_artefacts) = message_artefacts {
// send merkleblock first
trace!(target: "sync", "'getblocks' response to peer#{} is ready with merkleblock {}", peer_index, next_item.hash.to_reversed_str());
self.executor.execute(Task::MerkleBlock(peer_index, message_artefacts.merkleblock));
self.executor.execute(Task::MerkleBlock(peer_index, *block.hash(), message_artefacts.merkleblock));
// also send all matched transactions
for matched_transaction in message_artefacts.matching_transactions {
@ -334,6 +334,7 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
.map(|block_hash| self.storage.block_header(block_hash.into()))
.take_while(Option::is_some)
.map(Option::unwrap)
.map(|h| h.raw)
.collect();
// empty inventory messages are invalid according to regtests, while empty headers messages are valid
trace!(target: "sync", "'getheaders' response to peer#{} is ready with {} headers", peer_index, headers.len());
@ -375,11 +376,11 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
Some(block_header) => block_header,
};
if let Some(block_number) = self.storage.block_number(&block_header.previous_header_hash) {
if let Some(block_number) = self.storage.block_number(&block_header.raw.previous_header_hash) {
return Some(block_number);
}
block_hash = block_header.previous_header_hash;
block_hash = block_header.raw.previous_header_hash;
}
}
@ -741,7 +742,7 @@ pub mod tests {
let mut index = 0;
let tasks = sync_executor.take_tasks();
match tasks[index] {
Task::MerkleBlock(_, _) => {
Task::MerkleBlock(_, _, _) => {
if get_tx1 {
index += 1;
match tasks[index] {
@ -755,7 +756,7 @@ pub mod tests {
index += 1;
match tasks[index] {
Task::MerkleBlock(_, _) => {
Task::MerkleBlock(_, _, _) => {
if get_tx2 {
index += 1;
match tasks[index] {

View File

@ -1,5 +1,5 @@
use std::collections::HashMap;
use chain::BlockHeader;
use chain::IndexedBlockHeader;
use storage::{BlockRef, BlockHeaderProvider};
use primitives::bytes::Bytes;
use primitives::hash::H256;
@ -11,7 +11,7 @@ pub struct MessageBlockHeadersProvider<'a> {
/// headers offset
first_header_number: u32,
/// headers by hash
headers: HashMap<H256, BlockHeader>,
headers: HashMap<H256, IndexedBlockHeader>,
/// headers by order
headers_order: Vec<H256>,
}
@ -26,7 +26,7 @@ impl<'a> MessageBlockHeadersProvider<'a> {
}
}
pub fn append_header(&mut self, hash: H256, header: BlockHeader) {
pub fn append_header(&mut self, hash: H256, header: IndexedBlockHeader) {
self.headers.insert(hash.clone(), header);
self.headers_order.push(hash);
}
@ -35,10 +35,10 @@ impl<'a> MessageBlockHeadersProvider<'a> {
impl<'a> BlockHeaderProvider for MessageBlockHeadersProvider<'a> {
fn block_header_bytes(&self, block_ref: BlockRef) -> Option<Bytes> {
use ser::serialize;
self.block_header(block_ref).map(|h| serialize(&h))
self.block_header(block_ref).map(|h| serialize(&h.raw))
}
fn block_header(&self, block_ref: BlockRef) -> Option<BlockHeader> {
fn block_header(&self, block_ref: BlockRef) -> Option<IndexedBlockHeader> {
self.chain_provider.block_header(block_ref.clone())
.or_else(move || match block_ref {
BlockRef::Hash(h) => self.headers.get(&h).cloned(),
@ -67,17 +67,17 @@ mod tests {
let storage_provider = storage.as_block_header_provider();
let mut headers_provider = MessageBlockHeadersProvider::new(storage_provider, 0);
assert_eq!(headers_provider.block_header(BlockRef::Hash(test_data::genesis().hash())), Some(test_data::genesis().block_header));
assert_eq!(headers_provider.block_header(BlockRef::Number(0)), Some(test_data::genesis().block_header));
assert_eq!(headers_provider.block_header(BlockRef::Hash(test_data::genesis().hash())), Some(test_data::genesis().block_header.into()));
assert_eq!(headers_provider.block_header(BlockRef::Number(0)), Some(test_data::genesis().block_header.into()));
assert_eq!(headers_provider.block_header(BlockRef::Hash(H256::from(1))), None);
assert_eq!(headers_provider.block_header(BlockRef::Number(1)), None);
headers_provider.append_header(test_data::block_h1().hash(), test_data::block_h1().block_header);
headers_provider.append_header(test_data::block_h1().hash(), test_data::block_h1().block_header.into());
assert_eq!(headers_provider.block_header(BlockRef::Hash(test_data::genesis().hash())), Some(test_data::genesis().block_header));
assert_eq!(headers_provider.block_header(BlockRef::Number(0)), Some(test_data::genesis().block_header));
assert_eq!(headers_provider.block_header(BlockRef::Hash(test_data::block_h1().hash())), Some(test_data::block_h1().block_header));
assert_eq!(headers_provider.block_header(BlockRef::Number(1)), Some(test_data::block_h1().block_header));
assert_eq!(headers_provider.block_header(BlockRef::Hash(test_data::genesis().hash())), Some(test_data::genesis().block_header.into()));
assert_eq!(headers_provider.block_header(BlockRef::Number(0)), Some(test_data::genesis().block_header.into()));
assert_eq!(headers_provider.block_header(BlockRef::Hash(test_data::block_h1().hash())), Some(test_data::block_h1().block_header.into()));
assert_eq!(headers_provider.block_header(BlockRef::Number(1)), Some(test_data::block_h1().block_header.into()));
assert_eq!(headers_provider.block_header(BlockRef::Hash(H256::from(1))), None);
assert_eq!(headers_provider.block_header(BlockRef::Number(2)), None);
}

View File

@ -127,7 +127,7 @@ fn threshold_state(cache: &mut DeploymentStateCache, deployment: Deployment, num
let number = first_of_the_period(number, miner_confirmation_window);
let hash = match headers.block_header(BlockRef::Number(number)) {
Some(header) => header.hash(),
Some(header) => header.hash,
None => return ThresholdState::Defined,
};
@ -174,7 +174,7 @@ fn first_of_the_period(block: u32, miner_confirmation_window: u32) -> u32 {
fn count_deployment_matches(block_number: u32, blocks: &BlockHeaderProvider, deployment: Deployment, window: u32) -> usize {
BlockAncestors::new(BlockRef::Number(block_number), blocks)
.take(window as usize)
.filter(|header| deployment.matches(header.version))
.filter(|header| deployment.matches(header.raw.version))
.count()
}
@ -209,7 +209,7 @@ impl<'a> Iterator for ThresholdIterator<'a> {
None => return None,
};
let median = median_timestamp(&header, self.headers);
let median = median_timestamp(&header.raw, self.headers);
match self.last_state {
ThresholdState::Defined => {
@ -239,7 +239,7 @@ impl<'a> Iterator for ThresholdIterator<'a> {
let result = DeploymentState {
block_number: block_number,
block_hash: header.hash(),
block_hash: header.hash,
state: self.last_state,
};
@ -251,7 +251,7 @@ impl<'a> Iterator for ThresholdIterator<'a> {
mod tests {
use std::sync::atomic::{AtomicUsize, Ordering};
use std::collections::HashMap;
use chain::BlockHeader;
use chain::{BlockHeader, IndexedBlockHeader};
use storage::{BlockHeaderProvider, BlockRef};
use network::Deployment;
use hash::H256;
@ -295,12 +295,12 @@ mod tests {
unimplemented!()
}
fn block_header(&self, block_ref: BlockRef) -> Option<BlockHeader> {
fn block_header(&self, block_ref: BlockRef) -> Option<IndexedBlockHeader> {
self.request_count.fetch_add(1, Ordering::Relaxed);
match block_ref {
BlockRef::Number(height) => self.by_height.get(height as usize).cloned(),
BlockRef::Hash(hash) => self.by_hash.get(&hash).and_then(|height| self.by_height.get(*height)).cloned(),
}
}.map(Into::into)
}
}

View File

@ -15,7 +15,7 @@ pub fn median_timestamp(header: &BlockHeader, store: &BlockHeaderProvider) -> u3
pub fn median_timestamp_inclusive(previous_header_hash: H256, store: &BlockHeaderProvider) -> u32 {
let mut timestamps: Vec<_> = BlockAncestors::new(previous_header_hash.clone().into(), store)
.take(11)
.map(|header| header.time)
.map(|header| header.raw.time)
.collect();
if timestamps.is_empty() {

View File

@ -49,23 +49,23 @@ pub fn work_required(parent_hash: H256, time: u32, height: u32, store: &BlockHea
// then allow mining of a min-difficulty block.
if let Some(allow_min_difficulty_after_height) = consensus.pow_allow_min_difficulty_after_height {
if height >= allow_min_difficulty_after_height {
if time > parent_header.time + consensus.pow_target_spacing * 6 {
if time > parent_header.raw.time + consensus.pow_target_spacing * 6 {
return max_bits;
}
}
}
// Find the first block in the averaging interval + calculate total difficulty for blocks in the interval
let (count, oldest_hash, bits_total) = BlockAncestors::new(parent_header.previous_header_hash.into(), store)
let (count, oldest_hash, bits_total) = BlockAncestors::new(parent_header.raw.previous_header_hash.into(), store)
.take(consensus.pow_averaging_window as usize - 1)
.fold((1, Default::default(), U256::from(parent_header.bits)), |(count, _, bits_total), header|
(count + 1, header.previous_header_hash, bits_total.overflowing_add(header.bits.into()).0));
.fold((1, Default::default(), U256::from(parent_header.raw.bits)), |(count, _, bits_total), header|
(count + 1, header.raw.previous_header_hash, bits_total.overflowing_add(header.raw.bits.into()).0));
if count != consensus.pow_averaging_window {
return max_bits;
}
let bits_avg = bits_total / consensus.pow_averaging_window.into();
let parent_mtp = median_timestamp_inclusive(parent_header.hash(), store);
let parent_mtp = median_timestamp_inclusive(parent_hash, store);
let oldest_mtp = median_timestamp_inclusive(oldest_hash, store);
calculate_work_required(bits_avg, parent_mtp, oldest_mtp, consensus, max_bits)
@ -108,7 +108,7 @@ mod tests {
use primitives::bigint::U256;
use primitives::hash::H256;
use network::{Network, ConsensusParams};
use chain::BlockHeader;
use chain::{BlockHeader, IndexedBlockHeader};
use storage::{BlockHeaderProvider, BlockRef};
use timestamp::median_timestamp_inclusive;
use super::{work_required, calculate_work_required};
@ -150,11 +150,11 @@ mod tests {
unimplemented!()
}
fn block_header(&self, block_ref: BlockRef) -> Option<BlockHeader> {
fn block_header(&self, block_ref: BlockRef) -> Option<IndexedBlockHeader> {
match block_ref {
BlockRef::Hash(ref hash) => self.by_hash.get(hash).map(|h| &self.by_height[*h]).cloned(),
BlockRef::Number(height) => self.by_height.get(height as usize).cloned(),
}
}.map(Into::into)
}
}