finish indexed block refactor

This commit is contained in:
debris 2016-12-09 16:24:06 +01:00
parent ac1289fa71
commit 0811e876c0
6 changed files with 8 additions and 12 deletions

View File

@ -1,5 +1,5 @@
use primitives::hash::H256;
use chain::{Block, BlockHeader, OutPoint, TransactionOutput, merkle_root};
use chain::{Block, OutPoint, TransactionOutput, merkle_root};
use serialization::Serializable;
use indexed_header::IndexedBlockHeader;
use indexed_transaction::IndexedTransaction;
@ -18,7 +18,7 @@ impl PreviousTransactionOutputProvider for IndexedBlock {
}
fn is_spent(&self, _prevout: &OutPoint) -> bool {
unimplemented!();
false
}
}
@ -45,10 +45,6 @@ impl IndexedBlock {
&self.header.hash
}
pub fn header(&self) -> &BlockHeader {
&self.header.raw
}
pub fn to_raw_block(self) -> Block {
Block::new(self.header.raw, self.transactions.into_iter().map(|tx| tx.raw).collect())
}

View File

@ -190,7 +190,7 @@ impl Storage {
);
}
for tx in accepted_txs {
for tx in accepted_txs.iter().skip(1) {
context.meta.insert(
tx.hash.clone(),
TransactionMeta::new(number, tx.raw.outputs.len())

View File

@ -48,7 +48,7 @@ impl BlocksWriter {
return Ok(());
}
// verify && insert only if parent block is already in the storage
if !self.storage.contains_block(db::BlockRef::Hash(indexed_block.header().previous_header_hash.clone())) {
if !self.storage.contains_block(db::BlockRef::Hash(indexed_block.header.raw.previous_header_hash.clone())) {
self.orphaned_blocks_pool.insert_orphaned_block(indexed_block.hash().clone(), indexed_block);
// we can't hold many orphaned blocks in memory during import
if self.orphaned_blocks_pool.len() > MAX_ORPHANED_BLOCKS {

View File

@ -42,7 +42,7 @@ impl OrphanBlocksPool {
/// Insert orphaned block, for which we have already requested its parent block
pub fn insert_orphaned_block(&mut self, hash: H256, block: IndexedBlock) {
self.orphaned_blocks
.entry(block.header().previous_header_hash.clone())
.entry(block.header.raw.previous_header_hash.clone())
.or_insert_with(HashMap::new)
.insert(hash, block);
}

View File

@ -352,7 +352,7 @@ impl Chain {
/// Insert new best block to storage
pub fn insert_best_block(&mut self, hash: H256, block: &IndexedBlock) -> Result<BlockInsertionResult, db::Error> {
let is_appending_to_main_branch = self.best_storage_block.hash == block.header().previous_header_hash;
let is_appending_to_main_branch = self.best_storage_block.hash == block.header.raw.previous_header_hash;
// insert to storage
let storage_insertion = try!(self.storage.insert_indexed_block(&block));

View File

@ -1466,7 +1466,7 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
}
// check parent block state
let parent_block_state = chain.block_state(&block.header().previous_header_hash);
let parent_block_state = chain.block_state(&block.header.raw.previous_header_hash);
match parent_block_state {
BlockState::Unknown | BlockState::DeadEnd => {
if parent_block_state == BlockState::DeadEnd {
@ -1510,7 +1510,7 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
let blocks_hashes_to_forget: Vec<_> = blocks_to_verify.iter().map(|t| t.0.clone()).collect();
chain.forget_blocks_leave_header(&blocks_hashes_to_forget);
// remember that we are verifying these blocks
let blocks_headers_to_verify: Vec<_> = blocks_to_verify.iter().map(|&(ref h, ref b)| (h.clone(), b.header().clone())).collect();
let blocks_headers_to_verify: Vec<_> = blocks_to_verify.iter().map(|&(ref h, ref b)| (h.clone(), b.header.raw.clone())).collect();
chain.verify_blocks(blocks_headers_to_verify);
// remember that we are verifying block from this peer
for verifying_block_hash in blocks_to_verify.iter().map(|&(ref h, _)| h.clone()) {