parity-zcash/sync/src/blocks_writer.rs

211 lines
6.3 KiB
Rust
Raw Normal View History

2016-11-30 01:16:29 -08:00
use std::collections::VecDeque;
2016-12-25 23:31:09 -08:00
use std::sync::Arc;
2016-11-30 01:16:29 -08:00
use parking_lot::Mutex;
2016-11-03 04:37:58 -07:00
use chain;
use db;
use network::Magic;
2016-11-30 01:16:29 -08:00
use primitives::hash::H256;
use super::Error;
2016-12-25 23:31:09 -08:00
use synchronization_verifier::{Verifier, SyncVerifier, VerificationTask,
VerificationSink, BlockVerificationSink, TransactionVerificationSink};
use types::StorageRef;
use utils::OrphanBlocksPool;
2016-11-03 04:37:58 -07:00
2016-12-25 23:31:09 -08:00
/// Maximum number of orphaned in-memory blocks
pub const MAX_ORPHANED_BLOCKS: usize = 1024;
2016-11-30 01:16:29 -08:00
2016-12-25 23:31:09 -08:00
/// Synchronous block writer
2016-11-03 04:37:58 -07:00
pub struct BlocksWriter {
2016-12-25 23:31:09 -08:00
/// Blocks storage
storage: StorageRef,
/// Orphaned blocks pool
2016-11-30 01:16:29 -08:00
orphaned_blocks_pool: OrphanBlocksPool,
2016-12-25 23:31:09 -08:00
/// Blocks verifier
2016-11-30 01:16:29 -08:00
verifier: SyncVerifier<BlocksWriterSink>,
2016-12-25 23:31:09 -08:00
/// Verification events receiver
2016-12-07 11:39:12 -08:00
sink: Arc<BlocksWriterSinkData>,
2016-12-25 23:31:09 -08:00
/// True if verification is enabled
verification: bool,
2016-11-30 01:16:29 -08:00
}
2016-12-25 23:31:09 -08:00
/// Verification events receiver
2016-11-30 01:16:29 -08:00
struct BlocksWriterSink {
2016-12-25 23:31:09 -08:00
/// Reference to blocks writer data
2016-12-07 11:39:12 -08:00
data: Arc<BlocksWriterSinkData>,
}
2016-12-25 23:31:09 -08:00
/// Blocks writer data
2016-12-07 11:39:12 -08:00
struct BlocksWriterSinkData {
2016-12-25 23:31:09 -08:00
/// Blocks storage
storage: StorageRef,
/// Last verification error
2016-12-07 11:39:12 -08:00
err: Mutex<Option<Error>>,
2016-11-03 04:37:58 -07:00
}
impl BlocksWriter {
2016-12-25 23:31:09 -08:00
/// Create new synchronous blocks writer
pub fn new(storage: StorageRef, network: Magic, verification: bool) -> BlocksWriter {
2016-12-07 11:39:12 -08:00
let sink_data = Arc::new(BlocksWriterSinkData::new(storage.clone()));
let sink = Arc::new(BlocksWriterSink::new(sink_data.clone()));
let verifier = SyncVerifier::new(network, storage.clone(), sink);
2016-11-03 04:37:58 -07:00
BlocksWriter {
2016-11-30 01:16:29 -08:00
storage: storage,
orphaned_blocks_pool: OrphanBlocksPool::new(),
verifier: verifier,
2016-12-07 11:39:12 -08:00
sink: sink_data,
verification: verification,
2016-11-03 04:37:58 -07:00
}
}
2016-12-25 23:31:09 -08:00
/// Append new block
pub fn append_block(&mut self, block: chain::IndexedBlock) -> Result<(), Error> {
2016-12-08 11:14:32 -08:00
// do not append block if it is already there
if self.storage.contains_block(db::BlockRef::Hash(block.hash().clone())) {
2016-12-08 11:14:32 -08:00
return Ok(());
}
2016-12-25 23:31:09 -08:00
2016-11-30 01:16:29 -08:00
// verify && insert only if parent block is already in the storage
if !self.storage.contains_block(db::BlockRef::Hash(block.header.raw.previous_header_hash.clone())) {
2016-12-18 23:29:53 -08:00
self.orphaned_blocks_pool.insert_orphaned_block(block);
2016-11-30 01:16:29 -08:00
// we can't hold many orphaned blocks in memory during import
if self.orphaned_blocks_pool.len() > MAX_ORPHANED_BLOCKS {
return Err(Error::TooManyOrphanBlocks);
}
return Ok(());
}
// verify && insert block && all its orphan children
2016-12-18 23:29:53 -08:00
let mut verification_queue: VecDeque<chain::IndexedBlock> = self.orphaned_blocks_pool.remove_blocks_for_parent(block.hash());
verification_queue.push_front(block);
2016-11-30 01:16:29 -08:00
while let Some(block) = verification_queue.pop_front() {
if self.verification {
self.verifier.verify_block(block);
if let Some(err) = self.sink.error() {
return Err(err);
}
} else {
self.storage.insert(&block).map_err(Error::Database)?;
self.storage.canonize(block.hash()).map_err(Error::Database)?;
2016-11-30 01:16:29 -08:00
}
}
Ok(())
}
}
impl BlocksWriterSink {
2016-12-25 23:31:09 -08:00
/// Create new verification events receiver
2016-12-07 11:39:12 -08:00
pub fn new(data: Arc<BlocksWriterSinkData>) -> Self {
2016-11-30 01:16:29 -08:00
BlocksWriterSink {
2016-12-07 11:39:12 -08:00
data: data,
}
}
}
impl BlocksWriterSinkData {
2016-12-25 23:31:09 -08:00
/// Create new blocks writer data
pub fn new(storage: StorageRef) -> Self {
2016-12-07 11:39:12 -08:00
BlocksWriterSinkData {
2016-11-30 01:16:29 -08:00
storage: storage,
2016-12-07 11:39:12 -08:00
err: Mutex::new(None),
2016-11-03 04:37:58 -07:00
}
2016-11-30 01:16:29 -08:00
}
2016-12-25 23:31:09 -08:00
/// Take last verification error
2016-12-07 11:39:12 -08:00
pub fn error(&self) -> Option<Error> {
self.err.lock().take()
2016-11-30 01:16:29 -08:00
}
}
2016-11-03 04:37:58 -07:00
2016-11-30 01:16:29 -08:00
impl VerificationSink for BlocksWriterSink {
2016-12-07 11:39:12 -08:00
}
impl BlockVerificationSink for BlocksWriterSink {
fn on_block_verification_success(&self, block: chain::IndexedBlock) -> Option<Vec<VerificationTask>> {
if let Err(err) = self.data.storage.insert(&block) {
2016-12-07 11:39:12 -08:00
*self.data.err.lock() = Some(Error::Database(err));
2016-11-03 04:37:58 -07:00
}
if let Err(err) = self.data.storage.canonize(block.hash()) {
*self.data.err.lock() = Some(Error::Database(err));
}
None
2016-11-03 04:37:58 -07:00
}
2016-11-30 01:16:29 -08:00
2016-12-07 11:39:12 -08:00
fn on_block_verification_error(&self, err: &str, _hash: &H256) {
*self.data.err.lock() = Some(Error::Verification(err.into()));
2016-11-30 01:16:29 -08:00
}
2016-12-07 11:39:12 -08:00
}
2016-11-30 01:16:29 -08:00
2016-12-07 11:39:12 -08:00
impl TransactionVerificationSink for BlocksWriterSink {
2016-12-18 23:29:53 -08:00
fn on_transaction_verification_success(&self, _transaction: chain::IndexedTransaction) {
2016-11-30 01:16:29 -08:00
unreachable!("not intended to verify transactions")
}
2016-12-07 11:39:12 -08:00
fn on_transaction_verification_error(&self, _err: &str, _hash: &H256) {
2016-11-30 01:16:29 -08:00
unreachable!("not intended to verify transactions")
}
2016-11-03 04:37:58 -07:00
}
#[cfg(test)]
mod tests {
extern crate test_data;
2016-11-03 04:37:58 -07:00
use std::sync::Arc;
use db::{BlockChainDatabase};
use network::Magic;
2016-11-03 04:37:58 -07:00
use super::super::Error;
2016-11-30 01:16:29 -08:00
use super::{BlocksWriter, MAX_ORPHANED_BLOCKS};
2016-11-03 04:37:58 -07:00
#[test]
fn blocks_writer_appends_blocks() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet, true);
blocks_target.append_block(test_data::block_h1().into()).expect("Expecting no error");
assert_eq!(db.best_block().number, 1);
2016-11-03 04:37:58 -07:00
}
#[test]
fn blocks_writer_verification_error() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
2016-11-30 01:16:29 -08:00
let blocks = test_data::build_n_empty_blocks_from_genesis((MAX_ORPHANED_BLOCKS + 2) as u32, 1);
let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet, true);
2016-11-30 01:16:29 -08:00
for (index, block) in blocks.into_iter().skip(1).enumerate() {
match blocks_target.append_block(block.into()) {
2016-11-30 01:16:29 -08:00
Err(Error::TooManyOrphanBlocks) if index == MAX_ORPHANED_BLOCKS => (),
Ok(_) if index != MAX_ORPHANED_BLOCKS => (),
_ => panic!("unexpected"),
}
}
assert_eq!(db.best_block().number, 0);
2016-11-03 04:37:58 -07:00
}
#[test]
fn blocks_writer_out_of_order_block() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet, true);
2016-11-03 04:37:58 -07:00
let wrong_block = test_data::block_builder()
.header().parent(test_data::genesis().hash()).build()
.build();
match blocks_target.append_block(wrong_block.into()).unwrap_err() {
2016-11-30 01:16:29 -08:00
Error::Verification(_) => (),
2016-11-03 04:37:58 -07:00
_ => panic!("Unexpected error"),
};
assert_eq!(db.best_block().number, 0);
2016-11-03 04:37:58 -07:00
}
2016-12-08 11:20:36 -08:00
#[test]
fn blocks_writer_append_to_existing_db() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet, true);
2016-12-08 11:20:36 -08:00
assert!(blocks_target.append_block(test_data::genesis().into()).is_ok());
assert_eq!(db.best_block().number, 0);
2016-12-08 11:20:36 -08:00
assert!(blocks_target.append_block(test_data::block_h1().into()).is_ok());
assert_eq!(db.best_block().number, 1);
2016-12-08 11:20:36 -08:00
}
2016-11-03 04:37:58 -07:00
}