diff --git a/db/src/block_provider.rs b/db/src/block_provider.rs new file mode 100644 index 00000000..3afb6bc3 --- /dev/null +++ b/db/src/block_provider.rs @@ -0,0 +1,31 @@ +use super::BlockRef; +use primitives::hash::H256; +use primitives::bytes::Bytes; +use chain; + +pub trait BlockProvider { + + /// resolves number by block hash + fn block_number(&self, hash: &H256) -> Option; + + /// resolves hash by block number + fn block_hash(&self, number: u32) -> Option; + + /// resolves header bytes by block reference (number/hash) + fn block_header_bytes(&self, block_ref: BlockRef) -> Option; + + /// resolves deserialized block body by block reference (number/hash) + fn block(&self, block_ref: BlockRef) -> Option; + + /// returns true if store contains given block + fn contains_block(&self, block_ref: BlockRef) -> bool { + self.block_header_bytes(block_ref).is_some() + } + + /// resolves list of block transactions by block reference (number/hash) + fn block_transaction_hashes(&self, block_ref: BlockRef) -> Vec; + + /// returns all transactions in the block by block reference (number/hash) + fn block_transactions(&self, block_ref: BlockRef) -> Vec; + +} diff --git a/db/src/block_stapler.rs b/db/src/block_stapler.rs new file mode 100644 index 00000000..81a33604 --- /dev/null +++ b/db/src/block_stapler.rs @@ -0,0 +1,41 @@ +use primitives::hash::H256; +use super::BlockLocation; +use chain; +use error::Error; + +#[derive(Debug)] +pub struct Reorganization { + pub height: u32, + canonized: Vec, + decanonized: Vec, +} + +impl Reorganization { + pub fn new(height: u32) -> Reorganization { + Reorganization { height: height, canonized: Vec::new(), decanonized: Vec::new() } + } + + pub fn push_canonized(&mut self, hash: &H256) { + self.canonized.push(hash.clone()); + } + + pub fn push_decanonized(&mut self, hash: &H256) { + self.decanonized.push(hash.clone()); + } +} + +#[derive(Debug)] +pub enum BlockInsertedChain { + Disconnected, + Main, + Side, + Reorganized(Reorganization), +} + +pub trait BlockStapler { + /// return the location of this block once if it ever gets inserted + fn accepted_location(&self, header: &chain::BlockHeader) -> Option; + + /// insert block in the storage + fn insert_block(&self, block: &chain::Block) -> Result; +} diff --git a/db/src/error.rs b/db/src/error.rs new file mode 100644 index 00000000..923fefb4 --- /dev/null +++ b/db/src/error.rs @@ -0,0 +1,75 @@ +use primitives::hash::H256; +use std; + +#[derive(Debug)] +/// Database error +pub enum Error { + /// Rocksdb error + DB(String), + /// Io error + Io(std::io::Error), + /// Invalid meta info (while opening the database) + Meta(MetaError), + /// Database blockchain consistency error + Consistency(ConsistencyError), +} + +impl Error { + pub fn unknown_hash(h: &H256) -> Self { + Error::Consistency(ConsistencyError::Unknown(h.clone())) + } + + pub fn unknown_number(n: u32) -> Self { + Error::Consistency(ConsistencyError::UnknownNumber(n)) + } + + pub fn double_spend(h: &H256) -> Self { + Error::Consistency(ConsistencyError::DoubleSpend(h.clone())) + } + + pub fn not_main(h: &H256) -> Self { + Error::Consistency(ConsistencyError::NotMain(h.clone())) + } + + pub fn reorganize(h: &H256) -> Self { + Error::Consistency(ConsistencyError::Reorganize(h.clone())) + } +} + +#[derive(Debug, PartialEq)] +pub enum ConsistencyError { + /// Unknown hash + Unknown(H256), + /// Unknown number + UnknownNumber(u32), + /// Not the block from the main chain + NotMain(H256), + /// Fork too long + ForkTooLong, + /// Main chain block transaction attempts to double-spend + DoubleSpend(H256), + /// Transaction tries to spend + UnknownSpending(H256), + /// Chain has no best block + NoBestBlock, + /// Failed reorganization caused by block + Reorganize(H256), +} + + +#[derive(Debug, PartialEq)] +pub enum MetaError { + UnsupportedVersion, +} + +impl From for Error { + fn from(err: String) -> Error { + Error::DB(err) + } +} + +impl From for Error { + fn from(err: std::io::Error) -> Error { + Error::Io(err) + } +} diff --git a/db/src/lib.rs b/db/src/lib.rs index 497018fc..7d6f771c 100644 --- a/db/src/lib.rs +++ b/db/src/lib.rs @@ -21,6 +21,12 @@ mod storage; #[cfg(feature="dev")] mod test_storage; mod transaction_meta; +mod block_provider; +mod block_stapler; +mod transaction_provider; +mod transaction_meta_provider; +mod error; +mod update_context; pub enum BlockRef { Number(u32), @@ -33,9 +39,16 @@ pub enum BlockLocation { Side(u32), } +pub type SharedStore = std::sync::Arc; + pub use best_block::BestBlock; -pub use storage::{Storage, Store, Error, BlockInsertedChain}; +pub use storage::{Storage, Store}; +pub use error::Error; pub use kvdb::Database; +pub use transaction_provider::TransactionProvider; +pub use transaction_meta_provider::TransactionMetaProvider; +pub use block_stapler::{BlockStapler, BlockInsertedChain}; +pub use block_provider::BlockProvider; #[cfg(feature="dev")] pub use test_storage::TestStorage; diff --git a/db/src/storage.rs b/db/src/storage.rs index a6fa21f2..4219aef0 100644 --- a/db/src/storage.rs +++ b/db/src/storage.rs @@ -2,7 +2,7 @@ use std::{self, fs}; use std::path::Path; -use kvdb::{DBTransaction, Database, DatabaseConfig}; +use kvdb::{Database, DatabaseConfig}; use byteorder::{LittleEndian, ByteOrder}; use primitives::hash::H256; use primitives::bytes::Bytes; @@ -11,15 +11,21 @@ use serialization; use chain::{self, RepresentH256}; use parking_lot::RwLock; use transaction_meta::TransactionMeta; -use std::collections::HashMap; -const COL_COUNT: u32 = 10; -const COL_META: u32 = 0; -const COL_BLOCK_HASHES: u32 = 1; -const COL_BLOCK_HEADERS: u32 = 2; -const COL_BLOCK_TRANSACTIONS: u32 = 3; -const COL_TRANSACTIONS: u32 = 4; -const COL_TRANSACTIONS_META: u32 = 5; +use error::{Error, ConsistencyError, MetaError}; +use update_context::UpdateContext; +use block_provider::BlockProvider; +use transaction_provider::TransactionProvider; +use transaction_meta_provider::TransactionMetaProvider; +use block_stapler::{BlockStapler, BlockInsertedChain, Reorganization}; + +pub const COL_COUNT: u32 = 10; +pub const COL_META: u32 = 0; +pub const COL_BLOCK_HASHES: u32 = 1; +pub const COL_BLOCK_HEADERS: u32 = 2; +pub const COL_BLOCK_TRANSACTIONS: u32 = 3; +pub const COL_TRANSACTIONS: u32 = 4; +pub const COL_TRANSACTIONS_META: u32 = 5; const COL_BLOCK_NUMBERS: u32 = 6; const _COL_RESERVED3: u32 = 7; const _COL_RESERVED4: u32 = 8; @@ -31,52 +37,9 @@ const DB_VERSION: u32 = 1; const MAX_FORK_ROUTE_PRESET: usize = 128; /// Blockchain storage interface -pub trait Store : Send + Sync { +pub trait Store : BlockProvider + BlockStapler + TransactionProvider + TransactionMetaProvider { /// get best block fn best_block(&self) -> Option; - - /// resolves number by block hash - fn block_number(&self, hash: &H256) -> Option; - - /// resolves hash by block number - fn block_hash(&self, number: u32) -> Option; - - /// resolves header bytes by block reference (number/hash) - fn block_header_bytes(&self, block_ref: BlockRef) -> Option; - - /// resolves list of block transactions by block reference (number/hash) - fn block_transaction_hashes(&self, block_ref: BlockRef) -> Vec; - - /// resolves transaction body bytes by transaction hash - fn transaction_bytes(&self, hash: &H256) -> Option; - - /// resolves serialized transaction info by transaction hash - fn transaction(&self, hash: &H256) -> Option; - - /// returns all transactions in the block by block reference (number/hash) - fn block_transactions(&self, block_ref: BlockRef) -> Vec; - - /// resolves deserialized block body by block reference (number/hash) - fn block(&self, block_ref: BlockRef) -> Option; - - /// returns true if store contains given block - fn contains_block(&self, block_ref: BlockRef) -> bool { - self.block_header_bytes(block_ref).is_some() - } - - /// returns true if store contains given transaction - fn contains_transaction(&self, hash: &H256) -> bool { - self.transaction(hash).is_some() - } - - /// insert block in the storage - fn insert_block(&self, block: &chain::Block) -> Result; - - /// get transaction metadata - fn transaction_meta(&self, hash: &H256) -> Option; - - /// return the location of this block once if it ever gets inserted - fn accepted_location(&self, header: &chain::BlockHeader) -> Option; } /// Blockchain storage with rocksdb database @@ -85,77 +48,9 @@ pub struct Storage { best_block: RwLock>, } -#[derive(Debug, PartialEq)] -pub enum MetaError { - UnsupportedVersion, -} - -#[derive(Debug)] -/// Database error -pub enum Error { - /// Rocksdb error - DB(String), - /// Io error - Io(std::io::Error), - /// Invalid meta info (while opening the database) - Meta(MetaError), - /// Database blockchain consistency error - Consistency(ConsistencyError), -} - -impl Error { - fn unknown_hash(h: &H256) -> Self { - Error::Consistency(ConsistencyError::Unknown(h.clone())) - } - - fn unknown_number(n: u32) -> Self { - Error::Consistency(ConsistencyError::UnknownNumber(n)) - } - - fn double_spend(h: &H256) -> Self { - Error::Consistency(ConsistencyError::DoubleSpend(h.clone())) - } - - fn not_main(h: &H256) -> Self { - Error::Consistency(ConsistencyError::NotMain(h.clone())) - } - - fn reorganize(h: &H256) -> Self { - Error::Consistency(ConsistencyError::Reorganize(h.clone())) - } -} - -#[derive(Debug, PartialEq)] -pub enum ConsistencyError { - /// Unknown hash - Unknown(H256), - /// Unknown number - UnknownNumber(u32), - /// Not the block from the main chain - NotMain(H256), - /// Fork too long - ForkTooLong, - /// Main chain block transaction attempts to double-spend - DoubleSpend(H256), - /// Transaction tries to spend - UnknownSpending(H256), - /// Chain has no best block - NoBestBlock, - /// Failed reorganization caused by block - Reorganize(H256), -} - -impl From for Error { - fn from(err: String) -> Error { - Error::DB(err) - } -} - -impl From for Error { - fn from(err: std::io::Error) -> Error { - Error::Io(err) - } -} +const KEY_VERSION: &'static[u8] = b"version"; +const KEY_BEST_BLOCK_NUMBER: &'static[u8] = b"best_block_number"; +const KEY_BEST_BLOCK_HASH: &'static[u8] = b"best_block_hash"; fn u32_key(num: u32) -> [u8; 4] { let mut result = [0u8; 4]; @@ -163,78 +58,6 @@ fn u32_key(num: u32) -> [u8; 4] { result } -const KEY_VERSION: &'static[u8] = b"version"; -const KEY_BEST_BLOCK_NUMBER: &'static[u8] = b"best_block_number"; -const KEY_BEST_BLOCK_HASH: &'static[u8] = b"best_block_hash"; - -struct UpdateContext { - pub meta: HashMap, - pub db_transaction: DBTransaction, - meta_snapshot: Option>, -} - -impl UpdateContext { - pub fn new(db: &Database) -> Self { - UpdateContext { - meta: HashMap::new(), - db_transaction: db.transaction(), - meta_snapshot: None, - } - } - - pub fn apply(mut self, db: &Database) -> Result<(), Error> { - // actually saving meta - for (hash, meta) in self.meta.drain() { - self.db_transaction.put(Some(COL_TRANSACTIONS_META), &*hash, &meta.into_bytes()); - } - - try!(db.write(self.db_transaction)); - Ok(()) - } - - pub fn restore_point(&mut self) { - // todo: optimize clone here - self.meta_snapshot = Some(self.meta.clone()); - self.db_transaction.remember(); - } - - pub fn restore(&mut self) { - if let Some(meta_snapshot) = std::mem::replace(&mut self.meta_snapshot, None) { - self.meta = meta_snapshot; - self.db_transaction.rollback(); - } - } -} - -#[derive(Debug)] -pub struct Reorganization { - height: u32, - canonized: Vec, - decanonized: Vec, -} - -impl Reorganization { - fn new(height: u32) -> Reorganization { - Reorganization { height: height, canonized: Vec::new(), decanonized: Vec::new() } - } - - fn push_canonized(&mut self, hash: &H256) { - self.canonized.push(hash.clone()); - } - - fn push_decanonized(&mut self, hash: &H256) { - self.decanonized.push(hash.clone()); - } -} - -#[derive(Debug)] -pub enum BlockInsertedChain { - Disconnected, - Main, - Side, - Reorganized(Reorganization), -} - impl Storage { /// new storage at the selected path @@ -557,11 +380,7 @@ impl Storage { } } -impl Store for Storage { - fn best_block(&self) -> Option { - self.best_block.read().clone() - } - +impl BlockProvider for Storage { fn block_number(&self, hash: &H256) -> Option { self.get(COL_BLOCK_NUMBERS, &**hash) .map(|val| LittleEndian::read_u32(&val)) @@ -588,10 +407,6 @@ impl Store for Storage { .unwrap_or_default() } - fn transaction_bytes(&self, hash: &H256) -> Option { - self.get(COL_TRANSACTIONS, &**hash) - } - fn block(&self, block_ref: BlockRef) -> Option { self.resolve_hash(block_ref).and_then(|block_hash| self.get(COL_BLOCK_HEADERS, &*block_hash) @@ -608,6 +423,9 @@ impl Store for Storage { }) ) } +} + +impl BlockStapler for Storage { fn insert_block(&self, block: &chain::Block) -> Result { @@ -744,20 +562,6 @@ impl Store for Storage { Ok(result) } - fn transaction(&self, hash: &H256) -> Option { - self.transaction_bytes(hash).and_then(|tx_bytes| { - serialization::deserialize(tx_bytes.as_ref()).map_err( - |e| self.db_error(format!("Error deserializing transaction, possible db corruption ({:?})", e)) - ).ok() - }) - } - - fn transaction_meta(&self, hash: &H256) -> Option { - self.get(COL_TRANSACTIONS_META, &**hash).map(|val| - TransactionMeta::from_bytes(&val).unwrap_or_else(|e| panic!("Invalid transaction metadata: db corrupted? ({:?})", e)) - ) - } - fn accepted_location(&self, header: &chain::BlockHeader) -> Option { let best_number = match self.best_block() { None => { return Some(BlockLocation::Main(0)); }, @@ -781,10 +585,46 @@ impl Store for Storage { } } +impl TransactionProvider for Storage { + + fn transaction_bytes(&self, hash: &H256) -> Option { + self.get(COL_TRANSACTIONS, &**hash) + } + + fn transaction(&self, hash: &H256) -> Option { + self.transaction_bytes(hash).map(|tx_bytes| { + serialization::deserialize(tx_bytes.as_ref()) + .unwrap_or_else(|e| panic!("Failed to deserialize transaction: db corrupted? ({:?})", e)) + }) + } +} + +impl TransactionMetaProvider for Storage { + + fn transaction_meta(&self, hash: &H256) -> Option { + self.get(COL_TRANSACTIONS_META, &**hash).map(|val| + TransactionMeta::from_bytes(&val).unwrap_or_else(|e| panic!("Invalid transaction metadata: db corrupted? ({:?})", e)) + ) + } +} + +impl Store for Storage { + fn best_block(&self) -> Option { + self.best_block.read().clone() + } +} + #[cfg(test)] mod tests { - use super::{Storage, Store, UpdateContext, Error, ConsistencyError, BlockInsertedChain}; + use block_provider::BlockProvider; + use block_stapler::{BlockStapler, BlockInsertedChain}; + use transaction_meta_provider::TransactionMetaProvider; + use transaction_provider::TransactionProvider; + use update_context::UpdateContext; + use error::{ConsistencyError, Error}; + + use super::{Storage, Store}; use devtools::RandomTempPath; use chain::{Block, RepresentH256}; use super::super::{BlockRef, BlockLocation}; diff --git a/db/src/test_storage.rs b/db/src/test_storage.rs index b0c2de61..cc871d78 100644 --- a/db/src/test_storage.rs +++ b/db/src/test_storage.rs @@ -1,6 +1,9 @@ //! Test storage -use super::{BlockRef, Store, Error, BestBlock, BlockLocation, BlockInsertedChain}; +use super::{ + BlockRef, Store, Error, BestBlock, BlockLocation, BlockInsertedChain, BlockProvider, + BlockStapler, TransactionMetaProvider, TransactionProvider, +}; use chain::{self, Block, RepresentH256}; use primitives::hash::H256; use serialization; @@ -59,10 +62,7 @@ impl TestStorage { } } -impl Store for TestStorage { - fn best_block(&self) -> Option { - self.data.read().best_block.clone() - } +impl BlockProvider for TestStorage { fn block_number(&self, hash: &H256) -> Option { let data = self.data.read(); @@ -89,17 +89,6 @@ impl Store for TestStorage { .unwrap_or(Vec::new()) } - fn transaction_bytes(&self, hash: &H256) -> Option { - self.transaction(hash).map(|tx| serialization::serialize(&tx)) - } - - fn transaction(&self, hash: &H256) -> Option { - let data = self.data.read(); - data.blocks.iter().flat_map(|(_, b)| b.transactions()) - .find(|ref tx| tx.hash() == *hash) - .cloned() - } - fn block_transactions(&self, block_ref: BlockRef) -> Vec { self.block(block_ref) .map(|b| b.transactions().iter().cloned().collect()) @@ -112,6 +101,9 @@ impl Store for TestStorage { .and_then(|ref h| data.blocks.get(h)) .cloned() } +} + +impl BlockStapler for TestStorage { fn insert_block(&self, block: &chain::Block) -> Result { let hash = block.hash(); @@ -147,11 +139,6 @@ impl Store for TestStorage { Ok(BlockInsertedChain::Main) } - // just spawns new meta so far, use real store for proper tests - fn transaction_meta(&self, hash: &H256) -> Option { - self.transaction(hash).map(|tx| TransactionMeta::new(0, tx.outputs.len())) - } - // supports only main chain in test storage fn accepted_location(&self, header: &chain::BlockHeader) -> Option { if self.best_block().is_none() { return Some(BlockLocation::Main(0)); } @@ -161,6 +148,32 @@ impl Store for TestStorage { None } - +} + +impl TransactionProvider for TestStorage { + + fn transaction_bytes(&self, hash: &H256) -> Option { + self.transaction(hash).map(|tx| serialization::serialize(&tx)) + } + + fn transaction(&self, hash: &H256) -> Option { + let data = self.data.read(); + data.blocks.iter().flat_map(|(_, b)| b.transactions()) + .find(|ref tx| tx.hash() == *hash) + .cloned() + } +} + +impl TransactionMetaProvider for TestStorage { + // just spawns new meta so far, use real store for proper tests + fn transaction_meta(&self, hash: &H256) -> Option { + self.transaction(hash).map(|tx| TransactionMeta::new(0, tx.outputs.len())) + } +} + +impl Store for TestStorage { + fn best_block(&self) -> Option { + self.data.read().best_block.clone() + } } diff --git a/db/src/transaction_meta_provider.rs b/db/src/transaction_meta_provider.rs new file mode 100644 index 00000000..a85c92da --- /dev/null +++ b/db/src/transaction_meta_provider.rs @@ -0,0 +1,7 @@ +use transaction_meta::TransactionMeta; +use primitives::hash::H256; + +pub trait TransactionMetaProvider { + /// get transaction metadata + fn transaction_meta(&self, hash: &H256) -> Option; +} diff --git a/db/src/transaction_provider.rs b/db/src/transaction_provider.rs new file mode 100644 index 00000000..3f54874b --- /dev/null +++ b/db/src/transaction_provider.rs @@ -0,0 +1,18 @@ +use primitives::hash::H256; +use primitives::bytes::Bytes; +use chain; + +pub trait TransactionProvider { + + /// returns true if store contains given transaction + fn contains_transaction(&self, hash: &H256) -> bool { + self.transaction(hash).is_some() + } + + /// resolves transaction body bytes by transaction hash + fn transaction_bytes(&self, hash: &H256) -> Option; + + /// resolves serialized transaction info by transaction hash + fn transaction(&self, hash: &H256) -> Option; + +} diff --git a/db/src/update_context.rs b/db/src/update_context.rs new file mode 100644 index 00000000..e03e5625 --- /dev/null +++ b/db/src/update_context.rs @@ -0,0 +1,46 @@ +use kvdb::{DBTransaction, Database}; +use transaction_meta::TransactionMeta; +use std::collections::HashMap; +use storage::COL_TRANSACTIONS_META; +use primitives::hash::H256; +use error::Error; +use std; + +pub struct UpdateContext { + pub meta: HashMap, + pub db_transaction: DBTransaction, + meta_snapshot: Option>, +} + +impl UpdateContext { + pub fn new(db: &Database) -> Self { + UpdateContext { + meta: HashMap::new(), + db_transaction: db.transaction(), + meta_snapshot: None, + } + } + + pub fn apply(mut self, db: &Database) -> Result<(), Error> { + // actually saving meta + for (hash, meta) in self.meta.drain() { + self.db_transaction.put(Some(COL_TRANSACTIONS_META), &*hash, &meta.into_bytes()); + } + + try!(db.write(self.db_transaction)); + Ok(()) + } + + pub fn restore_point(&mut self) { + // todo: optimize clone here + self.meta_snapshot = Some(self.meta.clone()); + self.db_transaction.remember(); + } + + pub fn restore(&mut self) { + if let Some(meta_snapshot) = std::mem::replace(&mut self.meta_snapshot, None) { + self.meta = meta_snapshot; + self.db_transaction.rollback(); + } + } +} diff --git a/pbtc/util.rs b/pbtc/util.rs index 23d93bdf..79579999 100644 --- a/pbtc/util.rs +++ b/pbtc/util.rs @@ -5,7 +5,7 @@ use chain::RepresentH256; use {db, APP_INFO}; use config::Config; -pub fn open_db(_cfg: &Config) -> Arc { +pub fn open_db(_cfg: &Config) -> db::SharedStore { let db_path = app_dir(AppDataType::UserData, &APP_INFO, "db").expect("Failed to get app dir"); Arc::new(db::Storage::new(db_path).expect("Failed to open database")) } @@ -16,7 +16,7 @@ pub fn node_table_path() -> PathBuf { node_table } -pub fn init_db(cfg: &Config, db: &Arc) -> Result<(), String> { +pub fn init_db(cfg: &Config, db: &db::SharedStore) -> Result<(), String> { // insert genesis block if db is empty let genesis_block = cfg.magic.genesis_block(); match db.block_hash(0) { diff --git a/sync/src/blocks_writer.rs b/sync/src/blocks_writer.rs index f0cc0cb8..3f2f6816 100644 --- a/sync/src/blocks_writer.rs +++ b/sync/src/blocks_writer.rs @@ -10,7 +10,7 @@ pub struct BlocksWriter { } impl BlocksWriter { - pub fn new(storage: Arc) -> BlocksWriter { + pub fn new(storage: db::SharedStore) -> BlocksWriter { BlocksWriter { storage: storage.clone(), verifier: ChainVerifier::new(storage), diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 94f9f955..c03f214f 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -49,12 +49,12 @@ pub enum Error { } /// Create blocks writer. -pub fn create_sync_blocks_writer(db: Arc) -> blocks_writer::BlocksWriter { +pub fn create_sync_blocks_writer(db: db::SharedStore) -> blocks_writer::BlocksWriter { blocks_writer::BlocksWriter::new(db) } /// Create inbound synchronization connections factory for given `db`. -pub fn create_sync_connection_factory(handle: &Handle, consensus_params: ConsensusParams, db: Arc) -> p2p::LocalSyncNodeRef { +pub fn create_sync_connection_factory(handle: &Handle, consensus_params: ConsensusParams, db: db::SharedStore) -> p2p::LocalSyncNodeRef { use synchronization_chain::Chain as SyncChain; use synchronization_executor::LocalSynchronizationTaskExecutor as SyncExecutor; use local_node::LocalNode as SyncNode; diff --git a/sync/src/synchronization_chain.rs b/sync/src/synchronization_chain.rs index c4e6e6d8..26c1bd78 100644 --- a/sync/src/synchronization_chain.rs +++ b/sync/src/synchronization_chain.rs @@ -101,7 +101,7 @@ pub struct Chain { /// Best storage block (stored for optimizations) best_storage_block: db::BestBlock, /// Local blocks storage - storage: Arc, + storage: db::SharedStore, /// In-memory queue of blocks hashes hash_chain: HashQueueChain, /// In-memory queue of blocks headers @@ -134,7 +134,7 @@ impl BlockState { impl Chain { /// Create new `Chain` with given storage - pub fn new(storage: Arc) -> Self { + pub fn new(storage: db::SharedStore) -> Self { // we only work with storages with genesis block let genesis_block_hash = storage.block_hash(0) .expect("storage with genesis block is required"); @@ -166,7 +166,7 @@ impl Chain { } /// Get storage - pub fn storage(&self) -> Arc { + pub fn storage(&self) -> db::SharedStore { self.storage.clone() } @@ -667,6 +667,7 @@ mod tests { use primitives::hash::H256; use devtools::RandomTempPath; use test_data; + use db::BlockStapler; #[test] fn chain_empty() { diff --git a/sync/src/synchronization_client.rs b/sync/src/synchronization_client.rs index d719b9cf..be4d8e4b 100644 --- a/sync/src/synchronization_client.rs +++ b/sync/src/synchronization_client.rs @@ -1114,12 +1114,12 @@ pub mod tests { use db; use devtools::RandomTempPath; - fn create_disk_storage() -> Arc { + fn create_disk_storage() -> db::SharedStore { let path = RandomTempPath::create_dir(); Arc::new(db::Storage::new(path.as_path()).unwrap()) } - fn create_sync(storage: Option>) -> (Core, Handle, Arc>, ChainRef, Arc>>) { + fn create_sync(storage: Option) -> (Core, Handle, Arc>, ChainRef, Arc>>) { let event_loop = event_loop(); let handle = event_loop.handle(); let storage = match storage { diff --git a/verification/src/chain_verifier.rs b/verification/src/chain_verifier.rs index c7009c6a..32e891d7 100644 --- a/verification/src/chain_verifier.rs +++ b/verification/src/chain_verifier.rs @@ -1,7 +1,5 @@ //! Bitcoin chain verifier -use std::sync::Arc; - use db::{self, BlockRef, BlockLocation}; use chain::{self, RepresentH256}; use super::{Verify, VerificationResult, Chain, Error, TransactionError, ContinueVerify}; @@ -13,7 +11,7 @@ const MAX_BLOCK_SIGOPS: usize = 20000; const MAX_BLOCK_SIZE: usize = 1000000; pub struct ChainVerifier { - store: Arc, + store: db::SharedStore, verify_p2sh: bool, verify_clocktimeverify: bool, skip_pow: bool, @@ -21,7 +19,7 @@ pub struct ChainVerifier { } impl ChainVerifier { - pub fn new(store: Arc) -> Self { + pub fn new(store: db::SharedStore) -> Self { ChainVerifier { store: store, verify_p2sh: false, @@ -275,7 +273,7 @@ mod tests { use super::ChainVerifier; use super::super::{Verify, Chain, Error, TransactionError}; - use db::{TestStorage, Storage, Store}; + use db::{TestStorage, Storage, Store, BlockStapler}; use test_data; use std::sync::Arc; use devtools::RandomTempPath;