Merge pull request #491 from alekseysidorov/make-usable-for-spv

Make parity-bitcoin usable for spv validation
This commit is contained in:
Svyatoslav Nikolsky 2018-03-12 20:08:35 +03:00 committed by GitHub
commit 5483794b31
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
61 changed files with 234 additions and 127 deletions

43
Cargo.lock generated
View File

@ -74,6 +74,7 @@ dependencies = [
"db 0.1.0",
"network 0.1.0",
"primitives 0.1.0",
"storage 0.1.0",
"test-data 0.1.0",
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
"verification 0.1.0",
@ -212,10 +213,21 @@ dependencies = [
"primitives 0.1.0",
"rocksdb 0.4.5 (git+https://github.com/ethcore/rust-rocksdb)",
"serialization 0.1.0",
"storage 0.1.0",
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"test-data 0.1.0",
]
[[package]]
name = "display_derive"
version = "0.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
"syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)",
"synstructure 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "domain"
version = "0.2.2"
@ -605,6 +617,7 @@ dependencies = [
"primitives 0.1.0",
"script 0.1.0",
"serialization 0.1.0",
"storage 0.1.0",
"test-data 0.1.0",
"verification 0.1.0",
]
@ -809,6 +822,7 @@ dependencies = [
"primitives 0.1.0",
"rpc 0.1.0",
"script 0.1.0",
"storage 0.1.0",
"sync 0.1.0",
"verification 0.1.0",
]
@ -964,6 +978,7 @@ dependencies = [
"serde_derive 1.0.21 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serialization 0.1.0",
"storage 0.1.0",
"sync 0.1.0",
"test-data 0.1.0",
"tokio-core 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1135,6 +1150,21 @@ name = "stable_deref_trait"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "storage"
version = "0.1.0"
dependencies = [
"bit-vec 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
"chain 0.1.0",
"display_derive 0.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"elastic-array 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"primitives 0.1.0",
"serialization 0.1.0",
]
[[package]]
name = "strsim"
version = "0.6.0"
@ -1172,6 +1202,7 @@ dependencies = [
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"script 0.1.0",
"serialization 0.1.0",
"storage 0.1.0",
"test-data 0.1.0",
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
"verification 0.1.0",
@ -1185,6 +1216,15 @@ dependencies = [
"unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "synstructure"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
"syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "take"
version = "0.1.0"
@ -1356,6 +1396,7 @@ dependencies = [
"rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
"script 0.1.0",
"serialization 0.1.0",
"storage 0.1.0",
"test-data 0.1.0",
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -1448,6 +1489,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum clap 2.27.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1b8c532887f1a292d17de05ae858a8fe50a301e196f9ef0ddb7ccd0d1d00f180"
"checksum coco 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c06169f5beb7e31c7c67ebf5540b8b472d23e3eade3b2ec7d1f5b504a85f91bd"
"checksum csv 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7ef22b37c7a51c564a365892c012dc0271221fdcc64c69b19ba4d6fa8bd96d9c"
"checksum display_derive 0.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4bba5dcd6d2855639fcf65a9af7bbad0bfb6dbf6fe68fba70bab39a6eb973ef4"
"checksum domain 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c1850bf2c3c3349e1dba2aa214d86cf9edaa057a09ce46b1a02d5c07d5da5e65"
"checksum dtoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "09c3753c3db574d215cba4ea76018483895d7bff25a31b49ba45db21c48e50ab"
"checksum either 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "740178ddf48b1a9e878e6d6509a1442a2d42fd2928aae8e7a6f8a36fb01981b3"
@ -1542,6 +1584,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694"
"checksum syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d3b891b9015c88c576343b9b3e41c2c11a51c219ef067b264bd9c8aa9b441dad"
"checksum synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6"
"checksum synstructure 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3a761d12e6d8dcb4dcf952a7a89b475e3a9d69e4a69307e01a470977642914bd"
"checksum take 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b157868d8ac1f56b64604539990685fa7611d8fa9e5476cf0c02cf34d32917c5"
"checksum tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "87974a6f5c1dfb344d733055601650059a3363de2a6104819293baff662132d6"
"checksum termcolor 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9065bced9c3e43453aa3d56f1e98590b8455b341d2fa191a1090c0dd0b242c75"

View File

@ -18,6 +18,7 @@ network = { path = "network" }
miner = { path = "miner" }
p2p = { path = "p2p" }
script = { path = "script" }
storage = { path = "storage" }
db = { path = "db" }
verification = { path = "verification" }
sync = { path = "sync" }

View File

@ -6,6 +6,7 @@ authors = ["Ethcore <admin@ethcore.io>"]
description = "Parity bitcoin client."
[dependencies]
storage = { path = "../storage" }
db = { path = "../db" }
verification = { path = "../verification" }
network = { path = "../network" }

View File

@ -1,5 +1,6 @@
use chain::IndexedBlock;
use db::{BlockProvider, BlockRef, BlockChainDatabase, BlockOrigin, ForkChain};
use storage::{BlockProvider, BlockRef, BlockOrigin, ForkChain};
use db::BlockChainDatabase;
use test_data;
use super::Benchmark;

View File

@ -1,3 +1,4 @@
extern crate storage;
extern crate db;
extern crate chain;
extern crate test_data;

View File

@ -13,6 +13,7 @@ lru-cache = "0.1"
primitives = { path = "../primitives" }
serialization = { path = "../serialization" }
chain = { path = "../chain" }
storage = { path = "../storage" }
[dev-dependencies]
tempdir = "0.3"

View File

@ -19,11 +19,10 @@ use kv::{
COL_COUNT, COL_BLOCK_HASHES, COL_BLOCK_HEADERS, COL_BLOCK_TRANSACTIONS, COL_TRANSACTIONS,
COL_TRANSACTIONS_META, COL_BLOCK_NUMBERS
};
use best_block::BestBlock;
use {
use storage::{
BlockRef, Error, BlockHeaderProvider, BlockProvider, BlockOrigin, TransactionMeta, IndexedBlockProvider,
TransactionMetaProvider, TransactionProvider, TransactionOutputProvider, BlockChain, Store,
SideChainOrigin, ForkChain, Forkable, CanonStore, ConfigStore
SideChainOrigin, ForkChain, Forkable, CanonStore, ConfigStore, BestBlock
};
const KEY_BEST_BLOCK_NUMBER: &'static str = "best_block_number";

View File

@ -1,22 +0,0 @@
#[derive(Debug, PartialEq)]
pub enum Error {
/// Low level database error
DatabaseError(String),
/// Invalid block
CannotCanonize,
/// Uknown parent
UnknownParent,
/// Ancient fork
AncientFork,
}
impl From<Error> for String {
fn from(e: Error) -> String {
match e {
Error::DatabaseError(s) => format!("Database error: {}", s),
Error::CannotCanonize => "Cannot canonize block".into(),
Error::UnknownParent => "Block parent is unknown".into(),
Error::AncientFork => "Fork is too long to proceed".into(),
}
}
}

View File

@ -7,7 +7,7 @@ use bytes::Bytes;
use ser::List;
use chain::{Transaction as ChainTransaction, BlockHeader};
use kv::{Transaction, Key, KeyState, Operation, Value, KeyValueDatabase, KeyValue};
use {TransactionMeta};
use storage::{TransactionMeta};
#[derive(Default, Debug)]
struct InnerDatabase {

View File

@ -2,7 +2,7 @@ use bytes::Bytes;
use hash::H256;
use ser::{serialize, List, deserialize};
use chain::{Transaction as ChainTransaction, BlockHeader};
use {TransactionMeta};
use storage::{TransactionMeta};
pub const COL_COUNT: u32 = 10;
pub const COL_META: u32 = 0;

View File

@ -9,34 +9,10 @@ extern crate lru_cache;
extern crate primitives;
extern crate serialization as ser;
extern crate chain;
extern crate storage;
pub mod kv;
mod best_block;
mod block_ancestors;
mod block_chain;
mod block_chain_db;
mod block_impls;
mod block_iterator;
mod block_origin;
mod block_provider;
mod block_ref;
mod error;
mod store;
mod transaction_meta;
mod transaction_provider;
pub use primitives::{hash, bytes};
pub use best_block::BestBlock;
pub use block_ancestors::BlockAncestors;
pub use block_chain::{BlockChain, ForkChain, Forkable};
pub use block_chain_db::{BlockChainDatabase, ForkChainDatabase};
pub use block_iterator::BlockIterator;
pub use block_origin::{BlockOrigin, SideChainOrigin};
pub use block_provider::{BlockHeaderProvider, BlockProvider, IndexedBlockProvider};
pub use block_ref::BlockRef;
pub use error::Error;
pub use store::{AsSubstore, Store, SharedStore, CanonStore, ConfigStore};
pub use transaction_meta::TransactionMeta;
pub use transaction_provider::{TransactionProvider, TransactionOutputProvider, TransactionMetaProvider};
pub use primitives::{hash, bytes};

View File

@ -1,10 +1,12 @@
extern crate chain;
extern crate storage;
extern crate db;
extern crate test_data;
use chain::IndexedBlock;
use storage::{ForkChain, BlockProvider, SideChainOrigin};
use db::BlockChainDatabase;
use db::kv::{MemoryDatabase, SharedMemoryDatabase};
use db::{BlockChainDatabase, BlockProvider, SideChainOrigin, ForkChain};
#[test]
fn insert_block() {

View File

@ -8,6 +8,7 @@ byteorder = "1.0"
heapsize = "0.4"
bitcrypto = { path = "../crypto" }
chain = { path = "../chain" }
storage = { path = "../storage" }
db = { path = "../db" }
network = { path = "../network" }
primitives = { path = "../primitives" }

View File

@ -2,7 +2,7 @@ use std::collections::HashSet;
use primitives::hash::H256;
use primitives::compact::Compact;
use chain::{OutPoint, TransactionOutput, IndexedTransaction};
use db::{SharedStore, TransactionOutputProvider};
use storage::{SharedStore, TransactionOutputProvider};
use network::ConsensusParams;
use memory_pool::{MemoryPool, OrderingStrategy, Entry};
use verification::{work_required, block_reward_satoshi, transaction_sigops};

View File

@ -1,6 +1,6 @@
use chain::Transaction;
use ser::Serializable;
use db::TransactionProvider;
use storage::TransactionProvider;
pub fn transaction_fee(store: &TransactionProvider, transaction: &Transaction) -> u64 {
let inputs_sum = transaction.inputs.iter().map(|input| {
@ -21,7 +21,8 @@ mod tests {
extern crate test_data;
use std::sync::Arc;
use db::{BlockChainDatabase, AsSubstore};
use storage::{AsSubstore};
use db::BlockChainDatabase;
use super::*;
#[test]

View File

@ -3,6 +3,7 @@ extern crate heapsize;
extern crate bitcrypto as crypto;
extern crate chain;
extern crate storage;
extern crate db;
extern crate keys;
extern crate script;

View File

@ -5,7 +5,7 @@
//! transactions.
//! It also guarantees that ancestor-descendant relation won't break during ordered removal (ancestors always removed
//! before descendants). Removal using `remove_by_hash` can break this rule.
use db::{TransactionProvider, TransactionOutputProvider};
use storage::{TransactionProvider, TransactionOutputProvider};
use primitives::bytes::Bytes;
use primitives::hash::H256;
use chain::{IndexedTransaction, Transaction, OutPoint, TransactionOutput};

View File

@ -1,5 +1,5 @@
use clap::ArgMatches;
use db::BlockRef;
use storage::BlockRef;
use config::Config;
use primitives::hash::H256;
use util::init_db;

View File

@ -1,6 +1,6 @@
use std::net;
use clap;
use db;
use storage;
use message::Services;
use network::{Network, ConsensusParams, ConsensusFork, BitcoinCashConsensusParams};
use p2p::InternetProtocol;
@ -31,7 +31,7 @@ pub struct Config {
pub rpc_config: RpcHttpConfig,
pub block_notify_command: Option<String>,
pub verification_params: VerificationParameters,
pub db: db::SharedStore,
pub db: storage::SharedStore,
}
pub const DEFAULT_DB_CACHE: usize = 512;
@ -167,7 +167,7 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
Ok(config)
}
fn parse_consensus_fork(network: Network, db: &db::SharedStore, matches: &clap::ArgMatches) -> Result<ConsensusFork, String> {
fn parse_consensus_fork(network: Network, db: &storage::SharedStore, matches: &clap::ArgMatches) -> Result<ConsensusFork, String> {
let old_consensus_fork = db.consensus_fork()?;
let new_consensus_fork = match (matches.is_present("segwit"), matches.is_present("bitcoin-cash")) {
(false, false) => match &old_consensus_fork {

View File

@ -8,6 +8,7 @@ extern crate env_logger;
extern crate app_dirs;
extern crate libc;
extern crate storage;
extern crate db;
extern crate chain;
extern crate keys;

View File

@ -5,13 +5,13 @@ use ethcore_rpc::{Server, start_http, MetaIoHandler, Compatibility, Remote};
use network::Network;
use std::io;
use sync;
use db;
use storage;
use p2p;
pub struct Dependencies {
pub network: Network,
pub local_sync_node: sync::LocalNodeRef,
pub storage: db::SharedStore,
pub storage: storage::SharedStore,
pub p2p_context: Arc<p2p::Context>,
pub remote: Remote,
}

View File

@ -2,11 +2,12 @@ use std::sync::Arc;
use std::path::PathBuf;
use std::fs::create_dir_all;
use app_dirs::{app_dir, AppDataType};
use {db, APP_INFO};
use {storage, APP_INFO};
use db;
use config::Config;
use chain::IndexedBlock;
pub fn open_db(data_dir: &Option<String>, db_cache: usize) -> db::SharedStore {
pub fn open_db(data_dir: &Option<String>, db_cache: usize) -> storage::SharedStore {
let db_path = match *data_dir {
Some(ref data_dir) => custom_path(&data_dir, "db"),
None => app_dir(AppDataType::UserData, &APP_INFO, "db").expect("Failed to get app dir"),

View File

@ -22,6 +22,7 @@ chain = { path = "../chain" }
primitives = { path = "../primitives" }
p2p = { path = "../p2p" }
network = { path = "../network" }
storage = { path = "../storage" }
db = { path = "../db" }
miner = { path = "../miner" }
verification = { path = "../verification" }

View File

@ -15,6 +15,7 @@ extern crate serialization as ser;
extern crate primitives;
extern crate p2p;
extern crate network;
extern crate storage;
extern crate db;
extern crate miner;
extern crate verification;

View File

@ -9,7 +9,7 @@ use v1::helpers::errors::{block_not_found, block_at_height_not_found, transactio
transaction_output_not_found, transaction_of_side_branch};
use jsonrpc_macros::Trailing;
use jsonrpc_core::Error;
use {db, chain};
use {storage, chain};
use global_script::Script;
use chain::OutPoint;
use verification;
@ -33,11 +33,11 @@ pub trait BlockChainClientCoreApi: Send + Sync + 'static {
pub struct BlockChainClientCore {
network: Network,
storage: db::SharedStore,
storage: storage::SharedStore,
}
impl BlockChainClientCore {
pub fn new(network: Network, storage: db::SharedStore) -> Self {
pub fn new(network: Network, storage: storage::SharedStore) -> Self {
BlockChainClientCore {
network: network,

15
storage/Cargo.toml Normal file
View File

@ -0,0 +1,15 @@
[package]
name = "storage"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
elastic-array = "0.6"
parking_lot = "0.4"
log = "0.4"
bit-vec = "0.4"
lru-cache = "0.1"
primitives = { path = "../primitives" }
serialization = { path = "../serialization" }
chain = { path = "../chain" }
display_derive = "0.0.0"

21
storage/src/error.rs Normal file
View File

@ -0,0 +1,21 @@
#[derive(Debug, PartialEq, Display)]
pub enum Error {
/// Low level database error
#[display(fmt = "Database error: {}", _0)]
DatabaseError(String),
/// Invalid block
#[display(fmt = "Cannot canonize block")]
CannotCanonize,
/// Uknown parent
#[display(fmt = "Block parent is unknown")]
UnknownParent,
/// Ancient fork
#[display(fmt = "Fork is too long to proceed")]
AncientFork,
}
impl From<Error> for String {
fn from(e: Error) -> String {
format!("{}", e)
}
}

40
storage/src/lib.rs Normal file
View File

@ -0,0 +1,40 @@
extern crate elastic_array;
extern crate parking_lot;
#[macro_use]
extern crate log;
extern crate bit_vec;
extern crate lru_cache;
#[macro_use]
extern crate display_derive;
extern crate primitives;
extern crate serialization as ser;
extern crate chain;
mod best_block;
mod block_ancestors;
mod block_chain;
mod block_impls;
mod block_iterator;
mod block_origin;
mod block_provider;
mod block_ref;
mod error;
mod store;
mod transaction_meta;
mod transaction_provider;
pub use primitives::{hash, bytes};
pub use best_block::BestBlock;
pub use block_ancestors::BlockAncestors;
pub use block_chain::{BlockChain, ForkChain, Forkable};
pub use block_iterator::BlockIterator;
pub use block_origin::{BlockOrigin, SideChainOrigin};
pub use block_provider::{BlockHeaderProvider, BlockProvider, IndexedBlockProvider};
pub use block_ref::BlockRef;
pub use error::Error;
pub use store::{AsSubstore, Store, SharedStore, CanonStore, ConfigStore};
pub use transaction_meta::TransactionMeta;
pub use transaction_provider::{TransactionProvider, TransactionOutputProvider, TransactionMetaProvider};

View File

@ -16,6 +16,7 @@ byteorder = "1.0"
chain = { path = "../chain" }
bitcrypto = { path = "../crypto" }
storage = { path = "../storage" }
db = { path = "../db" }
message = { path = "../message" }
miner = { path = "../miner" }

View File

@ -2,7 +2,7 @@ use std::collections::VecDeque;
use std::sync::Arc;
use parking_lot::Mutex;
use chain;
use db;
use storage;
use network::ConsensusParams;
use primitives::hash::H256;
use super::Error;
@ -58,12 +58,12 @@ impl BlocksWriter {
/// Append new block
pub fn append_block(&mut self, block: chain::IndexedBlock) -> Result<(), Error> {
// do not append block if it is already there
if self.storage.contains_block(db::BlockRef::Hash(block.hash().clone())) {
if self.storage.contains_block(storage::BlockRef::Hash(block.hash().clone())) {
return Ok(());
}
// verify && insert only if parent block is already in the storage
if !self.storage.contains_block(db::BlockRef::Hash(block.header.raw.previous_header_hash.clone())) {
if !self.storage.contains_block(storage::BlockRef::Hash(block.header.raw.previous_header_hash.clone())) {
self.orphaned_blocks_pool.insert_orphaned_block(block);
// we can't hold many orphaned blocks in memory during import
if self.orphaned_blocks_pool.len() > MAX_ORPHANED_BLOCKS {

View File

@ -1,6 +1,7 @@
extern crate bitcrypto;
extern crate byteorder;
extern crate chain;
extern crate storage;
extern crate db;
#[macro_use]
extern crate log;
@ -52,7 +53,7 @@ pub enum Error {
/// Too many orphan blocks.
TooManyOrphanBlocks,
/// Database error.
Database(db::Error),
Database(storage::Error),
/// Block verification error.
Verification(String),
}
@ -76,7 +77,7 @@ pub trait SyncListener: Send + 'static {
}
/// Create blocks writer.
pub fn create_sync_blocks_writer(db: db::SharedStore, consensus: ConsensusParams, verification_params: VerificationParameters) -> blocks_writer::BlocksWriter {
pub fn create_sync_blocks_writer(db: storage::SharedStore, consensus: ConsensusParams, verification_params: VerificationParameters) -> blocks_writer::BlocksWriter {
blocks_writer::BlocksWriter::new(db, consensus, verification_params)
}
@ -88,7 +89,7 @@ pub fn create_sync_peers() -> PeersRef {
}
/// Creates local sync node for given `db`
pub fn create_local_sync_node(consensus: ConsensusParams, db: db::SharedStore, peers: PeersRef, verification_params: VerificationParameters) -> LocalNodeRef {
pub fn create_local_sync_node(consensus: ConsensusParams, db: storage::SharedStore, peers: PeersRef, verification_params: VerificationParameters) -> LocalNodeRef {
use miner::MemoryPool;
use synchronization_chain::Chain as SyncChain;
use synchronization_executor::LocalSynchronizationTaskExecutor as SyncExecutor;

View File

@ -2,7 +2,7 @@ use std::collections::{VecDeque, HashSet};
use std::fmt;
use linked_hash_map::LinkedHashMap;
use chain::{BlockHeader, Transaction, IndexedBlockHeader, IndexedBlock, IndexedTransaction};
use db;
use storage;
use miner::{MemoryPoolOrderingStrategy, MemoryPoolInformation};
use network::ConsensusParams;
use primitives::bytes::Bytes;
@ -103,7 +103,7 @@ pub struct Chain {
/// Genesis block hash (stored for optimizations)
genesis_block_hash: H256,
/// Best storage block (stored for optimizations)
best_storage_block: db::BestBlock,
best_storage_block: storage::BestBlock,
/// Local blocks storage
storage: StorageRef,
/// Consensus params.
@ -214,9 +214,9 @@ impl Chain {
}
/// Get best block
pub fn best_block(&self) -> db::BestBlock {
pub fn best_block(&self) -> storage::BestBlock {
match self.hash_chain.back() {
Some(hash) => db::BestBlock {
Some(hash) => storage::BestBlock {
number: self.best_storage_block.number + self.hash_chain.len(),
hash: hash.clone(),
},
@ -225,17 +225,17 @@ impl Chain {
}
/// Get best storage block
pub fn best_storage_block(&self) -> db::BestBlock {
pub fn best_storage_block(&self) -> storage::BestBlock {
self.best_storage_block.clone()
}
/// Get best block header
pub fn best_block_header(&self) -> db::BestBlock {
pub fn best_block_header(&self) -> storage::BestBlock {
let headers_chain_information = self.headers_chain.information();
if headers_chain_information.best == 0 {
return self.best_storage_block()
}
db::BestBlock {
storage::BestBlock {
number: self.best_storage_block.number + headers_chain_information.best,
hash: self.headers_chain.at(headers_chain_information.best - 1)
.expect("got this index above; qed")
@ -264,7 +264,7 @@ impl Chain {
/// Get block header by number
pub fn block_header_by_number(&self, number: BlockHeight) -> Option<IndexedBlockHeader> {
if number <= self.best_storage_block.number {
self.storage.block_header(db::BlockRef::Number(number)).map(Into::into)
self.storage.block_header(storage::BlockRef::Number(number)).map(Into::into)
} else {
self.headers_chain.at(number - self.best_storage_block.number)
}
@ -272,7 +272,7 @@ impl Chain {
/// Get block header by hash
pub fn block_header_by_hash(&self, hash: &H256) -> Option<IndexedBlockHeader> {
if let Some(block) = self.storage.block(db::BlockRef::Hash(hash.clone())) {
if let Some(block) = self.storage.block(storage::BlockRef::Hash(hash.clone())) {
return Some(block.block_header.into());
}
self.headers_chain.by_hash(hash)
@ -282,7 +282,7 @@ impl Chain {
pub fn block_state(&self, hash: &H256) -> BlockState {
match self.hash_chain.contains_in(hash) {
Some(queue_index) => BlockState::from_queue_index(queue_index),
None => if self.storage.contains_block(db::BlockRef::Hash(hash.clone())) {
None => if self.storage.contains_block(storage::BlockRef::Hash(hash.clone())) {
BlockState::Stored
} else if self.dead_end_blocks.contains(hash) {
BlockState::DeadEnd
@ -351,17 +351,17 @@ impl Chain {
}
/// Insert new best block to storage
pub fn insert_best_block(&mut self, block: IndexedBlock) -> Result<BlockInsertionResult, db::Error> {
pub fn insert_best_block(&mut self, block: IndexedBlock) -> Result<BlockInsertionResult, storage::Error> {
assert_eq!(Some(self.storage.best_block().hash), self.storage.block_hash(self.storage.best_block().number));
let block_origin = self.storage.block_origin(&block.header)?;
trace!(target: "sync", "insert_best_block {:?} origin: {:?}", block.hash().reversed(), block_origin);
match block_origin {
db::BlockOrigin::KnownBlock => {
storage::BlockOrigin::KnownBlock => {
// there should be no known blocks at this point
unreachable!();
},
// case 1: block has been added to the main branch
db::BlockOrigin::CanonChain { .. } => {
storage::BlockOrigin::CanonChain { .. } => {
self.storage.insert(block.clone())?;
self.storage.canonize(block.hash())?;
@ -395,7 +395,7 @@ impl Chain {
})
},
// case 2: block has been added to the side branch with reorganization to this branch
db::BlockOrigin::SideChainBecomingCanonChain(origin) => {
storage::BlockOrigin::SideChainBecomingCanonChain(origin) => {
let fork = self.storage.fork(origin.clone())?;
fork.store().insert(block.clone())?;
fork.store().canonize(block.hash())?;
@ -463,7 +463,7 @@ impl Chain {
Ok(result)
},
// case 3: block has been added to the side branch without reorganization to this branch
db::BlockOrigin::SideChain(_origin) => {
storage::BlockOrigin::SideChain(_origin) => {
let block_hash = block.hash().clone();
self.storage.insert(block)?;
@ -673,7 +673,7 @@ impl Chain {
}
}
impl db::TransactionProvider for Chain {
impl storage::TransactionProvider for Chain {
fn transaction_bytes(&self, hash: &H256) -> Option<Bytes> {
self.memory_pool.read().transaction_bytes(hash)
.or_else(|| self.storage.transaction_bytes(hash))
@ -685,16 +685,16 @@ impl db::TransactionProvider for Chain {
}
}
impl db::BlockHeaderProvider for Chain {
fn block_header_bytes(&self, block_ref: db::BlockRef) -> Option<Bytes> {
impl storage::BlockHeaderProvider for Chain {
fn block_header_bytes(&self, block_ref: storage::BlockRef) -> Option<Bytes> {
use ser::serialize;
self.block_header(block_ref).map(|h| serialize(&h))
}
fn block_header(&self, block_ref: db::BlockRef) -> Option<BlockHeader> {
fn block_header(&self, block_ref: storage::BlockRef) -> Option<BlockHeader> {
match block_ref {
db::BlockRef::Hash(hash) => self.block_header_by_hash(&hash).map(|h| h.raw),
db::BlockRef::Number(n) => self.block_header_by_number(n).map(|h| h.raw),
storage::BlockRef::Hash(hash) => self.block_header_by_hash(&hash).map(|h| h.raw),
storage::BlockRef::Number(n) => self.block_header_by_number(n).map(|h| h.raw),
}
}
}

View File

@ -1,7 +1,7 @@
use std::sync::Arc;
use futures::Future;
use parking_lot::{Mutex, RwLock};
use db;
use storage;
use local_node::LocalNode;
use miner::MemoryPool;
use super::SyncListener;
@ -24,7 +24,7 @@ pub type PeerIndex = usize;
pub type EmptyBoxFuture = Box<Future<Item=(), Error=()> + Send>;
/// Reference to storage
pub type StorageRef = db::SharedStore;
pub type StorageRef = storage::SharedStore;
/// Reference to memory pool
pub type MemoryPoolRef = Arc<RwLock<MemoryPool>>;

View File

@ -1,6 +1,6 @@
use std::collections::HashMap;
use chain::{Transaction, TransactionOutput, OutPoint};
use db::TransactionOutputProvider;
use storage::TransactionOutputProvider;
use miner::{DoubleSpendCheckResult, HashedOutPoint, NonFinalDoubleSpendSet};
use verification::TransactionError;
use super::super::types::{MemoryPoolRef, StorageRef};
@ -96,7 +96,8 @@ mod tests {
use std::sync::Arc;
use parking_lot::RwLock;
use chain::OutPoint;
use db::{TransactionOutputProvider, BlockChainDatabase};
use storage::{TransactionOutputProvider};
use db::BlockChainDatabase;
use miner::MemoryPool;
use super::MemoryPoolTransactionOutputProvider;

View File

@ -1,6 +1,6 @@
use std::collections::HashMap;
use chain::BlockHeader;
use db::{BlockRef, BlockHeaderProvider};
use storage::{BlockRef, BlockHeaderProvider};
use primitives::bytes::Bytes;
use primitives::hash::H256;
@ -56,7 +56,8 @@ impl<'a> BlockHeaderProvider for MessageBlockHeadersProvider<'a> {
mod tests {
extern crate test_data;
use db::{AsSubstore, BlockHeaderProvider, BlockChainDatabase, BlockRef};
use storage::{AsSubstore, BlockHeaderProvider, BlockRef};
use db::BlockChainDatabase;
use primitives::hash::H256;
use super::MessageBlockHeadersProvider;

View File

@ -16,6 +16,7 @@ cargo test\
-p script\
-p serialization\
-p serialization_derive\
-p storage\
-p sync\
-p test-data\
-p verification

View File

@ -14,8 +14,9 @@ chain = { path = "../chain" }
serialization = { path = "../serialization" }
script = { path = "../script" }
network = { path = "../network" }
db = { path = "../db" }
storage = { path = "../storage" }
bitcrypto = { path = "../crypto" }
[dev-dependencies]
test-data = { path = "../test-data" }
db = { path = "../db" }

View File

@ -1,6 +1,6 @@
use network::{ConsensusParams, ConsensusFork};
use crypto::dhash256;
use db::{TransactionOutputProvider, BlockHeaderProvider};
use storage::{TransactionOutputProvider, BlockHeaderProvider};
use script;
use ser::Stream;
use sigops::{transaction_sigops, transaction_sigops_cost} ;

View File

@ -1,5 +1,5 @@
use rayon::prelude::{IntoParallelRefIterator, IndexedParallelIterator, ParallelIterator};
use db::Store;
use storage::Store;
use network::ConsensusParams;
use error::Error;
use canon::CanonBlock;

View File

@ -1,10 +1,10 @@
use network::ConsensusParams;
use db::BlockHeaderProvider;
use storage::BlockHeaderProvider;
use canon::CanonHeader;
use error::Error;
use work::work_required;
use deployments::BlockDeployments;
use timestamp::median_timestamp;
use deployments::Deployments;
pub struct HeaderAcceptor<'a> {
pub version: HeaderVersion<'a>,
@ -13,16 +13,17 @@ pub struct HeaderAcceptor<'a> {
}
impl<'a> HeaderAcceptor<'a> {
pub fn new(
pub fn new<D: AsRef<Deployments>>(
store: &'a BlockHeaderProvider,
consensus: &'a ConsensusParams,
header: CanonHeader<'a>,
height: u32,
deployments: &'a BlockDeployments<'a>,
deployments: D,
) -> Self {
let csv_active = deployments.as_ref().csv(height, store, consensus);
HeaderAcceptor {
work: HeaderWork::new(header, store, height, consensus),
median_timestamp: HeaderMedianTimestamp::new(header, store, deployments),
median_timestamp: HeaderMedianTimestamp::new(header, store, csv_active),
version: HeaderVersion::new(header, height, consensus),
}
}
@ -99,12 +100,11 @@ pub struct HeaderMedianTimestamp<'a> {
}
impl<'a> HeaderMedianTimestamp<'a> {
fn new(header: CanonHeader<'a>, store: &'a BlockHeaderProvider, deployments: &'a BlockDeployments<'a>) -> Self {
let active = deployments.csv();
fn new(header: CanonHeader<'a>, store: &'a BlockHeaderProvider, csv_active: bool) -> Self {
HeaderMedianTimestamp {
header: header,
store: store,
active: active,
active: csv_active,
}
}

View File

@ -1,6 +1,6 @@
use primitives::hash::H256;
use primitives::bytes::Bytes;
use db::{TransactionMetaProvider, TransactionOutputProvider};
use storage::{TransactionMetaProvider, TransactionOutputProvider};
use network::{ConsensusParams, ConsensusFork};
use script::{Script, verify_script, VerificationFlags, TransactionSignatureChecker, TransactionInputSigner, SignatureVersion};
use duplex_store::DuplexTransactionOutputProvider;

View File

@ -2,7 +2,7 @@
use hash::H256;
use chain::{IndexedBlock, IndexedBlockHeader, BlockHeader, Transaction};
use db::{SharedStore, TransactionOutputProvider, BlockHeaderProvider, BlockOrigin};
use storage::{SharedStore, TransactionOutputProvider, BlockHeaderProvider, BlockOrigin};
use network::ConsensusParams;
use error::{Error, TransactionError};
use canon::{CanonBlock, CanonTransaction};
@ -143,7 +143,8 @@ mod tests {
use std::sync::Arc;
use chain::IndexedBlock;
use db::{BlockChainDatabase, Error as DBError};
use storage::{Error as DBError};
use db::BlockChainDatabase;
use network::{Network, ConsensusParams, ConsensusFork};
use script;
use super::BackwardsCompatibleChainVerifier as ChainVerifier;

View File

@ -3,7 +3,7 @@ use std::collections::hash_map::Entry;
use parking_lot::Mutex;
use network::{ConsensusParams, Deployment};
use hash::H256;
use db::{BlockHeaderProvider, BlockRef, BlockAncestors, BlockIterator};
use storage::{BlockHeaderProvider, BlockRef, BlockAncestors, BlockIterator};
use timestamp::median_timestamp;
#[derive(Debug, PartialEq, Clone, Copy)]
@ -110,6 +110,18 @@ impl<'a> BlockDeployments<'a> {
}
}
impl AsRef<Deployments> for Deployments {
fn as_ref(&self) -> &Deployments {
&self
}
}
impl<'a> AsRef<Deployments> for BlockDeployments<'a> {
fn as_ref(&self) -> &Deployments {
self.deployments
}
}
/// Calculates threshold state of given deployment
fn threshold_state(cache: &mut DeploymentStateCache, deployment: Deployment, number: u32, headers: &BlockHeaderProvider, miner_confirmation_window: u32, rule_change_activation_threshold: u32) -> ThresholdState {
// deployments are checked using previous block index
@ -255,7 +267,7 @@ mod tests {
use std::sync::atomic::{AtomicUsize, Ordering};
use std::collections::HashMap;
use chain::BlockHeader;
use db::{BlockHeaderProvider, BlockRef};
use storage::{BlockHeaderProvider, BlockRef};
use network::Deployment;
use hash::H256;
use primitives::bytes::Bytes;

View File

@ -2,7 +2,7 @@
//! require sophisticated (in more than one source) previous transaction lookups
use chain::{OutPoint, TransactionOutput};
use db::TransactionOutputProvider;
use storage::TransactionOutputProvider;
#[derive(Clone, Copy)]
pub struct DuplexTransactionOutputProvider<'a> {

View File

@ -1,6 +1,6 @@
use hash::H256;
use compact::Compact;
use db::Error as DBError;
use storage::Error as DBError;
use script::Error as SignatureError;
#[derive(Debug, PartialEq)]

View File

@ -59,13 +59,15 @@ extern crate log;
extern crate parking_lot;
extern crate rayon;
extern crate db;
extern crate storage;
extern crate chain;
extern crate network;
extern crate primitives;
extern crate serialization as ser;
extern crate script;
extern crate bitcrypto as crypto;
#[cfg(test)]
extern crate db;
pub mod constants;
mod canon;

View File

@ -1,6 +1,6 @@
use network::ConsensusFork;
use chain::Transaction;
use db::TransactionOutputProvider;
use storage::TransactionOutputProvider;
use script::{Script, ScriptWitness};
/// Counts signature operations in given transaction

View File

@ -1,5 +1,5 @@
use chain::BlockHeader;
use db::{BlockHeaderProvider, BlockAncestors};
use storage::{BlockHeaderProvider, BlockAncestors};
use primitives::hash::H256;
/// Returns median timestamp, of given header ancestors.

View File

@ -4,7 +4,7 @@ use primitives::hash::H256;
use primitives::bigint::U256;
use chain::{IndexedBlockHeader, BlockHeader};
use network::{Network, ConsensusParams, ConsensusFork};
use db::{BlockHeaderProvider, BlockRef};
use storage::{BlockHeaderProvider, BlockRef};
use work_bch::work_required_bitcoin_cash;
use constants::{

View File

@ -3,7 +3,7 @@ use primitives::hash::H256;
use primitives::bigint::{Uint, U256};
use chain::{IndexedBlockHeader, BlockHeader};
use network::{Network, ConsensusParams, BitcoinCashConsensusParams};
use db::BlockHeaderProvider;
use storage::BlockHeaderProvider;
use timestamp::median_timestamp_inclusive;
use work::{is_retarget_height, work_required_testnet, work_required_retarget};
@ -178,7 +178,7 @@ mod tests {
use primitives::hash::H256;
use primitives::bigint::U256;
use network::{Network, ConsensusParams, BitcoinCashConsensusParams, ConsensusFork};
use db::{BlockHeaderProvider, BlockRef};
use storage::{BlockHeaderProvider, BlockRef};
use chain::BlockHeader;
use work::work_required;
use super::work_required_bitcoin_cash_adjusted;