propagate ConsensusParams with ConsensusFork

This commit is contained in:
Svyatoslav Nikolsky 2017-08-08 09:52:54 +03:00
parent 9b108c0789
commit e93e5ef35a
17 changed files with 151 additions and 117 deletions

View File

@ -1,9 +1,28 @@
use hash::H256; use hash::H256;
use {Magic, Deployment}; use {Magic, Deployment};
#[derive(Debug, Clone, Copy)]
/// Concurrent consensus rule forks.
pub enum ConsensusFork {
/// SegWit2x (aka The New York Agreement).
/// Briefly: SegWit + blocks up to 2MB.
/// Technical specification:
/// Segregated Witness (Consensus layer) - https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki
/// Block size increase to 2MB - https://github.com/bitcoin/bips/blob/master/bip-0102.mediawiki
SegWit2x(u32),
/// Bitcoin Cash (aka UAHF).
/// Briefly: no SegWit + blocks up to 8MB + replay protection.
/// Technical specification:
/// UAHF Technical Specification - https://github.com/Bitcoin-UAHF/spec/blob/master/uahf-technical-spec.md
/// BUIP-HF Digest for replay protected signature verification across hard forks - https://github.com/Bitcoin-UAHF/spec/blob/master/replay-protected-sighash.md
BitcoinCash(u32),
}
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
/// Parameters that influence chain consensus. /// Parameters that influence chain consensus.
pub struct ConsensusParams { pub struct ConsensusParams {
/// Network.
pub magic: Magic,
/// Time when BIP16 becomes active. /// Time when BIP16 becomes active.
/// See https://github.com/bitcoin/bips/blob/master/bip-0016.mediawiki /// See https://github.com/bitcoin/bips/blob/master/bip-0016.mediawiki
pub bip16_time: u32, pub bip16_time: u32,
@ -16,6 +35,8 @@ pub struct ConsensusParams {
/// Block height at which BIP65 becomes active. /// Block height at which BIP65 becomes active.
/// See https://github.com/bitcoin/bips/blob/master/bip-0066.mediawiki /// See https://github.com/bitcoin/bips/blob/master/bip-0066.mediawiki
pub bip66_height: u32, pub bip66_height: u32,
/// Selected consensus fork.
pub fork: Option<ConsensusFork>,
/// Version bits activation /// Version bits activation
pub rule_change_activation_threshold: u32, pub rule_change_activation_threshold: u32,
/// Number of blocks with the same set of rules /// Number of blocks with the same set of rules
@ -27,13 +48,15 @@ pub struct ConsensusParams {
} }
impl ConsensusParams { impl ConsensusParams {
pub fn with_magic(magic: Magic) -> Self { pub fn new(magic: Magic, fork: Option<ConsensusFork>) -> Self {
match magic { match magic {
Magic::Mainnet | Magic::Other(_) => ConsensusParams { Magic::Mainnet | Magic::Other(_) => ConsensusParams {
magic: magic,
bip16_time: 1333238400, // Apr 1 2012 bip16_time: 1333238400, // Apr 1 2012
bip34_height: 227931, // 000000000000024b89b42a942fe0d9fea3bb44ab7bd1b19115dd6a759c0808b8 bip34_height: 227931, // 000000000000024b89b42a942fe0d9fea3bb44ab7bd1b19115dd6a759c0808b8
bip65_height: 388381, // 000000000000000004c2b624ed5d7756c508d90fd0da2c7c679febfa6c4735f0 bip65_height: 388381, // 000000000000000004c2b624ed5d7756c508d90fd0da2c7c679febfa6c4735f0
bip66_height: 363725, // 00000000000000000379eaa19dce8c9b722d46ae6a57c2f1a988119488b50931 bip66_height: 363725, // 00000000000000000379eaa19dce8c9b722d46ae6a57c2f1a988119488b50931
fork: fork,
rule_change_activation_threshold: 1916, // 95% rule_change_activation_threshold: 1916, // 95%
miner_confirmation_window: 2016, miner_confirmation_window: 2016,
csv_deployment: Some(Deployment { csv_deployment: Some(Deployment {
@ -46,10 +69,12 @@ impl ConsensusParams {
segwit_deployment: None, segwit_deployment: None,
}, },
Magic::Testnet => ConsensusParams { Magic::Testnet => ConsensusParams {
magic: magic,
bip16_time: 1333238400, // Apr 1 2012 bip16_time: 1333238400, // Apr 1 2012
bip34_height: 21111, // 0000000023b3a96d3484e5abb3755c413e7d41500f8e2a5c3f0dd01299cd8ef8 bip34_height: 21111, // 0000000023b3a96d3484e5abb3755c413e7d41500f8e2a5c3f0dd01299cd8ef8
bip65_height: 581885, // 00000000007f6655f22f98e72ed80d8b06dc761d5da09df0fa1dc4be4f861eb6 bip65_height: 581885, // 00000000007f6655f22f98e72ed80d8b06dc761d5da09df0fa1dc4be4f861eb6
bip66_height: 330776, // 000000002104c8c45e99a8853285a3b592602a3ccde2b832481da85e9e4ba182 bip66_height: 330776, // 000000002104c8c45e99a8853285a3b592602a3ccde2b832481da85e9e4ba182
fork: fork,
rule_change_activation_threshold: 1512, // 75% rule_change_activation_threshold: 1512, // 75%
miner_confirmation_window: 2016, miner_confirmation_window: 2016,
csv_deployment: Some(Deployment { csv_deployment: Some(Deployment {
@ -62,10 +87,12 @@ impl ConsensusParams {
segwit_deployment: None, segwit_deployment: None,
}, },
Magic::Regtest | Magic::Unitest => ConsensusParams { Magic::Regtest | Magic::Unitest => ConsensusParams {
magic: magic,
bip16_time: 1333238400, // Apr 1 2012 bip16_time: 1333238400, // Apr 1 2012
bip34_height: 100000000, // not activated on regtest bip34_height: 100000000, // not activated on regtest
bip65_height: 1351, bip65_height: 1351,
bip66_height: 1251, // used only in rpc tests bip66_height: 1251, // used only in rpc tests
fork: fork,
rule_change_activation_threshold: 108, // 75% rule_change_activation_threshold: 108, // 75%
miner_confirmation_window: 144, miner_confirmation_window: 144,
csv_deployment: Some(Deployment { csv_deployment: Some(Deployment {
@ -93,36 +120,36 @@ mod tests {
#[test] #[test]
fn test_consensus_params_bip34_height() { fn test_consensus_params_bip34_height() {
assert_eq!(ConsensusParams::with_magic(Magic::Mainnet).bip34_height, 227931); assert_eq!(ConsensusParams::new(Magic::Mainnet, None).bip34_height, 227931);
assert_eq!(ConsensusParams::with_magic(Magic::Testnet).bip34_height, 21111); assert_eq!(ConsensusParams::new(Magic::Testnet, None).bip34_height, 21111);
assert_eq!(ConsensusParams::with_magic(Magic::Regtest).bip34_height, 100000000); assert_eq!(ConsensusParams::new(Magic::Regtest, None).bip34_height, 100000000);
} }
#[test] #[test]
fn test_consensus_params_bip65_height() { fn test_consensus_params_bip65_height() {
assert_eq!(ConsensusParams::with_magic(Magic::Mainnet).bip65_height, 388381); assert_eq!(ConsensusParams::new(Magic::Mainnet, None).bip65_height, 388381);
assert_eq!(ConsensusParams::with_magic(Magic::Testnet).bip65_height, 581885); assert_eq!(ConsensusParams::new(Magic::Testnet, None).bip65_height, 581885);
assert_eq!(ConsensusParams::with_magic(Magic::Regtest).bip65_height, 1351); assert_eq!(ConsensusParams::new(Magic::Regtest, None).bip65_height, 1351);
} }
#[test] #[test]
fn test_consensus_params_bip66_height() { fn test_consensus_params_bip66_height() {
assert_eq!(ConsensusParams::with_magic(Magic::Mainnet).bip66_height, 363725); assert_eq!(ConsensusParams::new(Magic::Mainnet, None).bip66_height, 363725);
assert_eq!(ConsensusParams::with_magic(Magic::Testnet).bip66_height, 330776); assert_eq!(ConsensusParams::new(Magic::Testnet, None).bip66_height, 330776);
assert_eq!(ConsensusParams::with_magic(Magic::Regtest).bip66_height, 1251); assert_eq!(ConsensusParams::new(Magic::Regtest, None).bip66_height, 1251);
} }
#[test] #[test]
fn test_consensus_activation_threshold() { fn test_consensus_activation_threshold() {
assert_eq!(ConsensusParams::with_magic(Magic::Mainnet).rule_change_activation_threshold, 1916); assert_eq!(ConsensusParams::new(Magic::Mainnet, None).rule_change_activation_threshold, 1916);
assert_eq!(ConsensusParams::with_magic(Magic::Testnet).rule_change_activation_threshold, 1512); assert_eq!(ConsensusParams::new(Magic::Testnet, None).rule_change_activation_threshold, 1512);
assert_eq!(ConsensusParams::with_magic(Magic::Regtest).rule_change_activation_threshold, 108); assert_eq!(ConsensusParams::new(Magic::Regtest, None).rule_change_activation_threshold, 108);
} }
#[test] #[test]
fn test_consensus_miner_confirmation_window() { fn test_consensus_miner_confirmation_window() {
assert_eq!(ConsensusParams::with_magic(Magic::Mainnet).miner_confirmation_window, 2016); assert_eq!(ConsensusParams::new(Magic::Mainnet, None).miner_confirmation_window, 2016);
assert_eq!(ConsensusParams::with_magic(Magic::Testnet).miner_confirmation_window, 2016); assert_eq!(ConsensusParams::new(Magic::Testnet, None).miner_confirmation_window, 2016);
assert_eq!(ConsensusParams::with_magic(Magic::Regtest).miner_confirmation_window, 144); assert_eq!(ConsensusParams::new(Magic::Regtest, None).miner_confirmation_window, 144);
} }
} }

View File

@ -8,7 +8,7 @@ mod magic;
pub use primitives::{hash, compact}; pub use primitives::{hash, compact};
pub use consensus::ConsensusParams; pub use consensus::{ConsensusParams, ConsensusFork};
pub use deployments::Deployment; pub use deployments::Deployment;
pub use magic::Magic; pub use magic::Magic;

View File

@ -4,7 +4,6 @@
use compact::Compact; use compact::Compact;
use ser::{Stream, Serializable}; use ser::{Stream, Serializable};
use chain::Block; use chain::Block;
use super::ConsensusParams;
const MAGIC_MAINNET: u32 = 0xD9B4BEF9; const MAGIC_MAINNET: u32 = 0xD9B4BEF9;
const MAGIC_TESTNET: u32 = 0x0709110B; const MAGIC_TESTNET: u32 = 0x0709110B;
@ -87,10 +86,6 @@ impl Magic {
Magic::Regtest | Magic::Unitest => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4adae5494dffff7f20020000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(), Magic::Regtest | Magic::Unitest => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4adae5494dffff7f20020000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
} }
} }
pub fn consensus_params(&self) -> ConsensusParams {
ConsensusParams::with_magic(*self)
}
} }
impl Serializable for Magic { impl Serializable for Magic {

View File

@ -9,6 +9,16 @@ args:
- regtest: - regtest:
long: regtest long: regtest
help: Use private network for regtest help: Use private network for regtest
- segwit2x:
long: segwit2x
value_name: BLOCK
help: Enable SegWit2x verification rules, starting from given block height
takes_value: true
- bitcoin-cash:
long: bitcoin-cash
value_name: BLOCK
help: Use Bitcoin Cash verification rules, starting from given block height
takes_value: true
- connect: - connect:
short: c short: c
long: connect long: connect

View File

@ -11,7 +11,7 @@ pub fn import(cfg: Config, matches: &ArgMatches) -> Result<(), String> {
let blk_path = matches.value_of("PATH").expect("PATH is required in cli.yml; qed"); let blk_path = matches.value_of("PATH").expect("PATH is required in cli.yml; qed");
let skip_verification = matches.is_present("skip-verification"); let skip_verification = matches.is_present("skip-verification");
let mut writer = create_sync_blocks_writer(db, cfg.magic, !skip_verification); let mut writer = create_sync_blocks_writer(db, cfg.consensus, !skip_verification);
let blk_dir = try!(::import::open_blk_dir(blk_path).map_err(|_| "Import directory does not exist".to_owned())); let blk_dir = try!(::import::open_blk_dir(blk_path).map_err(|_| "Import directory does not exist".to_owned()));
let mut counter = 0; let mut counter = 0;

View File

@ -111,7 +111,7 @@ pub fn start(cfg: config::Config) -> Result<(), String> {
}; };
let sync_peers = create_sync_peers(); let sync_peers = create_sync_peers();
let local_sync_node = create_local_sync_node(cfg.magic, db.clone(), sync_peers.clone()); let local_sync_node = create_local_sync_node(cfg.consensus, db.clone(), sync_peers.clone());
let sync_connection_factory = create_sync_connection_factory(sync_peers.clone(), local_sync_node.clone()); let sync_connection_factory = create_sync_connection_factory(sync_peers.clone(), local_sync_node.clone());
if let Some(block_notify_command) = cfg.block_notify_command { if let Some(block_notify_command) = cfg.block_notify_command {

View File

@ -1,6 +1,6 @@
use std::net; use std::net;
use clap; use clap;
use network::Magic; use network::{Magic, ConsensusParams, ConsensusFork};
use p2p::InternetProtocol; use p2p::InternetProtocol;
use seednodes::{mainnet_seednodes, testnet_seednodes}; use seednodes::{mainnet_seednodes, testnet_seednodes};
use rpc_apis::ApiSet; use rpc_apis::ApiSet;
@ -9,6 +9,7 @@ use rpc::HttpConfiguration as RpcHttpConfig;
pub struct Config { pub struct Config {
pub magic: Magic, pub magic: Magic,
pub consensus: ConsensusParams,
pub port: u16, pub port: u16,
pub connect: Option<net::SocketAddr>, pub connect: Option<net::SocketAddr>,
pub seednodes: Vec<String>, pub seednodes: Vec<String>,
@ -35,6 +36,13 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
(true, true) => return Err("Only one testnet option can be used".into()), (true, true) => return Err("Only one testnet option can be used".into()),
}; };
let consensus = ConsensusParams::new(magic, match (matches.value_of("segwit2x"), matches.value_of("bitcoin-cash")) {
(Some(block), None) => Some(ConsensusFork::SegWit2x(block.parse().map_err(|_| "Invalid block number".to_owned())?)),
(None, Some(block)) => Some(ConsensusFork::BitcoinCash(block.parse().map_err(|_| "Invalid block number".to_owned())?)),
(None, None) => None,
(Some(_), Some(_)) => return Err("Only one fork can be used".into()),
});
let (in_connections, out_connections) = match magic { let (in_connections, out_connections) = match magic {
Magic::Testnet | Magic::Mainnet | Magic::Other(_) => (10, 10), Magic::Testnet | Magic::Mainnet | Magic::Other(_) => (10, 10),
Magic::Regtest | Magic::Unitest => (1, 0), Magic::Regtest | Magic::Unitest => (1, 0),
@ -52,22 +60,22 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
}; };
let port = match matches.value_of("port") { let port = match matches.value_of("port") {
Some(port) => try!(port.parse().map_err(|_| "Invalid port".to_owned())), Some(port) => port.parse().map_err(|_| "Invalid port".to_owned())?,
None => magic.port(), None => magic.port(),
}; };
let connect = match matches.value_of("connect") { let connect = match matches.value_of("connect") {
Some(s) => Some(try!(match s.parse::<net::SocketAddr>() { Some(s) => Some(match s.parse::<net::SocketAddr>() {
Err(_) => s.parse::<net::IpAddr>() Err(_) => s.parse::<net::IpAddr>()
.map(|ip| net::SocketAddr::new(ip, magic.port())) .map(|ip| net::SocketAddr::new(ip, magic.port()))
.map_err(|_| "Invalid connect".to_owned()), .map_err(|_| "Invalid connect".to_owned()),
Ok(a) => Ok(a), Ok(a) => Ok(a),
})), }?),
None => None, None => None,
}; };
let seednodes = match matches.value_of("seednode") { let seednodes = match matches.value_of("seednode") {
Some(s) => vec![try!(s.parse().map_err(|_| "Invalid seednode".to_owned()))], Some(s) => vec![s.parse().map_err(|_| "Invalid seednode".to_owned())?],
None => match magic { None => match magic {
Magic::Mainnet => mainnet_seednodes().into_iter().map(Into::into).collect(), Magic::Mainnet => mainnet_seednodes().into_iter().map(Into::into).collect(),
Magic::Testnet => testnet_seednodes().into_iter().map(Into::into).collect(), Magic::Testnet => testnet_seednodes().into_iter().map(Into::into).collect(),
@ -76,30 +84,31 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
}; };
let db_cache = match matches.value_of("db-cache") { let db_cache = match matches.value_of("db-cache") {
Some(s) => try!(s.parse().map_err(|_| "Invalid cache size - should be number in MB".to_owned())), Some(s) => s.parse().map_err(|_| "Invalid cache size - should be number in MB".to_owned())?,
None => DEFAULT_DB_CACHE, None => DEFAULT_DB_CACHE,
}; };
let data_dir = match matches.value_of("data-dir") { let data_dir = match matches.value_of("data-dir") {
Some(s) => Some(try!(s.parse().map_err(|_| "Invalid data-dir".to_owned()))), Some(s) => Some(s.parse().map_err(|_| "Invalid data-dir".to_owned())?),
None => None, None => None,
}; };
let only_net = match matches.value_of("only-net") { let only_net = match matches.value_of("only-net") {
Some(s) => try!(s.parse()), Some(s) => s.parse()?,
None => InternetProtocol::default(), None => InternetProtocol::default(),
}; };
let rpc_config = try!(parse_rpc_config(magic, matches)); let rpc_config = parse_rpc_config(magic, matches)?;
let block_notify_command = match matches.value_of("blocknotify") { let block_notify_command = match matches.value_of("blocknotify") {
Some(s) => Some(try!(s.parse().map_err(|_| "Invalid blocknotify commmand".to_owned()))), Some(s) => Some(s.parse().map_err(|_| "Invalid blocknotify commmand".to_owned())?),
None => None, None => None,
}; };
let config = Config { let config = Config {
print_to_console: print_to_console, print_to_console: print_to_console,
magic: magic, magic: magic,
consensus: consensus,
port: port, port: port,
connect: connect, connect: connect,
seednodes: seednodes, seednodes: seednodes,
@ -125,19 +134,19 @@ fn parse_rpc_config(magic: Magic, matches: &clap::ArgMatches) -> Result<RpcHttpC
} }
if let Some(apis) = matches.value_of("jsonrpc-apis") { if let Some(apis) = matches.value_of("jsonrpc-apis") {
config.apis = ApiSet::List(vec![try!(apis.parse().map_err(|_| "Invalid APIs".to_owned()))].into_iter().collect()); config.apis = ApiSet::List(vec![apis.parse().map_err(|_| "Invalid APIs".to_owned())?].into_iter().collect());
} }
if let Some(port) = matches.value_of("jsonrpc-port") { if let Some(port) = matches.value_of("jsonrpc-port") {
config.port = try!(port.parse().map_err(|_| "Invalid JSON RPC port".to_owned())); config.port = port.parse().map_err(|_| "Invalid JSON RPC port".to_owned())?;
} }
if let Some(interface) = matches.value_of("jsonrpc-interface") { if let Some(interface) = matches.value_of("jsonrpc-interface") {
config.interface = interface.to_owned(); config.interface = interface.to_owned();
} }
if let Some(cors) = matches.value_of("jsonrpc-cors") { if let Some(cors) = matches.value_of("jsonrpc-cors") {
config.cors = Some(vec![try!(cors.parse().map_err(|_| "Invalid JSON RPC CORS".to_owned()))]); config.cors = Some(vec![cors.parse().map_err(|_| "Invalid JSON RPC CORS".to_owned())?]);
} }
if let Some(hosts) = matches.value_of("jsonrpc-hosts") { if let Some(hosts) = matches.value_of("jsonrpc-hosts") {
config.hosts = Some(vec![try!(hosts.parse().map_err(|_| "Invalid JSON RPC hosts".to_owned()))]); config.hosts = Some(vec![hosts.parse().map_err(|_| "Invalid JSON RPC hosts".to_owned())?]);
} }
Ok(config) Ok(config)

View File

@ -3,7 +3,7 @@ use std::sync::Arc;
use parking_lot::Mutex; use parking_lot::Mutex;
use chain; use chain;
use db; use db;
use network::Magic; use network::ConsensusParams;
use primitives::hash::H256; use primitives::hash::H256;
use super::Error; use super::Error;
use synchronization_verifier::{Verifier, SyncVerifier, VerificationTask, use synchronization_verifier::{Verifier, SyncVerifier, VerificationTask,
@ -44,10 +44,10 @@ struct BlocksWriterSinkData {
impl BlocksWriter { impl BlocksWriter {
/// Create new synchronous blocks writer /// Create new synchronous blocks writer
pub fn new(storage: StorageRef, network: Magic, verification: bool) -> BlocksWriter { pub fn new(storage: StorageRef, consensus: ConsensusParams, verification: bool) -> BlocksWriter {
let sink_data = Arc::new(BlocksWriterSinkData::new(storage.clone())); let sink_data = Arc::new(BlocksWriterSinkData::new(storage.clone()));
let sink = Arc::new(BlocksWriterSink::new(sink_data.clone())); let sink = Arc::new(BlocksWriterSink::new(sink_data.clone()));
let verifier = SyncVerifier::new(network, storage.clone(), sink); let verifier = SyncVerifier::new(consensus, storage.clone(), sink);
BlocksWriter { BlocksWriter {
storage: storage, storage: storage,
orphaned_blocks_pool: OrphanBlocksPool::new(), orphaned_blocks_pool: OrphanBlocksPool::new(),
@ -156,14 +156,14 @@ mod tests {
use std::sync::Arc; use std::sync::Arc;
use db::{BlockChainDatabase}; use db::{BlockChainDatabase};
use network::Magic; use network::{ConsensusParams, Magic};
use super::super::Error; use super::super::Error;
use super::{BlocksWriter, MAX_ORPHANED_BLOCKS}; use super::{BlocksWriter, MAX_ORPHANED_BLOCKS};
#[test] #[test]
fn blocks_writer_appends_blocks() { fn blocks_writer_appends_blocks() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()])); let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet, true); let mut blocks_target = BlocksWriter::new(db.clone(), ConsensusParams::new(Magic::Testnet, None), true);
blocks_target.append_block(test_data::block_h1().into()).expect("Expecting no error"); blocks_target.append_block(test_data::block_h1().into()).expect("Expecting no error");
assert_eq!(db.best_block().number, 1); assert_eq!(db.best_block().number, 1);
} }
@ -172,7 +172,7 @@ mod tests {
fn blocks_writer_verification_error() { fn blocks_writer_verification_error() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()])); let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let blocks = test_data::build_n_empty_blocks_from_genesis((MAX_ORPHANED_BLOCKS + 2) as u32, 1); let blocks = test_data::build_n_empty_blocks_from_genesis((MAX_ORPHANED_BLOCKS + 2) as u32, 1);
let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet, true); let mut blocks_target = BlocksWriter::new(db.clone(), ConsensusParams::new(Magic::Testnet, None), true);
for (index, block) in blocks.into_iter().skip(1).enumerate() { for (index, block) in blocks.into_iter().skip(1).enumerate() {
match blocks_target.append_block(block.into()) { match blocks_target.append_block(block.into()) {
Err(Error::TooManyOrphanBlocks) if index == MAX_ORPHANED_BLOCKS => (), Err(Error::TooManyOrphanBlocks) if index == MAX_ORPHANED_BLOCKS => (),
@ -186,7 +186,7 @@ mod tests {
#[test] #[test]
fn blocks_writer_out_of_order_block() { fn blocks_writer_out_of_order_block() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()])); let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet, true); let mut blocks_target = BlocksWriter::new(db.clone(), ConsensusParams::new(Magic::Testnet, None), true);
let wrong_block = test_data::block_builder() let wrong_block = test_data::block_builder()
.header().parent(test_data::genesis().hash()).build() .header().parent(test_data::genesis().hash()).build()
@ -201,7 +201,7 @@ mod tests {
#[test] #[test]
fn blocks_writer_append_to_existing_db() { fn blocks_writer_append_to_existing_db() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()])); let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet, true); let mut blocks_target = BlocksWriter::new(db.clone(), ConsensusParams::new(Magic::Testnet, None), true);
assert!(blocks_target.append_block(test_data::genesis().into()).is_ok()); assert!(blocks_target.append_block(test_data::genesis().into()).is_ok());
assert_eq!(db.best_block().number, 0); assert_eq!(db.best_block().number, 0);

View File

@ -41,7 +41,7 @@ pub use types::PeersRef;
use std::sync::Arc; use std::sync::Arc;
use parking_lot::RwLock; use parking_lot::RwLock;
use network::Magic; use network::{Magic, ConsensusParams};
use primitives::hash::H256; use primitives::hash::H256;
use verification::BackwardsCompatibleChainVerifier as ChainVerifier; use verification::BackwardsCompatibleChainVerifier as ChainVerifier;
@ -65,8 +65,8 @@ pub trait SyncListener: Send + 'static {
} }
/// Create blocks writer. /// Create blocks writer.
pub fn create_sync_blocks_writer(db: db::SharedStore, network: Magic, verification: bool) -> blocks_writer::BlocksWriter { pub fn create_sync_blocks_writer(db: db::SharedStore, consensus: ConsensusParams, verification: bool) -> blocks_writer::BlocksWriter {
blocks_writer::BlocksWriter::new(db, network, verification) blocks_writer::BlocksWriter::new(db, consensus, verification)
} }
/// Create synchronization peers /// Create synchronization peers
@ -77,7 +77,7 @@ pub fn create_sync_peers() -> PeersRef {
} }
/// Creates local sync node for given `db` /// Creates local sync node for given `db`
pub fn create_local_sync_node(network: Magic, db: db::SharedStore, peers: PeersRef) -> LocalNodeRef { pub fn create_local_sync_node(consensus: ConsensusParams, db: db::SharedStore, peers: PeersRef) -> LocalNodeRef {
use miner::MemoryPool; use miner::MemoryPool;
use synchronization_chain::Chain as SyncChain; use synchronization_chain::Chain as SyncChain;
use synchronization_executor::LocalSynchronizationTaskExecutor as SyncExecutor; use synchronization_executor::LocalSynchronizationTaskExecutor as SyncExecutor;
@ -89,8 +89,8 @@ pub fn create_local_sync_node(network: Magic, db: db::SharedStore, peers: PeersR
use utils::SynchronizationState; use utils::SynchronizationState;
use types::SynchronizationStateRef; use types::SynchronizationStateRef;
let network = consensus.magic;
let sync_client_config = SynchronizationConfig { let sync_client_config = SynchronizationConfig {
network: network,
// during regtests, peer is providing us with bad blocks => we shouldn't close connection because of this // during regtests, peer is providing us with bad blocks => we shouldn't close connection because of this
close_connection_on_bad_block: network != Magic::Regtest, close_connection_on_bad_block: network != Magic::Regtest,
}; };
@ -98,7 +98,7 @@ pub fn create_local_sync_node(network: Magic, db: db::SharedStore, peers: PeersR
let memory_pool = Arc::new(RwLock::new(MemoryPool::new())); let memory_pool = Arc::new(RwLock::new(MemoryPool::new()));
let sync_state = SynchronizationStateRef::new(SynchronizationState::with_storage(db.clone())); let sync_state = SynchronizationStateRef::new(SynchronizationState::with_storage(db.clone()));
let sync_chain = SyncChain::new(db.clone(), memory_pool.clone()); let sync_chain = SyncChain::new(db.clone(), memory_pool.clone());
let chain_verifier = Arc::new(ChainVerifier::new(db.clone(), network)); let chain_verifier = Arc::new(ChainVerifier::new(db.clone(), consensus));
let sync_executor = SyncExecutor::new(peers.clone()); let sync_executor = SyncExecutor::new(peers.clone());
let sync_server = Arc::new(ServerImpl::new(peers.clone(), db.clone(), memory_pool.clone(), sync_executor.clone())); let sync_server = Arc::new(ServerImpl::new(peers.clone(), db.clone(), memory_pool.clone(), sync_executor.clone()));
let sync_client_core = SynchronizationClientCore::new(sync_client_config, sync_state.clone(), peers.clone(), sync_executor.clone(), sync_chain, chain_verifier.clone()); let sync_client_core = SynchronizationClientCore::new(sync_client_config, sync_state.clone(), peers.clone(), sync_executor.clone(), sync_chain, chain_verifier.clone());

View File

@ -335,7 +335,7 @@ pub mod tests {
use synchronization_chain::Chain; use synchronization_chain::Chain;
use message::types; use message::types;
use message::common::{InventoryVector, InventoryType}; use message::common::{InventoryVector, InventoryType};
use network::Magic; use network::{ConsensusParams, Magic};
use chain::Transaction; use chain::Transaction;
use db::{BlockChainDatabase}; use db::{BlockChainDatabase};
use miner::MemoryPool; use miner::MemoryPool;
@ -373,8 +373,8 @@ pub mod tests {
let sync_peers = Arc::new(PeersImpl::default()); let sync_peers = Arc::new(PeersImpl::default());
let executor = DummyTaskExecutor::new(); let executor = DummyTaskExecutor::new();
let server = Arc::new(DummyServer::new()); let server = Arc::new(DummyServer::new());
let config = Config { network: Magic::Mainnet, close_connection_on_bad_block: true }; let config = Config { close_connection_on_bad_block: true };
let chain_verifier = Arc::new(ChainVerifier::new(storage.clone(), Magic::Mainnet)); let chain_verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Magic::Mainnet, None)));
let client_core = SynchronizationClientCore::new(config, sync_state.clone(), sync_peers.clone(), executor.clone(), chain, chain_verifier); let client_core = SynchronizationClientCore::new(config, sync_state.clone(), sync_peers.clone(), executor.clone(), chain, chain_verifier);
let mut verifier = match verifier { let mut verifier = match verifier {
Some(verifier) => verifier, Some(verifier) => verifier,

View File

@ -9,7 +9,6 @@ use chain::{IndexedBlockHeader, IndexedTransaction, Transaction, IndexedBlock};
use message::types; use message::types;
use message::common::{InventoryType, InventoryVector}; use message::common::{InventoryType, InventoryVector};
use miner::transaction_fee_rate; use miner::transaction_fee_rate;
use network::Magic;
use primitives::hash::H256; use primitives::hash::H256;
use verification::BackwardsCompatibleChainVerifier as ChainVerifier; use verification::BackwardsCompatibleChainVerifier as ChainVerifier;
use synchronization_chain::{Chain, BlockState, TransactionState, BlockInsertionResult}; use synchronization_chain::{Chain, BlockState, TransactionState, BlockInsertionResult};
@ -77,8 +76,6 @@ pub trait ClientCore {
/// Synchronization client configuration options. /// Synchronization client configuration options.
#[derive(Debug)] #[derive(Debug)]
pub struct Config { pub struct Config {
/// Network
pub network: Magic,
/// If true, connection to peer who has provided us with bad block is closed /// If true, connection to peer who has provided us with bad block is closed
pub close_connection_on_bad_block: bool, pub close_connection_on_bad_block: bool,
} }
@ -1183,7 +1180,7 @@ pub mod tests {
use message::common::InventoryVector; use message::common::InventoryVector;
use message::types; use message::types;
use miner::MemoryPool; use miner::MemoryPool;
use network::Magic; use network::{ConsensusParams, Magic};
use primitives::hash::H256; use primitives::hash::H256;
use verification::BackwardsCompatibleChainVerifier as ChainVerifier; use verification::BackwardsCompatibleChainVerifier as ChainVerifier;
use inbound_connection::tests::DummyOutboundSyncConnection; use inbound_connection::tests::DummyOutboundSyncConnection;
@ -1236,9 +1233,9 @@ pub mod tests {
let memory_pool = Arc::new(RwLock::new(MemoryPool::new())); let memory_pool = Arc::new(RwLock::new(MemoryPool::new()));
let chain = Chain::new(storage.clone(), memory_pool.clone()); let chain = Chain::new(storage.clone(), memory_pool.clone());
let executor = DummyTaskExecutor::new(); let executor = DummyTaskExecutor::new();
let config = Config { network: Magic::Mainnet, close_connection_on_bad_block: true }; let config = Config { close_connection_on_bad_block: true };
let chain_verifier = Arc::new(ChainVerifier::new(storage.clone(), Magic::Unitest)); let chain_verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Magic::Unitest, None)));
let client_core = SynchronizationClientCore::new(config, sync_state.clone(), sync_peers.clone(), executor.clone(), chain, chain_verifier.clone()); let client_core = SynchronizationClientCore::new(config, sync_state.clone(), sync_peers.clone(), executor.clone(), chain, chain_verifier.clone());
{ {
client_core.lock().set_verify_headers(false); client_core.lock().set_verify_headers(false);

View File

@ -5,7 +5,7 @@ use std::thread;
use parking_lot::Mutex; use parking_lot::Mutex;
use time::get_time; use time::get_time;
use chain::{IndexedBlock, IndexedTransaction}; use chain::{IndexedBlock, IndexedTransaction};
use network::Magic; use network::ConsensusParams;
use primitives::hash::H256; use primitives::hash::H256;
use verification::{BackwardsCompatibleChainVerifier as ChainVerifier, Verify as VerificationVerify}; use verification::{BackwardsCompatibleChainVerifier as ChainVerifier, Verify as VerificationVerify};
use types::{BlockHeight, StorageRef, MemoryPoolRef}; use types::{BlockHeight, StorageRef, MemoryPoolRef};
@ -182,8 +182,8 @@ pub struct SyncVerifier<T: VerificationSink> {
impl<T> SyncVerifier<T> where T: VerificationSink { impl<T> SyncVerifier<T> where T: VerificationSink {
/// Create new sync verifier /// Create new sync verifier
pub fn new(network: Magic, storage: StorageRef, sink: Arc<T>) -> Self { pub fn new(consensus: ConsensusParams, storage: StorageRef, sink: Arc<T>) -> Self {
let verifier = ChainVerifier::new(storage.clone(), network); let verifier = ChainVerifier::new(storage.clone(), consensus);
SyncVerifier { SyncVerifier {
verifier: verifier, verifier: verifier,
sink: sink, sink: sink,

View File

@ -1,4 +1,4 @@
use network::{Magic, ConsensusParams}; use network::ConsensusParams;
use db::{TransactionOutputProvider, BlockHeaderProvider}; use db::{TransactionOutputProvider, BlockHeaderProvider};
use script; use script;
use sigops::transaction_sigops; use sigops::transaction_sigops;
@ -21,18 +21,17 @@ pub struct BlockAcceptor<'a> {
impl<'a> BlockAcceptor<'a> { impl<'a> BlockAcceptor<'a> {
pub fn new( pub fn new(
store: &'a TransactionOutputProvider, store: &'a TransactionOutputProvider,
network: Magic, consensus: &'a ConsensusParams,
block: CanonBlock<'a>, block: CanonBlock<'a>,
height: u32, height: u32,
deployments: &'a Deployments, deployments: &'a Deployments,
headers: &'a BlockHeaderProvider, headers: &'a BlockHeaderProvider,
) -> Self { ) -> Self {
let params = network.consensus_params();
BlockAcceptor { BlockAcceptor {
finality: BlockFinality::new(block, height, deployments, headers, &params), finality: BlockFinality::new(block, height, deployments, headers, consensus),
coinbase_script: BlockCoinbaseScript::new(block, &params, height), coinbase_script: BlockCoinbaseScript::new(block, consensus, height),
coinbase_claim: BlockCoinbaseClaim::new(block, store, height), coinbase_claim: BlockCoinbaseClaim::new(block, store, height),
sigops: BlockSigops::new(block, store, params, MAX_BLOCK_SIGOPS), sigops: BlockSigops::new(block, store, consensus, MAX_BLOCK_SIGOPS),
} }
} }
@ -82,12 +81,12 @@ impl<'a> BlockFinality<'a> {
pub struct BlockSigops<'a> { pub struct BlockSigops<'a> {
block: CanonBlock<'a>, block: CanonBlock<'a>,
store: &'a TransactionOutputProvider, store: &'a TransactionOutputProvider,
consensus_params: ConsensusParams, consensus_params: &'a ConsensusParams,
max_sigops: usize, max_sigops: usize,
} }
impl<'a> BlockSigops<'a> { impl<'a> BlockSigops<'a> {
fn new(block: CanonBlock<'a>, store: &'a TransactionOutputProvider, consensus_params: ConsensusParams, max_sigops: usize) -> Self { fn new(block: CanonBlock<'a>, store: &'a TransactionOutputProvider, consensus_params: &'a ConsensusParams, max_sigops: usize) -> Self {
BlockSigops { BlockSigops {
block: block, block: block,
store: store, store: store,

View File

@ -1,6 +1,6 @@
use rayon::prelude::{IntoParallelRefIterator, IndexedParallelIterator, ParallelIterator}; use rayon::prelude::{IntoParallelRefIterator, IndexedParallelIterator, ParallelIterator};
use db::Store; use db::Store;
use network::Magic; use network::ConsensusParams;
use error::Error; use error::Error;
use canon::CanonBlock; use canon::CanonBlock;
use accept_block::BlockAcceptor; use accept_block::BlockAcceptor;
@ -16,20 +16,20 @@ pub struct ChainAcceptor<'a> {
} }
impl<'a> ChainAcceptor<'a> { impl<'a> ChainAcceptor<'a> {
pub fn new(store: &'a Store, network: Magic, block: CanonBlock<'a>, height: u32, deployments: &'a Deployments) -> Self { pub fn new(store: &'a Store, consensus: &'a ConsensusParams, block: CanonBlock<'a>, height: u32, deployments: &'a Deployments) -> Self {
trace!(target: "verification", "Block verification {}", block.hash().to_reversed_str()); trace!(target: "verification", "Block verification {}", block.hash().to_reversed_str());
let output_store = DuplexTransactionOutputProvider::new(store.as_transaction_output_provider(), block.raw()); let output_store = DuplexTransactionOutputProvider::new(store.as_transaction_output_provider(), block.raw());
let headers = store.as_block_header_provider(); let headers = store.as_block_header_provider();
ChainAcceptor { ChainAcceptor {
block: BlockAcceptor::new(store.as_transaction_output_provider(), network, block, height, deployments, headers), block: BlockAcceptor::new(store.as_transaction_output_provider(), consensus, block, height, deployments, headers),
header: HeaderAcceptor::new(headers, network, block.header(), height, deployments), header: HeaderAcceptor::new(headers, consensus, block.header(), height, deployments),
transactions: block.transactions() transactions: block.transactions()
.into_iter() .into_iter()
.enumerate() .enumerate()
.map(|(tx_index, tx)| TransactionAcceptor::new( .map(|(tx_index, tx)| TransactionAcceptor::new(
store.as_transaction_meta_provider(), store.as_transaction_meta_provider(),
output_store, output_store,
network, consensus,
tx, tx,
block.hash(), block.hash(),
height, height,

View File

@ -15,16 +15,15 @@ pub struct HeaderAcceptor<'a> {
impl<'a> HeaderAcceptor<'a> { impl<'a> HeaderAcceptor<'a> {
pub fn new( pub fn new(
store: &'a BlockHeaderProvider, store: &'a BlockHeaderProvider,
network: Magic, consensus: &'a ConsensusParams,
header: CanonHeader<'a>, header: CanonHeader<'a>,
height: u32, height: u32,
deployments: &'a Deployments, deployments: &'a Deployments,
) -> Self { ) -> Self {
let params = network.consensus_params();
HeaderAcceptor { HeaderAcceptor {
work: HeaderWork::new(header, store, height, network), work: HeaderWork::new(header, store, height, consensus.magic),
median_timestamp: HeaderMedianTimestamp::new(header, store, height, deployments, &params), median_timestamp: HeaderMedianTimestamp::new(header, store, height, deployments, consensus),
version: HeaderVersion::new(header, height, params), version: HeaderVersion::new(header, height, consensus),
} }
} }
@ -41,11 +40,11 @@ impl<'a> HeaderAcceptor<'a> {
pub struct HeaderVersion<'a> { pub struct HeaderVersion<'a> {
header: CanonHeader<'a>, header: CanonHeader<'a>,
height: u32, height: u32,
consensus_params: ConsensusParams, consensus_params: &'a ConsensusParams,
} }
impl<'a> HeaderVersion<'a> { impl<'a> HeaderVersion<'a> {
fn new(header: CanonHeader<'a>, height: u32, consensus_params: ConsensusParams) -> Self { fn new(header: CanonHeader<'a>, height: u32, consensus_params: &'a ConsensusParams) -> Self {
HeaderVersion { HeaderVersion {
header: header, header: header,
height: height, height: height,

View File

@ -1,6 +1,6 @@
use primitives::hash::H256; use primitives::hash::H256;
use db::{TransactionMetaProvider, TransactionOutputProvider, BlockHeaderProvider}; use db::{TransactionMetaProvider, TransactionOutputProvider, BlockHeaderProvider};
use network::{Magic, ConsensusParams}; use network::ConsensusParams;
use script::{Script, verify_script, VerificationFlags, TransactionSignatureChecker, TransactionInputSigner}; use script::{Script, verify_script, VerificationFlags, TransactionSignatureChecker, TransactionInputSigner};
use duplex_store::DuplexTransactionOutputProvider; use duplex_store::DuplexTransactionOutputProvider;
use deployments::Deployments; use deployments::Deployments;
@ -25,7 +25,7 @@ impl<'a> TransactionAcceptor<'a> {
// previous transaction outputs // previous transaction outputs
// in case of block validation, that's database and currently processed block // in case of block validation, that's database and currently processed block
output_store: DuplexTransactionOutputProvider<'a>, output_store: DuplexTransactionOutputProvider<'a>,
network: Magic, consensus: &'a ConsensusParams,
transaction: CanonTransaction<'a>, transaction: CanonTransaction<'a>,
block_hash: &'a H256, block_hash: &'a H256,
height: u32, height: u32,
@ -35,14 +35,13 @@ impl<'a> TransactionAcceptor<'a> {
headers: &'a BlockHeaderProvider, headers: &'a BlockHeaderProvider,
) -> Self { ) -> Self {
trace!(target: "verification", "Tx verification {}", transaction.hash.to_reversed_str()); trace!(target: "verification", "Tx verification {}", transaction.hash.to_reversed_str());
let params = network.consensus_params();
TransactionAcceptor { TransactionAcceptor {
bip30: TransactionBip30::new_for_sync(transaction, meta_store, params.clone(), block_hash, height), bip30: TransactionBip30::new_for_sync(transaction, meta_store, consensus, block_hash, height),
missing_inputs: TransactionMissingInputs::new(transaction, output_store, transaction_index), missing_inputs: TransactionMissingInputs::new(transaction, output_store, transaction_index),
maturity: TransactionMaturity::new(transaction, meta_store, height), maturity: TransactionMaturity::new(transaction, meta_store, height),
overspent: TransactionOverspent::new(transaction, output_store), overspent: TransactionOverspent::new(transaction, output_store),
double_spent: TransactionDoubleSpend::new(transaction, output_store), double_spent: TransactionDoubleSpend::new(transaction, output_store),
eval: TransactionEval::new(transaction, output_store, &params, height, time, deployments, headers), eval: TransactionEval::new(transaction, output_store, consensus, height, time, deployments, headers),
} }
} }
@ -72,7 +71,7 @@ impl<'a> MemoryPoolTransactionAcceptor<'a> {
meta_store: &'a TransactionMetaProvider, meta_store: &'a TransactionMetaProvider,
// in case of memory pool it should be db and memory pool // in case of memory pool it should be db and memory pool
output_store: DuplexTransactionOutputProvider<'a>, output_store: DuplexTransactionOutputProvider<'a>,
network: Magic, consensus: &'a ConsensusParams,
transaction: CanonTransaction<'a>, transaction: CanonTransaction<'a>,
height: u32, height: u32,
time: u32, time: u32,
@ -80,15 +79,14 @@ impl<'a> MemoryPoolTransactionAcceptor<'a> {
headers: &'a BlockHeaderProvider, headers: &'a BlockHeaderProvider,
) -> Self { ) -> Self {
trace!(target: "verification", "Mempool-Tx verification {}", transaction.hash.to_reversed_str()); trace!(target: "verification", "Mempool-Tx verification {}", transaction.hash.to_reversed_str());
let params = network.consensus_params();
let transaction_index = 0; let transaction_index = 0;
MemoryPoolTransactionAcceptor { MemoryPoolTransactionAcceptor {
missing_inputs: TransactionMissingInputs::new(transaction, output_store, transaction_index), missing_inputs: TransactionMissingInputs::new(transaction, output_store, transaction_index),
maturity: TransactionMaturity::new(transaction, meta_store, height), maturity: TransactionMaturity::new(transaction, meta_store, height),
overspent: TransactionOverspent::new(transaction, output_store), overspent: TransactionOverspent::new(transaction, output_store),
sigops: TransactionSigops::new(transaction, output_store, params.clone(), MAX_BLOCK_SIGOPS, time), sigops: TransactionSigops::new(transaction, output_store, consensus, MAX_BLOCK_SIGOPS, time),
double_spent: TransactionDoubleSpend::new(transaction, output_store), double_spent: TransactionDoubleSpend::new(transaction, output_store),
eval: TransactionEval::new(transaction, output_store, &params, height, time, deployments, headers), eval: TransactionEval::new(transaction, output_store, consensus, height, time, deployments, headers),
} }
} }
@ -124,7 +122,7 @@ impl<'a> TransactionBip30<'a> {
fn new_for_sync( fn new_for_sync(
transaction: CanonTransaction<'a>, transaction: CanonTransaction<'a>,
store: &'a TransactionMetaProvider, store: &'a TransactionMetaProvider,
consensus_params: ConsensusParams, consensus_params: &'a ConsensusParams,
block_hash: &'a H256, block_hash: &'a H256,
height: u32 height: u32
) -> Self { ) -> Self {
@ -243,13 +241,13 @@ impl<'a> TransactionOverspent<'a> {
pub struct TransactionSigops<'a> { pub struct TransactionSigops<'a> {
transaction: CanonTransaction<'a>, transaction: CanonTransaction<'a>,
store: DuplexTransactionOutputProvider<'a>, store: DuplexTransactionOutputProvider<'a>,
consensus_params: ConsensusParams, consensus_params: &'a ConsensusParams,
max_sigops: usize, max_sigops: usize,
time: u32, time: u32,
} }
impl<'a> TransactionSigops<'a> { impl<'a> TransactionSigops<'a> {
fn new(transaction: CanonTransaction<'a>, store: DuplexTransactionOutputProvider<'a>, consensus_params: ConsensusParams, max_sigops: usize, time: u32) -> Self { fn new(transaction: CanonTransaction<'a>, store: DuplexTransactionOutputProvider<'a>, consensus_params: &'a ConsensusParams, max_sigops: usize, time: u32) -> Self {
TransactionSigops { TransactionSigops {
transaction: transaction, transaction: transaction,
store: store, store: store,

View File

@ -3,7 +3,7 @@
use hash::H256; use hash::H256;
use chain::{IndexedBlock, IndexedBlockHeader, BlockHeader, Transaction}; use chain::{IndexedBlock, IndexedBlockHeader, BlockHeader, Transaction};
use db::{SharedStore, TransactionOutputProvider, BlockHeaderProvider, BlockOrigin}; use db::{SharedStore, TransactionOutputProvider, BlockHeaderProvider, BlockOrigin};
use network::Magic; use network::ConsensusParams;
use error::{Error, TransactionError}; use error::{Error, TransactionError};
use canon::{CanonBlock, CanonTransaction}; use canon::{CanonBlock, CanonTransaction};
use duplex_store::{DuplexTransactionOutputProvider, NoopStore}; use duplex_store::{DuplexTransactionOutputProvider, NoopStore};
@ -17,15 +17,15 @@ use Verify;
pub struct BackwardsCompatibleChainVerifier { pub struct BackwardsCompatibleChainVerifier {
store: SharedStore, store: SharedStore,
network: Magic, consensus: ConsensusParams,
deployments: Deployments, deployments: Deployments,
} }
impl BackwardsCompatibleChainVerifier { impl BackwardsCompatibleChainVerifier {
pub fn new(store: SharedStore, network: Magic) -> Self { pub fn new(store: SharedStore, consensus: ConsensusParams) -> Self {
BackwardsCompatibleChainVerifier { BackwardsCompatibleChainVerifier {
store: store, store: store,
network: network, consensus: consensus,
deployments: Deployments::new(), deployments: Deployments::new(),
} }
} }
@ -33,7 +33,7 @@ impl BackwardsCompatibleChainVerifier {
fn verify_block(&self, block: &IndexedBlock) -> Result<(), Error> { fn verify_block(&self, block: &IndexedBlock) -> Result<(), Error> {
let current_time = ::time::get_time().sec as u32; let current_time = ::time::get_time().sec as u32;
// first run pre-verification // first run pre-verification
let chain_verifier = ChainVerifier::new(block, self.network, current_time); let chain_verifier = ChainVerifier::new(block, self.consensus.magic, current_time);
chain_verifier.check()?; chain_verifier.check()?;
assert_eq!(Some(self.store.best_block().hash), self.store.block_hash(self.store.best_block().number)); assert_eq!(Some(self.store.best_block().hash), self.store.block_hash(self.store.best_block().number));
@ -46,21 +46,21 @@ impl BackwardsCompatibleChainVerifier {
}, },
BlockOrigin::CanonChain { block_number } => { BlockOrigin::CanonChain { block_number } => {
let canon_block = CanonBlock::new(block); let canon_block = CanonBlock::new(block);
let chain_acceptor = ChainAcceptor::new(self.store.as_store(), self.network, canon_block, block_number, &self.deployments); let chain_acceptor = ChainAcceptor::new(self.store.as_store(), &self.consensus, canon_block, block_number, &self.deployments);
chain_acceptor.check()?; chain_acceptor.check()?;
}, },
BlockOrigin::SideChain(origin) => { BlockOrigin::SideChain(origin) => {
let block_number = origin.block_number; let block_number = origin.block_number;
let fork = self.store.fork(origin)?; let fork = self.store.fork(origin)?;
let canon_block = CanonBlock::new(block); let canon_block = CanonBlock::new(block);
let chain_acceptor = ChainAcceptor::new(fork.store(), self.network, canon_block, block_number, &self.deployments); let chain_acceptor = ChainAcceptor::new(fork.store(), &self.consensus, canon_block, block_number, &self.deployments);
chain_acceptor.check()?; chain_acceptor.check()?;
}, },
BlockOrigin::SideChainBecomingCanonChain(origin) => { BlockOrigin::SideChainBecomingCanonChain(origin) => {
let block_number = origin.block_number; let block_number = origin.block_number;
let fork = self.store.fork(origin)?; let fork = self.store.fork(origin)?;
let canon_block = CanonBlock::new(block); let canon_block = CanonBlock::new(block);
let chain_acceptor = ChainAcceptor::new(fork.store(), self.network, canon_block, block_number, &self.deployments); let chain_acceptor = ChainAcceptor::new(fork.store(), &self.consensus, canon_block, block_number, &self.deployments);
chain_acceptor.check()?; chain_acceptor.check()?;
}, },
} }
@ -79,7 +79,7 @@ impl BackwardsCompatibleChainVerifier {
// TODO: full verification // TODO: full verification
let current_time = ::time::get_time().sec as u32; let current_time = ::time::get_time().sec as u32;
let header = IndexedBlockHeader::new(hash.clone(), header.clone()); let header = IndexedBlockHeader::new(hash.clone(), header.clone());
let header_verifier = HeaderVerifier::new(&header, self.network, current_time); let header_verifier = HeaderVerifier::new(&header, self.consensus.magic, current_time);
header_verifier.check() header_verifier.check()
} }
@ -102,7 +102,7 @@ impl BackwardsCompatibleChainVerifier {
let tx_acceptor = MemoryPoolTransactionAcceptor::new( let tx_acceptor = MemoryPoolTransactionAcceptor::new(
self.store.as_transaction_meta_provider(), self.store.as_transaction_meta_provider(),
output_store, output_store,
self.network, &self.consensus,
canon_tx, canon_tx,
height, height,
time, time,
@ -133,7 +133,7 @@ mod tests {
use std::sync::Arc; use std::sync::Arc;
use chain::IndexedBlock; use chain::IndexedBlock;
use db::{BlockChainDatabase, Error as DBError}; use db::{BlockChainDatabase, Error as DBError};
use network::Magic; use network::{Magic, ConsensusParams};
use script; use script;
use super::BackwardsCompatibleChainVerifier as ChainVerifier; use super::BackwardsCompatibleChainVerifier as ChainVerifier;
use {Verify, Error, TransactionError}; use {Verify, Error, TransactionError};
@ -142,7 +142,7 @@ mod tests {
fn verify_orphan() { fn verify_orphan() {
let storage = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()])); let storage = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let b2 = test_data::block_h2().into(); let b2 = test_data::block_h2().into();
let verifier = ChainVerifier::new(storage, Magic::Unitest); let verifier = ChainVerifier::new(storage, ConsensusParams::new(Magic::Unitest, None));
assert_eq!(Err(Error::Database(DBError::UnknownParent)), verifier.verify(&b2)); assert_eq!(Err(Error::Database(DBError::UnknownParent)), verifier.verify(&b2));
} }
@ -150,7 +150,7 @@ mod tests {
fn verify_smoky() { fn verify_smoky() {
let storage = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()])); let storage = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let b1 = test_data::block_h1(); let b1 = test_data::block_h1();
let verifier = ChainVerifier::new(storage, Magic::Unitest); let verifier = ChainVerifier::new(storage, ConsensusParams::new(Magic::Unitest, None));
assert!(verifier.verify(&b1.into()).is_ok()); assert!(verifier.verify(&b1.into()).is_ok());
} }
@ -163,7 +163,7 @@ mod tests {
test_data::block_h1().into(), test_data::block_h1().into(),
]); ]);
let b1 = test_data::block_h2(); let b1 = test_data::block_h2();
let verifier = ChainVerifier::new(Arc::new(storage), Magic::Unitest); let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Magic::Unitest, None));
assert!(verifier.verify(&b1.into()).is_ok()); assert!(verifier.verify(&b1.into()).is_ok());
} }
@ -192,7 +192,7 @@ mod tests {
.merkled_header().parent(genesis.hash()).build() .merkled_header().parent(genesis.hash()).build()
.build(); .build();
let verifier = ChainVerifier::new(Arc::new(storage), Magic::Unitest); let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Magic::Unitest, None));
let expected = Err(Error::Transaction( let expected = Err(Error::Transaction(
1, 1,
@ -230,7 +230,7 @@ mod tests {
.merkled_header().parent(genesis.hash()).build() .merkled_header().parent(genesis.hash()).build()
.build(); .build();
let verifier = ChainVerifier::new(Arc::new(storage), Magic::Unitest); let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Magic::Unitest, None));
assert!(verifier.verify(&block.into()).is_ok()); assert!(verifier.verify(&block.into()).is_ok());
} }
@ -266,7 +266,7 @@ mod tests {
.merkled_header().parent(genesis.hash()).build() .merkled_header().parent(genesis.hash()).build()
.build(); .build();
let verifier = ChainVerifier::new(Arc::new(storage), Magic::Unitest); let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Magic::Unitest, None));
assert!(verifier.verify(&block.into()).is_ok()); assert!(verifier.verify(&block.into()).is_ok());
} }
@ -305,7 +305,7 @@ mod tests {
.merkled_header().parent(genesis.hash()).build() .merkled_header().parent(genesis.hash()).build()
.build(); .build();
let verifier = ChainVerifier::new(Arc::new(storage), Magic::Unitest); let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Magic::Unitest, None));
let expected = Err(Error::Transaction(2, TransactionError::Overspend)); let expected = Err(Error::Transaction(2, TransactionError::Overspend));
assert_eq!(expected, verifier.verify(&block.into())); assert_eq!(expected, verifier.verify(&block.into()));
@ -347,7 +347,7 @@ mod tests {
.merkled_header().parent(best_hash).build() .merkled_header().parent(best_hash).build()
.build(); .build();
let verifier = ChainVerifier::new(Arc::new(storage), Magic::Unitest); let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Magic::Unitest, None));
assert!(verifier.verify(&block.into()).is_ok()); assert!(verifier.verify(&block.into()).is_ok());
} }
@ -394,7 +394,7 @@ mod tests {
.build() .build()
.into(); .into();
let verifier = ChainVerifier::new(Arc::new(storage), Magic::Unitest); let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Magic::Unitest, None));
let expected = Err(Error::MaximumSigops); let expected = Err(Error::MaximumSigops);
assert_eq!(expected, verifier.verify(&block.into())); assert_eq!(expected, verifier.verify(&block.into()));
} }
@ -416,7 +416,7 @@ mod tests {
.build() .build()
.into(); .into();
let verifier = ChainVerifier::new(Arc::new(storage), Magic::Unitest); let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Magic::Unitest, None));
let expected = Err(Error::CoinbaseOverspend { let expected = Err(Error::CoinbaseOverspend {
expected_max: 5000000000, expected_max: 5000000000,