This commit is contained in:
Svyatoslav Nikolsky 2018-11-13 15:21:56 +03:00
parent 3a1e6c9aa6
commit 1eb6cbe622
32 changed files with 258 additions and 1667 deletions

View File

@ -2,7 +2,7 @@ use std::sync::Arc;
use db::BlockChainDatabase;
use chain::IndexedBlock;
use verification::{BackwardsCompatibleChainVerifier as ChainVerifier, Verify, VerificationLevel};
use network::{Network, ConsensusParams, ConsensusFork};
use network::{Network, ConsensusParams};
use test_data;
use byteorder::{LittleEndian, ByteOrder};
@ -96,7 +96,7 @@ pub fn main(benchmark: &mut Benchmark) {
assert_eq!(store.best_block().hash, rolling_hash);
let chain_verifier = ChainVerifier::new(store.clone(), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
let chain_verifier = ChainVerifier::new(store.clone(), ConsensusParams::new(Network::Unitest));
// bench
benchmark.start();

View File

@ -62,14 +62,14 @@ impl Serializable for MessageHeader {
mod tests {
use bytes::Bytes;
use ser::serialize;
use network::{Network, ConsensusFork};
use network::Network;
use super::MessageHeader;
#[test]
fn test_message_header_serialization() {
let expected = "f9beb4d96164647200000000000000001f000000ed52399b".into();
let header = MessageHeader {
magic: Network::Mainnet.magic(&ConsensusFork::BitcoinCore),
magic: Network::Mainnet.magic(),
command: "addr".into(),
len: 0x1f,
checksum: "ed52399b".into(),
@ -82,12 +82,12 @@ mod tests {
fn test_message_header_deserialization() {
let raw: Bytes = "f9beb4d96164647200000000000000001f000000ed52399b".into();
let expected = MessageHeader {
magic: Network::Mainnet.magic(&ConsensusFork::BitcoinCore),
magic: Network::Mainnet.magic(),
command: "addr".into(),
len: 0x1f,
checksum: "ed52399b".into(),
};
assert_eq!(expected, MessageHeader::deserialize(&raw, Network::Mainnet.magic(&ConsensusFork::BitcoinCore)).unwrap());
assert_eq!(expected, MessageHeader::deserialize(&raw, Network::Mainnet.magic()).unwrap());
}
}

View File

@ -3,7 +3,7 @@ use primitives::hash::H256;
use primitives::compact::Compact;
use chain::{OutPoint, TransactionOutput, IndexedTransaction};
use storage::{SharedStore, TransactionOutputProvider};
use network::{ConsensusParams, ConsensusFork, TransactionOrdering};
use network::ConsensusParams;
use memory_pool::{MemoryPool, OrderingStrategy, Entry};
use verification::{work_required, block_reward_satoshi, transaction_sigops, median_timestamp_inclusive};
@ -255,11 +255,6 @@ impl BlockAssembler {
let bits = work_required(previous_header_hash.clone(), time, height, store.as_block_header_provider(), consensus);
let version = BLOCK_VERSION;
let checkdatasig_active = match consensus.fork {
ConsensusFork::BitcoinCash(ref fork) => median_timestamp >= fork.magnetic_anomaly_time,
_ => false
};
let mut coinbase_value = block_reward_satoshi(height);
let mut transactions = Vec::new();
@ -271,7 +266,7 @@ impl BlockAssembler {
self.max_block_sigops,
height,
time,
checkdatasig_active);
false);
for entry in tx_iter {
// miner_fee is i64, but we can safely cast it to u64
// memory pool should restrict miner fee to be positive
@ -280,15 +275,6 @@ impl BlockAssembler {
transactions.push(tx);
}
// sort block transactions
let median_time_past = median_timestamp_inclusive(previous_header_hash.clone(), store.as_block_header_provider());
match consensus.fork.transaction_ordering(median_time_past) {
TransactionOrdering::Canonical => transactions.sort_unstable_by(|tx1, tx2|
tx1.hash.cmp(&tx2.hash)),
// memory pool iter returns transactions in topological order
TransactionOrdering::Topological => (),
}
BlockTemplate {
version: version,
previous_header_hash: previous_header_hash,
@ -311,7 +297,7 @@ mod tests {
use db::BlockChainDatabase;
use primitives::hash::H256;
use storage::SharedStore;
use network::{ConsensusParams, ConsensusFork, Network, BitcoinCashConsensusParams};
use network::{ConsensusParams, Network};
use memory_pool::MemoryPool;
use self::test_data::{ChainBuilder, TransactionBuilder};
use super::{BlockAssembler, SizePolicy, NextStep, BlockTemplate};
@ -382,19 +368,10 @@ mod tests {
}
// when topological consensus is used
let topological_consensus = ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore);
let topological_consensus = ConsensusParams::new(Network::Mainnet);
let (block, hash0, hash1) = construct_block(topological_consensus);
assert!(hash1 < hash0);
assert_eq!(block.transactions[0].hash, hash0);
assert_eq!(block.transactions[1].hash, hash1);
// when canonocal consensus is used
let mut canonical_fork = BitcoinCashConsensusParams::new(Network::Mainnet);
canonical_fork.magnetic_anomaly_time = 0;
let canonical_consensus = ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCash(canonical_fork));
let (block, hash0, hash1) = construct_block(canonical_consensus);
assert!(hash1 < hash0);
assert_eq!(block.transactions[0].hash, hash1);
assert_eq!(block.transactions[1].hash, hash0);
}
}

View File

@ -18,8 +18,6 @@ pub struct ConsensusParams {
/// Block height at which BIP65 becomes active.
/// See https://github.com/bitcoin/bips/blob/master/bip-0066.mediawiki
pub bip66_height: u32,
/// Selected consensus fork.
pub fork: ConsensusFork,
/// Version bits activation
pub rule_change_activation_threshold: u32,
/// Number of blocks with the same set of rules
@ -28,334 +26,59 @@ pub struct ConsensusParams {
pub csv_deployment: Option<Deployment>,
/// BIP141, BIP143, BIP147 deployment
pub segwit_deployment: Option<Deployment>,
}
#[derive(Debug, Clone)]
/// Bitcoin cash consensus parameters.
pub struct BitcoinCashConsensusParams {
/// Initial BCH hard fork height.
pub height: u32,
/// Height of difficulty adjustment hardfork.
/// https://reviews.bitcoinabc.org/D601
pub difficulty_adjustion_height: u32,
/// Time of monolith (aka May 2018) hardfork.
/// https://github.com/bitcoincashorg/spec/blob/4fbb0face661e293bcfafe1a2a4744dcca62e50d/may-2018-hardfork.md
pub monolith_time: u32,
/// Time of magnetic anomaly (aka Nov 2018) hardfork.
/// https://github.com/bitcoincashorg/bitcoincash.org/blob/f92f5412f2ed60273c229f68dd8703b6d5d09617/spec/2018-nov-upgrade.md
pub magnetic_anomaly_time: u32,
}
#[derive(Debug, Clone)]
/// ZCash consensus parameters.
pub struct ZCashConsensusParams {
pub pow_averaging_window: u32,
pub pow_max_adjust_down: u32,
pub pow_max_adjust_up: u32,
pub pow_target_spacing: u32,
}
#[derive(Debug, Clone)]
/// Concurrent consensus rule forks.
pub enum ConsensusFork {
/// No fork.
BitcoinCore,
/// Bitcoin Cash (aka UAHF).
/// `u32` is height of the first block, for which new consensus rules are applied.
/// Briefly: no SegWit + blocks up to 8MB + replay protection.
/// Technical specification:
/// UAHF Technical Specification - https://github.com/Bitcoin-UAHF/spec/blob/master/uahf-technical-spec.md
/// BUIP-HF Digest for replay protected signature verification across hard forks - https://github.com/Bitcoin-UAHF/spec/blob/master/replay-protected-sighash.md
BitcoinCash(BitcoinCashConsensusParams),
/// ZCash.
ZCash(ZCashConsensusParams),
}
#[derive(Debug, Clone, Copy)]
/// Describes the ordering of transactions within single block.
pub enum TransactionOrdering {
/// Topological tranasaction ordering: if tx TX2 depends on tx TX1,
/// it should come AFTER TX1 (not necessary **right** after it).
Topological,
/// Canonical transaction ordering: transactions are ordered by their
/// hash (in ascending order).
Canonical,
}
impl ConsensusParams {
pub fn new(network: Network, fork: ConsensusFork) -> Self {
pub fn new(network: Network) -> Self {
match network {
Network::Mainnet | Network::Other(_) => ConsensusParams {
network: network,
bip16_time: match fork {
ConsensusFork::ZCash(_) => 0,
_ => 1333238400, // Apr 1 2012
},
bip34_height: match fork {
ConsensusFork::ZCash(_) => 1,
_ => 227931, // 000000000000024b89b42a942fe0d9fea3bb44ab7bd1b19115dd6a759c0808b8
},
bip65_height: match fork {
ConsensusFork::ZCash(_) => 0,
_ => 388381, // 000000000000000004c2b624ed5d7756c508d90fd0da2c7c679febfa6c4735f0
},
bip66_height: match fork {
ConsensusFork::ZCash(_) => 0,
_ => 363725, // 00000000000000000379eaa19dce8c9b722d46ae6a57c2f1a988119488b50931
},
segwit_deployment: match fork {
ConsensusFork::BitcoinCore => Some(Deployment {
name: "segwit",
bit: 1,
start_time: 1479168000,
timeout: 1510704000,
activation: Some(481824),
}),
ConsensusFork::BitcoinCash(_) | ConsensusFork::ZCash(_) => None,
},
fork: fork,
bip16_time: 0,
bip34_height: 1,
bip65_height: 0,
bip66_height: 0,
segwit_deployment: None,
rule_change_activation_threshold: 1916, // 95%
miner_confirmation_window: 2016,
csv_deployment: Some(Deployment {
name: "csv",
bit: 0,
start_time: 1462060800,
timeout: 1493596800,
activation: Some(419328),
}),
csv_deployment: None,
pow_averaging_window: 17,
pow_max_adjust_down: 32,
pow_max_adjust_up: 16,
pow_target_spacing: (2.5 * 60.0) as u32,
},
Network::Testnet => ConsensusParams {
network: network,
bip16_time: match fork {
ConsensusFork::ZCash(_) => 0,
_ => 1333238400, // Apr 1 2012
},
bip34_height: match fork {
ConsensusFork::ZCash(_) => 1,
_ => 21111, // 0000000023b3a96d3484e5abb3755c413e7d41500f8e2a5c3f0dd01299cd8ef8
},
bip65_height: match fork {
ConsensusFork::ZCash(_) => 0,
_ => 581885, // 00000000007f6655f22f98e72ed80d8b06dc761d5da09df0fa1dc4be4f861eb6
},
bip66_height: match fork {
ConsensusFork::ZCash(_) => 0,
_ => 330776, // 000000002104c8c45e99a8853285a3b592602a3ccde2b832481da85e9e4ba182
},
segwit_deployment: match fork {
ConsensusFork::BitcoinCore => Some(Deployment {
name: "segwit",
bit: 1,
start_time: 1462060800,
timeout: 1493596800,
activation: Some(834624),
}),
ConsensusFork::BitcoinCash(_) | ConsensusFork::ZCash(_) => None,
},
fork: fork,
bip16_time: 0,
bip34_height: 1,
bip65_height: 0,
bip66_height: 0,
segwit_deployment: None,
rule_change_activation_threshold: 1512, // 75%
miner_confirmation_window: 2016,
csv_deployment: Some(Deployment {
name: "csv",
bit: 0,
start_time: 1456790400,
timeout: 1493596800,
activation: Some(770112),
}),
csv_deployment: None,
pow_averaging_window: 17,
pow_max_adjust_down: 32,
pow_max_adjust_up: 16,
pow_target_spacing: (2.5 * 60.0) as u32,
},
Network::Regtest | Network::Unitest => ConsensusParams {
network: network,
bip16_time: match fork {
ConsensusFork::ZCash(_) => 0,
_ => 1333238400, // Apr 1 2012
},
bip34_height: match fork {
ConsensusFork::ZCash(_) => 1,
_ => 100000000, // not activated on regtest
},
bip65_height: match fork {
ConsensusFork::ZCash(_) => 0,
_ => 1351,
},
bip66_height: match fork {
ConsensusFork::ZCash(_) => 0,
_ => 1251, // used only in rpc tests
},
segwit_deployment: match fork {
ConsensusFork::BitcoinCore => Some(Deployment {
name: "segwit",
bit: 1,
start_time: 0,
timeout: ::std::u32::MAX,
activation: None,
}),
ConsensusFork::BitcoinCash(_) | ConsensusFork::ZCash(_) => None,
},
fork: fork,
bip16_time: 0,
bip34_height: 1,
bip65_height: 0,
bip66_height: 0,
segwit_deployment: None,
rule_change_activation_threshold: 108, // 75%
miner_confirmation_window: 144,
csv_deployment: Some(Deployment {
name: "csv",
bit: 0,
start_time: 0,
timeout: 0,
activation: Some(0),
}),
},
}
}
csv_deployment: None,
pub fn magic(&self) -> Magic {
self.network.magic(&self.fork)
}
pub fn is_bip30_exception(&self, hash: &H256, height: u32) -> bool {
(height == 91842 && hash == &H256::from_reversed_str("00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
(height == 91880 && hash == &H256::from_reversed_str("00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721"))
}
/// Returns true if SegWit is possible on this chain.
pub fn is_segwit_possible(&self) -> bool {
match self.fork {
// SegWit is not supported in (our?) regtests
ConsensusFork::BitcoinCore if self.network != Network::Regtest => true,
ConsensusFork::BitcoinCore | ConsensusFork::BitcoinCash(_) | ConsensusFork::ZCash(_) => false,
}
}
}
impl ConsensusFork {
/// Absolute (across all forks) maximum block size. Currently is 8MB for post-HF BitcoinCash
pub fn absolute_maximum_block_size() -> usize {
32_000_000
}
/// Absolute (across all forks) maximum number of sigops in single block. Currently is max(sigops) for 8MB post-HF BitcoinCash block
pub fn absolute_maximum_block_sigops() -> usize {
160_000
}
/// Witness scale factor (equal among all forks)
pub fn witness_scale_factor() -> usize {
4
}
pub fn activation_height(&self) -> u32 {
match *self {
ConsensusFork::BitcoinCore => 0,
ConsensusFork::BitcoinCash(ref fork) => fork.height,
ConsensusFork::ZCash(_) => 0,
}
}
pub fn min_transaction_size(&self, median_time_past: u32) -> usize {
match *self {
ConsensusFork::BitcoinCash(ref fork) if median_time_past >= fork.magnetic_anomaly_time => 100,
_ => 0,
}
}
pub fn max_transaction_size(&self) -> usize {
// BitcoinCash: according to REQ-5: max size of tx is still 1_000_000
// SegWit: size * 4 <= 4_000_000 ===> max size of tx is still 1_000_000
1_000_000
}
pub fn min_block_size(&self, height: u32) -> usize {
match *self {
// size of first fork block must be larger than 1MB
ConsensusFork::BitcoinCash(ref fork) if height == fork.height => 1_000_001,
ConsensusFork::BitcoinCore | ConsensusFork::BitcoinCash(_) => 0,
ConsensusFork::ZCash(_) => 0,
}
}
pub fn max_block_size(&self, height: u32, median_time_past: u32) -> usize {
match *self {
ConsensusFork::BitcoinCash(ref fork) if median_time_past >= fork.monolith_time => 32_000_000,
ConsensusFork::BitcoinCash(ref fork) if height >= fork.height => 8_000_000,
ConsensusFork::BitcoinCore | ConsensusFork::BitcoinCash(_) => 1_000_000,
ConsensusFork::ZCash(_) => 2_000_000,
}
}
pub fn max_block_sigops(&self, height: u32, block_size: usize) -> usize {
match *self {
// according to REQ-5: max_block_sigops = 20000 * ceil((max(blocksize_bytes, 1000000) / 1000000))
ConsensusFork::BitcoinCash(ref fork) if height >= fork.height =>
20_000 * (1 + (block_size - 1) / 1_000_000),
ConsensusFork::BitcoinCore | ConsensusFork::BitcoinCash(_) | ConsensusFork::ZCash(_) => 20_000,
}
}
pub fn max_block_sigops_cost(&self, height: u32, block_size: usize) -> usize {
match *self {
ConsensusFork::BitcoinCash(_) | ConsensusFork::ZCash(_) =>
self.max_block_sigops(height, block_size) * Self::witness_scale_factor(),
ConsensusFork::BitcoinCore =>
80_000,
}
}
pub fn max_block_weight(&self, _height: u32) -> usize {
match *self {
ConsensusFork::BitcoinCore | ConsensusFork::ZCash(_) =>
4_000_000,
ConsensusFork::BitcoinCash(_) =>
unreachable!("BitcoinCash has no SegWit; weight is only checked with SegWit activated; qed"),
}
}
pub fn transaction_ordering(&self, median_time_past: u32) -> TransactionOrdering {
match *self {
ConsensusFork::BitcoinCash(ref fork) if median_time_past >= fork.magnetic_anomaly_time
=> TransactionOrdering::Canonical,
_ => TransactionOrdering::Topological,
}
}
}
impl BitcoinCashConsensusParams {
pub fn new(network: Network) -> Self {
match network {
Network::Mainnet | Network::Other(_) => BitcoinCashConsensusParams {
height: 478559,
difficulty_adjustion_height: 504031,
monolith_time: 1526400000,
magnetic_anomaly_time: 1542300000,
},
Network::Testnet => BitcoinCashConsensusParams {
height: 1155876,
difficulty_adjustion_height: 1188697,
monolith_time: 1526400000,
magnetic_anomaly_time: 1542300000,
},
Network::Regtest | Network::Unitest => BitcoinCashConsensusParams {
height: 0,
difficulty_adjustion_height: 0,
monolith_time: 1526400000,
magnetic_anomaly_time: 1542300000,
},
}
}
}
impl ZCashConsensusParams {
pub fn new(network: Network) -> Self {
match network {
Network::Mainnet | Network::Other(_) => ZCashConsensusParams {
pow_averaging_window: 17,
pow_max_adjust_down: 32,
pow_max_adjust_up: 16,
pow_target_spacing: (2.5 * 60.0) as u32,
},
Network::Testnet => ZCashConsensusParams {
pow_averaging_window: 17,
pow_max_adjust_down: 32,
pow_max_adjust_up: 16,
pow_target_spacing: (2.5 * 60.0) as u32,
},
Network::Regtest | Network::Unitest => ZCashConsensusParams {
pow_averaging_window: 17,
pow_max_adjust_down: 0,
pow_max_adjust_up: 0,
@ -364,6 +87,10 @@ impl ZCashConsensusParams {
}
}
pub fn magic(&self) -> Magic {
self.network.magic()
}
pub fn averaging_window_timespan(&self) -> u32 {
self.pow_averaging_window * self.pow_target_spacing
}
@ -375,76 +102,8 @@ impl ZCashConsensusParams {
pub fn max_actual_timespan(&self) -> u32 {
(self.averaging_window_timespan() * (100 + self.pow_max_adjust_down)) / 100
}
}
#[cfg(test)]
mod tests {
use super::super::Network;
use super::{ConsensusParams, ConsensusFork, BitcoinCashConsensusParams};
#[test]
fn test_consensus_params_bip34_height() {
assert_eq!(ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore).bip34_height, 227931);
assert_eq!(ConsensusParams::new(Network::Testnet, ConsensusFork::BitcoinCore).bip34_height, 21111);
assert_eq!(ConsensusParams::new(Network::Regtest, ConsensusFork::BitcoinCore).bip34_height, 100000000);
}
#[test]
fn test_consensus_params_bip65_height() {
assert_eq!(ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore).bip65_height, 388381);
assert_eq!(ConsensusParams::new(Network::Testnet, ConsensusFork::BitcoinCore).bip65_height, 581885);
assert_eq!(ConsensusParams::new(Network::Regtest, ConsensusFork::BitcoinCore).bip65_height, 1351);
}
#[test]
fn test_consensus_params_bip66_height() {
assert_eq!(ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore).bip66_height, 363725);
assert_eq!(ConsensusParams::new(Network::Testnet, ConsensusFork::BitcoinCore).bip66_height, 330776);
assert_eq!(ConsensusParams::new(Network::Regtest, ConsensusFork::BitcoinCore).bip66_height, 1251);
}
#[test]
fn test_consensus_activation_threshold() {
assert_eq!(ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore).rule_change_activation_threshold, 1916);
assert_eq!(ConsensusParams::new(Network::Testnet, ConsensusFork::BitcoinCore).rule_change_activation_threshold, 1512);
assert_eq!(ConsensusParams::new(Network::Regtest, ConsensusFork::BitcoinCore).rule_change_activation_threshold, 108);
}
#[test]
fn test_consensus_miner_confirmation_window() {
assert_eq!(ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore).miner_confirmation_window, 2016);
assert_eq!(ConsensusParams::new(Network::Testnet, ConsensusFork::BitcoinCore).miner_confirmation_window, 2016);
assert_eq!(ConsensusParams::new(Network::Regtest, ConsensusFork::BitcoinCore).miner_confirmation_window, 144);
}
#[test]
fn test_consensus_fork_min_block_size() {
assert_eq!(ConsensusFork::BitcoinCore.min_block_size(0), 0);
let fork = ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(Network::Mainnet));
assert_eq!(fork.min_block_size(0), 0);
assert_eq!(fork.min_block_size(fork.activation_height()), 1_000_001);
}
#[test]
fn test_consensus_fork_max_transaction_size() {
assert_eq!(ConsensusFork::BitcoinCore.max_transaction_size(), 1_000_000);
assert_eq!(ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(Network::Mainnet)).max_transaction_size(), 1_000_000);
}
#[test]
fn test_consensus_fork_min_transaction_size() {
assert_eq!(ConsensusFork::BitcoinCore.min_transaction_size(0), 0);
assert_eq!(ConsensusFork::BitcoinCore.min_transaction_size(2000000000), 0);
assert_eq!(ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(Network::Mainnet)).min_transaction_size(0), 0);
assert_eq!(ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(Network::Mainnet)).min_transaction_size(2000000000), 100);
}
#[test]
fn test_consensus_fork_max_block_sigops() {
assert_eq!(ConsensusFork::BitcoinCore.max_block_sigops(0, 1_000_000), 20_000);
let fork = ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(Network::Mainnet));
assert_eq!(fork.max_block_sigops(0, 1_000_000), 20_000);
assert_eq!(fork.max_block_sigops(fork.activation_height(), 2_000_000), 40_000);
assert_eq!(fork.max_block_sigops(fork.activation_height() + 100, 3_000_000), 60_000);
pub fn max_transaction_size(&self) -> usize {
100_000 // TODO: changed after sapling
}
}

View File

@ -12,6 +12,6 @@ mod network;
pub use primitives::{hash, compact};
pub use consensus::{ConsensusParams, ConsensusFork, BitcoinCashConsensusParams, ZCashConsensusParams, TransactionOrdering};
pub use consensus::ConsensusParams;
pub use deployments::Deployment;
pub use network::{Magic, Network};

View File

@ -5,29 +5,12 @@ use compact::Compact;
use chain::Block;
use primitives::hash::H256;
use primitives::bigint::U256;
use {ConsensusFork};
const MAGIC_MAINNET: u32 = 0xD9B4BEF9;
const MAGIC_TESTNET: u32 = 0x0709110B;
const MAGIC_REGTEST: u32 = 0xDAB5BFFA;
const MAGIC_UNITEST: u32 = 0x00000000;
const BITCOIN_CASH_MAGIC_MAINNET: u32 = 0xE8F3E1E3;
const BITCOIN_CASH_MAGIC_TESTNET: u32 = 0xF4F3E5F4;
const BITCOIN_CASH_MAGIC_REGTEST: u32 = 0xFABFB5DA;
const ZCASH_MAGIC_MAINNET: u32 = 0x6427e924;
const ZCASH_MAGIC_TESTNET: u32 = 0xbff91afa;
const ZCASH_MAGIC_REGTEST: u32 = 0x5f3fe8aa;
lazy_static! {
static ref MAX_BITS_MAINNET: U256 = "00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse()
.expect("hardcoded value should parse without errors");
static ref MAX_BITS_TESTNET: U256 = "00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse()
.expect("hardcoded value should parse without errors");
static ref MAX_BITS_REGTEST: U256 = "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse()
.expect("hardcoded value should parse without errors");
static ref ZCASH_MAX_BITS_MAINNET: U256 = "0007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse()
.expect("hardcoded value should parse without errors");
static ref ZCASH_MAX_BITS_TESTNET: U256 = "07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse()
@ -55,41 +38,28 @@ pub enum Network {
}
impl Network {
pub fn magic(&self, fork: &ConsensusFork) -> Magic {
match (fork, *self) {
(&ConsensusFork::BitcoinCash(_), Network::Mainnet) => BITCOIN_CASH_MAGIC_MAINNET,
(&ConsensusFork::BitcoinCash(_), Network::Testnet) => BITCOIN_CASH_MAGIC_TESTNET,
(&ConsensusFork::BitcoinCash(_), Network::Regtest) => BITCOIN_CASH_MAGIC_REGTEST,
(&ConsensusFork::ZCash(_), Network::Mainnet) => ZCASH_MAGIC_MAINNET,
(&ConsensusFork::ZCash(_), Network::Testnet) => ZCASH_MAGIC_TESTNET,
(&ConsensusFork::ZCash(_), Network::Regtest) => ZCASH_MAGIC_REGTEST,
(_, Network::Mainnet) => MAGIC_MAINNET,
(_, Network::Testnet) => MAGIC_TESTNET,
(_, Network::Regtest) => MAGIC_REGTEST,
(_, Network::Unitest) => MAGIC_UNITEST,
(_, Network::Other(value)) => value,
pub fn magic(&self) -> Magic {
match *self {
Network::Mainnet => ZCASH_MAGIC_MAINNET,
Network::Testnet => ZCASH_MAGIC_TESTNET,
Network::Regtest | Network::Unitest => ZCASH_MAGIC_REGTEST,
Network::Other(value) => value,
}
}
pub fn max_bits(&self, fork: &ConsensusFork) -> U256 {
match (fork, *self) {
(&ConsensusFork::ZCash(_), Network::Mainnet) => ZCASH_MAX_BITS_MAINNET.clone(),
(&ConsensusFork::ZCash(_), Network::Testnet) => ZCASH_MAX_BITS_TESTNET.clone(),
(_, Network::Mainnet) | (_, Network::Other(_)) => MAX_BITS_MAINNET.clone(),
(_, Network::Testnet) => MAX_BITS_TESTNET.clone(),
(_, Network::Regtest) => MAX_BITS_REGTEST.clone(),
(_, Network::Unitest) => Compact::max_value().into(),
pub fn max_bits(&self) -> U256 {
match *self {
Network::Mainnet => ZCASH_MAX_BITS_MAINNET.clone(),
Network::Testnet | Network::Unitest | Network::Regtest => ZCASH_MAX_BITS_TESTNET.clone(),
Network::Other(_) => Compact::max_value().into(),
}
}
pub fn port(&self, fork: &ConsensusFork) -> u16 {
match (fork, *self) {
(&ConsensusFork::ZCash(_), Network::Mainnet) | (&ConsensusFork::ZCash(_), Network::Other(_)) => 8233,
(&ConsensusFork::ZCash(_), Network::Testnet) => 18233,
(&ConsensusFork::ZCash(_), Network::Regtest) | (&ConsensusFork::ZCash(_), Network::Unitest) => 18344,
(_, Network::Mainnet) | (_, Network::Other(_)) => 8333,
(_, Network::Testnet) => 18333,
(_, Network::Regtest) | (_, Network::Unitest) => 18444,
pub fn port(&self) -> u16 {
match *self {
Network::Mainnet | Network::Other(_) => 8233,
Network::Testnet => 18233,
Network::Regtest | Network::Unitest => 18344,
}
}
@ -101,10 +71,10 @@ impl Network {
}
}
pub fn genesis_block(&self, fork: &ConsensusFork) -> Block {
match (fork, *self) {
// TODO
(&ConsensusFork::ZCash(_), Network::Mainnet) | (&ConsensusFork::ZCash(_), Network::Other(_)) => {
pub fn genesis_block(&self) -> Block {
match *self {
Network::Mainnet | Network::Other(_) => "040000000000000000000000000000000000000000000000000000000000000000000000db4d7a85b768123f1dff1d4c4cece70083b2d27e117b4ac2e31d087988a5eac4000000000000000000000000000000000000000000000000000000000000000090041358ffff071f5712000000000000000000000000000000000000000000000000000000000000fd4005000a889f00854b8665cd555f4656f68179d31ccadc1b1f7fb0952726313b16941da348284d67add4686121d4e3d930160c1348d8191c25f12b267a6a9c131b5031cbf8af1f79c9d513076a216ec87ed045fa966e01214ed83ca02dc1797270a454720d3206ac7d931a0a680c5c5e099057592570ca9bdf6058343958b31901fce1a15a4f38fd347750912e14004c73dfe588b903b6c03166582eeaf30529b14072a7b3079e3a684601b9b3024054201f7440b0ee9eb1a7120ff43f713735494aa27b1f8bab60d7f398bca14f6abb2adbf29b04099121438a7974b078a11635b594e9170f1086140b4173822dd697894483e1c6b4e8b8dcd5cb12ca4903bc61e108871d4d915a9093c18ac9b02b6716ce1013ca2c1174e319c1a570215bc9ab5f7564765f7be20524dc3fdf8aa356fd94d445e05ab165ad8bb4a0db096c097618c81098f91443c719416d39837af6de85015dca0de89462b1d8386758b2cf8a99e00953b308032ae44c35e05eb71842922eb69797f68813b59caf266cb6c213569ae3280505421a7e3a0a37fdf8e2ea354fc5422816655394a9454bac542a9298f176e211020d63dee6852c40de02267e2fc9d5e1ff2ad9309506f02a1a71a0501b16d0d36f70cdfd8de78116c0c506ee0b8ddfdeb561acadf31746b5a9dd32c21930884397fb1682164cb565cc14e089d66635a32618f7eb05fe05082b8a3fae620571660a6b89886eac53dec109d7cbb6930ca698a168f301a950be152da1be2b9e07516995e20baceebecb5579d7cdbc16d09f3a50cb3c7dffe33f26686d4ff3f8946ee6475e98cf7b3cf9062b6966e838f865ff3de5fb064a37a21da7bb8dfd2501a29e184f207caaba364f36f2329a77515dcb710e29ffbf73e2bbd773fab1f9a6b005567affff605c132e4e4dd69f36bd201005458cfbd2c658701eb2a700251cefd886b1e674ae816d3f719bac64be649c172ba27a4fd55947d95d53ba4cbc73de97b8af5ed4840b659370c556e7376457f51e5ebb66018849923db82c1c9a819f173cccdb8f3324b239609a300018d0fb094adf5bd7cbb3834c69e6d0b3798065c525b20f040e965e1a161af78ff7561cd874f5f1b75aa0bc77f720589e1b810f831eac5073e6dd46d00a2793f70f7427f0f798f2f53a67e615e65d356e66fe40609a958a05edb4c175bcc383ea0530e67ddbe479a898943c6e3074c6fcc252d6014de3a3d292b03f0d88d312fe221be7be7e3c59d07fa0f2f4029e364f1f355c5d01fa53770d0cd76d82bf7e60f6903bc1beb772e6fde4a70be51d9c7e03c8d6d8dfb361a234ba47c470fe630820bbd920715621b9fbedb49fcee165ead0875e6c2b1af16f50b5d6140cc981122fcbcf7c5a4e3772b3661b628e08380abc545957e59f634705b1bbde2f0b4e055a5ec5676d859be77e20962b645e051a880fddb0180b4555789e1f9344a436a84dc5579e2553f1e5fb0a599c137be36cabbed0319831fea3fddf94ddc7971e4bcf02cdc93294a9aab3e3b13e3b058235b4f4ec06ba4ceaa49d675b4ba80716f3bc6976b1fbf9c8bf1f3e3a4dc1cd83ef9cf816667fb94f1e923ff63fef072e6a19321e4812f96cb0ffa864da50ad74deb76917a336f31dce03ed5f0303aad5e6a83634f9fcc371096f8288b8f02ddded5ff1bb9d49331e4a84dbe1543164438fde9ad71dab024779dcdde0b6602b5ae0a6265c14b94edd83b37403f4b78fcd2ed555b596402c28ee81d87a909c4e8722b30c71ecdd861b05f61f8b1231795c76adba2fdefa451b283a5d527955b9f3de1b9828e7b2e74123dd47062ddcc09b05e7fa13cb2212a6fdbc65d7e852cec463ec6fd929f5b8483cf3052113b13dac91b69f49d1b7d1aec01c4a68e41ce1570101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff071f0104455a6361736830623963346565663862376363343137656535303031653335303039383462366665613335363833613763616331343161303433633432303634383335643334ffffffff010000000000000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
/*{
use serialization;
use chain;
use hex::FromHex;
@ -112,65 +82,18 @@ impl Network {
let origin = origin.from_hex::<Vec<u8>>().unwrap();
let genesis: chain::Block = serialization::deserialize(&origin as &[u8]).unwrap();
genesis
},
(&ConsensusFork::ZCash(_), Network::Testnet) =>
"".into(),
(&ConsensusFork::ZCash(_), Network::Regtest) | (&ConsensusFork::ZCash(_), Network::Unitest) =>
"".into(),
(_, Network::Mainnet) | (_, Network::Other(_)) => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a29ab5f49ffff001d1dac2b7c0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
(_, Network::Testnet) => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4adae5494dffff001d1aa4ae180101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
(_, Network::Regtest) | (_, Network::Unitest) => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4adae5494dffff7f20020000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
},*/
Network::Testnet => "TODO".into(),
Network::Regtest | Network::Unitest => "TODO".into(),
}
}
pub fn default_verification_edge(&self, fork: &ConsensusFork) -> H256 {
match *self {
Network::Mainnet => H256::from_reversed_str("0000000000000000030abc968e1bd635736e880b946085c93152969b9a81a6e2"),
Network::Testnet => H256::from_reversed_str("000000000871ee6842d3648317ccc8a435eb8cc3c2429aee94faff9ba26b05a0"),
_ => self.genesis_block(fork).hash(),
}
pub fn default_verification_edge(&self) -> H256 {
self.genesis_block().hash()
}
}
#[cfg(test)]
mod tests {
use compact::Compact;
use {ConsensusFork};
use super::{
Network, MAGIC_MAINNET, MAGIC_TESTNET, MAGIC_REGTEST, MAGIC_UNITEST,
MAX_BITS_MAINNET, MAX_BITS_TESTNET, MAX_BITS_REGTEST,
};
#[test]
fn test_network_magic_number() {
assert_eq!(MAGIC_MAINNET, Network::Mainnet.magic(&ConsensusFork::BitcoinCore));
assert_eq!(MAGIC_TESTNET, Network::Testnet.magic(&ConsensusFork::BitcoinCore));
assert_eq!(MAGIC_REGTEST, Network::Regtest.magic(&ConsensusFork::BitcoinCore));
assert_eq!(MAGIC_UNITEST, Network::Unitest.magic(&ConsensusFork::BitcoinCore));
}
#[test]
fn test_network_max_bits() {
assert_eq!(Network::Mainnet.max_bits(&ConsensusFork::BitcoinCore), *MAX_BITS_MAINNET);
assert_eq!(Network::Testnet.max_bits(&ConsensusFork::BitcoinCore), *MAX_BITS_TESTNET);
assert_eq!(Network::Regtest.max_bits(&ConsensusFork::BitcoinCore), *MAX_BITS_REGTEST);
assert_eq!(Network::Unitest.max_bits(&ConsensusFork::BitcoinCore), Compact::max_value().into());
}
#[test]
fn test_network_port() {
assert_eq!(Network::Mainnet.port(&ConsensusFork::BitcoinCore), 8333);
assert_eq!(Network::Testnet.port(&ConsensusFork::BitcoinCore), 18333);
assert_eq!(Network::Regtest.port(&ConsensusFork::BitcoinCore), 18444);
assert_eq!(Network::Unitest.port(&ConsensusFork::BitcoinCore), 18444);
}
#[test]
fn test_network_rpc_port() {
assert_eq!(Network::Mainnet.rpc_port(), 8332);
assert_eq!(Network::Testnet.rpc_port(), 18332);
assert_eq!(Network::Regtest.rpc_port(), 18443);
assert_eq!(Network::Unitest.rpc_port(), 18443);
}
// TODO: tests
}

View File

@ -210,7 +210,7 @@ mod tests {
use tokio_io::{AsyncRead, AsyncWrite};
use bytes::Bytes;
use ser::Stream;
use network::{Network, ConsensusFork, BitcoinCashConsensusParams};
use network::{Network, ConsensusParams};
use message::{Message, Error};
use message::types::Verack;
use message::types::version::{Version, V0, V106, V70001};
@ -286,7 +286,7 @@ mod tests {
#[test]
fn test_handshake() {
let magic = Network::Mainnet.magic(&ConsensusFork::BitcoinCore);
let magic = Network::Mainnet.magic();
let version = 70012;
let local_version = local_version();
let remote_version = remote_version();
@ -316,7 +316,7 @@ mod tests {
#[test]
fn test_accept_handshake() {
let magic = Network::Mainnet.magic(&ConsensusFork::BitcoinCore);
let magic = Network::Mainnet.magic();
let version = 70012;
let local_version = local_version();
let remote_version = remote_version();
@ -345,7 +345,7 @@ mod tests {
#[test]
fn test_self_handshake() {
let magic = Network::Mainnet.magic(&ConsensusFork::BitcoinCore);
let magic = Network::Mainnet.magic();
let version = 70012;
let remote_version = local_version();
let local_version = local_version();
@ -366,7 +366,7 @@ mod tests {
#[test]
fn test_accept_self_handshake() {
let magic = Network::Mainnet.magic(&ConsensusFork::BitcoinCore);
let magic = Network::Mainnet.magic();
let version = 70012;
let remote_version = local_version();
let local_version = local_version();
@ -384,26 +384,4 @@ mod tests {
let hs = accept_handshake(test_io, magic, local_version, 0).wait().unwrap();
assert_eq!(hs.1.unwrap_err(), expected);
}
#[test]
fn test_fails_to_accept_other_fork_node() {
let magic1 = Network::Mainnet.magic(&ConsensusFork::BitcoinCore);
let magic2 = Network::Mainnet.magic(&ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(Network::Mainnet)));
let version = 70012;
let local_version = local_version();
let remote_version = remote_version();
let mut remote_stream = Stream::new();
remote_stream.append_slice(Message::new(magic2, version, &remote_version).unwrap().as_ref());
let test_io = TestIo {
read: io::Cursor::new(remote_stream.out()),
write: Bytes::default(),
};
let expected = Error::InvalidMagic;
let hs = accept_handshake(test_io, magic1, local_version, 0).wait().unwrap();
assert_eq!(hs.1.unwrap_err(), expected);
}
}

View File

@ -63,7 +63,7 @@ impl<A> Future for ReadAnyMessage<A> where A: AsyncRead {
mod tests {
use futures::Future;
use bytes::Bytes;
use network::{Network, ConsensusFork};
use network::{Network};
use message::Error;
use super::read_any_message;
@ -74,20 +74,20 @@ mod tests {
let nonce = "5845303b6da97786".into();
let expected = (name, nonce);
assert_eq!(read_any_message(raw.as_ref(), Network::Mainnet.magic(&ConsensusFork::BitcoinCore)).wait().unwrap(), Ok(expected));
assert_eq!(read_any_message(raw.as_ref(), Network::Testnet.magic(&ConsensusFork::BitcoinCore)).wait().unwrap(), Err(Error::InvalidMagic));
assert_eq!(read_any_message(raw.as_ref(), Network::Mainnet.magic()).wait().unwrap(), Ok(expected));
assert_eq!(read_any_message(raw.as_ref(), Network::Testnet.magic()).wait().unwrap(), Err(Error::InvalidMagic));
}
#[test]
fn test_read_too_short_any_message() {
let raw: Bytes = "f9beb4d970696e6700000000000000000800000083c00c765845303b6da977".into();
assert!(read_any_message(raw.as_ref(), Network::Mainnet.magic(&ConsensusFork::BitcoinCore)).wait().is_err());
assert!(read_any_message(raw.as_ref(), Network::Mainnet.magic()).wait().is_err());
}
#[test]
fn test_read_any_message_with_invalid_checksum() {
let raw: Bytes = "f9beb4d970696e6700000000000000000800000083c01c765845303b6da97786".into();
assert_eq!(read_any_message(raw.as_ref(), Network::Mainnet.magic(&ConsensusFork::BitcoinCore)).wait().unwrap(), Err(Error::InvalidChecksum));
assert_eq!(read_any_message(raw.as_ref(), Network::Mainnet.magic()).wait().unwrap(), Err(Error::InvalidChecksum));
}
}

View File

@ -32,7 +32,7 @@ impl<A> Future for ReadHeader<A> where A: AsyncRead {
mod tests {
use futures::Future;
use bytes::Bytes;
use network::{Network, ConsensusFork};
use network::{Network};
use message::{MessageHeader, Error};
use super::read_header;
@ -40,25 +40,25 @@ mod tests {
fn test_read_header() {
let raw: Bytes = "f9beb4d96164647200000000000000001f000000ed52399b".into();
let expected = MessageHeader {
magic: Network::Mainnet.magic(&ConsensusFork::BitcoinCore),
magic: Network::Mainnet.magic(),
command: "addr".into(),
len: 0x1f,
checksum: "ed52399b".into(),
};
assert_eq!(read_header(raw.as_ref(), Network::Mainnet.magic(&ConsensusFork::BitcoinCore)).wait().unwrap().1, Ok(expected));
assert_eq!(read_header(raw.as_ref(), Network::Testnet.magic(&ConsensusFork::BitcoinCore)).wait().unwrap().1, Err(Error::InvalidMagic));
assert_eq!(read_header(raw.as_ref(), Network::Mainnet.magic()).wait().unwrap().1, Ok(expected));
assert_eq!(read_header(raw.as_ref(), Network::Testnet.magic()).wait().unwrap().1, Err(Error::InvalidMagic));
}
#[test]
fn test_read_header_with_invalid_magic() {
let raw: Bytes = "f9beb4d86164647200000000000000001f000000ed52399b".into();
assert_eq!(read_header(raw.as_ref(), Network::Testnet.magic(&ConsensusFork::BitcoinCore)).wait().unwrap().1, Err(Error::InvalidMagic));
assert_eq!(read_header(raw.as_ref(), Network::Testnet.magic()).wait().unwrap().1, Err(Error::InvalidMagic));
}
#[test]
fn test_read_too_short_header() {
let raw: Bytes = "f9beb4d96164647200000000000000001f000000ed5239".into();
assert!(read_header(raw.as_ref(), Network::Mainnet.magic(&ConsensusFork::BitcoinCore)).wait().is_err());
assert!(read_header(raw.as_ref(), Network::Mainnet.magic()).wait().is_err());
}
}

View File

@ -70,7 +70,7 @@ impl<M, A> Future for ReadMessage<M, A> where A: AsyncRead, M: Payload {
mod tests {
use futures::Future;
use bytes::Bytes;
use network::{Network, ConsensusFork};
use network::{Network};
use message::Error;
use message::types::{Ping, Pong};
use super::read_message;
@ -79,21 +79,21 @@ mod tests {
fn test_read_message() {
let raw: Bytes = "f9beb4d970696e6700000000000000000800000083c00c765845303b6da97786".into();
let ping = Ping::new(u64::from_str_radix("8677a96d3b304558", 16).unwrap());
assert_eq!(read_message(raw.as_ref(), Network::Mainnet.magic(&ConsensusFork::BitcoinCore), 0).wait().unwrap().1, Ok(ping));
assert_eq!(read_message::<Ping, _>(raw.as_ref(), Network::Testnet.magic(&ConsensusFork::BitcoinCore), 0).wait().unwrap().1, Err(Error::InvalidMagic));
assert_eq!(read_message::<Pong, _>(raw.as_ref(), Network::Mainnet.magic(&ConsensusFork::BitcoinCore), 0).wait().unwrap().1, Err(Error::InvalidCommand));
assert_eq!(read_message(raw.as_ref(), Network::Mainnet.magic(), 0).wait().unwrap().1, Ok(ping));
assert_eq!(read_message::<Ping, _>(raw.as_ref(), Network::Testnet.magic(), 0).wait().unwrap().1, Err(Error::InvalidMagic));
assert_eq!(read_message::<Pong, _>(raw.as_ref(), Network::Mainnet.magic(), 0).wait().unwrap().1, Err(Error::InvalidCommand));
}
#[test]
fn test_read_too_short_message() {
let raw: Bytes = "f9beb4d970696e6700000000000000000800000083c00c765845303b6da977".into();
assert!(read_message::<Ping, _>(raw.as_ref(), Network::Mainnet.magic(&ConsensusFork::BitcoinCore), 0).wait().is_err());
assert!(read_message::<Ping, _>(raw.as_ref(), Network::Mainnet.magic(), 0).wait().is_err());
}
#[test]
fn test_read_message_with_invalid_checksum() {
let raw: Bytes = "f9beb4d970696e6700000000000000000800000083c01c765845303b6da97786".into();
assert_eq!(read_message::<Ping, _>(raw.as_ref(), Network::Mainnet.magic(&ConsensusFork::BitcoinCore), 0).wait().unwrap().1, Err(Error::InvalidChecksum));
assert_eq!(read_message::<Ping, _>(raw.as_ref(), Network::Mainnet.magic(), 0).wait().unwrap().1, Err(Error::InvalidChecksum));
}
}

View File

@ -18,7 +18,7 @@ pub fn rollback(cfg: Config, matches: &ArgMatches) -> Result<(), String> {
};
let required_block_hash = cfg.db.block_header(block_ref.clone()).ok_or(format!("Block {:?} is unknown", block_ref))?.hash();
let genesis_hash = cfg.network.genesis_block(&cfg.consensus.fork).hash();
let genesis_hash = cfg.network.genesis_block().hash();
let mut best_block_hash = cfg.db.best_block().hash;
debug_assert!(best_block_hash != H256::default()); // genesis inserted in init_db

View File

@ -4,7 +4,6 @@ use std::sync::Arc;
use std::sync::mpsc::{channel, Sender, Receiver};
use std::sync::atomic::{AtomicBool, Ordering};
use sync::{create_sync_peers, create_local_sync_node, create_sync_connection_factory, SyncListener};
use network::ConsensusFork;
use primitives::hash::H256;
use util::{init_db, node_table_path};
use {config, p2p, PROTOCOL_VERSION, PROTOCOL_MINIMUM, ZCASH_PROTOCOL_VERSION, ZCASH_PROTOCOL_MINIMUM};
@ -94,14 +93,8 @@ pub fn start(cfg: config::Config) -> Result<(), String> {
inbound_connections: cfg.inbound_connections,
outbound_connections: cfg.outbound_connections,
connection: p2p::NetConfig {
protocol_version: match &cfg.consensus.fork {
&ConsensusFork::ZCash(_) => ZCASH_PROTOCOL_VERSION,
_ => PROTOCOL_VERSION,
},
protocol_minimum: match &cfg.consensus.fork {
&ConsensusFork::ZCash(_) => ZCASH_PROTOCOL_MINIMUM,
_ => PROTOCOL_MINIMUM,
},
protocol_version: ZCASH_PROTOCOL_VERSION,
protocol_minimum: ZCASH_PROTOCOL_MINIMUM,
magic: cfg.consensus.magic(),
local_address: SocketAddr::new(cfg.host, cfg.port),
services: cfg.services,

View File

@ -2,7 +2,7 @@ use std::net;
use clap;
use storage;
use message::Services;
use network::{Network, ConsensusParams, ConsensusFork, BitcoinCashConsensusParams, ZCashConsensusParams};
use network::{Network, ConsensusParams};
use p2p::InternetProtocol;
use seednodes::{mainnet_seednodes, testnet_seednodes, bitcoin_cash_seednodes,
bitcoin_cash_testnet_seednodes, zcash_seednodes};
@ -59,8 +59,7 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
(true, true) => return Err("Only one testnet option can be used".into()),
};
let consensus_fork = parse_consensus_fork(network, &db, &matches)?;
let consensus = ConsensusParams::new(network, consensus_fork);
let consensus = ConsensusParams::new(network);
let (in_connections, out_connections) = match network {
Network::Testnet | Network::Mainnet | Network::Other(_) => (10, 10),
@ -73,11 +72,7 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
};
// to skip idiotic 30 seconds delay in test-scripts
let user_agent_suffix = match consensus.fork {
ConsensusFork::BitcoinCore => "",
ConsensusFork::BitcoinCash(_) => "/UAHF",
ConsensusFork::ZCash(_) => "",
};
let user_agent_suffix = "";
let user_agent = match network {
Network::Testnet | Network::Mainnet | Network::Unitest | Network::Other(_) => format!("{}{}", USER_AGENT, user_agent_suffix),
Network::Regtest => REGTEST_USER_AGENT.into(),
@ -85,13 +80,13 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
let port = match matches.value_of("port") {
Some(port) => port.parse().map_err(|_| "Invalid port".to_owned())?,
None => network.port(&consensus.fork),
None => network.port(),
};
let connect = match matches.value_of("connect") {
Some(s) => Some(match s.parse::<net::SocketAddr>() {
Err(_) => s.parse::<net::IpAddr>()
.map(|ip| net::SocketAddr::new(ip, network.port(&consensus.fork)))
.map(|ip| net::SocketAddr::new(ip, network.port()))
.map_err(|_| "Invalid connect".to_owned()),
Ok(a) => Ok(a),
}?),
@ -100,13 +95,9 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
let seednodes: Vec<String> = match matches.value_of("seednode") {
Some(s) => vec![s.parse().map_err(|_| "Invalid seednode".to_owned())?],
None => match (network, &consensus.fork) {
(Network::Mainnet, &ConsensusFork::ZCash(_)) => zcash_seednodes().into_iter().map(Into::into).collect(),
(Network::Mainnet, &ConsensusFork::BitcoinCash(_)) => bitcoin_cash_seednodes().into_iter().map(Into::into).collect(),
(Network::Testnet, &ConsensusFork::BitcoinCash(_)) => bitcoin_cash_testnet_seednodes().into_iter().map(Into::into).collect(),
(Network::Mainnet, _) => mainnet_seednodes().into_iter().map(Into::into).collect(),
(Network::Testnet, _) => testnet_seednodes().into_iter().map(Into::into).collect(),
(Network::Other(_), _) | (Network::Regtest, _) | (Network::Unitest, _) => Vec::new(),
None => match network {
Network::Mainnet => zcash_seednodes().into_iter().map(Into::into).collect(),
Network::Other(_) | Network::Testnet | Network::Regtest | Network::Unitest => Vec::new(),
},
};
@ -131,11 +122,6 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
};
let services = Services::default().with_network(true);
let services = match &consensus.fork {
&ConsensusFork::BitcoinCash(_) => services.with_bitcoin_cash(true),
&ConsensusFork::BitcoinCore => services.with_witness(true),
&ConsensusFork::ZCash(_) => services,
};
let verification_level = match matches.value_of("verification-level") {
Some(s) if s == "full" => VerificationLevel::Full,
@ -150,7 +136,7 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
let edge: H256 = s.parse().map_err(|_| "Invalid verification edge".to_owned())?;
edge.reversed()
},
_ => network.default_verification_edge(&consensus.fork),
_ => network.default_verification_edge(),
};
let config = Config {
@ -181,34 +167,6 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
Ok(config)
}
fn parse_consensus_fork(network: Network, db: &storage::SharedStore, matches: &clap::ArgMatches) -> Result<ConsensusFork, String> {
let old_consensus_fork = db.consensus_fork()?;
let new_consensus_fork = match (matches.is_present("btc"), matches.is_present("bch"), matches.is_present("zcash")) {
(false, false, false) => match &old_consensus_fork {
&Some(ref old_consensus_fork) => old_consensus_fork,
&None => return Err("You must select fork on first run: --btc, --bch or --zcash".into()),
},
(true, false, false) => "btc",
(false, true, false) => "bch",
(false, false, true) => "zcash",
_ => return Err("You can only pass single fork argument: --btc, --bch or --zcash".into()),
};
match &old_consensus_fork {
&None => db.set_consensus_fork(new_consensus_fork)?,
&Some(ref old_consensus_fork) if old_consensus_fork == new_consensus_fork => (),
&Some(ref old_consensus_fork) =>
return Err(format!("Cannot select '{}' fork with non-empty database of '{}' fork", new_consensus_fork, old_consensus_fork)),
}
match new_consensus_fork {
"btc" => Ok(ConsensusFork::BitcoinCore),
"bch" => Ok(ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(network))),
"zcash" => Ok(ConsensusFork::ZCash(ZCashConsensusParams::new(network))),
_ => Err(String::from("Fork mandatory")),
}
}
fn parse_rpc_config(network: Network, matches: &clap::ArgMatches) -> Result<RpcHttpConfig, String> {
let mut config = RpcHttpConfig::with_port(network.rpc_port());
config.enabled = !matches.is_present("no-jsonrpc");

View File

@ -26,7 +26,7 @@ pub fn node_table_path(cfg: &Config) -> PathBuf {
pub fn init_db(cfg: &Config) -> Result<(), String> {
// insert genesis block if db is empty
let genesis_block: IndexedBlock = cfg.network.genesis_block(&cfg.consensus.fork).into();
let genesis_block: IndexedBlock = cfg.network.genesis_block().into();
match cfg.db.block_hash(0) {
Some(ref db_genesis_block_hash) if db_genesis_block_hash != genesis_block.hash() => Err("Trying to open database with incompatible genesis block".into()),
Some(_) => Ok(()),

View File

@ -14,7 +14,7 @@ use global_script::Script;
use chain::OutPoint;
use verification;
use ser::serialize;
use network::{Network, ConsensusFork, ZCashConsensusParams};
use network::{Network};
use primitives::hash::H256 as GlobalH256;
pub struct BlockChainClient<T: BlockChainClientCoreApi> {
@ -33,16 +33,13 @@ pub trait BlockChainClientCoreApi: Send + Sync + 'static {
pub struct BlockChainClientCore {
network: Network,
fork: ConsensusFork,
storage: storage::SharedStore,
}
impl BlockChainClientCore {
pub fn new(network: Network, storage: storage::SharedStore) -> Self {
let fork = ConsensusFork::ZCash(ZCashConsensusParams::new(network));
BlockChainClientCore {
network: network,
fork,
storage: storage,
}
}
@ -62,7 +59,7 @@ impl BlockChainClientCoreApi for BlockChainClientCore {
}
fn difficulty(&self) -> f64 {
self.storage.difficulty(self.network.max_bits(&self.fork).into())
self.storage.difficulty(self.network.max_bits().into())
}
fn raw_block(&self, hash: GlobalH256) -> Option<RawBlock> {
@ -94,7 +91,7 @@ impl BlockChainClientCoreApi for BlockChainClientCore {
weight: block_size as u32, // TODO: segwit
height: height,
mediantime: Some(median_time),
difficulty: block.header.raw.bits.to_f64(self.network.max_bits(&self.fork).into()),
difficulty: block.header.raw.bits.to_f64(self.network.max_bits().into()),
chainwork: U256::default(), // TODO: read from storage
previousblockhash: Some(block.header.raw.previous_header_hash.clone().into()),
nextblockhash: height.and_then(|h| self.storage.block_hash(h + 1).map(|h| h.into())),

View File

@ -147,7 +147,7 @@ mod tests {
use std::sync::Arc;
use db::{BlockChainDatabase};
use network::{ConsensusParams, ConsensusFork, Network};
use network::{ConsensusParams, Network};
use verification::VerificationLevel;
use super::super::Error;
use super::{BlocksWriter, MAX_ORPHANED_BLOCKS};
@ -163,7 +163,7 @@ mod tests {
#[test]
fn blocks_writer_appends_blocks() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut blocks_target = BlocksWriter::new(db.clone(), ConsensusParams::new(Network::Testnet, ConsensusFork::BitcoinCore), default_verification_params());
let mut blocks_target = BlocksWriter::new(db.clone(), ConsensusParams::new(Network::Testnet), default_verification_params());
blocks_target.append_block(test_data::block_h1().into()).expect("Expecting no error");
assert_eq!(db.best_block().number, 1);
}
@ -172,7 +172,7 @@ mod tests {
fn blocks_writer_verification_error() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let blocks = test_data::build_n_empty_blocks_from_genesis((MAX_ORPHANED_BLOCKS + 2) as u32, 1);
let mut blocks_target = BlocksWriter::new(db.clone(), ConsensusParams::new(Network::Testnet, ConsensusFork::BitcoinCore), default_verification_params());
let mut blocks_target = BlocksWriter::new(db.clone(), ConsensusParams::new(Network::Testnet), default_verification_params());
for (index, block) in blocks.into_iter().skip(1).enumerate() {
match blocks_target.append_block(block.into()) {
Err(Error::TooManyOrphanBlocks) if index == MAX_ORPHANED_BLOCKS => (),
@ -186,7 +186,7 @@ mod tests {
#[test]
fn blocks_writer_out_of_order_block() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut blocks_target = BlocksWriter::new(db.clone(), ConsensusParams::new(Network::Testnet, ConsensusFork::BitcoinCore), default_verification_params());
let mut blocks_target = BlocksWriter::new(db.clone(), ConsensusParams::new(Network::Testnet), default_verification_params());
let wrong_block = test_data::block_builder()
.header().parent(test_data::genesis().hash()).build()
@ -201,11 +201,8 @@ mod tests {
#[test]
fn blocks_writer_append_to_existing_db() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut blocks_target = BlocksWriter::new(db.clone(), ConsensusParams::new(Network::Testnet, ConsensusFork::BitcoinCore), default_verification_params());
let mut blocks_target = BlocksWriter::new(db.clone(), ConsensusParams::new(Network::Mainnet), default_verification_params());
assert!(blocks_target.append_block(test_data::genesis().into()).is_ok());
assert_eq!(db.best_block().number, 0);
println!("=== XXX: {:?}", blocks_target.append_block(test_data::block_h1().into()));
assert!(blocks_target.append_block(test_data::block_h1().into()).is_ok());
assert_eq!(db.best_block().number, 1);
}

View File

@ -279,10 +279,10 @@ impl<T, U, V> LocalNode<T, U, V> where T: TaskExecutor, U: Server, V: Client {
let previous_block_header = self.storage.block_header(previous_block_height.into()).expect("best block is in db; qed");
let median_timestamp = median_timestamp_inclusive(previous_block_header.hash(), self.storage.as_block_header_provider());
let new_block_height = previous_block_height + 1;
let max_block_size = self.consensus.fork.max_block_size(new_block_height, median_timestamp);
let max_block_size = 2_000_000;
let block_assembler = BlockAssembler {
max_block_size: max_block_size as u32,
max_block_sigops: self.consensus.fork.max_block_sigops(new_block_height, max_block_size) as u32,
max_block_sigops: 20_000,
};
let memory_pool = &*self.memory_pool.read();
block_assembler.create_new_block(&self.storage, memory_pool, time::get_time().sec as u32, median_timestamp, &self.consensus)
@ -343,7 +343,7 @@ pub mod tests {
use synchronization_chain::Chain;
use message::types;
use message::common::{InventoryVector, InventoryType};
use network::{ConsensusParams, ConsensusFork, Network};
use network::{ConsensusParams, Network};
use chain::Transaction;
use db::{BlockChainDatabase};
use miner::MemoryPool;
@ -377,12 +377,12 @@ pub mod tests {
let memory_pool = Arc::new(RwLock::new(MemoryPool::new()));
let storage = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let sync_state = SynchronizationStateRef::new(SynchronizationState::with_storage(storage.clone()));
let chain = Chain::new(storage.clone(), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), memory_pool.clone());
let chain = Chain::new(storage.clone(), ConsensusParams::new(Network::Unitest), memory_pool.clone());
let sync_peers = Arc::new(PeersImpl::default());
let executor = DummyTaskExecutor::new();
let server = Arc::new(DummyServer::new());
let config = Config { close_connection_on_bad_block: true };
let chain_verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore)));
let chain_verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Mainnet)));
let client_core = SynchronizationClientCore::new(config, sync_state.clone(), sync_peers.clone(), executor.clone(), chain, chain_verifier);
let mut verifier = match verifier {
Some(verifier) => verifier,
@ -390,7 +390,7 @@ pub mod tests {
};
verifier.set_sink(Arc::new(CoreVerificationSink::new(client_core.clone())));
let client = SynchronizationClient::new(sync_state.clone(), client_core, verifier);
let local_node = LocalNode::new(ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore), storage, memory_pool, sync_peers, sync_state, executor.clone(), client, server.clone());
let local_node = LocalNode::new(ConsensusParams::new(Network::Mainnet), storage, memory_pool, sync_peers, sync_state, executor.clone(), client, server.clone());
(executor, server, local_node)
}

View File

@ -115,9 +115,6 @@ pub struct Chain {
memory_pool: MemoryPoolRef,
/// Blocks that have been marked as dead-ends
dead_end_blocks: HashSet<H256>,
/// Is SegWit is possible on this chain? SegWit inventory types are used when block/tx-es are
/// requested and this flag is true.
is_segwit_possible: bool,
}
impl BlockState {
@ -148,7 +145,6 @@ impl Chain {
.expect("storage with genesis block is required");
let best_storage_block = storage.best_block();
let best_storage_block_hash = best_storage_block.hash.clone();
let is_segwit_possible = consensus.is_segwit_possible();
Chain {
genesis_block_hash: genesis_block_hash,
@ -159,7 +155,6 @@ impl Chain {
verifying_transactions: LinkedHashMap::new(),
memory_pool: memory_pool,
dead_end_blocks: HashSet::new(),
is_segwit_possible,
}
}
@ -185,11 +180,6 @@ impl Chain {
self.memory_pool.clone()
}
/// Is segwit active
pub fn is_segwit_possible(&self) -> bool {
self.is_segwit_possible
}
/// Get number of blocks in given state
pub fn length_of_blocks_state(&self, state: BlockState) -> BlockHeight {
match state {
@ -734,7 +724,7 @@ mod tests {
use chain::{Transaction, IndexedBlockHeader};
use db::BlockChainDatabase;
use miner::MemoryPool;
use network::{Network, ConsensusParams, ConsensusFork};
use network::{Network, ConsensusParams};
use primitives::hash::H256;
use super::{Chain, BlockState, TransactionState, BlockInsertionResult};
use utils::HashPosition;
@ -743,7 +733,7 @@ mod tests {
fn chain_empty() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let db_best_block = db.best_block();
let chain = Chain::new(db.clone(), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let chain = Chain::new(db.clone(), ConsensusParams::new(Network::Unitest), Arc::new(RwLock::new(MemoryPool::new())));
assert_eq!(chain.information().scheduled, 0);
assert_eq!(chain.information().requested, 0);
assert_eq!(chain.information().verifying, 0);
@ -760,7 +750,7 @@ mod tests {
#[test]
fn chain_block_path() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db.clone(), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db.clone(), ConsensusParams::new(Network::Unitest), Arc::new(RwLock::new(MemoryPool::new())));
// add 6 blocks to scheduled queue
let blocks = test_data::build_n_empty_blocks_from_genesis(6, 0);
@ -812,7 +802,7 @@ mod tests {
#[test]
fn chain_block_locator_hashes() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest), Arc::new(RwLock::new(MemoryPool::new())));
let genesis_hash = chain.best_block().hash;
assert_eq!(chain.block_locator_hashes(), vec![genesis_hash.clone()]);
@ -897,7 +887,7 @@ mod tests {
#[test]
fn chain_transaction_state() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest), Arc::new(RwLock::new(MemoryPool::new())));
let genesis_block = test_data::genesis();
let block1 = test_data::block_h1();
let tx1: Transaction = test_data::TransactionBuilder::with_version(1).into();
@ -934,7 +924,7 @@ mod tests {
let tx2_hash = tx2.hash();
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![b0.into()]));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest), Arc::new(RwLock::new(MemoryPool::new())));
chain.verify_transaction(tx1.into());
chain.insert_verified_transaction(tx2.into());
@ -958,7 +948,7 @@ mod tests {
.set_default_input(0).set_output(400).store(test_chain); // t4
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest), Arc::new(RwLock::new(MemoryPool::new())));
chain.verify_transaction(test_chain.at(0).into());
chain.verify_transaction(test_chain.at(1).into());
chain.verify_transaction(test_chain.at(2).into());
@ -980,7 +970,7 @@ mod tests {
.set_default_input(0).set_output(400).store(test_chain); // t4
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest), Arc::new(RwLock::new(MemoryPool::new())));
chain.insert_verified_transaction(test_chain.at(0).into());
chain.insert_verified_transaction(test_chain.at(1).into());
chain.insert_verified_transaction(test_chain.at(2).into());
@ -1006,7 +996,7 @@ mod tests {
let tx2_hash = tx2.hash();
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![b0.into()]));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest), Arc::new(RwLock::new(MemoryPool::new())));
chain.verify_transaction(tx1.into());
chain.insert_verified_transaction(tx2.into());
@ -1054,7 +1044,7 @@ mod tests {
let tx5 = b5.transactions[0].clone();
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![genesis.into()]));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest), Arc::new(RwLock::new(MemoryPool::new())));
chain.insert_verified_transaction(tx3.into());
chain.insert_verified_transaction(tx4.into());
@ -1098,7 +1088,7 @@ mod tests {
// insert tx2 to memory pool
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest), Arc::new(RwLock::new(MemoryPool::new())));
chain.insert_verified_transaction(tx2.clone().into());
chain.insert_verified_transaction(tx3.clone().into());
// insert verified block with tx1
@ -1117,7 +1107,7 @@ mod tests {
.reset().set_input(&data_chain.at(0), 0).add_output(30).store(data_chain); // transaction0 -> transaction2
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest), Arc::new(RwLock::new(MemoryPool::new())));
chain.insert_verified_transaction(data_chain.at(1).into());
assert_eq!(chain.information().transactions.transactions_count, 1);
chain.insert_verified_transaction(data_chain.at(2).into());

View File

@ -1235,7 +1235,7 @@ pub mod tests {
use message::common::InventoryVector;
use message::{Services, types};
use miner::MemoryPool;
use network::{ConsensusParams, ConsensusFork, Network};
use network::{ConsensusParams, Network};
use primitives::hash::H256;
use verification::BackwardsCompatibleChainVerifier as ChainVerifier;
use inbound_connection::tests::DummyOutboundSyncConnection;
@ -1286,11 +1286,11 @@ pub mod tests {
};
let sync_state = SynchronizationStateRef::new(SynchronizationState::with_storage(storage.clone()));
let memory_pool = Arc::new(RwLock::new(MemoryPool::new()));
let chain = Chain::new(storage.clone(), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), memory_pool.clone());
let chain = Chain::new(storage.clone(), ConsensusParams::new(Network::Unitest), memory_pool.clone());
let executor = DummyTaskExecutor::new();
let config = Config { close_connection_on_bad_block: true };
let chain_verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore)));
let chain_verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Unitest)));
let client_core = SynchronizationClientCore::new(config, sync_state.clone(), sync_peers.clone(), executor.clone(), chain, chain_verifier.clone());
{
client_core.lock().set_verify_headers(false);

View File

@ -263,7 +263,7 @@ pub mod tests {
use std::sync::atomic::Ordering;
use std::collections::{HashSet, HashMap};
use db::BlockChainDatabase;
use network::{Network, ConsensusParams, ConsensusFork};
use network::{Network, ConsensusParams};
use verification::{VerificationLevel, BackwardsCompatibleChainVerifier as ChainVerifier, Error as VerificationError, TransactionError};
use synchronization_client_core::CoreVerificationSink;
use synchronization_executor::tests::DummyTaskExecutor;
@ -351,7 +351,7 @@ pub mod tests {
#[test]
fn verifier_wrapper_switches_to_full_mode() {
let storage: StorageRef = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore)));
let verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Unitest)));
// switching to full verification when block is already in db
assert_eq!(ChainVerifierWrapper::new(verifier.clone(), &storage, VerificationParameters {
@ -382,7 +382,7 @@ pub mod tests {
.build()
.merkled_header()
.parent(rolling_hash.clone())
.bits(Network::Unitest.max_bits(&ConsensusFork::BitcoinCore).into())
.bits(Network::Unitest.max_bits().into())
.build()
.build();
rolling_hash = next_block.hash();
@ -392,7 +392,7 @@ pub mod tests {
let coinbase_transaction_hash = blocks[0].transactions[0].hash.clone();
let last_block_hash = blocks[blocks.len() - 1].hash().clone();
let storage: StorageRef = Arc::new(BlockChainDatabase::init_test_chain(blocks));
let verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore)));
let verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Unitest)));
let bad_transaction_block: IndexedBlock = test_data::block_builder()
.transaction().coinbase().output().value(50).build().build()
.transaction()
@ -401,7 +401,7 @@ pub mod tests {
.build()
.merkled_header()
.parent(last_block_hash)
.bits(Network::Unitest.max_bits(&ConsensusFork::BitcoinCore).into())
.bits(Network::Unitest.max_bits().into())
.build()
.build().into();
@ -424,7 +424,7 @@ pub mod tests {
#[test]
fn verification_level_none_accept_incorrect_block() {
let storage: StorageRef = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore)));
let verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Unitest)));
let bad_block: IndexedBlock = test_data::block_builder().header().build().build().into();
// Ok(()) when nothing is verified

View File

@ -1,9 +1,9 @@
use network::{ConsensusParams, ConsensusFork, TransactionOrdering};
use network::{ConsensusParams};
use storage::{TransactionOutputProvider, BlockHeaderProvider};
use script;
use sigops::{transaction_sigops};
use work::block_reward_satoshi;
use duplex_store::{transaction_index_for_output_check, DuplexTransactionOutputProvider};
use duplex_store::DuplexTransactionOutputProvider;
use deployments::BlockDeployments;
use canon::CanonBlock;
use error::{Error, TransactionError};
@ -16,7 +16,6 @@ pub struct BlockAcceptor<'a> {
pub sigops: BlockSigops<'a>,
pub coinbase_claim: BlockCoinbaseClaim<'a>,
pub coinbase_script: BlockCoinbaseScript<'a>,
pub ordering: BlockTransactionOrdering<'a>,
}
impl<'a> BlockAcceptor<'a> {
@ -35,7 +34,6 @@ impl<'a> BlockAcceptor<'a> {
coinbase_script: BlockCoinbaseScript::new(block, consensus, height),
coinbase_claim: BlockCoinbaseClaim::new(block, consensus, store, height, median_time_past),
sigops: BlockSigops::new(block, store, consensus, height, median_time_past),
ordering: BlockTransactionOrdering::new(block, consensus, median_time_past),
}
}
@ -45,7 +43,6 @@ impl<'a> BlockAcceptor<'a> {
self.serialized_size.check()?;
self.coinbase_claim.check()?;
self.coinbase_script.check()?;
self.ordering.check()?;
Ok(())
}
}
@ -104,12 +101,7 @@ impl<'a> BlockSerializedSize<'a> {
fn check(&self) -> Result<(), Error> {
let size = self.block.size();
// block size (without witness) is valid for all forks:
// before SegWit: it is main check for size
// after SegWit: without witness data, block size should be <= 1_000_000
// after BitcoinCash fork: block size is increased to 8_000_000
if size < self.consensus.fork.min_block_size(self.height) ||
size > self.consensus.fork.max_block_size(self.height, self.median_time_past) {
if size > 2_000_000 {
return Err(Error::Size(size));
}
@ -135,10 +127,7 @@ impl<'a> BlockSigops<'a> {
median_time_past: u32,
) -> Self {
let bip16_active = block.header.raw.time >= consensus.bip16_time;
let checkdatasig_active = match consensus.fork {
ConsensusFork::BitcoinCash(ref fork) => median_time_past >= fork.magnetic_anomaly_time,
_ => false,
};
let checkdatasig_active = false;
BlockSigops {
block: block,
@ -157,7 +146,7 @@ impl<'a> BlockSigops<'a> {
.fold(0, |acc, tx_sigops| (acc + tx_sigops));
let size = self.block.size();
if sigops > self.consensus.fork.max_block_sigops(self.height, size) {
if sigops > 20_000 {
return Err(Error::MaximumSigops);
}
@ -169,7 +158,6 @@ pub struct BlockCoinbaseClaim<'a> {
block: CanonBlock<'a>,
store: &'a TransactionOutputProvider,
height: u32,
transaction_ordering: TransactionOrdering,
}
impl<'a> BlockCoinbaseClaim<'a> {
@ -184,7 +172,6 @@ impl<'a> BlockCoinbaseClaim<'a> {
block: block,
store: store,
height: height,
transaction_ordering: consensus_params.fork.transaction_ordering(median_time_past),
}
}
@ -197,8 +184,7 @@ impl<'a> BlockCoinbaseClaim<'a> {
// (1) Total sum of all referenced outputs
let mut incoming: u64 = 0;
for input in tx.raw.inputs.iter() {
let prevout_tx_idx = transaction_index_for_output_check(self.transaction_ordering, tx_idx);
let prevout = store.transaction_output(&input.previous_output, prevout_tx_idx);
let prevout = store.transaction_output(&input.previous_output, tx_idx);
let (sum, overflow) = incoming.overflowing_add(prevout.map(|o| o.value).unwrap_or(0));
if overflow {
return Err(Error::ReferencedInputsSumOverflow);
@ -276,43 +262,14 @@ impl<'a> BlockCoinbaseScript<'a> {
}
}
pub struct BlockTransactionOrdering<'a> {
block: CanonBlock<'a>,
transaction_ordering: TransactionOrdering,
}
impl<'a> BlockTransactionOrdering<'a> {
fn new(block: CanonBlock<'a>, consensus: &'a ConsensusParams, median_time_past: u32) -> Self {
BlockTransactionOrdering {
block,
transaction_ordering: consensus.fork.transaction_ordering(median_time_past),
}
}
fn check(&self) -> Result<(), Error> {
match self.transaction_ordering {
// topological transaction ordering is checked in TransactionMissingInputs
TransactionOrdering::Topological => Ok(()),
// canonical transaction ordering means that transactions are ordered by
// their id (i.e. hash) in ascending order
TransactionOrdering::Canonical =>
if self.block.transactions.windows(2).skip(1).all(|w| w[0].hash < w[1].hash) {
Ok(())
} else {
Err(Error::NonCanonicalTransactionOrdering)
},
}
}
}
#[cfg(test)]
mod tests {
extern crate test_data;
use chain::{IndexedBlock, Transaction};
use network::{Network, ConsensusFork, ConsensusParams, BitcoinCashConsensusParams};
use network::{Network, ConsensusParams};
use {Error, CanonBlock};
use super::{BlockCoinbaseScript, BlockTransactionOrdering};
use super::{BlockCoinbaseScript};
#[test]
fn test_block_coinbase_script() {
@ -342,42 +299,4 @@ mod tests {
assert_eq!(coinbase_script_validator2.check(), Err(Error::CoinbaseScript));
}
#[test]
fn block_transaction_ordering_works() {
let tx1: Transaction = test_data::TransactionBuilder::with_output(1).into();
let tx2: Transaction = test_data::TransactionBuilder::with_output(2).into();
let tx3: Transaction = test_data::TransactionBuilder::with_output(3).into();
let bad_block: IndexedBlock = test_data::block_builder()
.with_transaction(tx1.clone())
.with_transaction(tx2.clone())
.with_transaction(tx3.clone())
.header().build()
.build()
.into();
let good_block: IndexedBlock = test_data::block_builder()
.with_transaction(tx1)
.with_transaction(tx3)
.with_transaction(tx2)
.header().build()
.build()
.into();
let bad_block = CanonBlock::new(&bad_block);
let good_block = CanonBlock::new(&good_block);
// when topological ordering is used => we don't care about tx ordering
let consensus = ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore);
let checker = BlockTransactionOrdering::new(bad_block, &consensus, 0);
assert_eq!(checker.check(), Ok(()));
// when topological ordering is used => we care about tx ordering
let mut bch = BitcoinCashConsensusParams::new(Network::Unitest);
bch.magnetic_anomaly_time = 0;
let consensus = ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCash(bch));
let checker = BlockTransactionOrdering::new(bad_block, &consensus, 0);
assert_eq!(checker.check(), Err(Error::NonCanonicalTransactionOrdering));
let checker = BlockTransactionOrdering::new(good_block, &consensus, 0);
assert_eq!(checker.check(), Ok(()));
}
}

View File

@ -1,4 +1,4 @@
use network::{ConsensusParams, ConsensusFork};
use network::{ConsensusParams};
use storage::BlockHeaderProvider;
use canon::CanonHeader;
use error::Error;
@ -81,11 +81,6 @@ impl<'a> HeaderEquihashSolution<'a> {
}
fn check(&self) -> Result<(), Error> {
match self.consensus.fork {
ConsensusFork::ZCash(_) => (),
_ => return Ok(()),
}
use equihash;
let is_solution_correct = equihash::verify_block_equihash_solution(&equihash::EquihashParams {
N: 200,

View File

@ -2,9 +2,9 @@ use primitives::hash::H256;
use primitives::bytes::Bytes;
use ser::Serializable;
use storage::{TransactionMetaProvider, TransactionOutputProvider};
use network::{ConsensusParams, ConsensusFork};
use network::{ConsensusParams};
use script::{Script, verify_script, VerificationFlags, TransactionSignatureChecker, TransactionInputSigner, SignatureVersion};
use duplex_store::{DuplexTransactionOutputProvider, transaction_index_for_output_check};
use duplex_store::DuplexTransactionOutputProvider;
use deployments::BlockDeployments;
use script::Builder;
use sigops::transaction_sigops;
@ -14,13 +14,11 @@ use error::TransactionError;
use VerificationLevel;
pub struct TransactionAcceptor<'a> {
pub size: TransactionSize<'a>,
pub bip30: TransactionBip30<'a>,
pub missing_inputs: TransactionMissingInputs<'a>,
pub maturity: TransactionMaturity<'a>,
pub overspent: TransactionOverspent<'a>,
pub double_spent: TransactionDoubleSpend<'a>,
pub return_replay_protection: TransactionReturnReplayProtection<'a>,
pub eval: TransactionEval<'a>,
}
@ -42,41 +40,33 @@ impl<'a> TransactionAcceptor<'a> {
deployments: &'a BlockDeployments<'a>,
) -> Self {
trace!(target: "verification", "Tx verification {}", transaction.hash.to_reversed_str());
let tx_ordering = consensus.fork.transaction_ordering(median_time_past);
let missing_input_tx_index = transaction_index_for_output_check(tx_ordering,transaction_index);
TransactionAcceptor {
size: TransactionSize::new(transaction, consensus, median_time_past),
bip30: TransactionBip30::new_for_sync(transaction, meta_store, consensus, block_hash, height),
missing_inputs: TransactionMissingInputs::new(transaction, output_store, missing_input_tx_index),
missing_inputs: TransactionMissingInputs::new(transaction, output_store, transaction_index),
maturity: TransactionMaturity::new(transaction, meta_store, height),
overspent: TransactionOverspent::new(transaction, output_store),
double_spent: TransactionDoubleSpend::new(transaction, output_store),
return_replay_protection: TransactionReturnReplayProtection::new(transaction, consensus, height),
eval: TransactionEval::new(transaction, output_store, consensus, verification_level, height, time, median_time_past, deployments),
}
}
pub fn check(&self) -> Result<(), TransactionError> {
try!(self.size.check());
try!(self.bip30.check());
try!(self.missing_inputs.check());
try!(self.maturity.check());
try!(self.overspent.check());
try!(self.double_spent.check());
try!(self.return_replay_protection.check());
try!(self.eval.check());
Ok(())
}
}
pub struct MemoryPoolTransactionAcceptor<'a> {
pub size: TransactionSize<'a>,
pub missing_inputs: TransactionMissingInputs<'a>,
pub maturity: TransactionMaturity<'a>,
pub overspent: TransactionOverspent<'a>,
pub sigops: TransactionSigops<'a>,
pub double_spent: TransactionDoubleSpend<'a>,
pub return_replay_protection: TransactionReturnReplayProtection<'a>,
pub eval: TransactionEval<'a>,
}
@ -95,15 +85,13 @@ impl<'a> MemoryPoolTransactionAcceptor<'a> {
) -> Self {
trace!(target: "verification", "Mempool-Tx verification {}", transaction.hash.to_reversed_str());
let transaction_index = 0;
let max_block_sigops = consensus.fork.max_block_sigops(height, consensus.fork.max_block_size(height, median_time_past));
let max_block_sigops = 20_000;
MemoryPoolTransactionAcceptor {
size: TransactionSize::new(transaction, consensus, median_time_past),
missing_inputs: TransactionMissingInputs::new(transaction, output_store, transaction_index),
maturity: TransactionMaturity::new(transaction, meta_store, height),
overspent: TransactionOverspent::new(transaction, output_store),
sigops: TransactionSigops::new(transaction, output_store, consensus, max_block_sigops, time),
double_spent: TransactionDoubleSpend::new(transaction, output_store),
return_replay_protection: TransactionReturnReplayProtection::new(transaction, consensus, height),
eval: TransactionEval::new(transaction, output_store, consensus, VerificationLevel::Full, height, time, median_time_past, deployments),
}
}
@ -111,13 +99,11 @@ impl<'a> MemoryPoolTransactionAcceptor<'a> {
pub fn check(&self) -> Result<(), TransactionError> {
// Bip30 is not checked because we don't need to allow tx pool acceptance of an unspent duplicate.
// Tx pool validation is not strinctly a matter of consensus.
try!(self.size.check());
try!(self.missing_inputs.check());
try!(self.maturity.check());
try!(self.overspent.check());
try!(self.sigops.check());
try!(self.double_spent.check());
try!(self.return_replay_protection.check());
try!(self.eval.check());
Ok(())
}
@ -135,7 +121,6 @@ impl<'a> MemoryPoolTransactionAcceptor<'a> {
pub struct TransactionBip30<'a> {
transaction: CanonTransaction<'a>,
store: &'a TransactionMetaProvider,
exception: bool,
}
impl<'a> TransactionBip30<'a> {
@ -146,18 +131,15 @@ impl<'a> TransactionBip30<'a> {
block_hash: &'a H256,
height: u32
) -> Self {
let exception = consensus_params.is_bip30_exception(block_hash, height);
TransactionBip30 {
transaction: transaction,
store: store,
exception: exception,
}
}
fn check(&self) -> Result<(), TransactionError> {
match self.store.transaction_meta(&self.transaction.hash) {
Some(ref meta) if !meta.is_fully_spent() && !self.exception => {
Some(ref meta) if !meta.is_fully_spent() => {
Err(TransactionError::UnspentTransactionWithTheSameHash)
},
_ => Ok(())
@ -279,10 +261,7 @@ impl<'a> TransactionSigops<'a> {
fn check(&self) -> Result<(), TransactionError> {
let bip16_active = self.time >= self.consensus_params.bip16_time;
let checkdatasig_active = match self.consensus_params.fork {
ConsensusFork::BitcoinCash(ref fork) => self.time >= fork.magnetic_anomaly_time,
_ => false
};
let checkdatasig_active = false;
let sigops = transaction_sigops(&self.transaction.raw, &self.store, bip16_active, checkdatasig_active);
if sigops > self.max_sigops {
Err(TransactionError::MaxSigops)
@ -321,24 +300,12 @@ impl<'a> TransactionEval<'a> {
deployments: &'a BlockDeployments,
) -> Self {
let verify_p2sh = time >= params.bip16_time;
let verify_strictenc = match params.fork {
ConsensusFork::BitcoinCash(ref fork) if height >= fork.height => true,
_ => false,
};
let verify_strictenc = false;
let verify_locktime = height >= params.bip65_height;
let verify_dersig = height >= params.bip66_height;
let verify_monolith_opcodes = match params.fork {
ConsensusFork::BitcoinCash(ref fork) => median_timestamp >= fork.monolith_time,
_ => false,
};
let verify_magnetic_anomaly_opcodes = match params.fork {
ConsensusFork::BitcoinCash(ref fork) => median_timestamp >= fork.magnetic_anomaly_time,
_ => false,
};
let signature_version = match params.fork {
ConsensusFork::BitcoinCash(ref fork) if height >= fork.height => SignatureVersion::ForkId,
ConsensusFork::BitcoinCore | ConsensusFork::BitcoinCash(_) | ConsensusFork::ZCash(_) => SignatureVersion::Base,
};
let verify_monolith_opcodes = false;
let verify_magnetic_anomaly_opcodes = false;
let signature_version = SignatureVersion::Base;
let verify_checksequence = deployments.csv();
let verify_sigpushonly = verify_magnetic_anomaly_opcodes;
@ -443,125 +410,3 @@ impl<'a> TransactionDoubleSpend<'a> {
Ok(())
}
}
pub struct TransactionReturnReplayProtection<'a> {
transaction: CanonTransaction<'a>,
consensus: &'a ConsensusParams,
height: u32,
}
lazy_static! {
pub static ref BITCOIN_CASH_RETURN_REPLAY_PROTECTION_SCRIPT: Bytes = Builder::default()
.return_bytes(b"Bitcoin: A Peer-to-Peer Electronic Cash System")
.into_bytes();
}
impl<'a> TransactionReturnReplayProtection<'a> {
fn new(transaction: CanonTransaction<'a>, consensus: &'a ConsensusParams, height: u32) -> Self {
TransactionReturnReplayProtection {
transaction: transaction,
consensus: consensus,
height: height,
}
}
fn check(&self) -> Result<(), TransactionError> {
if let ConsensusFork::BitcoinCash(ref fork) = self.consensus.fork {
// Transactions with such OP_RETURNs shall be considered valid again for block 530,001 and onwards
if self.height >= fork.height && self.height <= 530_000 {
if (*self.transaction).raw.outputs.iter()
.any(|out| out.script_pubkey == *BITCOIN_CASH_RETURN_REPLAY_PROTECTION_SCRIPT) {
return Err(TransactionError::ReturnReplayProtection)
}
}
}
Ok(())
}
}
pub struct TransactionSize<'a> {
transaction: CanonTransaction<'a>,
min_transaction_size: usize,
}
impl<'a> TransactionSize<'a> {
fn new(transaction: CanonTransaction<'a>, consensus: &'a ConsensusParams, median_time_past: u32) -> Self {
let min_transaction_size = consensus.fork.min_transaction_size(median_time_past);
TransactionSize {
transaction: transaction,
min_transaction_size,
}
}
fn check(&self) -> Result<(), TransactionError> {
if self.min_transaction_size != 0 && self.transaction.raw.serialized_size() < self.min_transaction_size {
Err(TransactionError::MinSize)
} else {
Ok(())
}
}
}
#[cfg(test)]
mod tests {
use chain::{IndexedTransaction, Transaction, TransactionOutput};
use network::{Network, ConsensusParams, ConsensusFork, BitcoinCashConsensusParams};
use script::Builder;
use canon::CanonTransaction;
use error::TransactionError;
use super::{TransactionReturnReplayProtection, TransactionSize};
#[test]
fn return_replay_protection_works() {
let transaction: IndexedTransaction = Transaction {
version: 1,
inputs: vec![],
outputs: vec![TransactionOutput {
value: 0,
script_pubkey: Builder::default()
.return_bytes(b"Bitcoin: A Peer-to-Peer Electronic Cash System")
.into_bytes(),
}],
lock_time: 0xffffffff,
joint_split: None,
}.into();
assert_eq!(transaction.raw.outputs[0].script_pubkey.len(), 46 + 2);
let consensus = ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(Network::Mainnet)));
let checker = TransactionReturnReplayProtection::new(CanonTransaction::new(&transaction), &consensus, consensus.fork.activation_height());
assert_eq!(checker.check(), Err(TransactionError::ReturnReplayProtection));
let checker = TransactionReturnReplayProtection::new(CanonTransaction::new(&transaction), &consensus, consensus.fork.activation_height() - 1);
assert_eq!(checker.check(), Ok(()));
let consensus = ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore);
let checker = TransactionReturnReplayProtection::new(CanonTransaction::new(&transaction), &consensus, 100);
assert_eq!(checker.check(), Ok(()));
}
#[test]
fn transaction_size_works() {
let small_tx = Transaction::default();
let big_tx: Transaction = "01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000".into();
let small_tx = IndexedTransaction::new(small_tx.hash(), small_tx);
let big_tx = IndexedTransaction::new(big_tx.hash(), big_tx);
let small_tx = CanonTransaction::new(&small_tx);
let big_tx = CanonTransaction::new(&big_tx);
let unrestricted_consensus = ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore);
let restricted_consensus = ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(Network::Unitest)));
// no restrictions
let checker = TransactionSize::new(small_tx, &unrestricted_consensus, 10000000);
assert_eq!(checker.check(), Ok(()));
// big + restricted
let checker = TransactionSize::new(big_tx, &restricted_consensus, 2000000000);
assert_eq!(checker.check(), Ok(()));
// small + restricted
let checker = TransactionSize::new(small_tx, &restricted_consensus, 2000000000);
assert_eq!(checker.check(), Err(TransactionError::MinSize));
}
}

View File

@ -157,7 +157,7 @@ mod tests {
use chain::{IndexedBlock, Transaction, Block};
use storage::Error as DBError;
use db::BlockChainDatabase;
use network::{Network, ConsensusParams, ConsensusFork, BitcoinCashConsensusParams};
use network::{Network, ConsensusParams};
use script;
use constants::DOUBLE_SPACING_SECONDS;
use super::BackwardsCompatibleChainVerifier as ChainVerifier;
@ -167,7 +167,7 @@ mod tests {
fn verify_orphan() {
let storage = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let b2 = test_data::block_h2().into();
let verifier = ChainVerifier::new(storage, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
let verifier = ChainVerifier::new(storage, ConsensusParams::new(Network::Unitest));
assert_eq!(Err(Error::Database(DBError::UnknownParent)), verifier.verify(VerificationLevel::Full, &b2));
}
@ -175,7 +175,7 @@ mod tests {
fn verify_smoky() {
let storage = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let b1 = test_data::block_h1();
let verifier = ChainVerifier::new(storage, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
let verifier = ChainVerifier::new(storage, ConsensusParams::new(Network::Unitest));
assert!(verifier.verify(VerificationLevel::Full, &b1.into()).is_ok());
}
@ -187,7 +187,7 @@ mod tests {
test_data::block_h1().into(),
]);
let b1 = test_data::block_h2();
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest));
assert!(verifier.verify(VerificationLevel::Full, &b1.into()).is_ok());
}
@ -216,7 +216,7 @@ mod tests {
.merkled_header().parent(genesis.hash()).build()
.build();
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest));
let expected = Err(Error::Transaction(
1,
@ -254,7 +254,7 @@ mod tests {
.merkled_header().parent(genesis.hash()).build()
.build();
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest));
assert!(verifier.verify(VerificationLevel::Full, &block.into()).is_ok());
}
@ -290,7 +290,7 @@ mod tests {
.merkled_header().parent(genesis.hash()).build()
.build();
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest));
assert!(verifier.verify(VerificationLevel::Full, &block.into()).is_ok());
}
@ -329,76 +329,12 @@ mod tests {
.merkled_header().parent(genesis.hash()).build()
.build();
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest));
let expected = Err(Error::Transaction(2, TransactionError::Overspend));
assert_eq!(expected, verifier.verify(VerificationLevel::Full, &block.into()));
}
#[test]
fn transaction_references_same_block_and_goes_before_previous() {
let mut blocks = vec![test_data::block_builder()
.transaction()
.coinbase()
.output().value(50).build()
.build()
.merkled_header().build()
.build()];
let input_tx = blocks[0].transactions()[0].clone();
let mut parent_hash = blocks[0].hash();
// waiting 100 blocks for genesis coinbase to become valid
for _ in 0..100 {
let block: Block = test_data::block_builder()
.transaction().coinbase().build()
.merkled_header().parent(parent_hash).build()
.build()
.into();
parent_hash = block.hash();
blocks.push(block);
}
let storage = Arc::new(BlockChainDatabase::init_test_chain(blocks.into_iter().map(Into::into).collect()));
let tx1: Transaction = test_data::TransactionBuilder::with_version(4)
.add_input(&input_tx, 0)
.add_output(10).add_output(10).add_output(10)
.add_output(5).add_output(5).add_output(5)
.into();
let tx2: Transaction = test_data::TransactionBuilder::with_version(1)
.add_input(&tx1, 0)
.add_output(1).add_output(1).add_output(1)
.add_output(2).add_output(2).add_output(2)
.into();
assert!(tx1.hash() > tx2.hash());
let block = test_data::block_builder()
.transaction()
.coinbase()
.output().value(2).script_pubkey_with_sigops(100).build()
.build()
.with_transaction(tx2)
.with_transaction(tx1)
.merkled_header()
.time(DOUBLE_SPACING_SECONDS + 101) // to pass BCH work check
.parent(parent_hash)
.build()
.build();
// when topological order is required
let topological_consensus = ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore);
let verifier = ChainVerifier::new(storage.clone(), topological_consensus);
let expected = Err(Error::Transaction(1, TransactionError::Overspend));
assert_eq!(expected, verifier.verify(VerificationLevel::Header, &block.clone().into()));
// when canonical order is required
let mut canonical_params = BitcoinCashConsensusParams::new(Network::Unitest);
canonical_params.magnetic_anomaly_time = 0;
let canonical_consensus = ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCash(canonical_params));
let verifier = ChainVerifier::new(storage, canonical_consensus);
let expected = Ok(());
assert_eq!(expected, verifier.verify(VerificationLevel::Header, &block.into()));
}
#[test]
#[ignore]
fn coinbase_happy() {
@ -435,7 +371,7 @@ mod tests {
.merkled_header().parent(best_hash).build()
.build();
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest));
assert!(verifier.verify(VerificationLevel::Full, &block.into()).is_ok());
}
@ -482,7 +418,7 @@ mod tests {
.build()
.into();
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest));
let expected = Err(Error::MaximumSigops);
assert_eq!(expected, verifier.verify(VerificationLevel::Full, &block.into()));
}
@ -504,7 +440,7 @@ mod tests {
.build()
.into();
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest));
let expected = Err(Error::CoinbaseOverspend {
expected_max: 5000000000,

View File

@ -2,7 +2,6 @@
//! require sophisticated (in more than one source) previous transaction lookups
use chain::{OutPoint, TransactionOutput};
use network::TransactionOrdering;
use storage::TransactionOutputProvider;
#[derive(Clone, Copy)]
@ -42,19 +41,3 @@ impl TransactionOutputProvider for NoopStore {
false
}
}
/// Converts actual transaction index into transaction index to use in
/// TransactionOutputProvider::transaction_output call.
/// When topological ordering is used, we expect ascendant transaction (TX1)
/// to come BEFORE descendant transaction (TX2) in the block, like this:
/// [ ... TX1 ... TX2 ... ]
/// When canonical ordering is used, transactions order within block is not
/// relevant for this check and ascendant transaction (TX1) can come AFTER
/// descendant, like this:
/// [ ... TX2 ... TX1 ... ]
pub fn transaction_index_for_output_check(ordering: TransactionOrdering, tx_idx: usize) -> usize {
match ordering {
TransactionOrdering::Topological => tx_idx,
TransactionOrdering::Canonical => ::std::usize::MAX,
}
}

View File

@ -82,7 +82,6 @@ mod error;
mod sigops;
mod timestamp;
mod work;
mod work_bch;
mod work_zcash;
// pre-verification

View File

@ -1,6 +1,5 @@
use std::collections::HashSet;
use chain::IndexedBlock;
use network::ConsensusFork;
use sigops::transaction_sigops;
use duplex_store::NoopStore;
use error::{Error, TransactionError};
@ -20,10 +19,10 @@ impl<'a> BlockVerifier<'a> {
BlockVerifier {
empty: BlockEmpty::new(block),
coinbase: BlockCoinbase::new(block),
serialized_size: BlockSerializedSize::new(block, ConsensusFork::absolute_maximum_block_size()),
serialized_size: BlockSerializedSize::new(block, 2_000_000),
extra_coinbases: BlockExtraCoinbases::new(block),
transactions_uniqueness: BlockTransactionsUniqueness::new(block),
sigops: BlockSigops::new(block, ConsensusFork::absolute_maximum_block_sigops()),
sigops: BlockSigops::new(block, 20_000),
merkle_root: BlockMerkleRoot::new(block),
}
}

View File

@ -34,7 +34,7 @@ impl<'a> HeaderProofOfWork<'a> {
fn new(header: &'a IndexedBlockHeader, consensus: &ConsensusParams) -> Self {
HeaderProofOfWork {
header: header,
max_work_bits: consensus.network.max_bits(&consensus.fork).into(),
max_work_bits: consensus.network.max_bits().into(),
}
}

View File

@ -1,7 +1,7 @@
use std::ops;
use ser::Serializable;
use chain::IndexedTransaction;
use network::{ConsensusParams, ConsensusFork};
use network::{ConsensusParams};
use duplex_store::NoopStore;
use sigops::transaction_sigops;
use error::TransactionError;
@ -47,7 +47,7 @@ impl<'a> MemoryPoolTransactionVerifier<'a> {
null_non_coinbase: TransactionNullNonCoinbase::new(transaction),
is_coinbase: TransactionMemoryPoolCoinbase::new(transaction),
size: TransactionSize::new(transaction, consensus),
sigops: TransactionSigops::new(transaction, ConsensusFork::absolute_maximum_block_sigops()),
sigops: TransactionSigops::new(transaction, 20_000),
}
}
@ -160,7 +160,7 @@ impl<'a> TransactionSize<'a> {
fn check(&self) -> Result<(), TransactionError> {
let size = self.transaction.raw.serialized_size();
if size > self.consensus.fork.max_transaction_size() {
if size > self.consensus.max_transaction_size() {
Err(TransactionError::MaxSize)
} else {
Ok(())

View File

@ -3,9 +3,8 @@ use primitives::compact::Compact;
use primitives::hash::H256;
use primitives::bigint::U256;
use chain::{IndexedBlockHeader, BlockHeader};
use network::{Network, ConsensusParams, ConsensusFork};
use network::{Network, ConsensusParams};
use storage::{BlockHeaderProvider, BlockRef};
use work_bch::work_required_bitcoin_cash;
use work_zcash::work_required_zcash;
use constants::{
@ -59,93 +58,17 @@ pub fn retarget_timespan(retarget_timestamp: u32, last_timestamp: u32) -> u32 {
/// Returns work required for given header
pub fn work_required(parent_hash: H256, time: u32, height: u32, store: &BlockHeaderProvider, consensus: &ConsensusParams) -> Compact {
let max_bits = consensus.network.max_bits(&consensus.fork).into();
let max_bits = consensus.network.max_bits().into();
if height == 0 {
return max_bits;
}
let parent_header = store.block_header(parent_hash.clone().into()).expect("self.height != 0; qed");
match consensus.fork {
ConsensusFork::ZCash(ref fork) =>
return work_required_zcash(IndexedBlockHeader {
hash: parent_hash,
raw: parent_header
}, store, fork, max_bits),
ConsensusFork::BitcoinCash(ref fork) if height >= fork.height =>
return work_required_bitcoin_cash(IndexedBlockHeader {
hash: parent_hash,
raw: parent_header
}, time, height, store, consensus, fork, max_bits),
_ => (),
}
if is_retarget_height(height) {
return work_required_retarget(parent_header, height, store, max_bits);
}
if consensus.network == Network::Testnet {
return work_required_testnet(parent_hash, time, height, store, consensus)
}
parent_header.bits
}
pub fn work_required_testnet(parent_hash: H256, time: u32, height: u32, store: &BlockHeaderProvider, consensus: &ConsensusParams) -> Compact {
assert!(height != 0, "cannot calculate required work for genesis block");
let mut bits = Vec::new();
let mut block_ref: BlockRef = parent_hash.into();
let parent_header = store.block_header(block_ref.clone()).expect("height != 0; qed");
let max_time_gap = parent_header.time + DOUBLE_SPACING_SECONDS;
let max_bits = consensus.network.max_bits(&consensus.fork).into();
if time > max_time_gap {
return max_bits;
}
// TODO: optimize it, so it does not make 2016!!! redundant queries each time
for _ in 0..RETARGETING_INTERVAL {
let previous_header = match store.block_header(block_ref) {
Some(h) => h,
None => { break; }
};
bits.push(previous_header.bits);
block_ref = previous_header.previous_header_hash.into();
}
for (index, bit) in bits.into_iter().enumerate() {
if bit != max_bits || is_retarget_height(height - index as u32 - 1) {
return bit;
}
}
max_bits
}
/// Algorithm used for retargeting work every 2 weeks
pub fn work_required_retarget(parent_header: BlockHeader, height: u32, store: &BlockHeaderProvider, max_work_bits: Compact) -> Compact {
let retarget_ref = (height - RETARGETING_INTERVAL).into();
let retarget_header = store.block_header(retarget_ref).expect("self.height != 0 && self.height % RETARGETING_INTERVAL == 0; qed");
// timestamp of block(height - RETARGETING_INTERVAL)
let retarget_timestamp = retarget_header.time;
// timestamp of parent block
let last_timestamp = parent_header.time;
// bits of last block
let last_bits = parent_header.bits;
let mut retarget: U256 = last_bits.into();
let maximum: U256 = max_work_bits.into();
retarget = retarget * retarget_timespan(retarget_timestamp, last_timestamp).into();
retarget = retarget / TARGET_TIMESPAN_SECONDS.into();
if retarget > maximum {
max_work_bits
} else {
retarget.into()
}
work_required_zcash(IndexedBlockHeader {
hash: parent_hash,
raw: parent_header
}, store, consensus, max_bits)
}
pub fn block_reward_satoshi(block_height: u32) -> u64 {
@ -158,7 +81,7 @@ pub fn block_reward_satoshi(block_height: u32) -> u64 {
mod tests {
use primitives::hash::H256;
use primitives::compact::Compact;
use network::{Network, ConsensusFork};
use network::{Network};
use super::{is_valid_proof_of_work_hash, is_valid_proof_of_work, block_reward_satoshi};
fn is_valid_pow(max: Compact, bits: u32, hash: &'static str) -> bool {
@ -166,19 +89,6 @@ mod tests {
is_valid_proof_of_work(max.into(), bits.into(), &H256::from_reversed_str(hash))
}
#[test]
fn test_is_valid_proof_of_work() {
// block 2
assert!(is_valid_pow(Network::Mainnet.max_bits(&ConsensusFork::BitcoinCore).into(), 486604799u32, "000000006a625f06636b8bb6ac7b960a8d03705d1ace08b1a19da3fdcc99ddbd"));
// block 400_000
assert!(is_valid_pow(Network::Mainnet.max_bits(&ConsensusFork::BitcoinCore).into(), 403093919u32, "000000000000000004ec466ce4732fe6f1ed1cddc2ed4b328fff5224276e3f6f"));
// other random tests
assert!(is_valid_pow(Network::Regtest.max_bits(&ConsensusFork::BitcoinCore).into(), 0x181bc330u32, "00000000000000001bc330000000000000000000000000000000000000000000"));
assert!(!is_valid_pow(Network::Regtest.max_bits(&ConsensusFork::BitcoinCore).into(), 0x181bc330u32, "00000000000000001bc330000000000000000000000000000000000000000001"));
assert!(!is_valid_pow(Network::Regtest.max_bits(&ConsensusFork::BitcoinCore).into(), 0x181bc330u32, "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"));
}
#[test]
fn reward() {
assert_eq!(block_reward_satoshi(0), 5000000000);

View File

@ -1,507 +0,0 @@
use primitives::compact::Compact;
use primitives::hash::H256;
use primitives::bigint::{Uint, U256};
use chain::{IndexedBlockHeader, BlockHeader};
use network::{Network, ConsensusParams, BitcoinCashConsensusParams};
use storage::BlockHeaderProvider;
use timestamp::median_timestamp_inclusive;
use work::{is_retarget_height, work_required_testnet, work_required_retarget};
use constants::{
DOUBLE_SPACING_SECONDS, TARGET_SPACING_SECONDS, RETARGETING_INTERVAL
};
/// Returns work required for given header for the post-HF Bitcoin Cash block
pub fn work_required_bitcoin_cash(parent_header: IndexedBlockHeader, time: u32, height: u32, store: &BlockHeaderProvider, consensus: &ConsensusParams, fork: &BitcoinCashConsensusParams, max_bits: Compact) -> Compact {
// special processing of Bitcoin Cash difficulty adjustment hardfork (Nov 2017), where difficulty is adjusted after each block
// `height` is the height of the new block => comparison is shifted by one
if height.saturating_sub(1) >= fork.difficulty_adjustion_height {
return work_required_bitcoin_cash_adjusted(parent_header, time, height, store, consensus);
}
if is_retarget_height(height) {
return work_required_retarget(parent_header.raw, height, store, max_bits);
}
if consensus.network == Network::Testnet {
return work_required_testnet(parent_header.hash, time, height, store, consensus)
}
if parent_header.raw.bits == max_bits {
return parent_header.raw.bits;
}
// REQ-7 Difficulty adjustement in case of hashrate drop
// In case the MTP of the tip of the chain is 12h or more after the MTP 6 block before the tip,
// the proof of work target is increased by a quarter, or 25%, which corresponds to a difficulty
// reduction of 20%.
let ancient_block_ref = (height - 6 - 1).into();
let ancient_header = store.block_header(ancient_block_ref)
.expect("parent_header.bits != max_bits; difficulty is max_bits for first RETARGETING_INTERVAL height; RETARGETING_INTERVAL > 7; qed");
let ancient_timestamp = median_timestamp_inclusive(ancient_header.hash(), store);
let parent_timestamp = median_timestamp_inclusive(parent_header.hash.clone(), store);
let timestamp_diff = parent_timestamp.checked_sub(ancient_timestamp).unwrap_or_default();
if timestamp_diff < 43_200 {
// less than 12h => no difficulty change needed
return parent_header.raw.bits;
}
let mut new_bits: U256 = parent_header.raw.bits.into();
let max_bits: U256 = max_bits.into();
new_bits = new_bits + (new_bits >> 2);
if new_bits > max_bits {
new_bits = max_bits
}
new_bits.into()
}
/// Algorithm to adjust difficulty after each block. Implementation is based on Bitcoin ABC commit:
/// https://github.com/Bitcoin-ABC/bitcoin-abc/commit/be51cf295c239ff6395a0aa67a3e13906aca9cb2
fn work_required_bitcoin_cash_adjusted(parent_header: IndexedBlockHeader, time: u32, height: u32, store: &BlockHeaderProvider, consensus: &ConsensusParams) -> Compact {
/// To reduce the impact of timestamp manipulation, we select the block we are
/// basing our computation on via a median of 3.
fn suitable_block(mut header2: BlockHeader, store: &BlockHeaderProvider) -> BlockHeader {
let reason = "header.height >= RETARGETNG_INTERVAL; RETARGETING_INTERVAL > 2; qed";
let mut header1 = store.block_header(header2.previous_header_hash.clone().into()).expect(reason);
let mut header0 = store.block_header(header1.previous_header_hash.clone().into()).expect(reason);
if header0.time > header2.time {
::std::mem::swap(&mut header0, &mut header2);
}
if header0.time > header1.time {
::std::mem::swap(&mut header0, &mut header1);
}
if header1.time > header2.time {
::std::mem::swap(&mut header1, &mut header2);
}
header1
}
/// Get block proof.
fn block_proof(header: &BlockHeader) -> U256 {
let proof: U256 = header.bits.into();
// We need to compute 2**256 / (bnTarget+1), but we can't represent 2**256
// as it's too large for a arith_uint256. However, as 2**256 is at least as
// large as bnTarget+1, it is equal to ((2**256 - bnTarget - 1) /
// (bnTarget+1)) + 1, or ~bnTarget / (nTarget+1) + 1.
(!proof / (proof + U256::one())) + U256::one()
}
/// Compute chain work between two blocks. Last block work is included. First block work is excluded.
fn compute_work_between_blocks(first: H256, last: &BlockHeader, store: &BlockHeaderProvider) -> U256 {
debug_assert!(last.hash() != first);
let mut chain_work: U256 = block_proof(last);
let mut prev_hash = last.previous_header_hash.clone();
loop {
let header = store.block_header(prev_hash.into())
.expect("last header is on main chain; first is at height last.height - 144; it is on main chain; qed");
chain_work = chain_work + block_proof(&header);
prev_hash = header.previous_header_hash;
if prev_hash == first {
return chain_work;
}
}
}
/// Compute the a target based on the work done between 2 blocks and the time
/// required to produce that work.
fn compute_target(first_header: BlockHeader, last_header: BlockHeader, store: &BlockHeaderProvider) -> U256 {
// From the total work done and the time it took to produce that much work,
// we can deduce how much work we expect to be produced in the targeted time
// between blocks.
let mut work = compute_work_between_blocks(first_header.hash(), &last_header, store);
work = work * TARGET_SPACING_SECONDS.into();
// In order to avoid difficulty cliffs, we bound the amplitude of the
// adjustement we are going to do.
debug_assert!(last_header.time > first_header.time);
let mut actual_timespan = last_header.time - first_header.time;
if actual_timespan > 288 * TARGET_SPACING_SECONDS {
actual_timespan = 288 * TARGET_SPACING_SECONDS;
} else if actual_timespan < 72 * TARGET_SPACING_SECONDS {
actual_timespan = 72 * TARGET_SPACING_SECONDS;
}
let work = work / actual_timespan.into();
// We need to compute T = (2^256 / W) - 1 but 2^256 doesn't fit in 256 bits.
// By expressing 1 as W / W, we get (2^256 - W) / W, and we can compute
// 2^256 - W as the complement of W.
(!work) / work
}
// This cannot handle the genesis block and early blocks in general.
debug_assert!(height > 0);
// Special difficulty rule for testnet:
// If the new block's timestamp is more than 2 * 10 minutes then allow
// mining of a min-difficulty block.
let max_bits = consensus.network.max_bits(&consensus.fork);
if consensus.network == Network::Testnet || consensus.network == Network::Unitest {
let max_time_gap = parent_header.raw.time + DOUBLE_SPACING_SECONDS;
if time > max_time_gap {
return max_bits.into();
}
}
// Compute the difficulty based on the full adjustement interval.
let last_height = height - 1;
debug_assert!(last_height >= RETARGETING_INTERVAL);
// Get the last suitable block of the difficulty interval.
let last_header = suitable_block(parent_header.raw, store);
// Get the first suitable block of the difficulty interval.
let first_height = last_height - 144;
let first_header = store.block_header(first_height.into())
.expect("last_height >= RETARGETING_INTERVAL; RETARGETING_INTERVAL - 144 > 0; qed");
let first_header = suitable_block(first_header, store);
// Compute the target based on time and work done during the interval.
let next_target = compute_target(first_header, last_header, store);
let max_bits = consensus.network.max_bits(&consensus.fork);
if next_target > max_bits {
return max_bits.into();
}
next_target.into()
}
#[cfg(test)]
pub mod tests {
use std::collections::HashMap;
use primitives::bytes::Bytes;
use primitives::hash::H256;
use primitives::bigint::U256;
use network::{Network, ConsensusParams, BitcoinCashConsensusParams, ConsensusFork};
use storage::{BlockHeaderProvider, BlockRef};
use chain::BlockHeader;
use work::work_required;
use super::work_required_bitcoin_cash_adjusted;
#[derive(Default)]
pub struct MemoryBlockHeaderProvider {
pub by_height: Vec<BlockHeader>,
pub by_hash: HashMap<H256, usize>,
}
impl MemoryBlockHeaderProvider {
pub fn last(&self) -> &BlockHeader {
self.by_height.last().unwrap()
}
pub fn insert(&mut self, header: BlockHeader) {
self.by_hash.insert(header.hash(), self.by_height.len());
self.by_height.push(header);
}
pub fn replace_last(&mut self, header: BlockHeader) {
let idx = self.by_height.len() - 1;
self.by_hash.remove(&self.by_height[idx].hash());
self.by_hash.insert(header.hash(), idx);
self.by_height[idx] = header;
}
}
impl BlockHeaderProvider for MemoryBlockHeaderProvider {
fn block_header_bytes(&self, _block_ref: BlockRef) -> Option<Bytes> {
unimplemented!()
}
fn block_header(&self, block_ref: BlockRef) -> Option<BlockHeader> {
match block_ref {
BlockRef::Hash(ref hash) => self.by_hash.get(hash).map(|h| &self.by_height[*h]).cloned(),
BlockRef::Number(height) => self.by_height.get(height as usize).cloned(),
}
}
}
// original test link:
// https://github.com/bitcoinclassic/bitcoinclassic/blob/8bf1fb856df44d1b790b0b835e4c1969be736e25/src/test/pow_tests.cpp#L108
#[test]
fn bitcoin_cash_req7() {
let main_consensus = ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore);
let uahf_consensus = ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCash(BitcoinCashConsensusParams {
height: 1000,
difficulty_adjustion_height: 0xffffffff,
monolith_time: 0xffffffff,
magnetic_anomaly_time: 0xffffffff,
}));
let mut header_provider = MemoryBlockHeaderProvider::default();
header_provider.insert(BlockHeader {
version: 0,
previous_header_hash: 0.into(),
merkle_root_hash: 0.into(),
time: 1269211443,
bits: 0x207fffff.into(),
nonce: 0.into(),
reserved_hash: Default::default(),
solution: Default::default(),
});
// create x100 pre-HF blocks
for height in 1..1000 {
let mut header = header_provider.block_header((height - 1).into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 10 * 60;
header_provider.insert(header);
}
// create x10 post-HF blocks every 2 hours
// MTP still less than 12h
for height in 1000..1010 {
let mut header = header_provider.block_header((height - 1).into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 2 * 60 * 60;
header_provider.insert(header.clone());
let main_bits: u32 = work_required(header.hash(), 0, height as u32, &header_provider, &main_consensus).into();
assert_eq!(main_bits, 0x207fffff_u32);
let uahf_bits: u32 = work_required(header.hash(), 0, height as u32, &header_provider, &uahf_consensus).into();
assert_eq!(uahf_bits, 0x207fffff_u32);
}
// MTP becames greater than 12h
let mut header = header_provider.block_header(1009.into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 2 * 60 * 60;
header_provider.insert(header.clone());
let main_bits: u32 = work_required(header.hash(), 0, 1010, &header_provider, &main_consensus).into();
assert_eq!(main_bits, 0x207fffff_u32);
let uahf_bits: u32 = work_required(header.hash(), 0, 1010, &header_provider, &uahf_consensus).into();
assert_eq!(uahf_bits, 0x1d00ffff_u32);
}
// original test link:
// https://github.com/Bitcoin-ABC/bitcoin-abc/blob/d8eac91f8d16716eed0ad11ccac420122280bb13/src/test/pow_tests.cpp#L193
#[test]
fn bitcoin_cash_adjusted_difficulty() {
let uahf_consensus = ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCash(BitcoinCashConsensusParams {
height: 1000,
difficulty_adjustion_height: 0xffffffff,
monolith_time: 0xffffffff,
magnetic_anomaly_time: 0xffffffff,
}));
let limit_bits = uahf_consensus.network.max_bits(&ConsensusFork::BitcoinCore);
let initial_bits = limit_bits >> 4;
let mut header_provider = MemoryBlockHeaderProvider::default();
// Genesis block.
header_provider.insert(BlockHeader {
version: 0,
previous_header_hash: 0.into(),
merkle_root_hash: 0.into(),
time: 1269211443,
bits: initial_bits.into(),
nonce: 0.into(),
reserved_hash: Default::default(),
solution: Default::default(),
});
// Pile up some blocks every 10 mins to establish some history.
for height in 1..2050 {
let mut header = header_provider.block_header((height - 1).into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 600;
header_provider.insert(header);
}
// Difficulty stays the same as long as we produce a block every 10 mins.
let current_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(2049.into()).unwrap().into(),
0, 2050, &header_provider, &uahf_consensus);
for height in 2050..2060 {
let mut header = header_provider.block_header((height - 1).into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 600;
header.bits = current_bits;
header_provider.insert(header);
let calculated_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(height.into()).unwrap().into(),
0, height + 1, &header_provider, &uahf_consensus);
debug_assert_eq!(calculated_bits, current_bits);
}
// Make sure we skip over blocks that are out of wack. To do so, we produce
// a block that is far in the future
let mut header = header_provider.block_header(2059.into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 6000;
header.bits = current_bits;
header_provider.insert(header);
let calculated_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(2060.into()).unwrap().into(),
0, 2061, &header_provider, &uahf_consensus);
debug_assert_eq!(calculated_bits, current_bits);
// .. and then produce a block with the expected timestamp.
let mut header = header_provider.block_header(2060.into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 2 * 600 - 6000;
header.bits = current_bits;
header_provider.insert(header);
let calculated_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(2061.into()).unwrap().into(),
0, 2062, &header_provider, &uahf_consensus);
debug_assert_eq!(calculated_bits, current_bits);
// The system should continue unaffected by the block with a bogous timestamps.
for height in 2062..2082 {
let mut header = header_provider.block_header((height - 1).into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 600;
header.bits = current_bits;
header_provider.insert(header);
let calculated_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(height.into()).unwrap().into(),
0, height + 1, &header_provider, &uahf_consensus);
debug_assert_eq!(calculated_bits, current_bits);
}
// We start emitting blocks slightly faster. The first block has no impact.
let mut header = header_provider.block_header(2081.into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 550;
header.bits = current_bits;
header_provider.insert(header);
let calculated_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(2082.into()).unwrap().into(),
0, 2083, &header_provider, &uahf_consensus);
debug_assert_eq!(calculated_bits, current_bits);
// Now we should see difficulty increase slowly.
let mut current_bits = current_bits;
for height in 2083..2093 {
let mut header = header_provider.block_header((height - 1).into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 550;
header.bits = current_bits;
header_provider.insert(header);
let calculated_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(height.into()).unwrap().into(),
0, height + 1, &header_provider, &uahf_consensus);
let current_work: U256 = current_bits.into();
let calculated_work: U256 = calculated_bits.into();
debug_assert!(calculated_work < current_work);
debug_assert!((current_work - calculated_work) < (current_work >> 10));
current_bits = calculated_bits;
}
// Check the actual value.
debug_assert_eq!(current_bits, 0x1c0fe7b1.into());
// If we dramatically shorten block production, difficulty increases faster.
for height in 2093..2113 {
let mut header = header_provider.block_header((height - 1).into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 10;
header.bits = current_bits;
header_provider.insert(header);
let calculated_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(height.into()).unwrap().into(),
0, height + 1, &header_provider, &uahf_consensus);
let current_work: U256 = current_bits.into();
let calculated_work: U256 = calculated_bits.into();
debug_assert!(calculated_work < current_work);
debug_assert!((current_work - calculated_work) < (current_work >> 4));
current_bits = calculated_bits;
}
// Check the actual value.
debug_assert_eq!(current_bits, 0x1c0db19f.into());
// We start to emit blocks significantly slower. The first block has no
// impact.
let mut header = header_provider.block_header(2112.into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 6000;
header.bits = current_bits;
header_provider.insert(header);
let mut current_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(2113.into()).unwrap().into(),
0, 2114, &header_provider, &uahf_consensus);
// Check the actual value.
debug_assert_eq!(current_bits, 0x1c0d9222.into());
// If we dramatically slow down block production, difficulty decreases.
for height in 2114..2207 {
let mut header = header_provider.block_header((height - 1).into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 6000;
header.bits = current_bits;
header_provider.insert(header);
let calculated_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(height.into()).unwrap().into(),
0, height + 1, &header_provider, &uahf_consensus);
let current_work: U256 = current_bits.into();
let calculated_work: U256 = calculated_bits.into();
debug_assert!(calculated_work < limit_bits);
debug_assert!(calculated_work > current_work);
debug_assert!((calculated_work - current_work) < (current_work >> 3));
current_bits = calculated_bits;
}
// Check the actual value.
debug_assert_eq!(current_bits, 0x1c2f13b9.into());
// Due to the window of time being bounded, next block's difficulty actually
// gets harder.
let mut header = header_provider.block_header(2206.into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 6000;
header.bits = current_bits;
header_provider.insert(header);
let mut current_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(2207.into()).unwrap().into(),
0, 2208, &header_provider, &uahf_consensus);
debug_assert_eq!(current_bits, 0x1c2ee9bf.into());
// And goes down again. It takes a while due to the window being bounded and
// the skewed block causes 2 blocks to get out of the window.
for height in 2208..2400 {
let mut header = header_provider.block_header((height - 1).into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 6000;
header.bits = current_bits;
header_provider.insert(header);
let calculated_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(height.into()).unwrap().into(),
0, height + 1, &header_provider, &uahf_consensus);
let current_work: U256 = current_bits.into();
let calculated_work: U256 = calculated_bits.into();
debug_assert!(calculated_work <= limit_bits);
debug_assert!(calculated_work > current_work);
debug_assert!((calculated_work - current_work) < (current_work >> 3));
current_bits = calculated_bits;
}
// Check the actual value.
debug_assert_eq!(current_bits, 0x1d00ffff.into());
// Once the difficulty reached the minimum allowed level, it doesn't get any
// easier.
for height in 2400..2405 {
let mut header = header_provider.block_header((height - 1).into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 6000;
header.bits = current_bits;
header_provider.insert(header);
let calculated_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(height.into()).unwrap().into(),
0, height + 1, &header_provider, &uahf_consensus);
debug_assert_eq!(calculated_bits, limit_bits.into());
current_bits = calculated_bits;
}
}
}

View File

@ -1,17 +1,17 @@
use primitives::compact::Compact;
use primitives::bigint::U256;
use chain::IndexedBlockHeader;
use network::ZCashConsensusParams;
use network::ConsensusParams;
use storage::BlockHeaderProvider;
use timestamp::median_timestamp_inclusive;
/// Returns work required for given header for the ZCash block
pub fn work_required_zcash(parent_header: IndexedBlockHeader, store: &BlockHeaderProvider, fork: &ZCashConsensusParams, max_bits: Compact) -> Compact {
pub fn work_required_zcash(parent_header: IndexedBlockHeader, store: &BlockHeaderProvider, consensus: &ConsensusParams, max_bits: Compact) -> Compact {
// Find the first block in the averaging interval
let parent_hash = parent_header.hash.clone();
let mut oldest_hash = parent_header.raw.previous_header_hash;
let mut bits_total: U256 = parent_header.raw.bits.into();
for _ in 1..fork.pow_averaging_window {
for _ in 1..consensus.pow_averaging_window {
let previous_header = match store.block_header(oldest_hash.into()) {
Some(previous_header) => previous_header,
None => return max_bits,
@ -21,31 +21,31 @@ pub fn work_required_zcash(parent_header: IndexedBlockHeader, store: &BlockHeade
oldest_hash = previous_header.previous_header_hash;
}
let bits_avg = bits_total / fork.pow_averaging_window.into();
let bits_avg = bits_total / consensus.pow_averaging_window.into();
let parent_mtp = median_timestamp_inclusive(parent_hash, store);
let oldest_mtp = median_timestamp_inclusive(oldest_hash, store);
calculate_work_required(bits_avg, parent_mtp, oldest_mtp, fork, max_bits)
calculate_work_required(bits_avg, parent_mtp, oldest_mtp, consensus, max_bits)
}
fn calculate_work_required(bits_avg: U256, parent_mtp: u32, oldest_mtp: u32, fork: &ZCashConsensusParams, max_bits: Compact) -> Compact {
fn calculate_work_required(bits_avg: U256, parent_mtp: u32, oldest_mtp: u32, consensus: &ConsensusParams, max_bits: Compact) -> Compact {
// Limit adjustment step
// Use medians to prevent time-warp attacks
let actual_timespan = parent_mtp - oldest_mtp;
let mut actual_timespan = fork.averaging_window_timespan() as i64 +
(actual_timespan as i64 - fork.averaging_window_timespan() as i64) / 4;
let mut actual_timespan = consensus.averaging_window_timespan() as i64 +
(actual_timespan as i64 - consensus.averaging_window_timespan() as i64) / 4;
if actual_timespan < fork.min_actual_timespan() as i64 {
actual_timespan = fork.min_actual_timespan() as i64;
if actual_timespan < consensus.min_actual_timespan() as i64 {
actual_timespan = consensus.min_actual_timespan() as i64;
}
if actual_timespan > fork.max_actual_timespan() as i64 {
actual_timespan = fork.max_actual_timespan() as i64;
if actual_timespan > consensus.max_actual_timespan() as i64 {
actual_timespan = consensus.max_actual_timespan() as i64;
}
// Retarget
let actual_timespan = actual_timespan as u32;
let mut bits_new = bits_avg / fork.averaging_window_timespan().into();
let mut bits_new = bits_avg / consensus.averaging_window_timespan().into();
bits_new = bits_new * actual_timespan.into();
if bits_new > max_bits.into() {
@ -57,23 +57,63 @@ fn calculate_work_required(bits_avg: U256, parent_mtp: u32, oldest_mtp: u32, for
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use primitives::bytes::Bytes;
use primitives::compact::Compact;
use primitives::bigint::U256;
use network::{Network, ZCashConsensusParams, ConsensusFork};
use primitives::hash::H256;
use network::{Network, ConsensusParams};
use chain::BlockHeader;
use storage::{BlockHeaderProvider, BlockRef};
use timestamp::median_timestamp_inclusive;
use work_bch::tests::MemoryBlockHeaderProvider;
use super::{work_required_zcash, calculate_work_required};
#[derive(Default)]
pub struct MemoryBlockHeaderProvider {
pub by_height: Vec<BlockHeader>,
pub by_hash: HashMap<H256, usize>,
}
impl MemoryBlockHeaderProvider {
pub fn last(&self) -> &BlockHeader {
self.by_height.last().unwrap()
}
pub fn insert(&mut self, header: BlockHeader) {
self.by_hash.insert(header.hash(), self.by_height.len());
self.by_height.push(header);
}
pub fn replace_last(&mut self, header: BlockHeader) {
let idx = self.by_height.len() - 1;
self.by_hash.remove(&self.by_height[idx].hash());
self.by_hash.insert(header.hash(), idx);
self.by_height[idx] = header;
}
}
impl BlockHeaderProvider for MemoryBlockHeaderProvider {
fn block_header_bytes(&self, _block_ref: BlockRef) -> Option<Bytes> {
unimplemented!()
}
fn block_header(&self, block_ref: BlockRef) -> Option<BlockHeader> {
match block_ref {
BlockRef::Hash(ref hash) => self.by_hash.get(hash).map(|h| &self.by_height[*h]).cloned(),
BlockRef::Number(height) => self.by_height.get(height as usize).cloned(),
}
}
}
// original test link:
// https://github.com/Bitcoin-ABC/bitcoin-abc/blob/d8eac91f8d16716eed0ad11ccac420122280bb13/src/test/pow_tests.cpp#L193
#[test]
fn zcash_work_required_works() {
let fork = ZCashConsensusParams::new(Network::Mainnet);
let max_bits = Network::Mainnet.max_bits(&ConsensusFork::ZCash(fork.clone()));
let consensus = ConsensusParams::new(Network::Mainnet);
let max_bits = Network::Mainnet.max_bits();
let last_block = 2 * fork.pow_averaging_window;
let first_block = last_block - fork.pow_averaging_window;
let last_block = 2 * consensus.pow_averaging_window;
let first_block = last_block - consensus.pow_averaging_window;
// insert genesis block
let mut header_provider = MemoryBlockHeaderProvider::default();
@ -91,7 +131,7 @@ mod tests {
// Start with blocks evenly-spaced and equal difficulty
for i in 1..last_block+1 {
let header = BlockHeader {
time: header_provider.last().time + fork.pow_target_spacing,
time: header_provider.last().time + consensus.pow_target_spacing,
bits: Compact::new(0x1e7fffff),
version: 0,
previous_header_hash: header_provider.by_height[i as usize - 1].hash(),
@ -108,23 +148,23 @@ mod tests {
let expected = calculate_work_required(bits_avg,
median_timestamp_inclusive(header_provider.by_height[last_block as usize].hash(), &header_provider),
median_timestamp_inclusive(header_provider.by_height[first_block as usize].hash(), &header_provider),
&fork, max_bits.into());
&consensus, max_bits.into());
let actual = work_required_zcash(header_provider.last().clone().into(),
&header_provider, &fork, max_bits.into());
&header_provider, &consensus, max_bits.into());
assert_eq!(actual, expected);
// Result should be unchanged, modulo integer division precision loss
let mut bits_expected: U256 = Compact::new(0x1e7fffff).into();
bits_expected = bits_expected / fork.averaging_window_timespan().into();
bits_expected = bits_expected * fork.averaging_window_timespan().into();
bits_expected = bits_expected / consensus.averaging_window_timespan().into();
bits_expected = bits_expected * consensus.averaging_window_timespan().into();
assert_eq!(work_required_zcash(header_provider.last().clone().into(),
&header_provider, &fork, max_bits.into()),
&header_provider, &consensus, max_bits.into()),
bits_expected.into());
// Randomise the final block time (plus 1 to ensure it is always different)
use rand::{thread_rng, Rng};
let mut last_header = header_provider.by_height[last_block as usize].clone();
last_header.time += thread_rng().gen_range(1, fork.pow_target_spacing / 2);
last_header.time += thread_rng().gen_range(1, consensus.pow_target_spacing / 2);
header_provider.replace_last(last_header);
// Result should be the same as if last difficulty was used
@ -132,15 +172,15 @@ mod tests {
let expected = calculate_work_required(bits_avg,
median_timestamp_inclusive(header_provider.by_height[last_block as usize].hash(), &header_provider),
median_timestamp_inclusive(header_provider.by_height[first_block as usize].hash(), &header_provider),
&fork, max_bits.into());
&consensus, max_bits.into());
let actual = work_required_zcash(header_provider.last().clone().into(),
&header_provider, &fork, max_bits.into());
&header_provider, &consensus, max_bits.into());
assert_eq!(actual, expected);
// Result should not be unchanged
let bits_expected = Compact::new(0x1e7fffff);
assert!(work_required_zcash(header_provider.last().clone().into(),
&header_provider, &fork, max_bits.into()) != bits_expected);
&header_provider, &consensus, max_bits.into()) != bits_expected);
// Change the final block difficulty
let mut last_header = header_provider.by_height[last_block as usize].clone();
@ -152,9 +192,9 @@ mod tests {
let expected = calculate_work_required(bits_avg.into(),
median_timestamp_inclusive(header_provider.by_height[last_block as usize].hash(), &header_provider),
median_timestamp_inclusive(header_provider.by_height[first_block as usize].hash(), &header_provider),
&fork, max_bits.into());
&consensus, max_bits.into());
let actual = work_required_zcash(header_provider.last().clone().into(),
&header_provider, &fork, max_bits.into());
&header_provider, &consensus, max_bits.into());
assert!(actual != expected);
// Result should be the same as if the average difficulty was used
@ -162,9 +202,9 @@ mod tests {
let expected = calculate_work_required(bits_avg,
median_timestamp_inclusive(header_provider.by_height[last_block as usize].hash(), &header_provider),
median_timestamp_inclusive(header_provider.by_height[first_block as usize].hash(), &header_provider),
&fork, max_bits.into());
&consensus, max_bits.into());
let actual = work_required_zcash(header_provider.last().clone().into(),
&header_provider, &fork, max_bits.into());
&header_provider, &consensus, max_bits.into());
assert_eq!(actual, expected);
}
}