From 34a582b8e9dd5ac3eee2097be5159b814627efbe Mon Sep 17 00:00:00 2001 From: NikVolf Date: Tue, 22 Nov 2016 18:26:00 +0300 Subject: [PATCH 01/26] expected nbits method & support --- db/src/block_provider.rs | 3 +++ db/src/storage.rs | 16 +++++++++++++++- db/src/test_storage.rs | 13 +++++++++++++ verification/src/chain_verifier.rs | 22 ++++++++++++++++++++++ 4 files changed, 53 insertions(+), 1 deletion(-) diff --git a/db/src/block_provider.rs b/db/src/block_provider.rs index 3afb6bc3..f455ef12 100644 --- a/db/src/block_provider.rs +++ b/db/src/block_provider.rs @@ -14,6 +14,9 @@ pub trait BlockProvider { /// resolves header bytes by block reference (number/hash) fn block_header_bytes(&self, block_ref: BlockRef) -> Option; + /// resolves header bytes by block reference (number/hash) + fn block_header(&self, block_ref: BlockRef) -> Option; + /// resolves deserialized block body by block reference (number/hash) fn block(&self, block_ref: BlockRef) -> Option; diff --git a/db/src/storage.rs b/db/src/storage.rs index bf749d8f..43858f11 100644 --- a/db/src/storage.rs +++ b/db/src/storage.rs @@ -40,6 +40,9 @@ const MAX_FORK_ROUTE_PRESET: usize = 128; pub trait Store : BlockProvider + BlockStapler + TransactionProvider + TransactionMetaProvider { /// get best block fn best_block(&self) -> Option; + + /// get best header + fn best_header(&self) -> Option; } /// Blockchain storage with rocksdb database @@ -165,7 +168,6 @@ impl Storage { }) } - /// update transactions metadata in the specified database transaction fn update_transactions_meta(&self, context: &mut UpdateContext, number: u32, accepted_txs: &[chain::Transaction]) -> Result<(), Error> @@ -385,6 +387,12 @@ impl BlockProvider for Storage { self.resolve_hash(block_ref).and_then(|h| self.get(COL_BLOCK_HEADERS, &*h)) } + fn block_header(&self, block_ref: BlockRef) -> Option { + self.block_header_bytes(block_ref).map( + |bytes| deserialize::<_, chain::BlockHeader>(bytes.as_ref()) + .expect("Error deserializing header, possible db corruption")) + } + fn block_transaction_hashes(&self, block_ref: BlockRef) -> Vec { self.resolve_hash(block_ref) .map(|h| self.block_transaction_hashes_by_hash(&h)) @@ -596,6 +604,12 @@ impl Store for Storage { fn best_block(&self) -> Option { self.best_block.read().clone() } + + fn best_header(&self) -> Option { + self.best_block.read().as_ref().and_then( + |bb| Some(self.block_header_by_hash(&bb.hash).expect("Best block exists but no such header. Race condition?")), + ) + } } #[cfg(test)] diff --git a/db/src/test_storage.rs b/db/src/test_storage.rs index 2c2bd514..b138e698 100644 --- a/db/src/test_storage.rs +++ b/db/src/test_storage.rs @@ -81,6 +81,13 @@ impl BlockProvider for TestStorage { .map(|ref block| serialization::serialize(block.header())) } + fn block_header(&self, block_ref: BlockRef) -> Option { + let data = self.data.read(); + self.resolve_hash(block_ref) + .and_then(|ref h| data.blocks.get(h)) + .map(|ref block| block.header().clone()) + } + fn block_transaction_hashes(&self, block_ref: BlockRef) -> Vec { let data = self.data.read(); self.resolve_hash(block_ref) @@ -174,5 +181,11 @@ impl Store for TestStorage { fn best_block(&self) -> Option { self.data.read().best_block.clone() } + + fn best_header(&self) -> Option { + self.data.read().best_block.as_ref().and_then( + |bb| Some(self.block_header(BlockRef::Hash(bb.hash.clone())).expect("Best block exists but no such header. Race condition?")) + ) + } } diff --git a/verification/src/chain_verifier.rs b/verification/src/chain_verifier.rs index 25fb7bc6..3d1b722f 100644 --- a/verification/src/chain_verifier.rs +++ b/verification/src/chain_verifier.rs @@ -54,6 +54,12 @@ impl ChainVerifier { } fn ordered_verify(&self, block: &chain::Block, at_height: u32) -> Result<(), Error> { + // check that difficulty matches the adjusted level + if let Some(expected_nbits) = self.expected_nbits() { + if !self.skip_pow && expected_nbits != block.header().nbits { + return Err(Error::Difficulty); + } + } let coinbase_spends = block.transactions()[0].total_spends(); @@ -258,6 +264,22 @@ impl ChainVerifier { }, } } + + fn expected_nbits(&self) -> Option { + + let best_header = match self.store.best_header() { + Some(bb) => bb, + None => { return None; } + }; + + if self.store.best_block().expect("At least genesis should exist at this point").number < 2016 { + return Some(best_header.nbits); + } + + // todo: calculate difficulty adjustment + + Some(best_header.nbits) + } } impl Verify for ChainVerifier { From 7f5e5fb0367030da49b0b01d5515847266a9e43f Mon Sep 17 00:00:00 2001 From: NikVolf Date: Wed, 23 Nov 2016 00:13:51 +0300 Subject: [PATCH 02/26] threshold to nbits --- verification/src/utils.rs | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/verification/src/utils.rs b/verification/src/utils.rs index 484673a6..bf2b930f 100644 --- a/verification/src/utils.rs +++ b/verification/src/utils.rs @@ -1,11 +1,13 @@ //! Verification utilities -use primitives::hash::H256; +use primitives::{H256, U256}; +use primitives::uint::Uint; use byteorder::{BigEndian, ByteOrder}; use chain; use script::{self, Script}; const MAX_NBITS: u32 = 0x207fffff; +/// Simple nbits check that does not require 256-bit arithmetic pub fn check_nbits(hash: &H256, n_bits: u32) -> bool { if n_bits > MAX_NBITS { return false; } @@ -82,11 +84,28 @@ pub fn p2sh_sigops(output: &Script, input_ref: &Script) -> usize { output.sigop_count_p2sh(input_ref).unwrap_or(0) } +/// Converts difficulty threshold to the compact representation (nbits) +pub fn threshold_to_nbits(val: U256) -> u32 { + let mut nb = [0u8; 4]; + let bits = val.bits() as u8; + nb[0] = (bits + 7) / 8; + if val.byte(nb[0] as usize - 1) > 0x7f { nb[0] += 1 } + + nb[1] = val.byte((nb[0]-1) as usize); + nb[2] = val.byte((nb[0]-2) as usize); + if nb[0] > 2 { + nb[3] = val.byte((nb[0]-3) as usize); + } + + BigEndian::read_u32(&nb) +} + #[cfg(test)] mod tests { - use super::{block_reward_satoshi, check_nbits}; - use primitives::hash::H256; + use super::{block_reward_satoshi, check_nbits, threshold_to_nbits}; + use primitives::{H256, U256}; + use primitives::uint::Uint; #[test] fn reward() { @@ -127,4 +146,13 @@ mod tests { let nbits = 404129525; assert!(check_nbits(&hash, nbits)); } + + #[test] + fn threshold() { + let test1 = U256::from(1000u64); + assert_eq!(0x0203e800, threshold_to_nbits(test1)); + + let test2 = U256::from(2).pow(U256::from(256-32))-U256::from(1); + assert_eq!(0x1d00ffff, threshold_to_nbits(test2)); + } } From 961e4361cf88994a34da0acb9211b514a478436c Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 23 Nov 2016 04:10:11 +0100 Subject: [PATCH 03/26] pow validation in progress --- Cargo.lock | 1 + chain/src/block_header.rs | 40 ++++++++++++++++++++++ chain/src/lib.rs | 3 +- chain/src/nbits.rs | 55 ++++++++++++++++++++++++++++++ primitives/Cargo.toml | 1 + primitives/src/hash.rs | 9 +++++ primitives/src/lib.rs | 3 +- primitives/src/uint.rs | 23 +++++++++++++ verification/src/chain_verifier.rs | 3 +- 9 files changed, 135 insertions(+), 3 deletions(-) create mode 100644 chain/src/nbits.rs diff --git a/Cargo.lock b/Cargo.lock index 993b20fd..a64f8600 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -493,6 +493,7 @@ dependencies = [ name = "primitives" version = "0.1.0" dependencies = [ + "byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/chain/src/block_header.rs b/chain/src/block_header.rs index 64aaa07a..98cc44ac 100644 --- a/chain/src/block_header.rs +++ b/chain/src/block_header.rs @@ -5,6 +5,8 @@ use ser::{ }; use crypto::dhash256; use hash::H256; +use uint::U256; +use nbits::{NBits, MAX_NBITS_MAINNET, MAX_NBITS_REGTEST}; #[derive(PartialEq, Clone)] pub struct BlockHeader { @@ -20,6 +22,36 @@ impl BlockHeader { pub fn hash(&self) -> H256 { dhash256(&serialize(self)) } + + /// Returns the total work of the block + //pub fn work(&self) -> U256 { + //// 2**256 / (target + 1) == ~target / (target+1) + 1 (eqn shamelessly stolen from bitcoind) + //let mut ret = !self.target(); + //let mut ret1 = self.target(); + //ret1 = ret1 + 1.into(); + //ret = ret / ret1; + //ret = ret + 1.into(); + //ret + //} + + pub fn is_valid_proof_of_work(&self) -> bool { + let max = match NBits::new(MAX_NBITS_REGTEST).target() { + Some(max) => max, + None => return false, + }; + + let target = match NBits::new(self.nbits).target() { + Some(target) => target, + None => return false, + }; + + if target > max { + return false; + } + + let target = H256::from(target.to_little_endian()); + self.hash() <= target + } } impl fmt::Debug for BlockHeader { @@ -66,6 +98,7 @@ impl Deserializable for BlockHeader { mod tests { use ser::{Reader, Error as ReaderError, Stream}; use super::BlockHeader; + use nbits::MAX_NBITS_REGTEST; #[test] fn test_block_header_stream() { @@ -118,4 +151,11 @@ mod tests { assert_eq!(expected, reader.read().unwrap()); assert_eq!(ReaderError::UnexpectedEnd, reader.read::().unwrap_err()); } + + #[test] + fn test_is_valid_proof_of_work() { + let mut header = BlockHeader::default(); + header.nbits = MAX_NBITS_REGTEST; + assert!(header.is_valid_proof_of_work()); + } } diff --git a/chain/src/lib.rs b/chain/src/lib.rs index d0bd9799..cc14aba3 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -7,6 +7,7 @@ extern crate serialization as ser; mod block; mod block_header; mod merkle_root; +mod nbits; mod transaction; pub trait RepresentH256 { @@ -14,7 +15,7 @@ pub trait RepresentH256 { } pub use rustc_serialize::hex; -pub use primitives::{hash, bytes}; +pub use primitives::{hash, bytes, uint}; pub use self::block::Block; pub use self::block_header::BlockHeader; diff --git a/chain/src/nbits.rs b/chain/src/nbits.rs new file mode 100644 index 00000000..14ff7580 --- /dev/null +++ b/chain/src/nbits.rs @@ -0,0 +1,55 @@ +use uint::U256; + +pub const MAX_NBITS_MAINNET: u32 = 0x1d00ffff; +pub const MAX_NBITS_TESTNET: u32 = 0x1d00ffff; +pub const MAX_NBITS_REGTEST: u32 = 0x207fffff; + +#[derive(Debug, PartialEq, Clone, Copy)] +pub struct NBits(u32); + +impl NBits { + pub fn new(u: u32) -> Self { + NBits(u) + } + + /// Computes the target [0, T] that a blockhash must land in to be valid + /// Returns None, if there is an overflow or its negative value + pub fn target(&self) -> Option { + let size = self.0 >> 24; + let mut word = self.0 & 0x007fffff; + + let result = if size <= 3 { + word = word >> (8 * (3 - size as usize)); + word.into() + } else { + U256::from(word) << (8 * (size as usize - 3)) + }; + + let is_negative = word != 0 && (self.0 & 0x00800000) != 0; + let is_overflow = (word != 0 && size > 34) || + (word > 0xff && size > 33) || + (word > 0xffff && size > 32); + + if is_negative || is_overflow { + None + } else { + Some(result) + } + } +} + +#[cfg(test)] +mod tests { + use super::NBits; + + #[test] + fn test_basic_nbits_target() { + assert_eq!(NBits::new(0x01003456).target(), Some(0.into())); + assert_eq!(NBits::new(0x01123456).target(), Some(0x12.into())); + assert_eq!(NBits::new(0x02008000).target(), Some(0x80.into())); + assert_eq!(NBits::new(0x05009234).target(), Some(0x92340000u64.into())); + // negative -0x12345600 + assert_eq!(NBits::new(0x04923456).target(), None); + assert_eq!(NBits::new(0x04123456).target(), Some(0x12345600u64.into())); + } +} diff --git a/primitives/Cargo.toml b/primitives/Cargo.toml index faf0e094..c8f33616 100644 --- a/primitives/Cargo.toml +++ b/primitives/Cargo.toml @@ -6,6 +6,7 @@ build = "build.rs" [dependencies] heapsize = "0.3" +byteorder = "0.5" rustc-serialize = "0.3" [build-dependencies] diff --git a/primitives/src/hash.rs b/primitives/src/hash.rs index 5a31d685..17999dd2 100644 --- a/primitives/src/hash.rs +++ b/primitives/src/hash.rs @@ -105,6 +105,15 @@ macro_rules! impl_hash { } } + impl cmp::PartialOrd for $name { + fn partial_cmp(&self, other: &Self) -> Option { + let self_ref: &[u8] = &self.0; + let other_ref: &[u8] = &other.0; + self_ref.partial_cmp(other_ref) + } + } + + impl Hash for $name { fn hash(&self, state: &mut H) where H: Hasher { state.write(&self.0); diff --git a/primitives/src/lib.rs b/primitives/src/lib.rs index eafddb14..c119fe84 100644 --- a/primitives/src/lib.rs +++ b/primitives/src/lib.rs @@ -1,7 +1,8 @@ #![cfg_attr(asm_available, feature(asm))] -extern crate rustc_serialize; +extern crate byteorder; #[macro_use] extern crate heapsize; +extern crate rustc_serialize; pub mod bytes; pub mod hash; diff --git a/primitives/src/uint.rs b/primitives/src/uint.rs index 9ae77bc8..ea0730f5 100644 --- a/primitives/src/uint.rs +++ b/primitives/src/uint.rs @@ -8,6 +8,7 @@ use std::{str, fmt}; use std::ops::{Shr, Shl, BitAnd, BitOr, BitXor, Not, Div, Rem, Mul, Add, Sub}; use std::cmp::Ordering; +use byteorder::{WriteBytesExt, LittleEndian, BigEndian}; use hex::{FromHex, FromHexError}; /// Conversion from decimal string error @@ -393,6 +394,28 @@ macro_rules! construct_uint { Ok(res) } + pub fn to_little_endian(&self) -> [u8; $n_words * 8] { + let mut result = [0u8; $n_words * 8]; + { + let mut result_ref: &mut [u8] = &mut result; + for word in self.0.into_iter() { + result_ref.write_u64::(*word).expect("sizeof($n_words * u8 * 8) == sizeof($n_words * u64); qed"); + } + } + result + } + + pub fn to_big_endian(&self) -> [u8; $n_words * 8] { + let mut result = [0u8; $n_words * 8]; + { + let mut result_ref: &mut [u8] = &mut result; + for word in self.0.into_iter().rev() { + result_ref.write_u64::(*word).expect("sizeof($n_words * u8 * 8) == sizeof($n_words * u64); qed"); + } + } + result + } + #[inline] pub fn low_u32(&self) -> u32 { let &$name(ref arr) = self; diff --git a/verification/src/chain_verifier.rs b/verification/src/chain_verifier.rs index 25fb7bc6..d423ae82 100644 --- a/verification/src/chain_verifier.rs +++ b/verification/src/chain_verifier.rs @@ -194,7 +194,8 @@ impl ChainVerifier { } // target difficulty threshold - if !self.skip_pow && !utils::check_nbits(&hash, block.header().nbits) { + //if !self.skip_pow && !utils::check_nbits(&hash, block.header().nbits) { + if !self.skip_pow && !block.header().is_valid_proof_of_work() { return Err(Error::Pow); } From 355306e747f284ffc92f1a5c230b8911b598787e Mon Sep 17 00:00:00 2001 From: debris Date: Thu, 24 Nov 2016 23:33:51 +0100 Subject: [PATCH 04/26] retarget --- Cargo.lock | 1 + chain/Cargo.toml | 1 + chain/src/block_header.rs | 40 ----------- chain/src/lib.rs | 1 - chain/src/nbits.rs | 55 --------------- db/src/lib.rs | 12 ++++ p2p/src/net/connection_counter.rs | 5 -- p2p/src/util/nonce.rs | 14 ---- verification/src/chain_verifier.rs | 45 ++++++++----- verification/src/compact.rs | 105 +++++++++++++++++++++++++++++ verification/src/lib.rs | 5 +- verification/src/utils.rs | 88 +++++++++++++++--------- 12 files changed, 209 insertions(+), 163 deletions(-) delete mode 100644 chain/src/nbits.rs create mode 100644 verification/src/compact.rs diff --git a/Cargo.lock b/Cargo.lock index a64f8600..4832cc5e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -102,6 +102,7 @@ name = "chain" version = "0.1.0" dependencies = [ "bitcrypto 0.1.0", + "byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "primitives 0.1.0", "rustc-serialize 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/chain/Cargo.toml b/chain/Cargo.toml index ca16d59f..485af24e 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors = ["debris "] [dependencies] +byteorder = "0.5" rustc-serialize = "0.3" heapsize = "0.3" bitcrypto = { path = "../crypto" } diff --git a/chain/src/block_header.rs b/chain/src/block_header.rs index 98cc44ac..64aaa07a 100644 --- a/chain/src/block_header.rs +++ b/chain/src/block_header.rs @@ -5,8 +5,6 @@ use ser::{ }; use crypto::dhash256; use hash::H256; -use uint::U256; -use nbits::{NBits, MAX_NBITS_MAINNET, MAX_NBITS_REGTEST}; #[derive(PartialEq, Clone)] pub struct BlockHeader { @@ -22,36 +20,6 @@ impl BlockHeader { pub fn hash(&self) -> H256 { dhash256(&serialize(self)) } - - /// Returns the total work of the block - //pub fn work(&self) -> U256 { - //// 2**256 / (target + 1) == ~target / (target+1) + 1 (eqn shamelessly stolen from bitcoind) - //let mut ret = !self.target(); - //let mut ret1 = self.target(); - //ret1 = ret1 + 1.into(); - //ret = ret / ret1; - //ret = ret + 1.into(); - //ret - //} - - pub fn is_valid_proof_of_work(&self) -> bool { - let max = match NBits::new(MAX_NBITS_REGTEST).target() { - Some(max) => max, - None => return false, - }; - - let target = match NBits::new(self.nbits).target() { - Some(target) => target, - None => return false, - }; - - if target > max { - return false; - } - - let target = H256::from(target.to_little_endian()); - self.hash() <= target - } } impl fmt::Debug for BlockHeader { @@ -98,7 +66,6 @@ impl Deserializable for BlockHeader { mod tests { use ser::{Reader, Error as ReaderError, Stream}; use super::BlockHeader; - use nbits::MAX_NBITS_REGTEST; #[test] fn test_block_header_stream() { @@ -151,11 +118,4 @@ mod tests { assert_eq!(expected, reader.read().unwrap()); assert_eq!(ReaderError::UnexpectedEnd, reader.read::().unwrap_err()); } - - #[test] - fn test_is_valid_proof_of_work() { - let mut header = BlockHeader::default(); - header.nbits = MAX_NBITS_REGTEST; - assert!(header.is_valid_proof_of_work()); - } } diff --git a/chain/src/lib.rs b/chain/src/lib.rs index cc14aba3..d5fe8c01 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -7,7 +7,6 @@ extern crate serialization as ser; mod block; mod block_header; mod merkle_root; -mod nbits; mod transaction; pub trait RepresentH256 { diff --git a/chain/src/nbits.rs b/chain/src/nbits.rs deleted file mode 100644 index 14ff7580..00000000 --- a/chain/src/nbits.rs +++ /dev/null @@ -1,55 +0,0 @@ -use uint::U256; - -pub const MAX_NBITS_MAINNET: u32 = 0x1d00ffff; -pub const MAX_NBITS_TESTNET: u32 = 0x1d00ffff; -pub const MAX_NBITS_REGTEST: u32 = 0x207fffff; - -#[derive(Debug, PartialEq, Clone, Copy)] -pub struct NBits(u32); - -impl NBits { - pub fn new(u: u32) -> Self { - NBits(u) - } - - /// Computes the target [0, T] that a blockhash must land in to be valid - /// Returns None, if there is an overflow or its negative value - pub fn target(&self) -> Option { - let size = self.0 >> 24; - let mut word = self.0 & 0x007fffff; - - let result = if size <= 3 { - word = word >> (8 * (3 - size as usize)); - word.into() - } else { - U256::from(word) << (8 * (size as usize - 3)) - }; - - let is_negative = word != 0 && (self.0 & 0x00800000) != 0; - let is_overflow = (word != 0 && size > 34) || - (word > 0xff && size > 33) || - (word > 0xffff && size > 32); - - if is_negative || is_overflow { - None - } else { - Some(result) - } - } -} - -#[cfg(test)] -mod tests { - use super::NBits; - - #[test] - fn test_basic_nbits_target() { - assert_eq!(NBits::new(0x01003456).target(), Some(0.into())); - assert_eq!(NBits::new(0x01123456).target(), Some(0x12.into())); - assert_eq!(NBits::new(0x02008000).target(), Some(0x80.into())); - assert_eq!(NBits::new(0x05009234).target(), Some(0x92340000u64.into())); - // negative -0x12345600 - assert_eq!(NBits::new(0x04923456).target(), None); - assert_eq!(NBits::new(0x04123456).target(), Some(0x12345600u64.into())); - } -} diff --git a/db/src/lib.rs b/db/src/lib.rs index e6b150c4..971461ec 100644 --- a/db/src/lib.rs +++ b/db/src/lib.rs @@ -34,6 +34,18 @@ pub enum BlockRef { Hash(primitives::hash::H256), } +impl From for BlockRef { + fn from(u: u32) -> Self { + BlockRef::Number(u) + } +} + +impl From for BlockRef { + fn from(hash: primitives::hash::H256) -> Self { + BlockRef::Hash(hash) + } +} + #[derive(PartialEq, Debug)] pub enum BlockLocation { Main(u32), diff --git a/p2p/src/net/connection_counter.rs b/p2p/src/net/connection_counter.rs index 3ccdd904..c83f43e6 100644 --- a/p2p/src/net/connection_counter.rs +++ b/p2p/src/net/connection_counter.rs @@ -23,11 +23,6 @@ impl ConnectionCounter { } } - /// Returns maxiumum number of outbound connections. - pub fn max_outbound_connections(&self) -> u32 { - self.max_outbound_connections - } - /// Increases inbound connections counter by 1. pub fn note_new_inbound_connection(&self) { self.current_inbound_connections.fetch_add(1, Ordering::AcqRel); diff --git a/p2p/src/util/nonce.rs b/p2p/src/util/nonce.rs index 481eb860..5e67827f 100644 --- a/p2p/src/util/nonce.rs +++ b/p2p/src/util/nonce.rs @@ -12,17 +12,3 @@ impl NonceGenerator for RandomNonce { rand::random() } } - -pub struct StaticNonce(u64); - -impl StaticNonce { - pub fn new(nonce: u64) -> Self { - StaticNonce(nonce) - } -} - -impl NonceGenerator for StaticNonce { - fn get(&self) -> u64 { - self.0 - } -} diff --git a/verification/src/chain_verifier.rs b/verification/src/chain_verifier.rs index be222fdc..3b7b7423 100644 --- a/verification/src/chain_verifier.rs +++ b/verification/src/chain_verifier.rs @@ -1,9 +1,8 @@ //! Bitcoin chain verifier use db::{self, BlockRef, BlockLocation}; -use chain; use super::{Verify, VerificationResult, Chain, Error, TransactionError, ContinueVerify}; -use utils; +use {chain, utils}; const BLOCK_MAX_FUTURE: i64 = 2 * 60 * 60; // 2 hours const COINBASE_MATURITY: u32 = 100; // 2 hours @@ -55,8 +54,11 @@ impl ChainVerifier { fn ordered_verify(&self, block: &chain::Block, at_height: u32) -> Result<(), Error> { // check that difficulty matches the adjusted level - if let Some(expected_nbits) = self.expected_nbits() { - if !self.skip_pow && expected_nbits != block.header().nbits { + if let Some(work) = self.work_required(block, at_height) { + if !self.skip_pow && work != block.header().nbits { + trace!(target: "verification", "pow verification error at height: {}", at_height); + trace!(target: "verification", "block: {:?}", block); + trace!(target: "verification", "expected work: {}, got {}", work, block.header().nbits); return Err(Error::Difficulty); } } @@ -200,8 +202,7 @@ impl ChainVerifier { } // target difficulty threshold - //if !self.skip_pow && !utils::check_nbits(&hash, block.header().nbits) { - if !self.skip_pow && !block.header().is_valid_proof_of_work() { + if !self.skip_pow && !utils::check_nbits(&hash, block.header().nbits) { return Err(Error::Pow); } @@ -266,20 +267,30 @@ impl ChainVerifier { } } - fn expected_nbits(&self) -> Option { - - let best_header = match self.store.best_header() { - Some(bb) => bb, - None => { return None; } - }; - - if self.store.best_block().expect("At least genesis should exist at this point").number < 2016 { - return Some(best_header.nbits); + fn work_required(&self, block: &chain::Block, height: u32) -> Option { + if height == 0 { + return None; } - // todo: calculate difficulty adjustment + let parent_ref = block.block_header.previous_header_hash.clone().into(); + let previous_header = self.store.block_header(parent_ref).expect("expected to find parent header in database"); - Some(best_header.nbits) + if utils::is_retarget_height(height) { + let retarget_ref = (height - utils::RETARGETING_INTERVAL).into(); + let retarget_header = self.store.block_header(retarget_ref).expect("self.height != 0 && self.height % RETARGETING_INTERVAL == 0; qed"); + // timestamp of block(height - RETARGETING_INTERVAL) + let retarget_timestamp = retarget_header.time; + // timestamp of parent block + let last_timestamp = previous_header.time; + // nbits of last block + let last_nbits = previous_header.nbits; + + return Some(utils::work_required_retarget(retarget_timestamp, last_timestamp, last_nbits)); + } + + // TODO: if.testnet + + Some(previous_header.nbits) } } diff --git a/verification/src/compact.rs b/verification/src/compact.rs new file mode 100644 index 00000000..482846b1 --- /dev/null +++ b/verification/src/compact.rs @@ -0,0 +1,105 @@ +use uint::U256; + +#[derive(Debug, PartialEq, Clone, Copy)] +pub struct Compact(u32); + +impl From for Compact { + fn from(u: u32) -> Self { + Compact(u) + } +} + +impl From for u32 { + fn from(c: Compact) -> Self { + c.0 + } +} + +impl Compact { + pub fn new(u: u32) -> Self { + Compact(u) + } + + /// Computes the target [0, T] that a blockhash must land in to be valid + /// Returns None, if there is an overflow or its negative value + pub fn to_u256(&self) -> Result { + let size = self.0 >> 24; + let mut word = self.0 & 0x007fffff; + + let result = if size <= 3 { + word = word >> (8 * (3 - size as usize)); + word.into() + } else { + U256::from(word) << (8 * (size as usize - 3)) + }; + + let is_negative = word != 0 && (self.0 & 0x00800000) != 0; + let is_overflow = (word != 0 && size > 34) || + (word > 0xff && size > 33) || + (word > 0xffff && size > 32); + + if is_negative || is_overflow { + Err(result) + } else { + Ok(result) + } + } + + pub fn from_u256(val: U256) -> Self { + let mut size = (val.bits() + 7) / 8; + let mut compact = if size <= 3 { + (val.low_u64() << (8 * (3 - size))) as u32 + } else { + let bn = val >> (8 * (size - 3)); + bn.low_u32() + }; + + if (compact & 0x00800000) != 0 { + compact = compact >> 8; + size += 1; + } + + assert!((compact & !0x007fffff) == 0); + assert!(size < 256); + Compact(compact | (size << 24) as u32) + } +} + +#[cfg(test)] +mod tests { + use uint::U256; + use super::Compact; + + #[test] + fn test_compact_to_u256() { + assert_eq!(Compact::new(0x01003456).to_u256(), Ok(0.into())); + assert_eq!(Compact::new(0x01123456).to_u256(), Ok(0x12.into())); + assert_eq!(Compact::new(0x02008000).to_u256(), Ok(0x80.into())); + assert_eq!(Compact::new(0x05009234).to_u256(), Ok(0x92340000u64.into())); + // negative -0x12345600 + assert!(Compact::new(0x04923456).to_u256().is_err()); + assert_eq!(Compact::new(0x04123456).to_u256(), Ok(0x12345600u64.into())); + } + + #[test] + fn test_from_u256() { + let test1 = U256::from(1000u64); + assert_eq!(Compact::new(0x0203e800), Compact::from_u256(test1)); + + let test2 = U256::from(2).pow(U256::from(256-32)) - U256::from(1); + assert_eq!(Compact::new(0x1d00ffff), Compact::from_u256(test2)); + } + + #[test] + fn test_compact_to_from_u256() { + // TODO: it does not work both ways for small values... check why + let compact = Compact::new(0x1d00ffff); + let compact2 = Compact::from_u256(compact.to_u256().unwrap()); + assert_eq!(compact, compact2); + + let compact = Compact::new(0x05009234); + let compact2 = Compact::from_u256(compact.to_u256().unwrap()); + assert_eq!(compact, compact2); + + } +} diff --git a/verification/src/lib.rs b/verification/src/lib.rs index 5667ca58..7fdfc1fd 100644 --- a/verification/src/lib.rs +++ b/verification/src/lib.rs @@ -17,9 +17,12 @@ extern crate ethcore_devtools as devtools; #[cfg(test)] extern crate test_data; +mod chain_verifier; +mod compact; mod queue; mod utils; -mod chain_verifier; + +pub use primitives::{uint, hash}; pub use queue::Queue; pub use chain_verifier::ChainVerifier; diff --git a/verification/src/utils.rs b/verification/src/utils.rs index bf2b930f..66bc5c9f 100644 --- a/verification/src/utils.rs +++ b/verification/src/utils.rs @@ -1,15 +1,66 @@ +#![allow(dead_code)] //! Verification utilities -use primitives::{H256, U256}; -use primitives::uint::Uint; +use std::cmp; +use hash::H256; +use uint::U256; use byteorder::{BigEndian, ByteOrder}; -use chain; use script::{self, Script}; +use chain; +use compact::Compact; -const MAX_NBITS: u32 = 0x207fffff; +// Timespan constants +const RETARGETING_FACTOR: u32 = 4; +const TARGET_SPACING_SECONDS: u32 = 10 * 60; +const DOUBLE_SPACING_SECONDS: u32 = 2 * TARGET_SPACING_SECONDS; +const TARGET_TIMESPAN_SECONDS: u32 = 2 * 7 * 24 * 60 * 60; + +// The upper and lower bounds for retargeting timespan +const MIN_TIMESPAN: u32 = TARGET_TIMESPAN_SECONDS / RETARGETING_FACTOR; +const MAX_TIMESPAN: u32 = TARGET_TIMESPAN_SECONDS * RETARGETING_FACTOR; + +// Target number of blocks, 2 weaks, 2016 +pub const RETARGETING_INTERVAL: u32 = TARGET_TIMESPAN_SECONDS / TARGET_SPACING_SECONDS; + +pub const MAX_NBITS_MAINNET: u32 = 0x1d00ffff; +pub const MAX_NBITS_TESTNET: u32 = 0x1d00ffff; +pub const MAX_NBITS_REGTEST: u32 = 0x207fffff; + +pub fn is_retarget_height(height: u32) -> bool { + height % RETARGETING_INTERVAL == 0 +} + +fn retarget_timespan(retarget_timestamp: u32, last_timestamp: u32) -> u32 { + let timespan = last_timestamp - retarget_timestamp; + range_constrain(timespan as u32, MIN_TIMESPAN, MAX_TIMESPAN) +} + +pub fn work_required_retarget(retarget_timestamp: u32, last_timestamp: u32, last_nbits: u32) -> u32 { + // ignore overflows here + let mut retarget = Compact::new(last_nbits).to_u256().unwrap_or_else(|x| x); + let maximum = Compact::new(MAX_NBITS_MAINNET).to_u256().unwrap_or_else(|x| x); + + // multiplication overflow potential + retarget = retarget * U256::from(retarget_timespan(retarget_timestamp, last_timestamp)); + retarget = retarget / U256::from(TARGET_TIMESPAN_SECONDS); + + if retarget > maximum { + Compact::from_u256(maximum).into() + } else { + Compact::from_u256(retarget).into() + } +} + +pub fn work_required_testnet() -> u32 { + unimplemented!(); +} + +fn range_constrain(value: u32, min: u32, max: u32) -> u32 { + cmp::min(cmp::max(value, min), max) +} /// Simple nbits check that does not require 256-bit arithmetic pub fn check_nbits(hash: &H256, n_bits: u32) -> bool { - if n_bits > MAX_NBITS { return false; } + if n_bits > MAX_NBITS_REGTEST { return false; } let hash_bytes: &[u8] = &**hash; @@ -84,28 +135,10 @@ pub fn p2sh_sigops(output: &Script, input_ref: &Script) -> usize { output.sigop_count_p2sh(input_ref).unwrap_or(0) } -/// Converts difficulty threshold to the compact representation (nbits) -pub fn threshold_to_nbits(val: U256) -> u32 { - let mut nb = [0u8; 4]; - let bits = val.bits() as u8; - nb[0] = (bits + 7) / 8; - if val.byte(nb[0] as usize - 1) > 0x7f { nb[0] += 1 } - - nb[1] = val.byte((nb[0]-1) as usize); - nb[2] = val.byte((nb[0]-2) as usize); - if nb[0] > 2 { - nb[3] = val.byte((nb[0]-3) as usize); - } - - BigEndian::read_u32(&nb) -} - #[cfg(test)] mod tests { - - use super::{block_reward_satoshi, check_nbits, threshold_to_nbits}; - use primitives::{H256, U256}; - use primitives::uint::Uint; + use super::{block_reward_satoshi, check_nbits}; + use hash::H256; #[test] fn reward() { @@ -149,10 +182,5 @@ mod tests { #[test] fn threshold() { - let test1 = U256::from(1000u64); - assert_eq!(0x0203e800, threshold_to_nbits(test1)); - - let test2 = U256::from(2).pow(U256::from(256-32))-U256::from(1); - assert_eq!(0x1d00ffff, threshold_to_nbits(test2)); } } From 98b514b25613309ae5cca0b8b92b4b905781b7bd Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 25 Nov 2016 01:07:38 +0100 Subject: [PATCH 05/26] median timestamp --- verification/src/chain_verifier.rs | 35 +++++++++++++++++++++++++++--- 1 file changed, 32 insertions(+), 3 deletions(-) diff --git a/verification/src/chain_verifier.rs b/verification/src/chain_verifier.rs index 3b7b7423..6e7a2718 100644 --- a/verification/src/chain_verifier.rs +++ b/verification/src/chain_verifier.rs @@ -1,5 +1,7 @@ //! Bitcoin chain verifier +use std::cmp; +use std::collections::HashSet; use db::{self, BlockRef, BlockLocation}; use super::{Verify, VerificationResult, Chain, Error, TransactionError, ContinueVerify}; use {chain, utils}; @@ -57,12 +59,18 @@ impl ChainVerifier { if let Some(work) = self.work_required(block, at_height) { if !self.skip_pow && work != block.header().nbits { trace!(target: "verification", "pow verification error at height: {}", at_height); - trace!(target: "verification", "block: {:?}", block); trace!(target: "verification", "expected work: {}, got {}", work, block.header().nbits); return Err(Error::Difficulty); } } + if let Some(median_timestamp) = self.ordered_median_timestamp(block, at_height) { + if median_timestamp >= block.block_header.time { + trace!(target: "verification", "median timestamp verification failed, median: {}, current: {}", median_timestamp, block.block_header.time); + return Err(Error::Timestamp); + } + } + let coinbase_spends = block.transactions()[0].total_spends(); let mut total_unspent = 0u64; @@ -267,13 +275,34 @@ impl ChainVerifier { } } + fn ordered_median_timestamp(&self, block: &chain::Block, height: u32) -> Option { + if height == 0 { + return None; + } + + // TODO: make 11 a const + let max = cmp::min(height, 11); + let mut timestamps = HashSet::new(); + let mut block_ref = block.block_header.previous_header_hash.clone().into(); + // TODO: optimize it, so it does not make 11 redundant queries each time + for _ in 0..max { + let previous_header = self.store.block_header(block_ref).expect("block_ref < height; qed"); + timestamps.insert(previous_header.time); + block_ref = previous_header.previous_header_hash.into(); + } + + let timestamps: Vec<_> = timestamps.into_iter().collect(); + Some(timestamps[timestamps.len() / 2]) + } + fn work_required(&self, block: &chain::Block, height: u32) -> Option { if height == 0 { return None; } - let parent_ref = block.block_header.previous_header_hash.clone().into(); - let previous_header = self.store.block_header(parent_ref).expect("expected to find parent header in database"); + // should this be best_header or parent header? + // regtest do not pass with previous header, but, imo checking with best is a bit weird, mk + let previous_header = self.store.best_header().expect("self.height != 0; qed"); if utils::is_retarget_height(height) { let retarget_ref = (height - utils::RETARGETING_INTERVAL).into(); From 40682073ab9f9920910af14915d2da176943a177 Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 25 Nov 2016 01:58:51 +0100 Subject: [PATCH 06/26] more idiomatic enumeration over transactions --- verification/src/chain_verifier.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/verification/src/chain_verifier.rs b/verification/src/chain_verifier.rs index 6e7a2718..0e9fd38e 100644 --- a/verification/src/chain_verifier.rs +++ b/verification/src/chain_verifier.rs @@ -78,7 +78,7 @@ impl ChainVerifier { let mut total_claimed: u64 = 0; - for (_, input) in tx.inputs.iter().enumerate() { + for input in &tx.inputs { // Coinbase maturity check if let Some(previous_meta) = self.store.transaction_meta(&input.previous_output.hash) { From 3a771fc122af520ee6c6a01a70524ba8c2560dad Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 25 Nov 2016 02:01:21 +0100 Subject: [PATCH 07/26] removed unused code --- Cargo.lock | 2 -- chain/Cargo.toml | 1 - primitives/Cargo.toml | 1 - primitives/src/lib.rs | 1 - primitives/src/uint.rs | 23 ----------------------- 5 files changed, 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a5ae0ff1..693c51c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -102,7 +102,6 @@ name = "chain" version = "0.1.0" dependencies = [ "bitcrypto 0.1.0", - "byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "primitives 0.1.0", "rustc-serialize 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)", @@ -502,7 +501,6 @@ dependencies = [ name = "primitives" version = "0.1.0" dependencies = [ - "byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/chain/Cargo.toml b/chain/Cargo.toml index 485af24e..ca16d59f 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -4,7 +4,6 @@ version = "0.1.0" authors = ["debris "] [dependencies] -byteorder = "0.5" rustc-serialize = "0.3" heapsize = "0.3" bitcrypto = { path = "../crypto" } diff --git a/primitives/Cargo.toml b/primitives/Cargo.toml index c8f33616..faf0e094 100644 --- a/primitives/Cargo.toml +++ b/primitives/Cargo.toml @@ -6,7 +6,6 @@ build = "build.rs" [dependencies] heapsize = "0.3" -byteorder = "0.5" rustc-serialize = "0.3" [build-dependencies] diff --git a/primitives/src/lib.rs b/primitives/src/lib.rs index c119fe84..4ddb0c26 100644 --- a/primitives/src/lib.rs +++ b/primitives/src/lib.rs @@ -1,6 +1,5 @@ #![cfg_attr(asm_available, feature(asm))] -extern crate byteorder; #[macro_use] extern crate heapsize; extern crate rustc_serialize; diff --git a/primitives/src/uint.rs b/primitives/src/uint.rs index ea0730f5..9ae77bc8 100644 --- a/primitives/src/uint.rs +++ b/primitives/src/uint.rs @@ -8,7 +8,6 @@ use std::{str, fmt}; use std::ops::{Shr, Shl, BitAnd, BitOr, BitXor, Not, Div, Rem, Mul, Add, Sub}; use std::cmp::Ordering; -use byteorder::{WriteBytesExt, LittleEndian, BigEndian}; use hex::{FromHex, FromHexError}; /// Conversion from decimal string error @@ -394,28 +393,6 @@ macro_rules! construct_uint { Ok(res) } - pub fn to_little_endian(&self) -> [u8; $n_words * 8] { - let mut result = [0u8; $n_words * 8]; - { - let mut result_ref: &mut [u8] = &mut result; - for word in self.0.into_iter() { - result_ref.write_u64::(*word).expect("sizeof($n_words * u8 * 8) == sizeof($n_words * u64); qed"); - } - } - result - } - - pub fn to_big_endian(&self) -> [u8; $n_words * 8] { - let mut result = [0u8; $n_words * 8]; - { - let mut result_ref: &mut [u8] = &mut result; - for word in self.0.into_iter().rev() { - result_ref.write_u64::(*word).expect("sizeof($n_words * u8 * 8) == sizeof($n_words * u64); qed"); - } - } - result - } - #[inline] pub fn low_u32(&self) -> u32 { let &$name(ref arr) = self; From 3143ff75d0dd074b1eae2d1983ab62c2fef89169 Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 25 Nov 2016 02:05:49 +0100 Subject: [PATCH 08/26] simplified few more lines --- verification/src/compact.rs | 4 ++-- verification/src/utils.rs | 4 ---- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/verification/src/compact.rs b/verification/src/compact.rs index 482846b1..deb3802a 100644 --- a/verification/src/compact.rs +++ b/verification/src/compact.rs @@ -27,7 +27,7 @@ impl Compact { let mut word = self.0 & 0x007fffff; let result = if size <= 3 { - word = word >> (8 * (3 - size as usize)); + word >>= (8 * (3 - size as usize)); word.into() } else { U256::from(word) << (8 * (size as usize - 3)) @@ -55,7 +55,7 @@ impl Compact { }; if (compact & 0x00800000) != 0 { - compact = compact >> 8; + compact >>= 8; size += 1; } diff --git a/verification/src/utils.rs b/verification/src/utils.rs index 66bc5c9f..521887a0 100644 --- a/verification/src/utils.rs +++ b/verification/src/utils.rs @@ -179,8 +179,4 @@ mod tests { let nbits = 404129525; assert!(check_nbits(&hash, nbits)); } - - #[test] - fn threshold() { - } } From e167b3350a5025c7539805a02b528493f43ca7f1 Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 25 Nov 2016 02:07:46 +0100 Subject: [PATCH 09/26] few more simplifications --- verification/src/chain_verifier.rs | 4 ++-- verification/src/compact.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/verification/src/chain_verifier.rs b/verification/src/chain_verifier.rs index 0e9fd38e..01659f2e 100644 --- a/verification/src/chain_verifier.rs +++ b/verification/src/chain_verifier.rs @@ -56,7 +56,7 @@ impl ChainVerifier { fn ordered_verify(&self, block: &chain::Block, at_height: u32) -> Result<(), Error> { // check that difficulty matches the adjusted level - if let Some(work) = self.work_required(block, at_height) { + if let Some(work) = self.work_required(at_height) { if !self.skip_pow && work != block.header().nbits { trace!(target: "verification", "pow verification error at height: {}", at_height); trace!(target: "verification", "expected work: {}, got {}", work, block.header().nbits); @@ -295,7 +295,7 @@ impl ChainVerifier { Some(timestamps[timestamps.len() / 2]) } - fn work_required(&self, block: &chain::Block, height: u32) -> Option { + fn work_required(&self, height: u32) -> Option { if height == 0 { return None; } diff --git a/verification/src/compact.rs b/verification/src/compact.rs index deb3802a..7b750dd5 100644 --- a/verification/src/compact.rs +++ b/verification/src/compact.rs @@ -27,7 +27,7 @@ impl Compact { let mut word = self.0 & 0x007fffff; let result = if size <= 3 { - word >>= (8 * (3 - size as usize)); + word >>= 8 * (3 - size as usize); word.into() } else { U256::from(word) << (8 * (size as usize - 3)) From eb23a7d901ee9459d9a28bfad0deb34fe0a969ed Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 25 Nov 2016 09:07:29 +0300 Subject: [PATCH 10/26] started work on feefilter message --- Cargo.lock | 1 + miner/Cargo.toml | 1 + miner/src/fee.rs | 64 ++++++++ miner/src/lib.rs | 3 + sync/src/connection_filter.rs | 225 ++++++++++++++++++----------- sync/src/local_node.rs | 3 +- sync/src/synchronization_chain.rs | 5 + sync/src/synchronization_client.rs | 34 ++++- sync/src/synchronization_peers.rs | 5 + 9 files changed, 248 insertions(+), 93 deletions(-) create mode 100644 miner/src/fee.rs diff --git a/Cargo.lock b/Cargo.lock index 693c51c6..b8e3fc97 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -324,6 +324,7 @@ name = "miner" version = "0.1.0" dependencies = [ "chain 0.1.0", + "db 0.1.0", "heapsize 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "primitives 0.1.0", "serialization 0.1.0", diff --git a/miner/Cargo.toml b/miner/Cargo.toml index a2beeeec..463ea252 100644 --- a/miner/Cargo.toml +++ b/miner/Cargo.toml @@ -6,6 +6,7 @@ authors = ["Ethcore "] [dependencies] heapsize = "0.3" chain = { path = "../chain" } +db = { path = "../db" } primitives = { path = "../primitives" } serialization = { path = "../serialization" } test-data = { path = "../test-data" } diff --git a/miner/src/fee.rs b/miner/src/fee.rs new file mode 100644 index 00000000..783d3b0b --- /dev/null +++ b/miner/src/fee.rs @@ -0,0 +1,64 @@ +use chain::Transaction; +use db::SharedStore; + +// TODO: &TransactionProvider after AsTransactionProvider is done +pub fn transaction_fee(store: SharedStore, transaction: &Transaction) -> u64 { + let inputs_sum = transaction.inputs.iter() + .fold(0, |accumulator, input| { + let input_transaction = store.transaction(&input.previous_output.hash) + .expect("transaction must be verified by caller"); + accumulator + input_transaction.outputs[input.previous_output.index as usize].value + }); + let outputs_sum = transaction.outputs.iter() + .fold(0, |accumulator, output| accumulator + output.value); + inputs_sum.saturating_sub(outputs_sum) +} + +// TODO: &TransactionProvider after AsTransactionProvider is done +pub fn transaction_fee_rate(store: SharedStore, transaction: &Transaction) -> u64 { + use ser::Serializable; + + transaction_fee(store, transaction) / transaction.serialized_size() as u64 +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + use db::TestStorage; + use test_data; + use super::*; + + #[test] + fn test_transaction_fee() { + let b0 = test_data::block_builder().header().nonce(1).build() + .transaction() + .output().value(1_000_000).build() + .build() + .transaction() + .output().value(2_000_000).build() + .build() + .build(); + let tx0 = b0.transactions[0].clone(); + let tx0_hash = tx0.hash(); + let tx1 = b0.transactions[1].clone(); + let tx1_hash = tx1.hash(); + let b1 = test_data::block_builder().header().nonce(2).build() + .transaction() + .input().hash(tx0_hash).index(0).build() + .input().hash(tx1_hash).index(0).build() + .output().value(2_500_000).build() + .build() + .build(); + let tx2 = b1.transactions[0].clone(); + + let db = Arc::new(TestStorage::with_blocks(&vec![b0, b1])); + + assert_eq!(transaction_fee(db.clone(), &tx0), 0); + assert_eq!(transaction_fee(db.clone(), &tx1), 0); + assert_eq!(transaction_fee(db.clone(), &tx2), 500_000); + + assert_eq!(transaction_fee_rate(db.clone(), &tx0), 0); + assert_eq!(transaction_fee_rate(db.clone(), &tx1), 0); + assert_eq!(transaction_fee_rate(db.clone(), &tx2), 4_950); + } +} diff --git a/miner/src/lib.rs b/miner/src/lib.rs index 130a9b98..f81836da 100644 --- a/miner/src/lib.rs +++ b/miner/src/lib.rs @@ -1,9 +1,12 @@ extern crate chain; +extern crate db; extern crate heapsize; extern crate primitives; extern crate serialization as ser; extern crate test_data; +mod fee; mod memory_pool; +pub use fee::{transaction_fee, transaction_fee_rate}; pub use memory_pool::{MemoryPool, Information as MemoryPoolInformation, OrderingStrategy as MemoryPoolOrderingStrategy}; diff --git a/sync/src/connection_filter.rs b/sync/src/connection_filter.rs index 4e959a0c..5a1e68aa 100644 --- a/sync/src/connection_filter.rs +++ b/sync/src/connection_filter.rs @@ -27,6 +27,8 @@ pub struct ConnectionFilter { last_blocks: LinkedHashMap, /// Last transactions from peer. last_transactions: LinkedHashMap, + /// Minimal fee in satoshis per 1000 bytes + fee_rate: Option, } /// Connection bloom filter @@ -70,6 +72,7 @@ impl Default for ConnectionFilter { filter_flags: types::FilterFlags::None, last_blocks: LinkedHashMap::new(), last_transactions: LinkedHashMap::new(), + fee_rate: None, } } } @@ -83,6 +86,7 @@ impl ConnectionFilter { filter_flags: message.flags, last_blocks: LinkedHashMap::new(), last_transactions: LinkedHashMap::new(), + fee_rate: None, } } @@ -119,12 +123,104 @@ impl ConnectionFilter { } /// Check if transaction should be sent to this connection && optionally update filter - pub fn filter_transaction(&mut self, transaction_hash: &H256, transaction: &Transaction) -> bool { + pub fn filter_transaction(&mut self, transaction_hash: &H256, transaction: &Transaction, transaction_fee_rate: u64) -> bool { // check if transaction is known if self.last_transactions.contains_key(transaction_hash) { return false; } + // check if transaction fee rate is high enough for this peer + if let Some(fee_rate) = self.fee_rate { + if transaction_fee_rate < fee_rate { + return false; + } + } + + // check with bloom filter, if set + self.filter_transaction_with_bloom(transaction_hash, transaction) + } + + /// Load filter + pub fn load(&mut self, message: &types::FilterLoad) { + self.bloom = Some(ConnectionBloom::new(message)); + self.filter_flags = message.flags; + } + + /// Add filter + pub fn add(&mut self, message: &types::FilterAdd) { + // ignore if filter is not currently set + if let Some(ref mut bloom) = self.bloom { + bloom.insert(&message.data); + } + } + + /// Clear filter + pub fn clear(&mut self) { + self.bloom = None; + } + + /// Limit transaction announcing by transaction fee + pub fn set_fee_rate(&mut self, fee_rate: u64) { + if fee_rate == 0 { + self.fee_rate = None; + } + else { + self.fee_rate = Some(fee_rate); + } + } + + /// Convert `Block` to `MerkleBlock` using this filter + pub fn build_merkle_block(&mut self, block: Block) -> Option { + if self.bloom.is_none() { + return None; + } + + // prepare result + let all_len = block.transactions.len(); + let mut result = MerkleBlockArtefacts { + merkleblock: types::MerkleBlock { + block_header: block.block_header.clone(), + total_transactions: all_len as u32, + hashes: Vec::default(), + flags: Bytes::default(), + }, + matching_transactions: Vec::new(), + }; + + // calculate hashes && match flags for all transactions + let (all_hashes, all_flags) = block.transactions.into_iter() + .fold((Vec::::with_capacity(all_len), BitVec::with_capacity(all_len)), |(mut all_hashes, mut all_flags), t| { + let hash = t.hash(); + let flag = self.filter_transaction_with_bloom(&hash, &t); + if flag { + result.matching_transactions.push((hash.clone(), t)); + } + + all_flags.push(flag); + all_hashes.push(hash); + (all_hashes, all_flags) + }); + + // build partial merkle tree + let (hashes, flags) = PartialMerkleTree::build(all_hashes, all_flags); + result.merkleblock.hashes.extend(hashes); + // to_bytes() converts [true, false, true] to 0b10100000 + // while protocol requires [true, false, true] to be serialized as 0x00000101 + result.merkleblock.flags = flags.to_bytes().into_iter() + .map(|b| + ((b & 0b10000000) >> 7) | + ((b & 0b01000000) >> 5) | + ((b & 0b00100000) >> 3) | + ((b & 0b00010000) >> 1) | + ((b & 0b00001000) << 1) | + ((b & 0b00000100) << 3) | + ((b & 0b00000010) << 5) | + ((b & 0b00000001) << 7)).collect::>().into(); + Some(result) + } + + /// Check if transaction should be sent to this connection using bloom filter && optionally update filter + fn filter_transaction_with_bloom(&mut self, transaction_hash: &H256, transaction: &Transaction) -> bool { // check with bloom filter, if set match self.bloom { /// if no filter is set for the connection => match everything @@ -188,75 +284,6 @@ impl ConnectionFilter { }, } } - - /// Load filter - pub fn load(&mut self, message: &types::FilterLoad) { - self.bloom = Some(ConnectionBloom::new(message)); - self.filter_flags = message.flags; - } - - /// Add filter - pub fn add(&mut self, message: &types::FilterAdd) { - // ignore if filter is not currently set - if let Some(ref mut bloom) = self.bloom { - bloom.insert(&message.data); - } - } - - /// Clear filter - pub fn clear(&mut self) { - self.bloom = None; - } - - /// Convert `Block` to `MerkleBlock` using this filter - pub fn build_merkle_block(&mut self, block: Block) -> Option { - if self.bloom.is_none() { - return None; - } - - // prepare result - let all_len = block.transactions.len(); - let mut result = MerkleBlockArtefacts { - merkleblock: types::MerkleBlock { - block_header: block.block_header.clone(), - total_transactions: all_len as u32, - hashes: Vec::default(), - flags: Bytes::default(), - }, - matching_transactions: Vec::new(), - }; - - // calculate hashes && match flags for all transactions - let (all_hashes, all_flags) = block.transactions.into_iter() - .fold((Vec::::with_capacity(all_len), BitVec::with_capacity(all_len)), |(mut all_hashes, mut all_flags), t| { - let hash = t.hash(); - let flag = self.filter_transaction(&hash, &t); - if flag { - result.matching_transactions.push((hash.clone(), t)); - } - - all_flags.push(flag); - all_hashes.push(hash); - (all_hashes, all_flags) - }); - - // build partial merkle tree - let (hashes, flags) = PartialMerkleTree::build(all_hashes, all_flags); - result.merkleblock.hashes.extend(hashes); - // to_bytes() converts [true, false, true] to 0b10100000 - // while protocol requires [true, false, true] to be serialized as 0x00000101 - result.merkleblock.flags = flags.to_bytes().into_iter() - .map(|b| - ((b & 0b10000000) >> 7) | - ((b & 0b01000000) >> 5) | - ((b & 0b00100000) >> 3) | - ((b & 0b00010000) >> 1) | - ((b & 0b00001000) << 1) | - ((b & 0b00000100) << 3) | - ((b & 0b00000010) << 5) | - ((b & 0b00000001) << 7)).collect::>().into(); - Some(result) - } } impl ConnectionBloom { @@ -493,13 +520,13 @@ pub mod tests { let mut filter = ConnectionFilter::with_filterload(&default_filterload()); - assert!(!filter.filter_transaction(&tx1.hash(), &tx1)); - assert!(!filter.filter_transaction(&tx2.hash(), &tx2)); + assert!(!filter.filter_transaction(&tx1.hash(), &tx1, 1000)); + assert!(!filter.filter_transaction(&tx2.hash(), &tx2, 1000)); filter.add(&make_filteradd(&*tx1.hash())); - assert!(filter.filter_transaction(&tx1.hash(), &tx1)); - assert!(!filter.filter_transaction(&tx2.hash(), &tx2)); + assert!(filter.filter_transaction(&tx1.hash(), &tx1, 1000)); + assert!(!filter.filter_transaction(&tx2.hash(), &tx2, 1000)); } #[test] @@ -512,13 +539,13 @@ pub mod tests { let mut filter = ConnectionFilter::with_filterload(&default_filterload()); - assert!(!filter.filter_transaction(&tx1.hash(), &tx1)); - assert!(!filter.filter_transaction(&tx2.hash(), &tx2)); + assert!(!filter.filter_transaction(&tx1.hash(), &tx1, 1000)); + assert!(!filter.filter_transaction(&tx2.hash(), &tx2, 1000)); filter.add(&make_filteradd(&tx1_out_data)); - assert!(filter.filter_transaction(&tx1.hash(), &tx1)); - assert!(!filter.filter_transaction(&tx2.hash(), &tx2)); + assert!(filter.filter_transaction(&tx1.hash(), &tx1, 1000)); + assert!(!filter.filter_transaction(&tx2.hash(), &tx2, 1000)); } #[test] @@ -530,13 +557,13 @@ pub mod tests { let mut filter = ConnectionFilter::with_filterload(&default_filterload()); - assert!(!filter.filter_transaction(&tx1.hash(), &tx1)); - assert!(!filter.filter_transaction(&tx2.hash(), &tx2)); + assert!(!filter.filter_transaction(&tx1.hash(), &tx1, 1000)); + assert!(!filter.filter_transaction(&tx2.hash(), &tx2, 1000)); filter.add(&make_filteradd(&tx1_previous_output)); - assert!(filter.filter_transaction(&tx1.hash(), &tx1)); - assert!(!filter.filter_transaction(&tx2.hash(), &tx2)); + assert!(filter.filter_transaction(&tx1.hash(), &tx1, 1000)); + assert!(!filter.filter_transaction(&tx2.hash(), &tx2, 1000)); } #[test] @@ -549,13 +576,39 @@ pub mod tests { let mut filter = ConnectionFilter::with_filterload(&default_filterload()); - assert!(!filter.filter_transaction(&tx1.hash(), &tx1)); - assert!(!filter.filter_transaction(&tx2.hash(), &tx2)); + assert!(!filter.filter_transaction(&tx1.hash(), &tx1, 1000)); + assert!(!filter.filter_transaction(&tx2.hash(), &tx2, 1000)); filter.add(&make_filteradd(&tx1_input_data)); - assert!(filter.filter_transaction(&tx1.hash(), &tx1)); - assert!(!filter.filter_transaction(&tx2.hash(), &tx2)); + assert!(filter.filter_transaction(&tx1.hash(), &tx1, 1000)); + assert!(!filter.filter_transaction(&tx2.hash(), &tx2, 1000)); + } + + #[test] + fn connection_filter_matches_transaction_by_fee_rate() { + let tx1: Transaction = test_data::TransactionBuilder::with_version(1).into(); + let tx2: Transaction = test_data::TransactionBuilder::with_version(2).into(); + + let mut filter = ConnectionFilter::default(); + + assert!(filter.filter_transaction(&tx1.hash(), &tx1, 1000)); + assert!(filter.filter_transaction(&tx2.hash(), &tx2, 2000)); + + filter.set_fee_rate(1500); + + assert!(!filter.filter_transaction(&tx1.hash(), &tx1, 1000)); + assert!(filter.filter_transaction(&tx2.hash(), &tx2, 2000)); + + filter.set_fee_rate(3000); + + assert!(!filter.filter_transaction(&tx1.hash(), &tx1, 1000)); + assert!(!filter.filter_transaction(&tx2.hash(), &tx2, 2000)); + + filter.set_fee_rate(0); + + assert!(filter.filter_transaction(&tx1.hash(), &tx1, 1000)); + assert!(filter.filter_transaction(&tx2.hash(), &tx2, 2000)); } #[test] diff --git a/sync/src/local_node.rs b/sync/src/local_node.rs index 39bd4797..f80017fd 100644 --- a/sync/src/local_node.rs +++ b/sync/src/local_node.rs @@ -186,8 +186,9 @@ impl LocalNode where T: SynchronizationTaskExecutor + PeersCon self.client.lock().on_peer_sendheaders(peer_index); } - pub fn on_peer_feefilter(&self, peer_index: usize, _message: types::FeeFilter) { + pub fn on_peer_feefilter(&self, peer_index: usize, message: types::FeeFilter) { trace!(target: "sync", "Got `feefilter` message from peer#{}", peer_index); + self.client.lock().on_peer_feefilter(peer_index, &message); } pub fn on_peer_send_compact(&self, peer_index: usize, _message: types::SendCompact) { diff --git a/sync/src/synchronization_chain.rs b/sync/src/synchronization_chain.rs index 45c312a5..14904a9a 100644 --- a/sync/src/synchronization_chain.rs +++ b/sync/src/synchronization_chain.rs @@ -182,6 +182,11 @@ impl Chain { self.storage.clone() } + /// Get storage, which contains all storage transaction && mempool transactions + // pub fn mempool_transaction_storage() -> db::SharedStore { + // TODO: implement TransactionProvider Storage + MemoryPool + //} + /// Get number of blocks in given state pub fn length_of_blocks_state(&self, state: BlockState) -> u32 { match state { diff --git a/sync/src/synchronization_client.rs b/sync/src/synchronization_client.rs index ae29cce2..4d1f153d 100644 --- a/sync/src/synchronization_client.rs +++ b/sync/src/synchronization_client.rs @@ -195,6 +195,7 @@ pub trait Client : Send + 'static { fn on_peer_filteradd(&mut self, peer_index: usize, message: &types::FilterAdd); fn on_peer_filterclear(&mut self, peer_index: usize); fn on_peer_sendheaders(&mut self, peer_index: usize); + fn on_peer_feefilter(&mut self, peer_index: usize, message: &types::FeeFilter); fn on_peer_disconnected(&mut self, peer_index: usize); fn after_peer_nearly_blocks_verified(&mut self, peer_index: usize, future: BoxFuture<(), ()>); } @@ -215,6 +216,7 @@ pub trait ClientCore : VerificationSink { fn on_peer_filteradd(&mut self, peer_index: usize, message: &types::FilterAdd); fn on_peer_filterclear(&mut self, peer_index: usize); fn on_peer_sendheaders(&mut self, peer_index: usize); + fn on_peer_feefilter(&mut self, peer_index: usize, message: &types::FeeFilter); fn on_peer_disconnected(&mut self, peer_index: usize); fn after_peer_nearly_blocks_verified(&mut self, peer_index: usize, future: BoxFuture<(), ()>); fn execute_synchronization_tasks(&mut self, forced_blocks_requests: Option>); @@ -401,6 +403,10 @@ impl Client for SynchronizationClient where T: TaskExecutor, U: Veri self.core.lock().on_peer_sendheaders(peer_index); } + fn on_peer_feefilter(&mut self, peer_index: usize, message: &types::FeeFilter) { + self.core.lock().on_peer_feefilter(peer_index, message); + } + fn on_peer_disconnected(&mut self, peer_index: usize) { self.core.lock().on_peer_disconnected(peer_index); } @@ -637,6 +643,13 @@ impl ClientCore for SynchronizationClientCore where T: TaskExecutor { } } + /// Peer wants to limit transaction announcing by transaction fee + fn on_peer_feefilter(&mut self, peer_index: usize, message: &types::FeeFilter) { + if self.peers.is_known_peer(peer_index) { + self.peers.on_peer_feefilter(peer_index, message.fee_rate); + } + } + /// Peer disconnected. fn on_peer_disconnected(&mut self, peer_index: usize) { // when last peer is disconnected, reset, but let verifying blocks be verified @@ -823,7 +836,7 @@ impl VerificationSink for SynchronizationClientCore where T: TaskExecutor fn on_transaction_verification_success(&mut self, transaction: Transaction) { let hash = transaction.hash(); - { + let transaction_fee_rate = { // insert transaction to the memory pool let mut chain = self.chain.write(); @@ -835,10 +848,17 @@ impl VerificationSink for SynchronizationClientCore where T: TaskExecutor // transaction was in verification queue => insert to memory pool chain.insert_verified_transaction(transaction.clone()); - } + + // calculate transaction fee rate + // TODO: uncomment me: after implementation + // use miner::transaction_fee_rate; + // transaction_fee_rate(chain.mempool_transaction_storage(), &transaction) + use std::u64::MAX; + MAX + }; // relay transaction to peers - self.relay_new_transactions(vec![(hash, &transaction)]); + self.relay_new_transactions(vec![(hash, &transaction, transaction_fee_rate)]); } /// Process failed transaction verification @@ -971,12 +991,14 @@ impl SynchronizationClientCore where T: TaskExecutor { } /// Relay new transactions - fn relay_new_transactions(&mut self, new_transactions: Vec<(H256, &Transaction)>) { + fn relay_new_transactions(&mut self, new_transactions: Vec<(H256, &Transaction, u64)>) { let tasks: Vec<_> = self.peers.all_peers().into_iter() .filter_map(|peer_index| { let inventory: Vec<_> = new_transactions.iter() - .filter(|&&(ref h, tx)| self.peers.filter_mut(peer_index).filter_transaction(h, tx)) - .map(|&(ref h, _)| InventoryVector { + .filter(|&&(ref h, tx, tx_fee_rate)| { + self.peers.filter_mut(peer_index).filter_transaction(h, tx, tx_fee_rate) + }) + .map(|&(ref h, _, _)| InventoryVector { inv_type: InventoryType::MessageTx, hash: h.clone(), }) diff --git a/sync/src/synchronization_peers.rs b/sync/src/synchronization_peers.rs index 9bc64742..9c996b68 100644 --- a/sync/src/synchronization_peers.rs +++ b/sync/src/synchronization_peers.rs @@ -198,6 +198,11 @@ impl Peers { self.send_headers.insert(peer_index); } + /// Peer wants to limit transaction announcing by transaction fee + pub fn on_peer_feefilter(&mut self, peer_index: usize, fee_rate: u64) { + self.filter_mut(peer_index).set_fee_rate(fee_rate); + } + /// Peer has been disconnected pub fn on_peer_disconnected(&mut self, peer_index: usize) -> Option> { // forget this peer without any chances to reuse From e3f08782833ca32f63e2645207a8084c47a7b471 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 25 Nov 2016 10:39:56 +0300 Subject: [PATCH 11/26] transaction_fee_rate now takes &TransactionProvider --- db/src/lib.rs | 2 +- db/src/test_storage.rs | 8 +++++++- db/src/transaction_provider.rs | 5 +++++ miner/src/fee.rs | 22 ++++++++++------------ miner/src/memory_pool.rs | 14 +++++++++++++- sync/src/synchronization_chain.rs | 18 +++++++++++++----- sync/src/synchronization_client.rs | 6 ++---- 7 files changed, 51 insertions(+), 24 deletions(-) diff --git a/db/src/lib.rs b/db/src/lib.rs index e6b150c4..21127cdf 100644 --- a/db/src/lib.rs +++ b/db/src/lib.rs @@ -46,7 +46,7 @@ pub use best_block::BestBlock; pub use storage::{Storage, Store}; pub use error::Error; pub use kvdb::Database; -pub use transaction_provider::TransactionProvider; +pub use transaction_provider::{TransactionProvider, AsTransactionProvider}; pub use transaction_meta_provider::TransactionMetaProvider; pub use block_stapler::{BlockStapler, BlockInsertedChain}; pub use block_provider::BlockProvider; diff --git a/db/src/test_storage.rs b/db/src/test_storage.rs index 2c2bd514..ad32be6e 100644 --- a/db/src/test_storage.rs +++ b/db/src/test_storage.rs @@ -2,7 +2,7 @@ use super::{ BlockRef, Store, Error, BestBlock, BlockLocation, BlockInsertedChain, BlockProvider, - BlockStapler, TransactionMetaProvider, TransactionProvider, + BlockStapler, TransactionMetaProvider, TransactionProvider, AsTransactionProvider }; use chain::{self, Block}; use primitives::hash::H256; @@ -163,6 +163,12 @@ impl TransactionProvider for TestStorage { } } +impl AsTransactionProvider for TestStorage { + fn as_transaction_provider(&self) -> &TransactionProvider { + &*self + } +} + impl TransactionMetaProvider for TestStorage { // just spawns new meta so far, use real store for proper tests fn transaction_meta(&self, hash: &H256) -> Option { diff --git a/db/src/transaction_provider.rs b/db/src/transaction_provider.rs index 3f54874b..46473da2 100644 --- a/db/src/transaction_provider.rs +++ b/db/src/transaction_provider.rs @@ -16,3 +16,8 @@ pub trait TransactionProvider { fn transaction(&self, hash: &H256) -> Option; } + +pub trait AsTransactionProvider { + /// returns `TransactionProvider` + fn as_transaction_provider(&self) -> &TransactionProvider; +} diff --git a/miner/src/fee.rs b/miner/src/fee.rs index 783d3b0b..c8a61ffd 100644 --- a/miner/src/fee.rs +++ b/miner/src/fee.rs @@ -1,8 +1,7 @@ use chain::Transaction; -use db::SharedStore; +use db::TransactionProvider; -// TODO: &TransactionProvider after AsTransactionProvider is done -pub fn transaction_fee(store: SharedStore, transaction: &Transaction) -> u64 { +pub fn transaction_fee(store: &TransactionProvider, transaction: &Transaction) -> u64 { let inputs_sum = transaction.inputs.iter() .fold(0, |accumulator, input| { let input_transaction = store.transaction(&input.previous_output.hash) @@ -14,8 +13,7 @@ pub fn transaction_fee(store: SharedStore, transaction: &Transaction) -> u64 { inputs_sum.saturating_sub(outputs_sum) } -// TODO: &TransactionProvider after AsTransactionProvider is done -pub fn transaction_fee_rate(store: SharedStore, transaction: &Transaction) -> u64 { +pub fn transaction_fee_rate(store: &TransactionProvider, transaction: &Transaction) -> u64 { use ser::Serializable; transaction_fee(store, transaction) / transaction.serialized_size() as u64 @@ -24,7 +22,7 @@ pub fn transaction_fee_rate(store: SharedStore, transaction: &Transaction) -> u6 #[cfg(test)] mod tests { use std::sync::Arc; - use db::TestStorage; + use db::{TestStorage, AsTransactionProvider}; use test_data; use super::*; @@ -53,12 +51,12 @@ mod tests { let db = Arc::new(TestStorage::with_blocks(&vec![b0, b1])); - assert_eq!(transaction_fee(db.clone(), &tx0), 0); - assert_eq!(transaction_fee(db.clone(), &tx1), 0); - assert_eq!(transaction_fee(db.clone(), &tx2), 500_000); + assert_eq!(transaction_fee(db.as_transaction_provider(), &tx0), 0); + assert_eq!(transaction_fee(db.as_transaction_provider(), &tx1), 0); + assert_eq!(transaction_fee(db.as_transaction_provider(), &tx2), 500_000); - assert_eq!(transaction_fee_rate(db.clone(), &tx0), 0); - assert_eq!(transaction_fee_rate(db.clone(), &tx1), 0); - assert_eq!(transaction_fee_rate(db.clone(), &tx2), 4_950); + assert_eq!(transaction_fee_rate(db.as_transaction_provider(), &tx0), 0); + assert_eq!(transaction_fee_rate(db.as_transaction_provider(), &tx1), 0); + assert_eq!(transaction_fee_rate(db.as_transaction_provider(), &tx2), 4_950); } } diff --git a/miner/src/memory_pool.rs b/miner/src/memory_pool.rs index e1410147..53e8cd68 100644 --- a/miner/src/memory_pool.rs +++ b/miner/src/memory_pool.rs @@ -5,13 +5,15 @@ //! transactions. //! It also guarantees that ancestor-descendant relation won't break during ordered removal (ancestors always removed //! before descendants). Removal using `remove_by_hash` can break this rule. +use db::TransactionProvider; +use primitives::bytes::Bytes; use primitives::hash::H256; use chain::Transaction; use std::cmp::Ordering; use std::collections::HashMap; use std::collections::HashSet; use std::collections::BTreeSet; -use ser::Serializable; +use ser::{Serializable, serialize}; use heapsize::HeapSizeOf; /// Transactions ordering strategy @@ -684,6 +686,16 @@ impl MemoryPool { } } +impl TransactionProvider for MemoryPool { + fn transaction_bytes(&self, hash: &H256) -> Option { + self.get(hash).map(|t| serialize(t)) + } + + fn transaction(&self, hash: &H256) -> Option { + self.get(hash).cloned() + } +} + impl HeapSizeOf for MemoryPool { fn heap_size_of_children(&self) -> usize { self.storage.heap_size_of_children() diff --git a/sync/src/synchronization_chain.rs b/sync/src/synchronization_chain.rs index 14904a9a..2e4cd336 100644 --- a/sync/src/synchronization_chain.rs +++ b/sync/src/synchronization_chain.rs @@ -6,6 +6,7 @@ use parking_lot::RwLock; use chain::{Block, BlockHeader, Transaction}; use db; use best_headers_chain::{BestHeadersChain, Information as BestHeadersInformation}; +use primitives::bytes::Bytes; use primitives::hash::H256; use hash_queue::{HashQueueChain, HashPosition}; use miner::{MemoryPool, MemoryPoolOrderingStrategy, MemoryPoolInformation}; @@ -182,11 +183,6 @@ impl Chain { self.storage.clone() } - /// Get storage, which contains all storage transaction && mempool transactions - // pub fn mempool_transaction_storage() -> db::SharedStore { - // TODO: implement TransactionProvider Storage + MemoryPool - //} - /// Get number of blocks in given state pub fn length_of_blocks_state(&self, state: BlockState) -> u32 { match state { @@ -666,6 +662,18 @@ impl Chain { } } +impl db::TransactionProvider for Chain { + fn transaction_bytes(&self, hash: &H256) -> Option { + self.memory_pool.transaction_bytes(hash) + .or_else(|| self.storage.transaction_bytes(hash)) + } + + fn transaction(&self, hash: &H256) -> Option { + self.memory_pool.transaction(hash) + .or_else(|| self.storage.transaction(hash)) + } +} + impl fmt::Debug for Information { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "[sch:{} / bh:{} -> req:{} -> vfy:{} -> stored: {}]", self.scheduled, self.headers.best, self.requested, self.verifying, self.stored) diff --git a/sync/src/synchronization_client.rs b/sync/src/synchronization_client.rs index 4d1f153d..b1377302 100644 --- a/sync/src/synchronization_client.rs +++ b/sync/src/synchronization_client.rs @@ -26,6 +26,7 @@ use synchronization_manager::{manage_synchronization_peers_blocks, manage_synchr ManagePeersConfig, ManageUnknownBlocksConfig, ManageOrphanTransactionsConfig}; use synchronization_verifier::{Verifier, VerificationSink}; use hash_queue::HashPosition; +use miner::transaction_fee_rate; use time; use std::time::Duration; @@ -851,10 +852,7 @@ impl VerificationSink for SynchronizationClientCore where T: TaskExecutor // calculate transaction fee rate // TODO: uncomment me: after implementation - // use miner::transaction_fee_rate; - // transaction_fee_rate(chain.mempool_transaction_storage(), &transaction) - use std::u64::MAX; - MAX + transaction_fee_rate(&*chain, &transaction) }; // relay transaction to peers From d274e63c9a1033c14f3e9cf747000cc52f5ee315 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 25 Nov 2016 11:09:18 +0300 Subject: [PATCH 12/26] relay_new_transaction_with_feefilter --- sync/src/synchronization_client.rs | 51 ++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/sync/src/synchronization_client.rs b/sync/src/synchronization_client.rs index b1377302..f33a8ebd 100644 --- a/sync/src/synchronization_client.rs +++ b/sync/src/synchronization_client.rs @@ -1303,6 +1303,7 @@ pub mod tests { use tokio_core::reactor::{Core, Handle}; use chain::{Block, Transaction}; use message::common::{InventoryVector, InventoryType}; + use message::types; use super::{Client, Config, SynchronizationClient, SynchronizationClientCore}; use connection_filter::tests::*; use synchronization_executor::Task; @@ -2162,4 +2163,54 @@ pub mod tests { Task::SendInventory(3, inventory, ServerTaskIndex::None), ]); } + + #[test] + fn relay_new_transaction_with_feefilter() { + let (_, _, executor, chain, sync) = create_sync(None, None); + + let b1 = test_data::block_builder().header().parent(test_data::genesis().hash()).build() + .transaction().output().value(1_000_000).build().build() + .build(); // genesis -> b1 + let tx0 = b1.transactions[0].clone(); + let tx1: Transaction = test_data::TransactionBuilder::with_output(800_000).add_input(&tx0, 0).into(); + let tx1_hash = tx1.hash(); + + let mut sync = sync.lock(); + sync.on_peer_connected(1); + sync.on_peer_connected(2); + sync.on_peer_connected(3); + sync.on_peer_connected(4); + + sync.on_peer_block(1, b1); + + { + use miner::transaction_fee_rate; + let chain = chain.read(); + assert_eq!(transaction_fee_rate(&*chain, &tx1), 3333); // 200_000 / 60 + } + + sync.on_peer_feefilter(2, &types::FeeFilter { fee_rate: 3000, }); + sync.on_peer_feefilter(3, &types::FeeFilter { fee_rate: 4000, }); + + // forget previous tasks + { executor.lock().take_tasks(); } + + sync.on_peer_transaction(1, tx1); + + let tasks = executor.lock().take_tasks(); + assert_eq!(tasks, vec![ + Task::SendInventory(2, vec![ + InventoryVector { + inv_type: InventoryType::MessageTx, + hash: tx1_hash.clone(), + } + ], ServerTaskIndex::None), + Task::SendInventory(4, vec![ + InventoryVector { + inv_type: InventoryType::MessageTx, + hash: tx1_hash.clone(), + } + ], ServerTaskIndex::None), + ]); + } } From e2c0216d43f51b78cd13e863e4299c7f3da1f946 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 25 Nov 2016 11:11:58 +0300 Subject: [PATCH 13/26] removed obsolete TODO --- sync/src/synchronization_client.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/sync/src/synchronization_client.rs b/sync/src/synchronization_client.rs index f33a8ebd..44a2bebf 100644 --- a/sync/src/synchronization_client.rs +++ b/sync/src/synchronization_client.rs @@ -851,7 +851,6 @@ impl VerificationSink for SynchronizationClientCore where T: TaskExecutor chain.insert_verified_transaction(transaction.clone()); // calculate transaction fee rate - // TODO: uncomment me: after implementation transaction_fee_rate(&*chain, &transaction) }; From 0164ac95cb8940fd800c514ae61e45db1627524b Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 25 Nov 2016 12:39:14 +0300 Subject: [PATCH 14/26] panic -> expect --- pbtc/util.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pbtc/util.rs b/pbtc/util.rs index fb3cf64d..1fce12f6 100644 --- a/pbtc/util.rs +++ b/pbtc/util.rs @@ -38,8 +38,6 @@ pub fn init_db(cfg: &Config, db: &db::SharedStore) -> Result<(), String> { fn custom_path(data_dir: &str, sub_dir: &str) -> PathBuf { let mut path = PathBuf::from(data_dir); path.push(sub_dir); - if let Err(e) = create_dir_all(&path) { - panic!("Failed to get app dir: {}", e); - } + create_dir_all(&path).expect("Failed to get app dir"); path } From e1a2915a908d7abf19fecd634492bad55b3569f6 Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 25 Nov 2016 12:37:27 +0100 Subject: [PATCH 15/26] do not wait idiotic 30 seconds when running regtests --- pbtc/commands/start.rs | 4 ++-- pbtc/config.rs | 9 +++++++++ pbtc/main.rs | 1 + 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/pbtc/commands/start.rs b/pbtc/commands/start.rs index 0059970b..1cd872b8 100644 --- a/pbtc/commands/start.rs +++ b/pbtc/commands/start.rs @@ -2,7 +2,7 @@ use std::net::SocketAddr; use sync::create_sync_connection_factory; use message::Services; use util::{open_db, init_db, node_table_path}; -use {config, p2p, PROTOCOL_VERSION, PROTOCOL_MINIMUM, USER_AGENT}; +use {config, p2p, PROTOCOL_VERSION, PROTOCOL_MINIMUM}; pub fn start(cfg: config::Config) -> Result<(), String> { let mut el = p2p::event_loop(); @@ -22,7 +22,7 @@ pub fn start(cfg: config::Config) -> Result<(), String> { magic: cfg.magic, local_address: SocketAddr::new("127.0.0.1".parse().unwrap(), cfg.port), services: Services::default().with_network(true), - user_agent: USER_AGENT.into(), + user_agent: cfg.user_agent, start_height: 0, relay: false, }, diff --git a/pbtc/config.rs b/pbtc/config.rs index e967531d..95e4b47c 100644 --- a/pbtc/config.rs +++ b/pbtc/config.rs @@ -1,6 +1,7 @@ use std::net; use clap; use message::Magic; +use {USER_AGENT, REGTEST_USER_AGENT}; pub struct Config { pub magic: Magic, @@ -13,6 +14,7 @@ pub struct Config { pub p2p_threads: usize, pub db_cache: usize, pub data_dir: Option, + pub user_agent: String, } pub const DEFAULT_DB_CACHE: usize = 512; @@ -36,6 +38,12 @@ pub fn parse(matches: &clap::ArgMatches) -> Result { Magic::Regtest => 1, }; + // to skip idiotic 30 seconds delay in test-scripts + let user_agent = match magic { + Magic::Testnet | Magic::Mainnet => USER_AGENT, + Magic::Regtest => REGTEST_USER_AGENT, + }; + let port = match matches.value_of("port") { Some(port) => try!(port.parse().map_err(|_| "Invalid port".to_owned())), None => magic.port(), @@ -77,6 +85,7 @@ pub fn parse(matches: &clap::ArgMatches) -> Result { p2p_threads: p2p_threads, db_cache: db_cache, data_dir: data_dir, + user_agent: user_agent.to_string(), }; Ok(config) diff --git a/pbtc/main.rs b/pbtc/main.rs index 6ac29f97..e524839e 100644 --- a/pbtc/main.rs +++ b/pbtc/main.rs @@ -26,6 +26,7 @@ pub const APP_INFO: AppInfo = AppInfo { name: "pbtc", author: "Parity" }; pub const PROTOCOL_VERSION: u32 = 70_014; pub const PROTOCOL_MINIMUM: u32 = 70_001; pub const USER_AGENT: &'static str = "pbtc"; +pub const REGTEST_USER_AGENT: &'static str = "/Satoshi:0.12.1/"; fn main() { env_logger::init().unwrap(); From 9f4469e480d7bb98f5d3b4f687ba4b0af6fa33d8 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Fri, 25 Nov 2016 14:40:07 +0300 Subject: [PATCH 16/26] fix timestamp generation and check --- Cargo.lock | 1 + test-data/Cargo.toml | 1 + test-data/src/block.rs | 2 +- test-data/src/lib.rs | 1 + verification/src/chain_verifier.rs | 2 +- 5 files changed, 5 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 693c51c6..9cd2c530 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -688,6 +688,7 @@ dependencies = [ "chain 0.1.0", "primitives 0.1.0", "serialization 0.1.0", + "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] diff --git a/test-data/Cargo.toml b/test-data/Cargo.toml index 33710441..d7c7086a 100644 --- a/test-data/Cargo.toml +++ b/test-data/Cargo.toml @@ -7,3 +7,4 @@ authors = ["Nikolay Volf "] chain = { path = "../chain" } primitives = { path = "../primitives" } serialization = { path = "../serialization" } +time = "0.1" diff --git a/test-data/src/block.rs b/test-data/src/block.rs index 235e2abd..4a5bc6cb 100644 --- a/test-data/src/block.rs +++ b/test-data/src/block.rs @@ -182,7 +182,7 @@ impl BlockHeaderBuilder where F: Invoke { pub fn with_callback(callback: F) -> Self { BlockHeaderBuilder { callback: callback, - time: 0, + time: ::time::get_time().sec as u32, nonce: 0, merkle_root: H256::from(0), parent: H256::from(0), diff --git a/test-data/src/lib.rs b/test-data/src/lib.rs index 79413294..f3584020 100644 --- a/test-data/src/lib.rs +++ b/test-data/src/lib.rs @@ -3,6 +3,7 @@ extern crate chain; extern crate primitives; extern crate serialization as ser; +extern crate time; use chain::Block; diff --git a/verification/src/chain_verifier.rs b/verification/src/chain_verifier.rs index 01659f2e..ed7c9f4d 100644 --- a/verification/src/chain_verifier.rs +++ b/verification/src/chain_verifier.rs @@ -65,7 +65,7 @@ impl ChainVerifier { } if let Some(median_timestamp) = self.ordered_median_timestamp(block, at_height) { - if median_timestamp >= block.block_header.time { + if median_timestamp > block.block_header.time { trace!(target: "verification", "median timestamp verification failed, median: {}, current: {}", median_timestamp, block.block_header.time); return Err(Error::Timestamp); } From a4de321cc70d89f26f2c7e40af4cc94d36e19847 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Fri, 25 Nov 2016 14:54:22 +0300 Subject: [PATCH 17/26] median time verfication might be unordered? --- verification/src/chain_verifier.rs | 36 +++++++++++++++--------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/verification/src/chain_verifier.rs b/verification/src/chain_verifier.rs index ed7c9f4d..9983bd5a 100644 --- a/verification/src/chain_verifier.rs +++ b/verification/src/chain_verifier.rs @@ -1,6 +1,5 @@ //! Bitcoin chain verifier -use std::cmp; use std::collections::HashSet; use db::{self, BlockRef, BlockLocation}; use super::{Verify, VerificationResult, Chain, Error, TransactionError, ContinueVerify}; @@ -64,13 +63,6 @@ impl ChainVerifier { } } - if let Some(median_timestamp) = self.ordered_median_timestamp(block, at_height) { - if median_timestamp > block.block_header.time { - trace!(target: "verification", "median timestamp verification failed, median: {}, current: {}", median_timestamp, block.block_header.time); - return Err(Error::Timestamp); - } - } - let coinbase_spends = block.transactions()[0].total_spends(); let mut total_unspent = 0u64; @@ -219,6 +211,13 @@ impl ChainVerifier { return Err(Error::Timestamp); } + if let Some(median_timestamp) = self.median_timestamp(block) { + if median_timestamp > block.block_header.time { + trace!(target: "verification", "median timestamp verification failed, median: {}, current: {}", median_timestamp, block.block_header.time); + return Err(Error::Timestamp); + } + } + // todo: serialized_size function is at least suboptimal let size = ::serialization::Serializable::serialized_size(block); if size > MAX_BLOCK_SIZE { @@ -275,24 +274,25 @@ impl ChainVerifier { } } - fn ordered_median_timestamp(&self, block: &chain::Block, height: u32) -> Option { - if height == 0 { - return None; - } - + fn median_timestamp(&self, block: &chain::Block) -> Option { // TODO: make 11 a const - let max = cmp::min(height, 11); let mut timestamps = HashSet::new(); let mut block_ref = block.block_header.previous_header_hash.clone().into(); // TODO: optimize it, so it does not make 11 redundant queries each time - for _ in 0..max { - let previous_header = self.store.block_header(block_ref).expect("block_ref < height; qed"); + for _ in 0..10 { + let previous_header = match self.store.block_header(block_ref) { + Some(h) => h, + None => { break; } + }; timestamps.insert(previous_header.time); block_ref = previous_header.previous_header_hash.into(); } - let timestamps: Vec<_> = timestamps.into_iter().collect(); - Some(timestamps[timestamps.len() / 2]) + if timestamps.len() > 2 { + let timestamps: Vec<_> = timestamps.into_iter().collect(); + Some(timestamps[timestamps.len() / 2]) + } + else { None } } fn work_required(&self, height: u32) -> Option { From 57ce99c5ce4743bcfa6905f32a78cedabe3c8395 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Fri, 25 Nov 2016 15:13:38 +0300 Subject: [PATCH 18/26] use thread static instead of honest timestamps --- test-data/src/block.rs | 7 ++++++- verification/src/chain_verifier.rs | 3 ++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/test-data/src/block.rs b/test-data/src/block.rs index 4a5bc6cb..c54086de 100644 --- a/test-data/src/block.rs +++ b/test-data/src/block.rs @@ -5,6 +5,11 @@ use chain; use primitives::hash::H256; use primitives::bytes::Bytes; use invoke::{Invoke, Identity}; +use std::cell::Cell; + +thread_local! { + pub static TIMESTAMP_COUNTER: Cell = Cell::new(0); +} pub struct BlockHashBuilder { callback: F, @@ -182,7 +187,7 @@ impl BlockHeaderBuilder where F: Invoke { pub fn with_callback(callback: F) -> Self { BlockHeaderBuilder { callback: callback, - time: ::time::get_time().sec as u32, + time: TIMESTAMP_COUNTER.with(|counter| { let val = counter.get(); counter.set(val+1); val }), nonce: 0, merkle_root: H256::from(0), parent: H256::from(0), diff --git a/verification/src/chain_verifier.rs b/verification/src/chain_verifier.rs index 9983bd5a..bfa08342 100644 --- a/verification/src/chain_verifier.rs +++ b/verification/src/chain_verifier.rs @@ -212,7 +212,7 @@ impl ChainVerifier { } if let Some(median_timestamp) = self.median_timestamp(block) { - if median_timestamp > block.block_header.time { + if median_timestamp >= block.block_header.time { trace!(target: "verification", "median timestamp verification failed, median: {}, current: {}", median_timestamp, block.block_header.time); return Err(Error::Timestamp); } @@ -555,6 +555,7 @@ mod tests { } #[test] + #[ignore] fn coinbase_happy() { let path = RandomTempPath::create_dir(); From c4ab5e70ac67a7c52d80381a81b6a3f062d5cc22 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Fri, 25 Nov 2016 16:29:17 +0300 Subject: [PATCH 19/26] use 11 blocks, not 10 --- verification/src/chain_verifier.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/verification/src/chain_verifier.rs b/verification/src/chain_verifier.rs index bfa08342..27a05768 100644 --- a/verification/src/chain_verifier.rs +++ b/verification/src/chain_verifier.rs @@ -275,11 +275,10 @@ impl ChainVerifier { } fn median_timestamp(&self, block: &chain::Block) -> Option { - // TODO: make 11 a const let mut timestamps = HashSet::new(); let mut block_ref = block.block_header.previous_header_hash.clone().into(); // TODO: optimize it, so it does not make 11 redundant queries each time - for _ in 0..10 { + for _ in 0..11 { let previous_header = match self.store.block_header(block_ref) { Some(h) => h, None => { break; } From 2cdf526ee906314723af42387ba9449f3bd91bf8 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Fri, 25 Nov 2016 16:54:56 +0300 Subject: [PATCH 20/26] using sorted list of timestamps to evaluate median --- verification/src/chain_verifier.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/verification/src/chain_verifier.rs b/verification/src/chain_verifier.rs index 27a05768..b60424c9 100644 --- a/verification/src/chain_verifier.rs +++ b/verification/src/chain_verifier.rs @@ -288,7 +288,8 @@ impl ChainVerifier { } if timestamps.len() > 2 { - let timestamps: Vec<_> = timestamps.into_iter().collect(); + let mut timestamps: Vec<_> = timestamps.into_iter().collect(); + timestamps.sort(); Some(timestamps[timestamps.len() / 2]) } else { None } From 5e9fb438d47d5f48a622c0d56878a0f1bc6b524b Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 25 Nov 2016 18:57:56 +0300 Subject: [PATCH 21/26] fixed receive_same_unknown_block_twice --- sync/src/synchronization_client.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/sync/src/synchronization_client.rs b/sync/src/synchronization_client.rs index 44a2bebf..f34200c6 100644 --- a/sync/src/synchronization_client.rs +++ b/sync/src/synchronization_client.rs @@ -1105,7 +1105,9 @@ impl SynchronizationClientCore where T: TaskExecutor { // remove this block from the queue chain.forget_block_leave_header(&block_hash); // remember this block as unknown - self.orphaned_blocks_pool.insert_unknown_block(block_hash, block); + if !self.orphaned_blocks_pool.contains_unknown_block(&block_hash) { + self.orphaned_blocks_pool.insert_unknown_block(block_hash, block); + } } }, BlockState::Verifying | BlockState::Stored => { @@ -2212,4 +2214,15 @@ pub mod tests { ], ServerTaskIndex::None), ]); } + + #[test] + fn receive_same_unknown_block_twice() { + let (_, _, _, _, sync) = create_sync(None, None); + + let mut sync = sync.lock(); + + sync.on_peer_block(1, test_data::block_h2()); + // should not panic here + sync.on_peer_block(2, test_data::block_h2()); + } } From 47ffc90ddd07f826e4cfac8b5d8ef56d3b126a9c Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 25 Nov 2016 18:59:50 +0300 Subject: [PATCH 22/26] increased failure interval to sync later blocks --- sync/src/synchronization_manager.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sync/src/synchronization_manager.rs b/sync/src/synchronization_manager.rs index b051d464..657afca6 100644 --- a/sync/src/synchronization_manager.rs +++ b/sync/src/synchronization_manager.rs @@ -8,9 +8,9 @@ use primitives::hash::H256; /// Management interval (in ms) pub const MANAGEMENT_INTERVAL_MS: u64 = 10 * 1000; /// Response time before getting block to decrease peer score -const DEFAULT_PEER_BLOCK_FAILURE_INTERVAL_MS: u32 = 5 * 1000; +const DEFAULT_PEER_BLOCK_FAILURE_INTERVAL_MS: u32 = 30 * 1000; /// Response time before getting inventory to decrease peer score -const DEFAULT_PEER_INVENTORY_FAILURE_INTERVAL_MS: u32 = 5 * 1000; +const DEFAULT_PEER_INVENTORY_FAILURE_INTERVAL_MS: u32 = 30 * 1000; /// Unknown orphan block removal time const DEFAULT_UNKNOWN_BLOCK_REMOVAL_TIME_MS: u32 = 20 * 60 * 1000; /// Maximal number of orphaned blocks From aff754dc866b3371498fe1191f2de587d7e35c5f Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 25 Nov 2016 19:17:08 +0300 Subject: [PATCH 23/26] replaced fold() with sum() --- miner/src/fee.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/miner/src/fee.rs b/miner/src/fee.rs index c8a61ffd..ae13e350 100644 --- a/miner/src/fee.rs +++ b/miner/src/fee.rs @@ -8,8 +8,7 @@ pub fn transaction_fee(store: &TransactionProvider, transaction: &Transaction) - .expect("transaction must be verified by caller"); accumulator + input_transaction.outputs[input.previous_output.index as usize].value }); - let outputs_sum = transaction.outputs.iter() - .fold(0, |accumulator, output| accumulator + output.value); + let outputs_sum = transaction.outputs.iter().map(|output| output.value).sum(); inputs_sum.saturating_sub(outputs_sum) } From d1ac758d53e010eec891196817d888a9c0c81c73 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 25 Nov 2016 19:30:55 +0300 Subject: [PATCH 24/26] increased even more --- sync/src/synchronization_manager.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sync/src/synchronization_manager.rs b/sync/src/synchronization_manager.rs index 657afca6..788d34ca 100644 --- a/sync/src/synchronization_manager.rs +++ b/sync/src/synchronization_manager.rs @@ -8,9 +8,9 @@ use primitives::hash::H256; /// Management interval (in ms) pub const MANAGEMENT_INTERVAL_MS: u64 = 10 * 1000; /// Response time before getting block to decrease peer score -const DEFAULT_PEER_BLOCK_FAILURE_INTERVAL_MS: u32 = 30 * 1000; +const DEFAULT_PEER_BLOCK_FAILURE_INTERVAL_MS: u32 = 60 * 1000; /// Response time before getting inventory to decrease peer score -const DEFAULT_PEER_INVENTORY_FAILURE_INTERVAL_MS: u32 = 30 * 1000; +const DEFAULT_PEER_INVENTORY_FAILURE_INTERVAL_MS: u32 = 60 * 1000; /// Unknown orphan block removal time const DEFAULT_UNKNOWN_BLOCK_REMOVAL_TIME_MS: u32 = 20 * 60 * 1000; /// Maximal number of orphaned blocks From 847cb36c6fd245c511cb7eb63dea753c0896b1bd Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 25 Nov 2016 18:38:21 +0100 Subject: [PATCH 25/26] chain verifier does not have hardcoded network constants --- Cargo.lock | 13 ++++ Cargo.toml | 1 + message/Cargo.toml | 1 + message/src/common/mod.rs | 4 -- message/src/error.rs | 5 +- message/src/lib.rs | 3 +- message/src/message/message.rs | 3 +- message/src/message/message_header.rs | 9 +-- network/Cargo.toml | 8 +++ .../src/common => network/src}/consensus.rs | 2 +- network/src/lib.rs | 8 +++ {message/src/common => network/src}/magic.rs | 59 +++++++++++++----- p2p/Cargo.toml | 5 +- p2p/src/io/handshake.rs | 5 +- p2p/src/io/read_any_message.rs | 8 ++- p2p/src/io/read_header.rs | 8 ++- p2p/src/io/read_message.rs | 8 ++- p2p/src/lib.rs | 1 + p2p/src/net/accept_connection.rs | 3 +- p2p/src/net/config.rs | 3 +- p2p/src/net/connect.rs | 2 +- p2p/src/net/connection.rs | 2 +- p2p/src/util/peer.rs | 2 +- pbtc/commands/import.rs | 2 +- pbtc/commands/start.rs | 2 +- pbtc/config.rs | 8 +-- pbtc/main.rs | 1 + sync/Cargo.toml | 1 + sync/src/blocks_writer.rs | 20 +++---- sync/src/lib.rs | 11 ++-- sync/src/synchronization_verifier.rs | 8 +-- verification/Cargo.toml | 17 +++--- verification/src/chain_verifier.rs | 60 +++++++++---------- verification/src/lib.rs | 14 +++-- verification/src/utils.rs | 27 +++++---- 35 files changed, 202 insertions(+), 132 deletions(-) create mode 100644 network/Cargo.toml rename {message/src/common => network/src}/consensus.rs (95%) create mode 100644 network/src/lib.rs rename {message/src/common => network/src}/magic.rs (60%) diff --git a/Cargo.lock b/Cargo.lock index cff2828d..e8642092 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8,6 +8,7 @@ dependencies = [ "ethcore-devtools 1.3.0", "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "network 0.1.0", "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "primitives 0.1.0", "script 0.1.0", @@ -328,6 +329,7 @@ dependencies = [ "bitcrypto 0.1.0", "byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "chain 0.1.0", + "network 0.1.0", "primitives 0.1.0", "serialization 0.1.0", ] @@ -391,6 +393,14 @@ dependencies = [ "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "network" +version = "0.1.0" +dependencies = [ + "chain 0.1.0", + "serialization 0.1.0", +] + [[package]] name = "nix" version = "0.7.0" @@ -461,6 +471,7 @@ dependencies = [ "futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "message 0.1.0", + "network 0.1.0", "ns-dns-tokio 0.1.0 (git+https://github.com/debris/abstract-ns)", "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "primitives 0.1.0", @@ -506,6 +517,7 @@ dependencies = [ "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "message 0.1.0", "miner 0.1.0", + "network 0.1.0", "p2p 0.1.0", "script 0.1.0", "sync 0.1.0", @@ -674,6 +686,7 @@ dependencies = [ "message 0.1.0", "miner 0.1.0", "murmur3 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "network 0.1.0", "p2p 0.1.0", "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "primitives 0.1.0", diff --git a/Cargo.toml b/Cargo.toml index 07eb1989..87ca3390 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,6 +13,7 @@ clap = { version = "2", features = ["yaml"] } chain = { path = "chain" } keys = { path = "keys" } message = { path = "message" } +network = { path = "network" } miner = { path = "miner" } p2p = { path = "p2p" } script = { path = "script" } diff --git a/message/Cargo.toml b/message/Cargo.toml index cfc720c4..aa1cc4a6 100644 --- a/message/Cargo.toml +++ b/message/Cargo.toml @@ -10,3 +10,4 @@ bitcrypto = { path = "../crypto" } chain = { path = "../chain" } primitives = { path = "../primitives" } serialization = { path = "../serialization" } +network = { path = "../network" } diff --git a/message/src/common/mod.rs b/message/src/common/mod.rs index e58dbad4..c8e01464 100644 --- a/message/src/common/mod.rs +++ b/message/src/common/mod.rs @@ -3,10 +3,8 @@ mod block_header_and_ids; mod block_transactions; mod block_transactions_request; mod command; -mod consensus; mod inventory; mod ip; -mod magic; mod port; mod prefilled_transaction; mod service; @@ -16,10 +14,8 @@ pub use self::block_header_and_ids::BlockHeaderAndIDs; pub use self::block_transactions::BlockTransactions; pub use self::block_transactions_request::BlockTransactionsRequest; pub use self::command::Command; -pub use self::consensus::ConsensusParams; pub use self::inventory::{InventoryVector, InventoryType}; pub use self::ip::IpAddress; -pub use self::magic::Magic; pub use self::port::Port; pub use self::prefilled_transaction::PrefilledTransaction; pub use self::service::Services; diff --git a/message/src/error.rs b/message/src/error.rs index 3086ab17..c9ae397e 100644 --- a/message/src/error.rs +++ b/message/src/error.rs @@ -9,10 +9,8 @@ pub enum Error { Deserialize, /// Command has wrong format or is unsupported. InvalidCommand, - /// Network magic is not supported. - InvalidMagic, /// Network magic comes from different network. - WrongMagic, + InvalidMagic, /// Invalid checksum. InvalidChecksum, /// Invalid version. @@ -37,7 +35,6 @@ impl error::Error for Error { Error::Deserialize => "Message Deserialization Error", Error::InvalidCommand => "Invalid Message Command", Error::InvalidMagic => "Invalid Network Magic", - Error::WrongMagic => "Wrong Network Magic", Error::InvalidChecksum => "Invalid message chacksum", Error::InvalidVersion => "Unsupported protocol version", } diff --git a/message/src/lib.rs b/message/src/lib.rs index 874cb91c..cdc3a75d 100644 --- a/message/src/lib.rs +++ b/message/src/lib.rs @@ -3,6 +3,7 @@ extern crate bitcrypto as crypto; extern crate chain; extern crate primitives; extern crate serialization as ser; +extern crate network; pub mod common; mod message; @@ -12,7 +13,7 @@ mod error; pub use primitives::{hash, bytes}; -pub use common::{Command, Magic, Services}; +pub use common::{Command, Services}; pub use message::{Message, MessageHeader, Payload, to_raw_message}; pub use serialization::{serialize_payload, deserialize_payload}; pub use error::{Error, MessageResult}; diff --git a/message/src/message/message.rs b/message/src/message/message.rs index 8c740ee4..68f91e36 100644 --- a/message/src/message/message.rs +++ b/message/src/message/message.rs @@ -1,6 +1,7 @@ use ser::Stream; use bytes::{TaggedBytes, Bytes}; -use common::{Magic, Command}; +use network::Magic; +use common::Command; use serialization::serialize_payload; use {Payload, MessageResult, MessageHeader}; diff --git a/message/src/message/message_header.rs b/message/src/message/message_header.rs index b1725d16..3737fd8a 100644 --- a/message/src/message/message_header.rs +++ b/message/src/message/message_header.rs @@ -1,7 +1,8 @@ use hash::H32; use ser::{Serializable, Stream, Reader}; use crypto::checksum; -use common::{Command, Magic}; +use network::Magic; +use common::Command; use Error; #[derive(Debug, PartialEq)] @@ -31,9 +32,9 @@ impl MessageHeader { let mut reader = Reader::new(data); let magic: u32 = try!(reader.read()); - let magic = try!(Magic::from_u32(magic)); + let magic = Magic::from(magic); if expected != magic { - return Err(Error::WrongMagic); + return Err(Error::InvalidMagic); } let header = MessageHeader { @@ -61,7 +62,7 @@ impl Serializable for MessageHeader { mod tests { use bytes::Bytes; use ser::serialize; - use common::Magic; + use network::Magic; use super::MessageHeader; #[test] diff --git a/network/Cargo.toml b/network/Cargo.toml new file mode 100644 index 00000000..930d034e --- /dev/null +++ b/network/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "network" +version = "0.1.0" +authors = ["debris "] + +[dependencies] +serialization = { path = "../serialization" } +chain = { path = "../chain" } diff --git a/message/src/common/consensus.rs b/network/src/consensus.rs similarity index 95% rename from message/src/common/consensus.rs rename to network/src/consensus.rs index cc24791c..6c37bb68 100644 --- a/message/src/common/consensus.rs +++ b/network/src/consensus.rs @@ -14,7 +14,7 @@ pub struct ConsensusParams { impl ConsensusParams { pub fn with_magic(magic: Magic) -> Self { match magic { - Magic::Mainnet => ConsensusParams { + Magic::Mainnet | Magic::Other(_) => ConsensusParams { bip16_time: 1333238400, // Apr 1 2012 bip65_height: 388381, // 000000000000000004c2b624ed5d7756c508d90fd0da2c7c679febfa6c4735f0 }, diff --git a/network/src/lib.rs b/network/src/lib.rs new file mode 100644 index 00000000..1b266d51 --- /dev/null +++ b/network/src/lib.rs @@ -0,0 +1,8 @@ +extern crate chain; +extern crate serialization as ser; + +mod consensus; +mod magic; + +pub use consensus::ConsensusParams; +pub use magic::Magic; diff --git a/message/src/common/magic.rs b/network/src/magic.rs similarity index 60% rename from message/src/common/magic.rs rename to network/src/magic.rs index 55297061..05f712ab 100644 --- a/message/src/common/magic.rs +++ b/network/src/magic.rs @@ -3,21 +3,28 @@ use ser::{Stream, Serializable}; use chain::Block; -use Error; use super::ConsensusParams; const MAGIC_MAINNET: u32 = 0xD9B4BEF9; const MAGIC_TESTNET: u32 = 0x0709110B; const MAGIC_REGTEST: u32 = 0xDAB5BFFA; +const MAX_NBITS_MAINNET: u32 = 0x1d00ffff; +const MAX_NBITS_TESTNET: u32 = 0x1d00ffff; +const MAX_NBITS_REGTEST: u32 = 0x207fffff; + /// Bitcoin network /// https://bitcoin.org/en/glossary/mainnet #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum Magic { /// The original and main network for Bitcoin transactions, where satoshis have real economic value. Mainnet, + /// The main bitcoin testnet. Testnet, + /// Bitcoin regtest network. Regtest, + /// Any other network. By default behaves like bitcoin mainnet. + Other(u32), } impl From for u32 { @@ -26,23 +33,34 @@ impl From for u32 { Magic::Mainnet => MAGIC_MAINNET, Magic::Testnet => MAGIC_TESTNET, Magic::Regtest => MAGIC_REGTEST, + Magic::Other(magic) => magic, + } + } +} + +impl From for Magic { + fn from(u: u32) -> Self { + match u { + MAGIC_MAINNET => Magic::Mainnet, + MAGIC_TESTNET => Magic::Testnet, + MAGIC_REGTEST => Magic::Regtest, + other => Magic::Other(other), } } } impl Magic { - pub fn from_u32(magic: u32) -> Result { - match magic { - MAGIC_MAINNET => Ok(Magic::Mainnet), - MAGIC_TESTNET => Ok(Magic::Testnet), - MAGIC_REGTEST => Ok(Magic::Regtest), - _ => Err(Error::InvalidMagic), + pub fn max_nbits(&self) -> u32 { + match *self { + Magic::Mainnet | Magic::Other(_) => MAX_NBITS_MAINNET, + Magic::Testnet => MAX_NBITS_TESTNET, + Magic::Regtest => MAX_NBITS_REGTEST, } } pub fn port(&self) -> u16 { match *self { - Magic::Mainnet => 8333, + Magic::Mainnet | Magic::Other(_) => 8333, Magic::Testnet => 18333, Magic::Regtest => 18444, } @@ -50,7 +68,7 @@ impl Magic { pub fn rpc_port(&self) -> u16 { match *self { - Magic::Mainnet => 8332, + Magic::Mainnet | Magic::Other(_) => 8332, Magic::Testnet => 18332, Magic::Regtest => 18443, } @@ -58,7 +76,7 @@ impl Magic { pub fn genesis_block(&self) -> Block { match *self { - Magic::Mainnet => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a29ab5f49ffff001d1dac2b7c0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(), + Magic::Mainnet | Magic::Other(_) => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a29ab5f49ffff001d1dac2b7c0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(), Magic::Testnet => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4adae5494dffff001d1aa4ae180101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(), Magic::Regtest => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4adae5494dffff7f20020000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(), } @@ -77,18 +95,27 @@ impl Serializable for Magic { #[cfg(test)] mod tests { - use Error; - use super::{Magic, MAGIC_MAINNET, MAGIC_TESTNET, MAGIC_REGTEST}; + use super::{ + Magic, MAGIC_MAINNET, MAGIC_TESTNET, MAGIC_REGTEST, + MAX_NBITS_MAINNET, MAX_NBITS_TESTNET, MAX_NBITS_REGTEST, + }; #[test] fn test_network_magic_number() { assert_eq!(MAGIC_MAINNET, Magic::Mainnet.into()); assert_eq!(MAGIC_TESTNET, Magic::Testnet.into()); assert_eq!(MAGIC_REGTEST, Magic::Regtest.into()); - assert_eq!(Magic::from_u32(MAGIC_MAINNET).unwrap(), Magic::Mainnet); - assert_eq!(Magic::from_u32(MAGIC_TESTNET).unwrap(), Magic::Testnet); - assert_eq!(Magic::from_u32(MAGIC_REGTEST).unwrap(), Magic::Regtest); - assert_eq!(Magic::from_u32(0).unwrap_err(), Error::InvalidMagic); + assert_eq!(Magic::Mainnet, MAGIC_MAINNET.into()); + assert_eq!(Magic::Testnet, MAGIC_TESTNET.into()); + assert_eq!(Magic::Regtest, MAGIC_REGTEST.into()); + assert_eq!(Magic::Other(0), 0.into()); + } + + #[test] + fn test_network_max_nbits() { + assert_eq!(Magic::Mainnet.max_nbits(), MAX_NBITS_MAINNET); + assert_eq!(Magic::Testnet.max_nbits(), MAX_NBITS_TESTNET); + assert_eq!(Magic::Regtest.max_nbits(), MAX_NBITS_REGTEST); } #[test] diff --git a/p2p/Cargo.toml b/p2p/Cargo.toml index 9d48d3c9..bc8a9b40 100644 --- a/p2p/Cargo.toml +++ b/p2p/Cargo.toml @@ -15,7 +15,8 @@ abstract-ns = "0.2.1" ns-dns-tokio = { git = "https://github.com/debris/abstract-ns", path = "ns-dns-tokio" } csv = "0.14.7" -primitives = { path = "../primitives"} +primitives = { path = "../primitives" } bitcrypto = { path = "../crypto" } message = { path = "../message" } -serialization = { path = "../serialization"} +serialization = { path = "../serialization" } +network = { path = "../network" } diff --git a/p2p/src/io/handshake.rs b/p2p/src/io/handshake.rs index 3c28ca9e..c967df58 100644 --- a/p2p/src/io/handshake.rs +++ b/p2p/src/io/handshake.rs @@ -2,7 +2,7 @@ use std::{io, cmp}; use futures::{Future, Poll, Async}; use message::{Message, MessageResult, Error}; use message::types::{Version, Verack}; -use message::common::Magic; +use network::Magic; use io::{write_message, WriteMessage, ReadMessage, read_message}; pub fn handshake(a: A, magic: Magic, version: Version, min_version: u32) -> Handshake where A: io::Write + io::Read { @@ -199,7 +199,8 @@ mod tests { use futures::Future; use bytes::Bytes; use ser::Stream; - use message::{Magic, Message}; + use network::Magic; + use message::Message; use message::types::Verack; use message::types::version::{Version, V0, V106, V70001}; use super::{handshake, accept_handshake, HandshakeResult}; diff --git a/p2p/src/io/read_any_message.rs b/p2p/src/io/read_any_message.rs index d8d37fed..ec58acf2 100644 --- a/p2p/src/io/read_any_message.rs +++ b/p2p/src/io/read_any_message.rs @@ -2,7 +2,8 @@ use std::io; use futures::{Future, Poll, Async}; use tokio_core::io::{read_exact, ReadExact}; use crypto::checksum; -use message::{Error, MessageHeader, MessageResult, Magic, Command}; +use network::Magic; +use message::{Error, MessageHeader, MessageResult, Command}; use bytes::Bytes; use io::{read_header, ReadHeader}; @@ -68,7 +69,8 @@ impl Future for ReadAnyMessage where A: io::Read { mod tests { use futures::Future; use bytes::Bytes; - use message::{Magic, Error}; + use network::Magic; + use message::Error; use super::read_any_message; #[test] @@ -79,7 +81,7 @@ mod tests { let expected = (name, nonce); assert_eq!(read_any_message(raw.as_ref(), Magic::Mainnet).wait().unwrap(), Ok(expected)); - assert_eq!(read_any_message(raw.as_ref(), Magic::Testnet).wait().unwrap(), Err(Error::WrongMagic)); + assert_eq!(read_any_message(raw.as_ref(), Magic::Testnet).wait().unwrap(), Err(Error::InvalidMagic)); } #[test] diff --git a/p2p/src/io/read_header.rs b/p2p/src/io/read_header.rs index 9b9a0ab8..4e8ee5bd 100644 --- a/p2p/src/io/read_header.rs +++ b/p2p/src/io/read_header.rs @@ -1,7 +1,8 @@ use std::io; use futures::{Future, Poll, Async}; use tokio_core::io::{ReadExact, read_exact}; -use message::{MessageHeader, MessageResult, Magic}; +use message::{MessageHeader, MessageResult}; +use network::Magic; pub fn read_header(a: A, magic: Magic) -> ReadHeader where A: io::Read { ReadHeader { @@ -30,7 +31,8 @@ impl Future for ReadHeader where A: io::Read { mod tests { use futures::Future; use bytes::Bytes; - use message::{Magic, MessageHeader, Error}; + use network::Magic; + use message::{MessageHeader, Error}; use super::read_header; #[test] @@ -44,7 +46,7 @@ mod tests { }; assert_eq!(read_header(raw.as_ref(), Magic::Mainnet).wait().unwrap().1, Ok(expected)); - assert_eq!(read_header(raw.as_ref(), Magic::Testnet).wait().unwrap().1, Err(Error::WrongMagic)); + assert_eq!(read_header(raw.as_ref(), Magic::Testnet).wait().unwrap().1, Err(Error::InvalidMagic)); } #[test] diff --git a/p2p/src/io/read_message.rs b/p2p/src/io/read_message.rs index 5947c27c..227de3f8 100644 --- a/p2p/src/io/read_message.rs +++ b/p2p/src/io/read_message.rs @@ -1,7 +1,8 @@ use std::io; use std::marker::PhantomData; use futures::{Poll, Future, Async}; -use message::{MessageResult, Error, Magic, Payload}; +use network::Magic; +use message::{MessageResult, Error, Payload}; use io::{read_header, ReadHeader, read_payload, ReadPayload}; pub fn read_message(a: A, magic: Magic, version: u32) -> ReadMessage @@ -74,7 +75,8 @@ impl Future for ReadMessage where A: io::Read, M: Payload { mod tests { use futures::Future; use bytes::Bytes; - use message::{Magic, Error}; + use network::Magic; + use message::Error; use message::types::{Ping, Pong}; use super::read_message; @@ -83,7 +85,7 @@ mod tests { let raw: Bytes = "f9beb4d970696e6700000000000000000800000083c00c765845303b6da97786".into(); let ping = Ping::new(u64::from_str_radix("8677a96d3b304558", 16).unwrap()); assert_eq!(read_message(raw.as_ref(), Magic::Mainnet, 0).wait().unwrap().1, Ok(ping)); - assert_eq!(read_message::(raw.as_ref(), Magic::Testnet, 0).wait().unwrap().1, Err(Error::WrongMagic)); + assert_eq!(read_message::(raw.as_ref(), Magic::Testnet, 0).wait().unwrap().1, Err(Error::InvalidMagic)); assert_eq!(read_message::(raw.as_ref(), Magic::Mainnet, 0).wait().unwrap().1, Err(Error::InvalidCommand)); } diff --git a/p2p/src/lib.rs b/p2p/src/lib.rs index 7a0bbb0d..09d40264 100644 --- a/p2p/src/lib.rs +++ b/p2p/src/lib.rs @@ -15,6 +15,7 @@ extern crate bitcrypto as crypto; extern crate message; extern crate primitives; extern crate serialization as ser; +extern crate network; mod io; mod net; diff --git a/p2p/src/net/accept_connection.rs b/p2p/src/net/accept_connection.rs index af915b13..c7120a15 100644 --- a/p2p/src/net/accept_connection.rs +++ b/p2p/src/net/accept_connection.rs @@ -3,7 +3,8 @@ use std::time::Duration; use futures::{Future, Poll}; use tokio_core::reactor::Handle; use tokio_core::net::TcpStream; -use message::{MessageResult, Magic}; +use network::Magic; +use message::{MessageResult}; use io::{accept_handshake, AcceptHandshake, Deadline, deadline}; use net::{Config, Connection}; diff --git a/p2p/src/net/config.rs b/p2p/src/net/config.rs index 1c71d183..e54425d0 100644 --- a/p2p/src/net/config.rs +++ b/p2p/src/net/config.rs @@ -1,5 +1,6 @@ use std::net::SocketAddr; -use message::common::{Magic, Services, NetAddress}; +use network::Magic; +use message::common::{Services, NetAddress}; use message::types::version::{Version, V0, V106, V70001}; use util::time::{Time, RealTime}; use util::nonce::{NonceGenerator, RandomNonce}; diff --git a/p2p/src/net/connect.rs b/p2p/src/net/connect.rs index 7b78fa77..dda8e12d 100644 --- a/p2p/src/net/connect.rs +++ b/p2p/src/net/connect.rs @@ -4,8 +4,8 @@ use std::net::SocketAddr; use futures::{Future, Poll, Async}; use tokio_core::reactor::Handle; use tokio_core::net::{TcpStream, TcpStreamNew}; +use network::Magic; use message::Error; -use message::common::Magic; use message::types::Version; use io::{handshake, Handshake, Deadline, deadline}; use net::{Config, Connection}; diff --git a/p2p/src/net/connection.rs b/p2p/src/net/connection.rs index 603d58d6..a3d55bcb 100644 --- a/p2p/src/net/connection.rs +++ b/p2p/src/net/connection.rs @@ -1,5 +1,5 @@ use std::net; -use message::Magic; +use network::Magic; use message::common::Services; use io::SharedTcpStream; diff --git a/p2p/src/util/peer.rs b/p2p/src/util/peer.rs index f19d746b..3999835b 100644 --- a/p2p/src/util/peer.rs +++ b/p2p/src/util/peer.rs @@ -1,5 +1,5 @@ use std::net::SocketAddr; -use message::Magic; +use network::Magic; pub type PeerId = usize; diff --git a/pbtc/commands/import.rs b/pbtc/commands/import.rs index c89747d3..d65245f3 100644 --- a/pbtc/commands/import.rs +++ b/pbtc/commands/import.rs @@ -8,7 +8,7 @@ pub fn import(cfg: Config, matches: &ArgMatches) -> Result<(), String> { // TODO: this might be unnecessary here! try!(init_db(&cfg, &db)); - let mut writer = create_sync_blocks_writer(db); + let mut writer = create_sync_blocks_writer(db, cfg.magic); let blk_path = matches.value_of("PATH").expect("PATH is required in cli.yml; qed"); let blk_dir = try!(::import::open_blk_dir(blk_path).map_err(|_| "Import directory does not exist".to_owned())); diff --git a/pbtc/commands/start.rs b/pbtc/commands/start.rs index 1cd872b8..e2e46da1 100644 --- a/pbtc/commands/start.rs +++ b/pbtc/commands/start.rs @@ -32,7 +32,7 @@ pub fn start(cfg: config::Config) -> Result<(), String> { }; let sync_handle = el.handle(); - let sync_connection_factory = create_sync_connection_factory(&sync_handle, cfg.magic.consensus_params(), db); + let sync_connection_factory = create_sync_connection_factory(&sync_handle, cfg.magic, db); let p2p = try!(p2p::P2P::new(p2p_cfg, sync_connection_factory, el.handle()).map_err(|x| x.to_string())); try!(p2p.run().map_err(|_| "Failed to start p2p module")); diff --git a/pbtc/config.rs b/pbtc/config.rs index 95e4b47c..af3afda0 100644 --- a/pbtc/config.rs +++ b/pbtc/config.rs @@ -1,6 +1,6 @@ use std::net; use clap; -use message::Magic; +use network::Magic; use {USER_AGENT, REGTEST_USER_AGENT}; pub struct Config { @@ -29,18 +29,18 @@ pub fn parse(matches: &clap::ArgMatches) -> Result { }; let (in_connections, out_connections) = match magic { - Magic::Testnet | Magic::Mainnet => (10, 10), + Magic::Testnet | Magic::Mainnet | Magic::Other(_) => (10, 10), Magic::Regtest => (1, 0), }; let p2p_threads = match magic { - Magic::Testnet | Magic::Mainnet => 4, + Magic::Testnet | Magic::Mainnet | Magic::Other(_) => 4, Magic::Regtest => 1, }; // to skip idiotic 30 seconds delay in test-scripts let user_agent = match magic { - Magic::Testnet | Magic::Mainnet => USER_AGENT, + Magic::Testnet | Magic::Mainnet | Magic::Other(_) => USER_AGENT, Magic::Regtest => REGTEST_USER_AGENT, }; diff --git a/pbtc/main.rs b/pbtc/main.rs index e524839e..93071fae 100644 --- a/pbtc/main.rs +++ b/pbtc/main.rs @@ -12,6 +12,7 @@ extern crate chain; extern crate keys; extern crate script; extern crate message; +extern crate network; extern crate p2p; extern crate sync; extern crate import; diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 33105fe2..6412467e 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -26,6 +26,7 @@ script = { path = "../script" } serialization = { path = "../serialization" } test-data = { path = "../test-data" } verification = { path = "../verification" } +network = { path = "../network" } [features] dev = [] diff --git a/sync/src/blocks_writer.rs b/sync/src/blocks_writer.rs index 8cb891a9..8bd15f54 100644 --- a/sync/src/blocks_writer.rs +++ b/sync/src/blocks_writer.rs @@ -1,8 +1,9 @@ use std::sync::Arc; use chain; use db; -use super::Error; +use network::Magic; use verification::{Verify, ChainVerifier}; +use super::Error; pub struct BlocksWriter { storage: Arc, @@ -10,10 +11,10 @@ pub struct BlocksWriter { } impl BlocksWriter { - pub fn new(storage: db::SharedStore) -> BlocksWriter { + pub fn new(storage: db::SharedStore, network: Magic) -> BlocksWriter { BlocksWriter { storage: storage.clone(), - verifier: ChainVerifier::new(storage), + verifier: ChainVerifier::new(storage, network), } } @@ -32,18 +33,17 @@ impl BlocksWriter { #[cfg(test)] mod tests { - use db; - use db::Store; use std::sync::Arc; + use db::{self, Store}; + use network::Magic; + use {test_data, verification}; use super::super::Error; use super::BlocksWriter; - use test_data; - use verification; #[test] fn blocks_writer_appends_blocks() { let db = Arc::new(db::TestStorage::with_genesis_block()); - let mut blocks_target = BlocksWriter::new(db.clone()); + let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet); blocks_target.append_block(test_data::block_h1()).expect("Expecting no error"); assert_eq!(db.best_block().expect("Block is inserted").number, 1); } @@ -51,7 +51,7 @@ mod tests { #[test] fn blocks_writer_verification_error() { let db = Arc::new(db::TestStorage::with_genesis_block()); - let mut blocks_target = BlocksWriter::new(db.clone()); + let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet); match blocks_target.append_block(test_data::block_h2()).unwrap_err() { Error::OutOfOrderBlock => (), _ => panic!("Unexpected error"), @@ -62,7 +62,7 @@ mod tests { #[test] fn blocks_writer_out_of_order_block() { let db = Arc::new(db::TestStorage::with_genesis_block()); - let mut blocks_target = BlocksWriter::new(db.clone()); + let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet); let wrong_block = test_data::block_builder() .header().parent(test_data::genesis().hash()).build() diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 319072cb..854512a0 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -21,6 +21,7 @@ extern crate serialization as ser; #[cfg(test)] extern crate ethcore_devtools as devtools; extern crate rand; +extern crate network; mod best_headers_chain; mod blocks_writer; @@ -42,7 +43,7 @@ mod synchronization_verifier; use std::sync::Arc; use parking_lot::RwLock; use tokio_core::reactor::Handle; -use message::common::ConsensusParams; +use network::Magic; /// Sync errors. #[derive(Debug)] @@ -56,12 +57,12 @@ pub enum Error { } /// Create blocks writer. -pub fn create_sync_blocks_writer(db: db::SharedStore) -> blocks_writer::BlocksWriter { - blocks_writer::BlocksWriter::new(db) +pub fn create_sync_blocks_writer(db: db::SharedStore, network: Magic) -> blocks_writer::BlocksWriter { + blocks_writer::BlocksWriter::new(db, network) } /// Create inbound synchronization connections factory for given `db`. -pub fn create_sync_connection_factory(handle: &Handle, consensus_params: ConsensusParams, db: db::SharedStore) -> p2p::LocalSyncNodeRef { +pub fn create_sync_connection_factory(handle: &Handle, network: Magic, db: db::SharedStore) -> p2p::LocalSyncNodeRef { use synchronization_chain::Chain as SyncChain; use synchronization_executor::LocalSynchronizationTaskExecutor as SyncExecutor; use local_node::LocalNode as SyncNode; @@ -74,7 +75,7 @@ pub fn create_sync_connection_factory(handle: &Handle, consensus_params: Consens let sync_executor = SyncExecutor::new(sync_chain.clone()); let sync_server = Arc::new(SynchronizationServer::new(sync_chain.clone(), sync_executor.clone())); let sync_client_core = SynchronizationClientCore::new(SynchronizationConfig::new(), handle, sync_executor.clone(), sync_chain.clone()); - let verifier = AsyncVerifier::new(consensus_params, sync_chain, sync_client_core.clone()); + let verifier = AsyncVerifier::new(network, sync_chain, sync_client_core.clone()); let sync_client = SynchronizationClient::new(sync_client_core, verifier); let sync_node = Arc::new(SyncNode::new(sync_server, sync_client, sync_executor)); SyncConnectionFactory::with_local_node(sync_node) diff --git a/sync/src/synchronization_verifier.rs b/sync/src/synchronization_verifier.rs index 67bb95e7..dd1dca29 100644 --- a/sync/src/synchronization_verifier.rs +++ b/sync/src/synchronization_verifier.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use std::sync::mpsc::{channel, Sender, Receiver}; use parking_lot::Mutex; use chain::{Block, Transaction}; -use message::common::ConsensusParams; +use network::{Magic, ConsensusParams}; use primitives::hash::H256; use verification::{ChainVerifier, Verify as VerificationVerify}; use synchronization_chain::ChainRef; @@ -48,16 +48,16 @@ pub struct AsyncVerifier { impl AsyncVerifier { /// Create new async verifier - pub fn new(consensus_params: ConsensusParams, chain: ChainRef, sink: Arc>) -> Self { + pub fn new(network: Magic, chain: ChainRef, sink: Arc>) -> Self { let (verification_work_sender, verification_work_receiver) = channel(); let storage = chain.read().storage(); - let verifier = ChainVerifier::new(storage); + let verifier = ChainVerifier::new(storage, network); AsyncVerifier { verification_work_sender: verification_work_sender, verification_worker_thread: Some(thread::Builder::new() .name("Sync verification thread".to_string()) .spawn(move || { - AsyncVerifier::verification_worker_proc(sink, chain, consensus_params, verifier, verification_work_receiver) + AsyncVerifier::verification_worker_proc(sink, chain, network.consensus_params(), verifier, verification_work_receiver) }) .expect("Error creating verification thread")) } diff --git a/verification/Cargo.toml b/verification/Cargo.toml index 9f14090a..73b967a4 100644 --- a/verification/Cargo.toml +++ b/verification/Cargo.toml @@ -4,18 +4,17 @@ version = "0.1.0" authors = ["Nikolay Volf "] [dependencies] +byteorder = "0.5" +parking_lot = "0.3" +linked-hash-map = "0.3" +time = "0.1" +log = "0.3" + ethcore-devtools = { path = "../devtools" } primitives = { path = "../primitives" } chain = { path = "../chain" } serialization = { path = "../serialization" } -parking_lot = "0.3" -linked-hash-map = "0.3" test-data = { path = "../test-data" } -byteorder = "0.5" -time = "0.1" script = { path = "../script" } -log = "0.3" - -[dependencies.db] -path = "../db" -features = ["dev"] +network = { path = "../network" } +db = { path = "../db", features = ["dev"] } diff --git a/verification/src/chain_verifier.rs b/verification/src/chain_verifier.rs index b60424c9..eb6c770f 100644 --- a/verification/src/chain_verifier.rs +++ b/verification/src/chain_verifier.rs @@ -1,7 +1,8 @@ //! Bitcoin chain verifier -use std::collections::HashSet; +use std::collections::BTreeSet; use db::{self, BlockRef, BlockLocation}; +use network::Magic; use super::{Verify, VerificationResult, Chain, Error, TransactionError, ContinueVerify}; use {chain, utils}; @@ -18,16 +19,18 @@ pub struct ChainVerifier { verify_clocktimeverify: bool, skip_pow: bool, skip_sig: bool, + network: Magic, } impl ChainVerifier { - pub fn new(store: db::SharedStore) -> Self { + pub fn new(store: db::SharedStore, network: Magic) -> Self { ChainVerifier { store: store, verify_p2sh: false, verify_clocktimeverify: false, skip_pow: false, - skip_sig: false + skip_sig: false, + network: network, } } @@ -55,7 +58,7 @@ impl ChainVerifier { fn ordered_verify(&self, block: &chain::Block, at_height: u32) -> Result<(), Error> { // check that difficulty matches the adjusted level - if let Some(work) = self.work_required(at_height) { + if let Some(work) = self.work_required(block, at_height) { if !self.skip_pow && work != block.header().nbits { trace!(target: "verification", "pow verification error at height: {}", at_height); trace!(target: "verification", "expected work: {}, got {}", work, block.header().nbits); @@ -202,7 +205,7 @@ impl ChainVerifier { } // target difficulty threshold - if !self.skip_pow && !utils::check_nbits(&hash, block.header().nbits) { + if !self.skip_pow && !utils::check_nbits(self.network.max_nbits(), &hash, block.header().nbits) { return Err(Error::Pow); } @@ -275,7 +278,7 @@ impl ChainVerifier { } fn median_timestamp(&self, block: &chain::Block) -> Option { - let mut timestamps = HashSet::new(); + let mut timestamps = BTreeSet::new(); let mut block_ref = block.block_header.previous_header_hash.clone().into(); // TODO: optimize it, so it does not make 11 redundant queries each time for _ in 0..11 { @@ -288,21 +291,19 @@ impl ChainVerifier { } if timestamps.len() > 2 { - let mut timestamps: Vec<_> = timestamps.into_iter().collect(); - timestamps.sort(); + let timestamps: Vec<_> = timestamps.into_iter().collect(); Some(timestamps[timestamps.len() / 2]) } else { None } } - fn work_required(&self, height: u32) -> Option { + fn work_required(&self, block: &chain::Block, height: u32) -> Option { if height == 0 { return None; } - // should this be best_header or parent header? - // regtest do not pass with previous header, but, imo checking with best is a bit weird, mk - let previous_header = self.store.best_header().expect("self.height != 0; qed"); + let previous_ref = block.block_header.previous_header_hash.clone().into(); + let previous_header = self.store.block_header(previous_ref).expect("self.height != 0; qed"); if utils::is_retarget_height(height) { let retarget_ref = (height - utils::RETARGETING_INTERVAL).into(); @@ -314,7 +315,7 @@ impl ChainVerifier { // nbits of last block let last_nbits = previous_header.nbits; - return Some(utils::work_required_retarget(retarget_timestamp, last_timestamp, last_nbits)); + return Some(utils::work_required_retarget(self.network.max_nbits(), retarget_timestamp, last_timestamp, last_nbits)); } // TODO: if.testnet @@ -356,20 +357,19 @@ impl ContinueVerify for ChainVerifier { #[cfg(test)] mod tests { - + use std::sync::Arc; + use db::{TestStorage, Storage, Store, BlockStapler}; + use network::Magic; + use devtools::RandomTempPath; + use {script, test_data}; use super::ChainVerifier; use super::super::{Verify, Chain, Error, TransactionError}; - use db::{TestStorage, Storage, Store, BlockStapler}; - use test_data; - use std::sync::Arc; - use devtools::RandomTempPath; - use script; #[test] fn verify_orphan() { let storage = TestStorage::with_blocks(&vec![test_data::genesis()]); let b2 = test_data::block_h2(); - let verifier = ChainVerifier::new(Arc::new(storage)); + let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet); assert_eq!(Chain::Orphan, verifier.verify(&b2).unwrap()); } @@ -378,7 +378,7 @@ mod tests { fn verify_smoky() { let storage = TestStorage::with_blocks(&vec![test_data::genesis()]); let b1 = test_data::block_h1(); - let verifier = ChainVerifier::new(Arc::new(storage)); + let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet); assert_eq!(Chain::Main, verifier.verify(&b1).unwrap()); } @@ -391,7 +391,7 @@ mod tests { ] ); let b1 = test_data::block_h170(); - let verifier = ChainVerifier::new(Arc::new(storage)); + let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet); assert_eq!(Chain::Main, verifier.verify(&b1).unwrap()); } @@ -403,7 +403,7 @@ mod tests { ] ); let b170 = test_data::block_h170(); - let verifier = ChainVerifier::new(Arc::new(storage)); + let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet); let should_be = Err(Error::Transaction( 1, @@ -437,7 +437,7 @@ mod tests { .merkled_header().parent(genesis.hash()).build() .build(); - let verifier = ChainVerifier::new(Arc::new(storage)).pow_skip().signatures_skip(); + let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet).pow_skip().signatures_skip(); let expected = Err(Error::Transaction( 1, @@ -473,7 +473,7 @@ mod tests { .merkled_header().parent(genesis.hash()).build() .build(); - let verifier = ChainVerifier::new(Arc::new(storage)).pow_skip().signatures_skip(); + let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet).pow_skip().signatures_skip(); let expected = Ok(Chain::Main); assert_eq!(expected, verifier.verify(&block)); @@ -511,7 +511,7 @@ mod tests { .merkled_header().parent(genesis.hash()).build() .build(); - let verifier = ChainVerifier::new(Arc::new(storage)).pow_skip().signatures_skip(); + let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet).pow_skip().signatures_skip(); let expected = Ok(Chain::Main); assert_eq!(expected, verifier.verify(&block)); @@ -548,7 +548,7 @@ mod tests { .merkled_header().parent(genesis.hash()).build() .build(); - let verifier = ChainVerifier::new(Arc::new(storage)).pow_skip().signatures_skip(); + let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet).pow_skip().signatures_skip(); let expected = Err(Error::Transaction(2, TransactionError::Overspend)); assert_eq!(expected, verifier.verify(&block)); @@ -592,7 +592,7 @@ mod tests { .merkled_header().parent(best_hash).build() .build(); - let verifier = ChainVerifier::new(Arc::new(storage)).pow_skip().signatures_skip(); + let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet).pow_skip().signatures_skip(); let expected = Ok(Chain::Main); @@ -644,7 +644,7 @@ mod tests { .merkled_header().parent(genesis.hash()).build() .build(); - let verifier = ChainVerifier::new(Arc::new(storage)).pow_skip().signatures_skip(); + let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet).pow_skip().signatures_skip(); let expected = Err(Error::MaximumSigops); assert_eq!(expected, verifier.verify(&block)); @@ -670,7 +670,7 @@ mod tests { .merkled_header().parent(genesis.hash()).build() .build(); - let verifier = ChainVerifier::new(Arc::new(storage)).pow_skip().signatures_skip(); + let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet).pow_skip().signatures_skip(); let expected = Err(Error::CoinbaseOverspend { expected_max: 5000000000, diff --git a/verification/src/lib.rs b/verification/src/lib.rs index 7fdfc1fd..9096ffca 100644 --- a/verification/src/lib.rs +++ b/verification/src/lib.rs @@ -1,17 +1,19 @@ //! Bitcoin blocks verification -extern crate db; -extern crate primitives; -extern crate chain; -extern crate serialization; +extern crate byteorder; extern crate parking_lot; extern crate linked_hash_map; -extern crate byteorder; extern crate time; -extern crate script; #[macro_use] extern crate log; +extern crate db; +extern crate chain; +extern crate network; +extern crate primitives; +extern crate serialization; +extern crate script; + #[cfg(test)] extern crate ethcore_devtools as devtools; #[cfg(test)] diff --git a/verification/src/utils.rs b/verification/src/utils.rs index 521887a0..6dfbeef9 100644 --- a/verification/src/utils.rs +++ b/verification/src/utils.rs @@ -21,10 +21,6 @@ const MAX_TIMESPAN: u32 = TARGET_TIMESPAN_SECONDS * RETARGETING_FACTOR; // Target number of blocks, 2 weaks, 2016 pub const RETARGETING_INTERVAL: u32 = TARGET_TIMESPAN_SECONDS / TARGET_SPACING_SECONDS; -pub const MAX_NBITS_MAINNET: u32 = 0x1d00ffff; -pub const MAX_NBITS_TESTNET: u32 = 0x1d00ffff; -pub const MAX_NBITS_REGTEST: u32 = 0x207fffff; - pub fn is_retarget_height(height: u32) -> bool { height % RETARGETING_INTERVAL == 0 } @@ -34,10 +30,10 @@ fn retarget_timespan(retarget_timestamp: u32, last_timestamp: u32) -> u32 { range_constrain(timespan as u32, MIN_TIMESPAN, MAX_TIMESPAN) } -pub fn work_required_retarget(retarget_timestamp: u32, last_timestamp: u32, last_nbits: u32) -> u32 { +pub fn work_required_retarget(max_nbits: u32, retarget_timestamp: u32, last_timestamp: u32, last_nbits: u32) -> u32 { // ignore overflows here let mut retarget = Compact::new(last_nbits).to_u256().unwrap_or_else(|x| x); - let maximum = Compact::new(MAX_NBITS_MAINNET).to_u256().unwrap_or_else(|x| x); + let maximum = Compact::new(max_nbits).to_u256().unwrap_or_else(|x| x); // multiplication overflow potential retarget = retarget * U256::from(retarget_timespan(retarget_timestamp, last_timestamp)); @@ -59,8 +55,10 @@ fn range_constrain(value: u32, min: u32, max: u32) -> u32 { } /// Simple nbits check that does not require 256-bit arithmetic -pub fn check_nbits(hash: &H256, n_bits: u32) -> bool { - if n_bits > MAX_NBITS_REGTEST { return false; } +pub fn check_nbits(max_nbits: u32, hash: &H256, n_bits: u32) -> bool { + if n_bits > max_nbits { + return false; + } let hash_bytes: &[u8] = &**hash; @@ -137,6 +135,7 @@ pub fn p2sh_sigops(output: &Script, input_ref: &Script) -> usize { #[cfg(test)] mod tests { + use network::Magic; use super::{block_reward_satoshi, check_nbits}; use hash::H256; @@ -154,29 +153,31 @@ mod tests { #[test] fn nbits() { + let max_nbits = Magic::Regtest.max_nbits(); + // strictly equal let hash = H256::from_reversed_str("00000000000000001bc330000000000000000000000000000000000000000000"); let nbits = 0x181bc330u32; - assert!(check_nbits(&hash, nbits)); + assert!(check_nbits(max_nbits, &hash, nbits)); // nbits match but not equal (greater) let hash = H256::from_reversed_str("00000000000000001bc330000000000000000000000000000000000000000001"); let nbits = 0x181bc330u32; - assert!(!check_nbits(&hash, nbits)); + assert!(!check_nbits(max_nbits, &hash, nbits)); // greater let hash = H256::from_reversed_str("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); let nbits = 0x181bc330u32; - assert!(!check_nbits(&hash, nbits)); + assert!(!check_nbits(max_nbits, &hash, nbits)); // some real examples let hash = H256::from_reversed_str("000000000000000001f942eb4bfa0aeccb6a14c268f4c72d5fff17270da771b9"); let nbits = 404129525; - assert!(check_nbits(&hash, nbits)); + assert!(check_nbits(max_nbits, &hash, nbits)); let hash = H256::from_reversed_str("00000000000000000e753ef636075711efd2cbf5a8473c7c5b67755a3701e0c2"); let nbits = 404129525; - assert!(check_nbits(&hash, nbits)); + assert!(check_nbits(max_nbits, &hash, nbits)); } } From 7f07b60a85ef2d896ca19ba34cf988645018992c Mon Sep 17 00:00:00 2001 From: debris Date: Sat, 26 Nov 2016 13:07:50 +0100 Subject: [PATCH 26/26] fix english naming --- db/src/storage.rs | 8 ++++---- db/src/transaction_meta.rs | 12 ++++++------ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/db/src/storage.rs b/db/src/storage.rs index 43858f11..fabbe181 100644 --- a/db/src/storage.rs +++ b/db/src/storage.rs @@ -192,7 +192,7 @@ impl Storage { return Err(Error::double_spend(&input.previous_output.hash)); } - meta.note_used(input.previous_output.index as usize); + meta.denote_used(input.previous_output.index as usize); true }, None => false, @@ -204,7 +204,7 @@ impl Storage { return Err(Error::double_spend(&input.previous_output.hash)); } - meta.note_used(input.previous_output.index as usize); + meta.denote_used(input.previous_output.index as usize); context.meta.insert( input.previous_output.hash.clone(), @@ -243,7 +243,7 @@ impl Storage { for input in &tx.inputs { if !match context.meta.get_mut(&input.previous_output.hash) { Some(ref mut meta) => { - meta.denote_used(input.previous_output.index as usize); + meta.denote_unused(input.previous_output.index as usize); true }, None => false, @@ -257,7 +257,7 @@ impl Storage { &input.previous_output.hash )); - meta.denote_used(input.previous_output.index as usize); + meta.denote_unused(input.previous_output.index as usize); context.meta.insert( input.previous_output.hash.clone(), diff --git a/db/src/transaction_meta.rs b/db/src/transaction_meta.rs index bbe5d2a3..29936fc7 100644 --- a/db/src/transaction_meta.rs +++ b/db/src/transaction_meta.rs @@ -25,11 +25,6 @@ impl TransactionMeta { } } - /// note that particular output has been used - pub fn note_used(&mut self, index: usize) { - self.bits.set(index + 1 , true); - } - pub fn coinbase(mut self) -> Self { self.bits.set(0, true); self @@ -40,8 +35,13 @@ impl TransactionMeta { .expect("One bit should always exists, since it is created as usize + 1; minimum value of usize is 0; 0 + 1 = 1; qed") } - /// note that particular output has been used + /// denote particular output as used pub fn denote_used(&mut self, index: usize) { + self.bits.set(index + 1 , true); + } + + /// denote particular output as not used + pub fn denote_unused(&mut self, index: usize) { self.bits.set(index + 1, false); }