From 1930d8348595caa782568c93577b9e65b24683cd Mon Sep 17 00:00:00 2001 From: NikVolf Date: Tue, 13 Dec 2016 11:44:06 +0100 Subject: [PATCH 01/13] rpc method stub --- rpc/src/v1/traits/network.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/rpc/src/v1/traits/network.rs b/rpc/src/v1/traits/network.rs index c09cf60b..7bb24364 100644 --- a/rpc/src/v1/traits/network.rs +++ b/rpc/src/v1/traits/network.rs @@ -10,5 +10,10 @@ build_rpc_trait! { /// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "method": "addnode", "params": ["127.0.0.1:8888", "onetry"], "id":1 }' -H 'content-type: application/json;' http://127.0.0.1:8332/ #[rpc(name = "addnode")] fn add_node(&self, String, AddNodeOperation) -> Result<(), Error>; + /// Query node(s) info + /// @curl-example: curl --data-binary '{"jsonrpc": "1.0", "id":"curltest", "method": "getaddednodeinfo", "params": [true, "192.168.0.201"] }' -H 'content-type: application/json;' http://127.0.0.1:8332/ + /// @curl-example: curl --data-binary '{"jsonrpc": "1.0", "id":"curltest", "method": "getaddednodeinfo", "params": [true, "192.168.0.201"] }' -H 'content-type: application/json;' http://127.0.0.1:8332/ + #[rpc(name = "getaddednodeinfo")] + fn node_info(&self, bool, String) -> Result<(), Error>; } } From 1f75bbcb1f138c19262b26bec64ca238903da0f0 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Tue, 13 Dec 2016 13:17:57 +0100 Subject: [PATCH 02/13] json-rpc layout --- rpc/src/v1/helpers/errors.rs | 9 +++++++++ rpc/src/v1/impls/network.rs | 32 ++++++++++++++++++++++++++++++-- rpc/src/v1/traits/network.rs | 7 ++++--- rpc/src/v1/types/mod.rs.in | 2 +- rpc/src/v1/types/nodes.rs | 29 ++++++++++++++++++++++++++++- 5 files changed, 72 insertions(+), 7 deletions(-) diff --git a/rpc/src/v1/helpers/errors.rs b/rpc/src/v1/helpers/errors.rs index b14f70a5..984b0a27 100644 --- a/rpc/src/v1/helpers/errors.rs +++ b/rpc/src/v1/helpers/errors.rs @@ -2,6 +2,7 @@ mod codes { // NOTE [ToDr] Codes from [-32099, -32000] + pub const UNKNOWN: i64 = -32000; pub const EXECUTION_ERROR: i64 = -32015; pub const TRANSACTION_NOT_FOUND: i64 = -32096; pub const TRANSACTION_OUTPUT_NOT_FOUND: i64 = -32097; @@ -97,3 +98,11 @@ pub fn node_not_added() -> Error { data: None, } } + +pub fn unknown() -> Error { + Error { + code: ErrorCode::ServerError(codes::UNKNOWN), + message: "Unknown error has occurred".into(), + data: None, + } +} diff --git a/rpc/src/v1/impls/network.rs b/rpc/src/v1/impls/network.rs index 842996c8..e58136a8 100644 --- a/rpc/src/v1/impls/network.rs +++ b/rpc/src/v1/impls/network.rs @@ -1,8 +1,9 @@ use std::sync::Arc; -use std::net::SocketAddr; +use std::net::{SocketAddr, IpAddr}; use v1::traits::Network as NetworkRpc; -use v1::types::AddNodeOperation; +use v1::types::{AddNodeOperation, NodeInfo}; use jsonrpc_core::Error; +use jsonrpc_macros::Trailing; use v1::helpers::errors; use p2p; @@ -10,6 +11,8 @@ pub trait NetworkApi : Send + Sync + 'static { fn add_node(&self, socket_addr: SocketAddr) -> Result<(), p2p::NodeTableError>; fn remove_node(&self, socket_addr: SocketAddr) -> Result<(), p2p::NodeTableError>; fn connect(&self, socket_addr: SocketAddr); + fn node_info(&self, node_addr: IpAddr) -> Result; + fn nodes_info(&self) -> Vec; } impl NetworkRpc for NetworkClient where T: NetworkApi { @@ -29,6 +32,23 @@ impl NetworkRpc for NetworkClient where T: NetworkApi { } } } + + fn node_info(&self, _dns: bool, node_addr: Trailing) -> Result, Error> { + Ok( + if node_addr.0.is_empty() { + self.api.nodes_info() + } + else { + let addr = try!(node_addr.0.parse().map_err( + |_| errors::invalid_params("node", "Invalid ip address format, should be ip address (127.0.0.1)"))); + let node_info = try!( + self.api.node_info(addr) + .map_err(|_| errors::node_not_added()) + ); + vec![node_info] + } + ) + } } pub struct NetworkClient { @@ -65,4 +85,12 @@ impl NetworkApi for NetworkClientCore { fn connect(&self, socket_addr: SocketAddr) { p2p::Context::connect_normal(self.p2p.clone(), socket_addr); } + + fn node_info(&self, node_addr: IpAddr) -> Result { + Err(p2p::NodeTableError::NoAddressInTable) + } + + fn nodes_info(&self) -> Vec { + vec![] + } } diff --git a/rpc/src/v1/traits/network.rs b/rpc/src/v1/traits/network.rs index 7bb24364..e0d23e50 100644 --- a/rpc/src/v1/traits/network.rs +++ b/rpc/src/v1/traits/network.rs @@ -1,5 +1,6 @@ use jsonrpc_core::Error; -use v1::types::AddNodeOperation; +use jsonrpc_macros::Trailing; +use v1::types::{AddNodeOperation, NodeInfo}; build_rpc_trait! { /// Parity-bitcoin network interface @@ -11,9 +12,9 @@ build_rpc_trait! { #[rpc(name = "addnode")] fn add_node(&self, String, AddNodeOperation) -> Result<(), Error>; /// Query node(s) info - /// @curl-example: curl --data-binary '{"jsonrpc": "1.0", "id":"curltest", "method": "getaddednodeinfo", "params": [true, "192.168.0.201"] }' -H 'content-type: application/json;' http://127.0.0.1:8332/ + /// @curl-example: curl --data-binary '{"jsonrpc": "1.0", "id":"curltest", "method": "getaddednodeinfo", "params": [true] }' -H 'content-type: application/json;' http://127.0.0.1:8332/ /// @curl-example: curl --data-binary '{"jsonrpc": "1.0", "id":"curltest", "method": "getaddednodeinfo", "params": [true, "192.168.0.201"] }' -H 'content-type: application/json;' http://127.0.0.1:8332/ #[rpc(name = "getaddednodeinfo")] - fn node_info(&self, bool, String) -> Result<(), Error>; + fn node_info(&self, bool, Trailing) -> Result, Error>; } } diff --git a/rpc/src/v1/types/mod.rs.in b/rpc/src/v1/types/mod.rs.in index 93f55b42..5309df74 100644 --- a/rpc/src/v1/types/mod.rs.in +++ b/rpc/src/v1/types/mod.rs.in @@ -22,4 +22,4 @@ pub use self::raw_block::RawBlock; pub use self::raw_transaction::RawTransaction; pub use self::script::ScriptType; pub use self::uint::U256; -pub use self::nodes::AddNodeOperation; +pub use self::nodes::{AddNodeOperation, NodeInfo}; diff --git a/rpc/src/v1/types/nodes.rs b/rpc/src/v1/types/nodes.rs index 0ba9ff50..f1a157c6 100644 --- a/rpc/src/v1/types/nodes.rs +++ b/rpc/src/v1/types/nodes.rs @@ -1,4 +1,4 @@ -use serde::{Deserialize, Deserializer}; +use serde::{Serialize, Serializer, Deserialize, Deserializer}; #[derive(Debug, PartialEq)] pub enum AddNodeOperation { @@ -29,3 +29,30 @@ impl Deserialize for AddNodeOperation { deserializer.deserialize(DummyVisitor) } } + +#[derive(Serialize)] +pub struct NodeInfoAddress { + address: String, + connected: NodeInfoAddressConnectionType, +} + +#[derive(Serialize)] +pub struct NodeInfo { + addednode: String, + connected: bool, + addresses: Vec, +} + +pub enum NodeInfoAddressConnectionType { + Inbound, + Outbound, +} + +impl Serialize for NodeInfoAddressConnectionType { + fn serialize(&self, serializer: &mut S) -> Result<(), S::Error> where S: Serializer { + match *self { + NodeInfoAddressConnectionType::Inbound => "inbound".serialize(serializer), + NodeInfoAddressConnectionType::Outbound => "outbound".serialize(serializer), + } + } +} From 92b103deb8f7dd601bcb9d30c0fb306f0a16961e Mon Sep 17 00:00:00 2001 From: NikVolf Date: Tue, 13 Dec 2016 14:23:16 +0100 Subject: [PATCH 03/13] p2p api changes --- p2p/src/net/connections.rs | 7 ++++++- p2p/src/p2p.rs | 8 ++++++++ p2p/src/util/node_table.rs | 5 +++++ rpc/src/v1/impls/network.rs | 1 - 4 files changed, 19 insertions(+), 2 deletions(-) diff --git a/p2p/src/net/connections.rs b/p2p/src/net/connections.rs index c3ed4c17..b9b388df 100644 --- a/p2p/src/net/connections.rs +++ b/p2p/src/net/connections.rs @@ -35,8 +35,13 @@ impl Connections { self.channels().values().map(|channel| channel.peer_info().address).collect() } + /// Returns info on every peer + pub fn peers(&self) -> Vec { + self.channels().values().map(|channel| channel.peer_info()).collect() + } + /// Returns number of connections. - pub fn _count(&self) -> usize { + pub fn count(&self) -> usize { self.channels.read().len() } diff --git a/p2p/src/p2p.rs b/p2p/src/p2p.rs index 6c658f2f..6db84249 100644 --- a/p2p/src/p2p.rs +++ b/p2p/src/p2p.rs @@ -386,6 +386,14 @@ impl Context { pub fn create_sync_session(&self, start_height: i32, outbound_connection: OutboundSyncConnectionRef) -> InboundSyncConnectionRef { self.local_sync_node.create_sync_session(start_height, outbound_connection) } + + pub fn connections(&self) -> &Connections { + &self.connections + } + + pub fn nodes(&self) -> Vec { + self.node_table.read().nodes() + } } pub struct P2P { diff --git a/p2p/src/util/node_table.rs b/p2p/src/util/node_table.rs index afc8bc64..f197c804 100644 --- a/p2p/src/util/node_table.rs +++ b/p2p/src/util/node_table.rs @@ -296,6 +296,11 @@ impl NodeTable where T: Time { .collect() } + /// Returnes all nodes + pub fn nodes(&self) -> Vec { + self.by_addr.iter().map(|(_, n)| n).cloned().collect() + } + /// Returns most recently active nodes. /// /// The documenation says: diff --git a/rpc/src/v1/impls/network.rs b/rpc/src/v1/impls/network.rs index e58136a8..b327794a 100644 --- a/rpc/src/v1/impls/network.rs +++ b/rpc/src/v1/impls/network.rs @@ -87,7 +87,6 @@ impl NetworkApi for NetworkClientCore { } fn node_info(&self, node_addr: IpAddr) -> Result { - Err(p2p::NodeTableError::NoAddressInTable) } fn nodes_info(&self) -> Vec { From f206634ce8be68db66f7cf532479f8b3bf25ebee Mon Sep 17 00:00:00 2001 From: NikVolf Date: Tue, 13 Dec 2016 15:30:08 +0100 Subject: [PATCH 04/13] finalizing jsonrpc --- p2p/src/lib.rs | 2 +- p2p/src/net/connections.rs | 2 +- rpc/src/v1/impls/network.rs | 30 +++++++++++++++++++++++++++++- rpc/src/v1/types/nodes.rs | 19 ++++++++++++++++--- 4 files changed, 47 insertions(+), 6 deletions(-) diff --git a/p2p/src/lib.rs b/p2p/src/lib.rs index 381b64e3..6f183238 100644 --- a/p2p/src/lib.rs +++ b/p2p/src/lib.rs @@ -32,5 +32,5 @@ pub use config::Config; pub use net::Config as NetConfig; pub use p2p::{P2P, Context}; pub use event_loop::{event_loop, forever}; -pub use util::{NodeTableError, PeerId, PeerInfo, InternetProtocol}; +pub use util::{NodeTableError, PeerId, PeerInfo, InternetProtocol, Direction}; pub use protocol::{InboundSyncConnection, InboundSyncConnectionRef, OutboundSyncConnection, OutboundSyncConnectionRef, LocalSyncNode, LocalSyncNodeRef}; diff --git a/p2p/src/net/connections.rs b/p2p/src/net/connections.rs index b9b388df..ed56eb9b 100644 --- a/p2p/src/net/connections.rs +++ b/p2p/src/net/connections.rs @@ -36,7 +36,7 @@ impl Connections { } /// Returns info on every peer - pub fn peers(&self) -> Vec { + pub fn info(&self) -> Vec { self.channels().values().map(|channel| channel.peer_info()).collect() } diff --git a/rpc/src/v1/impls/network.rs b/rpc/src/v1/impls/network.rs index b327794a..a0a1834b 100644 --- a/rpc/src/v1/impls/network.rs +++ b/rpc/src/v1/impls/network.rs @@ -87,9 +87,37 @@ impl NetworkApi for NetworkClientCore { } fn node_info(&self, node_addr: IpAddr) -> Result { + let exact_node = try!( + self.p2p.nodes() + .iter() + .find(|n| n.address().ip() == node_addr) + .cloned() + .ok_or(p2p::NodeTableError::NoAddressInTable) + ); + + let peers: Vec = self.p2p.connections().info() + .into_iter() + .filter(|p| p.address == exact_node.address()).collect(); + + Ok( + NodeInfo { + addednode: format!("{}", exact_node.address()), + connected: !peers.is_empty(), + addresses: peers.into_iter().map(|p| p.into()).collect(), + } + ) } fn nodes_info(&self) -> Vec { - vec![] + let peers: Vec = self.p2p.connections().info(); + + self.p2p.nodes().iter().map(|n| { + let node_peers: Vec = peers.iter().filter(|p| p.address == n.address()).cloned().collect(); + NodeInfo { + addednode: format!("{}", n.address()), + connected: !node_peers.is_empty(), + addresses: node_peers.into_iter().map(|p| p.into()).collect(), + } + }).collect() } } diff --git a/rpc/src/v1/types/nodes.rs b/rpc/src/v1/types/nodes.rs index f1a157c6..cce26540 100644 --- a/rpc/src/v1/types/nodes.rs +++ b/rpc/src/v1/types/nodes.rs @@ -1,4 +1,5 @@ use serde::{Serialize, Serializer, Deserialize, Deserializer}; +use p2p::{Direction, PeerInfo}; #[derive(Debug, PartialEq)] pub enum AddNodeOperation { @@ -36,11 +37,23 @@ pub struct NodeInfoAddress { connected: NodeInfoAddressConnectionType, } +impl From for NodeInfoAddress { + fn from(info: PeerInfo) -> Self { + NodeInfoAddress { + address: format!("{}", info.address), + connected: match info.direction { + Direction::Inbound => NodeInfoAddressConnectionType::Inbound, + Direction::Outbound => NodeInfoAddressConnectionType::Outbound, + }, + } + } +} + #[derive(Serialize)] pub struct NodeInfo { - addednode: String, - connected: bool, - addresses: Vec, + pub addednode: String, + pub connected: bool, + pub addresses: Vec, } pub enum NodeInfoAddressConnectionType { From 5f7ef32cce412e0c754910804d94b8b340e4dacc Mon Sep 17 00:00:00 2001 From: NikVolf Date: Tue, 13 Dec 2016 16:20:11 +0100 Subject: [PATCH 05/13] fix example --- rpc/src/v1/traits/network.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/rpc/src/v1/traits/network.rs b/rpc/src/v1/traits/network.rs index e0d23e50..f3dbe6be 100644 --- a/rpc/src/v1/traits/network.rs +++ b/rpc/src/v1/traits/network.rs @@ -5,15 +5,15 @@ use v1::types::{AddNodeOperation, NodeInfo}; build_rpc_trait! { /// Parity-bitcoin network interface pub trait Network { - /// Add/remove/connecto to the node + /// Add/remove/connect to the node /// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "method": "addnode", "params": ["127.0.0.1:8888", "add"], "id":1 }' -H 'content-type: application/json;' http://127.0.0.1:8332/ /// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "method": "addnode", "params": ["127.0.0.1:8888", "remove"], "id":1 }' -H 'content-type: application/json;' http://127.0.0.1:8332/ /// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "method": "addnode", "params": ["127.0.0.1:8888", "onetry"], "id":1 }' -H 'content-type: application/json;' http://127.0.0.1:8332/ #[rpc(name = "addnode")] fn add_node(&self, String, AddNodeOperation) -> Result<(), Error>; /// Query node(s) info - /// @curl-example: curl --data-binary '{"jsonrpc": "1.0", "id":"curltest", "method": "getaddednodeinfo", "params": [true] }' -H 'content-type: application/json;' http://127.0.0.1:8332/ - /// @curl-example: curl --data-binary '{"jsonrpc": "1.0", "id":"curltest", "method": "getaddednodeinfo", "params": [true, "192.168.0.201"] }' -H 'content-type: application/json;' http://127.0.0.1:8332/ + /// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "id":"1", "method": "getaddednodeinfo", "params": [true] }' -H 'content-type: application/json;' http://127.0.0.1:8332/ + /// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "id":"1", "method": "getaddednodeinfo", "params": [true, "192.168.0.201"] }' -H 'content-type: application/json;' http://127.0.0.1:8332/ #[rpc(name = "getaddednodeinfo")] fn node_info(&self, bool, Trailing) -> Result, Error>; } From c216d12d3480dd9b6bab47424e4d618d76b1e2b0 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Tue, 13 Dec 2016 16:54:56 +0100 Subject: [PATCH 06/13] connection count method --- rpc/src/v1/impls/network.rs | 9 +++++++++ rpc/src/v1/traits/network.rs | 4 ++++ 2 files changed, 13 insertions(+) diff --git a/rpc/src/v1/impls/network.rs b/rpc/src/v1/impls/network.rs index a0a1834b..01e6b2ae 100644 --- a/rpc/src/v1/impls/network.rs +++ b/rpc/src/v1/impls/network.rs @@ -13,6 +13,7 @@ pub trait NetworkApi : Send + Sync + 'static { fn connect(&self, socket_addr: SocketAddr); fn node_info(&self, node_addr: IpAddr) -> Result; fn nodes_info(&self) -> Vec; + fn connection_count(&self) -> usize; } impl NetworkRpc for NetworkClient where T: NetworkApi { @@ -49,6 +50,10 @@ impl NetworkRpc for NetworkClient where T: NetworkApi { } ) } + + fn connection_count(&self) -> Result { + Ok(self.api.connection_count()) + } } pub struct NetworkClient { @@ -120,4 +125,8 @@ impl NetworkApi for NetworkClientCore { } }).collect() } + + fn connection_count(&self) -> usize { + self.p2p.connections().count() + } } diff --git a/rpc/src/v1/traits/network.rs b/rpc/src/v1/traits/network.rs index f3dbe6be..5fcc0004 100644 --- a/rpc/src/v1/traits/network.rs +++ b/rpc/src/v1/traits/network.rs @@ -16,5 +16,9 @@ build_rpc_trait! { /// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "id":"1", "method": "getaddednodeinfo", "params": [true, "192.168.0.201"] }' -H 'content-type: application/json;' http://127.0.0.1:8332/ #[rpc(name = "getaddednodeinfo")] fn node_info(&self, bool, Trailing) -> Result, Error>; + /// Query node(s) info + /// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "id":"1", "method": "getconnectioncount", "params": [] }' -H 'content-type: application/json;' http://127.0.0.1:8332/ + #[rpc(name = "getconnectioncount")] + fn connection_count(&self) -> Result; } } From ce7af59ad34b3ee7bee522864206f4a11b041188 Mon Sep 17 00:00:00 2001 From: debris Date: Tue, 13 Dec 2016 18:06:46 +0100 Subject: [PATCH 07/13] hash block and transactions while reading them --- Cargo.lock | 1 + crypto/src/lib.rs | 8 +++++++- db/Cargo.toml | 1 + db/src/indexed_block.rs | 17 ++++++++++++++++- db/src/indexed_header.rs | 16 ++++++++++++++++ db/src/indexed_transaction.rs | 17 ++++++++++++++++- db/src/lib.rs | 2 ++ db/src/read_and_hash.rs | 33 +++++++++++++++++++++++++++++++++ serialization/src/reader.rs | 28 ++++++++++++++++++++++++++++ 9 files changed, 120 insertions(+), 3 deletions(-) create mode 100644 db/src/read_and_hash.rs diff --git a/Cargo.lock b/Cargo.lock index fff7d286..e47785a4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -173,6 +173,7 @@ name = "db" version = "0.1.0" dependencies = [ "bit-vec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "bitcrypto 0.1.0", "byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "chain 0.1.0", "elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/crypto/src/lib.rs b/crypto/src/lib.rs index 5fed6d43..02a11c3f 100644 --- a/crypto/src/lib.rs +++ b/crypto/src/lib.rs @@ -2,11 +2,11 @@ extern crate crypto as rcrypto; extern crate primitives; extern crate siphasher; +pub use rcrypto::digest::Digest; use std::hash::Hasher; use rcrypto::sha1::Sha1; use rcrypto::sha2::Sha256; use rcrypto::ripemd160::Ripemd160; -use rcrypto::digest::Digest; use siphasher::sip::SipHasher24; use primitives::hash::{H32, H160, H256}; @@ -72,6 +72,12 @@ impl DHash256 { pub fn new() -> Self { DHash256::default() } + + pub fn finish(mut self) -> H256 { + let mut result = H256::default(); + self.result(&mut *result); + result + } } impl Digest for DHash256 { diff --git a/db/Cargo.toml b/db/Cargo.toml index dd92e8d1..e27e490f 100644 --- a/db/Cargo.toml +++ b/db/Cargo.toml @@ -8,6 +8,7 @@ elastic-array = "0.5" rocksdb = { git = "https://github.com/ethcore/rust-rocksdb" } ethcore-devtools = { path = "../devtools" } primitives = { path = "../primitives" } +bitcrypto = { path = "../crypto" } byteorder = "0.5" chain = { path = "../chain" } serialization = { path = "../serialization" } diff --git a/db/src/indexed_block.rs b/db/src/indexed_block.rs index dee6936b..9b10f08a 100644 --- a/db/src/indexed_block.rs +++ b/db/src/indexed_block.rs @@ -1,6 +1,10 @@ +use std::io; use primitives::hash::H256; use chain::{Block, OutPoint, TransactionOutput, merkle_root, Transaction}; -use serialization::{Serializable, serialized_list_size}; +use serialization::{ + Serializable, serialized_list_size, + Deserializable, Reader, Error as ReaderError +}; use indexed_header::IndexedBlockHeader; use indexed_transaction::IndexedTransaction; use {TransactionOutputObserver, PreviousTransactionOutputProvider}; @@ -87,3 +91,14 @@ impl IndexedBlock { self.transactions.iter().all(|tx| tx.raw.is_final(height, self.header.raw.time)) } } + +impl Deserializable for IndexedBlock { + fn deserialize(reader: &mut Reader) -> Result where T: io::Read { + let block = IndexedBlock { + header: try!(reader.read()), + transactions: try!(reader.read_list()), + }; + + Ok(block) + } +} diff --git a/db/src/indexed_header.rs b/db/src/indexed_header.rs index df117833..04dfc9d8 100644 --- a/db/src/indexed_header.rs +++ b/db/src/indexed_header.rs @@ -1,5 +1,8 @@ +use std::io; use primitives::hash::H256; use chain::BlockHeader; +use serialization::{Deserializable, Reader, Error as ReaderError}; +use read_and_hash::ReadAndHash; #[derive(Debug, Clone)] pub struct IndexedBlockHeader { @@ -24,3 +27,16 @@ impl IndexedBlockHeader { } } } + +impl Deserializable for IndexedBlockHeader { + fn deserialize(reader: &mut Reader) -> Result where T: io::Read { + let data = try!(reader.read_and_hash::()); + // TODO: use len + let header = IndexedBlockHeader { + raw: data.data, + hash: data.hash, + }; + + Ok(header) + } +} diff --git a/db/src/indexed_transaction.rs b/db/src/indexed_transaction.rs index bcfec139..899da6a7 100644 --- a/db/src/indexed_transaction.rs +++ b/db/src/indexed_transaction.rs @@ -1,6 +1,8 @@ -use std::cmp; +use std::{cmp, io}; use primitives::hash::H256; use chain::{Transaction, OutPoint, TransactionOutput}; +use serialization::{Deserializable, Reader, Error as ReaderError}; +use read_and_hash::ReadAndHash; use PreviousTransactionOutputProvider; #[derive(Debug, Clone)] @@ -33,6 +35,19 @@ impl cmp::PartialEq for IndexedTransaction { } } +impl Deserializable for IndexedTransaction { + fn deserialize(reader: &mut Reader) -> Result where T: io::Read { + let data = try!(reader.read_and_hash::()); + // TODO: use len + let tx = IndexedTransaction { + raw: data.data, + hash: data.hash, + }; + + Ok(tx) + } +} + impl<'a> PreviousTransactionOutputProvider for &'a [IndexedTransaction] { fn previous_transaction_output(&self, prevout: &OutPoint) -> Option { self.iter() diff --git a/db/src/lib.rs b/db/src/lib.rs index 10a05309..cb2eba41 100644 --- a/db/src/lib.rs +++ b/db/src/lib.rs @@ -1,5 +1,6 @@ //! Bitcoin database +extern crate bitcrypto as crypto; extern crate elastic_array; extern crate rocksdb; extern crate parking_lot; @@ -31,6 +32,7 @@ mod update_context; mod indexed_block; mod indexed_header; mod indexed_transaction; +mod read_and_hash; #[derive(Debug, Clone)] pub enum BlockRef { diff --git a/db/src/read_and_hash.rs b/db/src/read_and_hash.rs new file mode 100644 index 00000000..8be41f10 --- /dev/null +++ b/db/src/read_and_hash.rs @@ -0,0 +1,33 @@ +use std::io; +use crypto::{DHash256, Digest}; +use primitives::hash::H256; +use serialization::{Reader, Error as ReaderError, Deserializable}; + +pub struct HashedData { + pub len: usize, + pub hash: H256, + pub data: T, +} + +pub trait ReadAndHash { + fn read_and_hash(&mut self) -> Result, ReaderError> where T: Deserializable; +} + +impl ReadAndHash for Reader where R: io::Read { + fn read_and_hash(&mut self) -> Result, ReaderError> where T: Deserializable { + let mut len = 0usize; + let mut hasher = DHash256::new(); + let data = self.read_with_proxy(|bytes| { + len += bytes.len(); + hasher.input(bytes); + })?; + + let result = HashedData { + hash: hasher.finish(), + data: data, + len: len, + }; + + Ok(result) + } +} diff --git a/serialization/src/reader.rs b/serialization/src/reader.rs index 51dacb61..653a99b5 100644 --- a/serialization/src/reader.rs +++ b/serialization/src/reader.rs @@ -84,6 +84,11 @@ impl Reader where R: io::Read { T::deserialize(self) } + pub fn read_with_proxy(&mut self, proxy: F) -> Result where T: Deserializable, F: FnMut(&[u8]) { + let mut reader = Reader::from_read(Proxy::new(self, proxy)); + T::deserialize(&mut reader) + } + pub fn read_slice(&mut self, bytes: &mut [u8]) -> Result<(), Error> { io::Read::read_exact(self, bytes).map_err(|_| Error::UnexpectedEnd) } @@ -148,3 +153,26 @@ impl Iterator for ReadIterator where R: io::Read, T: Deserializable } } } + +struct Proxy { + from: F, + to: T, +} + +impl Proxy { + fn new(from: F, to: T) -> Self { + Proxy { + from: from, + to: to, + } + } +} + +impl io::Read for Proxy where F: io::Read, T: FnMut(&[u8]) { + fn read(&mut self, buf: &mut [u8]) -> Result { + let len = try!(io::Read::read(&mut self.from, buf)); + let to = &mut self.to; + to(&buf[..len]); + Ok(len) + } +} From 7baaa47b8c5d35832a3a423ce8a6e741e7e3acbf Mon Sep 17 00:00:00 2001 From: debris Date: Tue, 13 Dec 2016 20:49:41 +0100 Subject: [PATCH 08/13] import is hashing blocks when reading, added --skip-verification flag --- Cargo.lock | 1 - chain/src/constants.rs | 22 ++++++++++ {db => chain}/src/indexed_block.rs | 51 ++++++------------------ {db => chain}/src/indexed_header.rs | 14 +++++-- {db => chain}/src/indexed_transaction.rs | 24 ++++++----- chain/src/lib.rs | 29 +++++++++----- {db => chain}/src/read_and_hash.rs | 12 +++--- chain/src/transaction.rs | 23 +---------- db/Cargo.toml | 1 - db/src/block_stapler.rs | 4 +- db/src/impls.rs | 49 +++++++++++++++++++++++ db/src/lib.rs | 9 +---- db/src/storage.rs | 13 +++--- db/src/test_storage.rs | 4 +- import/src/block.rs | 4 +- miner/src/block_assembler.rs | 8 ++-- pbtc/cli.yml | 3 ++ pbtc/commands/import.rs | 8 ++-- rpc/src/v1/impls/blockchain.rs | 4 +- rpc/src/v1/types/block_template.rs | 6 +-- script/src/interpreter.rs | 2 +- script/src/verify.rs | 10 ++--- sync/src/blocks_writer.rs | 47 ++++++++++++---------- sync/src/compact_block_builder.rs | 3 +- sync/src/lib.rs | 4 +- sync/src/orphan_blocks_pool.rs | 2 +- sync/src/synchronization_chain.rs | 4 +- sync/src/synchronization_client.rs | 6 +-- sync/src/synchronization_verifier.rs | 8 ++-- verification/src/canon.rs | 2 +- verification/src/chain_verifier.rs | 20 +++++----- verification/src/lib.rs | 2 +- verification/src/verify_block.rs | 2 +- verification/src/verify_chain.rs | 2 +- verification/src/verify_header.rs | 2 +- verification/src/verify_transaction.rs | 2 +- 36 files changed, 222 insertions(+), 185 deletions(-) create mode 100644 chain/src/constants.rs rename {db => chain}/src/indexed_block.rs (58%) rename {db => chain}/src/indexed_header.rs (75%) rename {db => chain}/src/indexed_transaction.rs (63%) rename {db => chain}/src/read_and_hash.rs (77%) create mode 100644 db/src/impls.rs diff --git a/Cargo.lock b/Cargo.lock index e47785a4..fff7d286 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -173,7 +173,6 @@ name = "db" version = "0.1.0" dependencies = [ "bit-vec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "bitcrypto 0.1.0", "byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "chain 0.1.0", "elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/chain/src/constants.rs b/chain/src/constants.rs new file mode 100644 index 00000000..72ecc5f5 --- /dev/null +++ b/chain/src/constants.rs @@ -0,0 +1,22 @@ + +// Below flags apply in the context of BIP 68 +// If this flag set, CTxIn::nSequence is NOT interpreted as a +// relative lock-time. +pub const SEQUENCE_LOCKTIME_DISABLE_FLAG: u32 = 1u32 << 31; + +// Setting nSequence to this value for every input in a transaction +// disables nLockTime. +pub const SEQUENCE_FINAL: u32 = 0xffffffff; + +// If CTxIn::nSequence encodes a relative lock-time and this flag +// is set, the relative lock-time has units of 512 seconds, +// otherwise it specifies blocks with a granularity of 1. +pub const SEQUENCE_LOCKTIME_TYPE_FLAG: u32 = (1 << 22); + +// If CTxIn::nSequence encodes a relative lock-time, this mask is +// applied to extract that lock-time from the sequence field. +pub const SEQUENCE_LOCKTIME_MASK: u32 = 0x0000ffff; + +/// Threshold for `nLockTime`: below this value it is interpreted as block number, +/// otherwise as UNIX timestamp. +pub const LOCKTIME_THRESHOLD: u32 = 500000000; // Tue Nov 5 00:53:20 1985 UTC diff --git a/db/src/indexed_block.rs b/chain/src/indexed_block.rs similarity index 58% rename from db/src/indexed_block.rs rename to chain/src/indexed_block.rs index 9b10f08a..e3ccad7b 100644 --- a/db/src/indexed_block.rs +++ b/chain/src/indexed_block.rs @@ -1,13 +1,14 @@ -use std::io; -use primitives::hash::H256; -use chain::{Block, OutPoint, TransactionOutput, merkle_root, Transaction}; -use serialization::{ +use std::{io, cmp}; +use hash::H256; +use ser::{ Serializable, serialized_list_size, Deserializable, Reader, Error as ReaderError }; +use block::Block; +use transaction::Transaction; +use merkle_root::merkle_root; use indexed_header::IndexedBlockHeader; use indexed_transaction::IndexedTransaction; -use {TransactionOutputObserver, PreviousTransactionOutputProvider}; #[derive(Debug, Clone)] pub struct IndexedBlock { @@ -15,40 +16,6 @@ pub struct IndexedBlock { pub transactions: Vec, } -impl PreviousTransactionOutputProvider for IndexedBlock { - fn previous_transaction_output(&self, prevout: &OutPoint) -> Option { - let txs: &[_] = &self.transactions; - txs.previous_transaction_output(prevout) - } -} - -impl TransactionOutputObserver for IndexedBlock { - fn is_spent(&self, _prevout: &OutPoint) -> Option { - // the code below is valid, but commented out due it's poor performance - // we could optimize it by indexing all outputs once - // let tx: IndexedTransaction = { .. } - // let indexed_outputs: IndexedOutputs = tx.indexed_outputs(); - // indexed_outputs.is_spent() - None - - // if previous transaction output appears more than once than we can safely - // tell that it's spent (double spent) - - //let spends = self.transactions.iter() - //.flat_map(|tx| &tx.raw.inputs) - //.filter(|input| &input.previous_output == prevout) - //.take(2) - //.count(); - - //match spends { - //0 => None, - //1 => Some(false), - //2 => Some(true), - //_ => unreachable!("spends <= 2; self.take(2); qed"), - //} - } -} - impl From for IndexedBlock { fn from(block: Block) -> Self { let Block { block_header, transactions } = block; @@ -60,6 +27,12 @@ impl From for IndexedBlock { } } +impl cmp::PartialEq for IndexedBlock { + fn eq(&self, other: &Self) -> bool { + self.header.hash == other.header.hash + } +} + impl IndexedBlock { pub fn new(header: IndexedBlockHeader, transactions: Vec) -> Self { IndexedBlock { diff --git a/db/src/indexed_header.rs b/chain/src/indexed_header.rs similarity index 75% rename from db/src/indexed_header.rs rename to chain/src/indexed_header.rs index 04dfc9d8..6e2142e9 100644 --- a/db/src/indexed_header.rs +++ b/chain/src/indexed_header.rs @@ -1,7 +1,7 @@ -use std::io; -use primitives::hash::H256; -use chain::BlockHeader; -use serialization::{Deserializable, Reader, Error as ReaderError}; +use std::{io, cmp}; +use hash::H256; +use ser::{Deserializable, Reader, Error as ReaderError}; +use block_header::BlockHeader; use read_and_hash::ReadAndHash; #[derive(Debug, Clone)] @@ -28,6 +28,12 @@ impl IndexedBlockHeader { } } +impl cmp::PartialEq for IndexedBlockHeader { + fn eq(&self, other: &Self) -> bool { + self.hash == other.hash + } +} + impl Deserializable for IndexedBlockHeader { fn deserialize(reader: &mut Reader) -> Result where T: io::Read { let data = try!(reader.read_and_hash::()); diff --git a/db/src/indexed_transaction.rs b/chain/src/indexed_transaction.rs similarity index 63% rename from db/src/indexed_transaction.rs rename to chain/src/indexed_transaction.rs index 899da6a7..d15fd813 100644 --- a/db/src/indexed_transaction.rs +++ b/chain/src/indexed_transaction.rs @@ -1,9 +1,8 @@ -use std::{cmp, io}; -use primitives::hash::H256; -use chain::{Transaction, OutPoint, TransactionOutput}; -use serialization::{Deserializable, Reader, Error as ReaderError}; +use std::{cmp, io, borrow}; +use hash::H256; +use ser::{Deserializable, Reader, Error as ReaderError}; +use transaction::Transaction; use read_and_hash::ReadAndHash; -use PreviousTransactionOutputProvider; #[derive(Debug, Clone)] pub struct IndexedTransaction { @@ -48,11 +47,14 @@ impl Deserializable for IndexedTransaction { } } -impl<'a> PreviousTransactionOutputProvider for &'a [IndexedTransaction] { - fn previous_transaction_output(&self, prevout: &OutPoint) -> Option { - self.iter() - .find(|tx| tx.hash == prevout.hash) - .and_then(|tx| tx.raw.outputs.get(prevout.index as usize)) - .cloned() +pub struct IndexedTransactionsRef<'a, T> where T: 'a { + pub transactions: &'a [T], +} + +impl<'a, T> IndexedTransactionsRef<'a, T> where T: borrow::Borrow { + pub fn new(transactions: &'a [T]) -> Self { + IndexedTransactionsRef { + transactions: transactions, + } } } diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 08d8c4ee..7d0a59b7 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -4,27 +4,34 @@ extern crate primitives; extern crate bitcrypto as crypto; extern crate serialization as ser; +pub mod constants; + mod block; mod block_header; mod merkle_root; mod transaction; +/// `IndexedBlock` extension +mod read_and_hash; +mod indexed_block; +mod indexed_header; +mod indexed_transaction; + pub trait RepresentH256 { - fn h256(&self) -> primitives::hash::H256; + fn h256(&self) -> hash::H256; } pub use rustc_serialize::hex; pub use primitives::{hash, bytes, uint, compact}; -pub use self::block::Block; -pub use self::block_header::BlockHeader; -pub use self::merkle_root::merkle_root; -pub use self::merkle_root::merkle_node_hash; -pub use self::transaction::{ - Transaction, TransactionInput, TransactionOutput, OutPoint, - SEQUENCE_LOCKTIME_DISABLE_FLAG, SEQUENCE_FINAL, - SEQUENCE_LOCKTIME_TYPE_FLAG, SEQUENCE_LOCKTIME_MASK, - LOCKTIME_THRESHOLD -}; +pub use block::Block; +pub use block_header::BlockHeader; +pub use merkle_root::{merkle_root, merkle_node_hash}; +pub use transaction::{Transaction, TransactionInput, TransactionOutput, OutPoint}; + +pub use read_and_hash::{ReadAndHash, HashedData}; +pub use indexed_block::IndexedBlock; +pub use indexed_header::IndexedBlockHeader; +pub use indexed_transaction::{IndexedTransaction, IndexedTransactionsRef}; pub type ShortTransactionID = hash::H48; diff --git a/db/src/read_and_hash.rs b/chain/src/read_and_hash.rs similarity index 77% rename from db/src/read_and_hash.rs rename to chain/src/read_and_hash.rs index 8be41f10..5c294dcf 100644 --- a/db/src/read_and_hash.rs +++ b/chain/src/read_and_hash.rs @@ -1,10 +1,10 @@ use std::io; +use hash::H256; use crypto::{DHash256, Digest}; -use primitives::hash::H256; -use serialization::{Reader, Error as ReaderError, Deserializable}; +use ser::{Reader, Error as ReaderError, Deserializable}; pub struct HashedData { - pub len: usize, + pub size: usize, pub hash: H256, pub data: T, } @@ -15,17 +15,17 @@ pub trait ReadAndHash { impl ReadAndHash for Reader where R: io::Read { fn read_and_hash(&mut self) -> Result, ReaderError> where T: Deserializable { - let mut len = 0usize; + let mut size = 0usize; let mut hasher = DHash256::new(); let data = self.read_with_proxy(|bytes| { - len += bytes.len(); + size += bytes.len(); hasher.input(bytes); })?; let result = HashedData { hash: hasher.finish(), data: data, - len: len, + size: size, }; Ok(result) diff --git a/chain/src/transaction.rs b/chain/src/transaction.rs index c2dd5657..9244bec6 100644 --- a/chain/src/transaction.rs +++ b/chain/src/transaction.rs @@ -11,28 +11,7 @@ use ser::{ }; use crypto::dhash256; use hash::H256; - -// Below flags apply in the context of BIP 68 -// If this flag set, CTxIn::nSequence is NOT interpreted as a -// relative lock-time. -pub const SEQUENCE_LOCKTIME_DISABLE_FLAG: u32 = 1u32 << 31; - -// Setting nSequence to this value for every input in a transaction -// disables nLockTime. -pub const SEQUENCE_FINAL: u32 = 0xffffffff; - -// If CTxIn::nSequence encodes a relative lock-time and this flag -// is set, the relative lock-time has units of 512 seconds, -// otherwise it specifies blocks with a granularity of 1. -pub const SEQUENCE_LOCKTIME_TYPE_FLAG: u32 = (1 << 22); - -// If CTxIn::nSequence encodes a relative lock-time, this mask is -// applied to extract that lock-time from the sequence field. -pub const SEQUENCE_LOCKTIME_MASK: u32 = 0x0000ffff; - -/// Threshold for `nLockTime`: below this value it is interpreted as block number, -/// otherwise as UNIX timestamp. -pub const LOCKTIME_THRESHOLD: u32 = 500000000; // Tue Nov 5 00:53:20 1985 UTC +use constants::{SEQUENCE_FINAL, LOCKTIME_THRESHOLD}; #[derive(Debug, PartialEq, Eq, Clone, Default)] pub struct OutPoint { diff --git a/db/Cargo.toml b/db/Cargo.toml index e27e490f..dd92e8d1 100644 --- a/db/Cargo.toml +++ b/db/Cargo.toml @@ -8,7 +8,6 @@ elastic-array = "0.5" rocksdb = { git = "https://github.com/ethcore/rust-rocksdb" } ethcore-devtools = { path = "../devtools" } primitives = { path = "../primitives" } -bitcrypto = { path = "../crypto" } byteorder = "0.5" chain = { path = "../chain" } serialization = { path = "../serialization" } diff --git a/db/src/block_stapler.rs b/db/src/block_stapler.rs index 20d2cf45..af361db4 100644 --- a/db/src/block_stapler.rs +++ b/db/src/block_stapler.rs @@ -1,7 +1,7 @@ use primitives::hash::H256; -use super::{BlockLocation, IndexedBlock}; -use chain; +use chain::{self, IndexedBlock}; use error::Error; +use super::BlockLocation; #[derive(Debug, PartialEq)] pub struct Reorganization { diff --git a/db/src/impls.rs b/db/src/impls.rs new file mode 100644 index 00000000..ceed0c60 --- /dev/null +++ b/db/src/impls.rs @@ -0,0 +1,49 @@ +use std::borrow::Borrow; +use chain::{OutPoint, TransactionOutput, IndexedTransactionsRef, IndexedTransaction, IndexedBlock}; +use transaction_provider::PreviousTransactionOutputProvider; +use transaction_meta_provider::TransactionOutputObserver; + +impl<'a, T> PreviousTransactionOutputProvider for IndexedTransactionsRef<'a, T> + where T: Borrow + Send + Sync { + fn previous_transaction_output(&self, prevout: &OutPoint) -> Option { + self.transactions.iter() + .map(Borrow::borrow) + .find(|tx| tx.hash == prevout.hash) + .and_then(|tx| tx.raw.outputs.get(prevout.index as usize)) + .cloned() + } +} + +impl PreviousTransactionOutputProvider for IndexedBlock { + fn previous_transaction_output(&self, prevout: &OutPoint) -> Option { + let txs = IndexedTransactionsRef::new(&self.transactions); + txs.previous_transaction_output(prevout) + } +} + +impl TransactionOutputObserver for IndexedBlock { + fn is_spent(&self, _prevout: &OutPoint) -> Option { + // the code below is valid, but commented out due it's poor performance + // we could optimize it by indexing all outputs once + // let tx: IndexedTransaction = { .. } + // let indexed_outputs: IndexedOutputs = tx.indexed_outputs(); + // indexed_outputs.is_spent() + None + + // if previous transaction output appears more than once than we can safely + // tell that it's spent (double spent) + + //let spends = self.transactions.iter() + //.flat_map(|tx| &tx.raw.inputs) + //.filter(|input| &input.previous_output == prevout) + //.take(2) + //.count(); + + //match spends { + //0 => None, + //1 => Some(false), + //2 => Some(true), + //_ => unreachable!("spends <= 2; self.take(2); qed"), + //} + } +} diff --git a/db/src/lib.rs b/db/src/lib.rs index cb2eba41..1262e4f7 100644 --- a/db/src/lib.rs +++ b/db/src/lib.rs @@ -1,6 +1,5 @@ //! Bitcoin database -extern crate bitcrypto as crypto; extern crate elastic_array; extern crate rocksdb; extern crate parking_lot; @@ -29,10 +28,7 @@ mod transaction_provider; mod transaction_meta_provider; mod error; mod update_context; -mod indexed_block; -mod indexed_header; -mod indexed_transaction; -mod read_and_hash; +mod impls; #[derive(Debug, Clone)] pub enum BlockRef { @@ -77,9 +73,6 @@ pub use transaction_meta_provider::{TransactionMetaProvider, TransactionOutputOb pub use transaction_meta::TransactionMeta; pub use block_stapler::{BlockStapler, BlockInsertedChain}; pub use block_provider::{BlockProvider, BlockHeaderProvider}; -pub use indexed_block::IndexedBlock; -pub use indexed_header::IndexedBlockHeader; -pub use indexed_transaction::IndexedTransaction; #[cfg(feature="dev")] pub use test_storage::TestStorage; diff --git a/db/src/storage.rs b/db/src/storage.rs index a7c5d6d5..72eacab6 100644 --- a/db/src/storage.rs +++ b/db/src/storage.rs @@ -4,14 +4,13 @@ use std::fs; use std::path::Path; use kvdb::{Database, DatabaseConfig}; use byteorder::{LittleEndian, ByteOrder}; -use primitives::hash::H256; -use primitives::bytes::Bytes; -use super::{BlockRef, BestBlock, BlockLocation, IndexedBlock}; -use serialization::{serialize, deserialize}; -use chain; use parking_lot::RwLock; use lru_cache::LruCache; +use primitives::hash::H256; +use primitives::bytes::Bytes; +use chain::{self, IndexedBlock, IndexedBlockHeader, IndexedTransaction}; +use serialization::{serialize, deserialize}; use transaction_meta::TransactionMeta; use error::{Error, ConsistencyError, MetaError}; use update_context::UpdateContext; @@ -19,9 +18,7 @@ use block_provider::{BlockProvider, BlockHeaderProvider}; use transaction_provider::{TransactionProvider, PreviousTransactionOutputProvider}; use transaction_meta_provider::TransactionMetaProvider; use block_stapler::{BlockStapler, BlockInsertedChain, Reorganization}; - -use indexed_header::IndexedBlockHeader; -use indexed_transaction::IndexedTransaction; +use super::{BlockRef, BestBlock, BlockLocation}; pub const COL_COUNT: u32 = 10; pub const COL_META: u32 = 0; diff --git a/db/src/test_storage.rs b/db/src/test_storage.rs index c5b3928f..2ea1ce18 100644 --- a/db/src/test_storage.rs +++ b/db/src/test_storage.rs @@ -3,9 +3,9 @@ use super::{ BlockRef, Store, Error, BestBlock, BlockLocation, BlockInsertedChain, BlockProvider, BlockStapler, TransactionMetaProvider, TransactionProvider, PreviousTransactionOutputProvider, - IndexedBlock, BlockHeaderProvider, + BlockHeaderProvider, }; -use chain::{self, Block}; +use chain::{self, Block, IndexedBlock}; use primitives::hash::H256; use serialization; use chain::bytes::Bytes; diff --git a/import/src/block.rs b/import/src/block.rs index 0cadd73f..fb893881 100644 --- a/import/src/block.rs +++ b/import/src/block.rs @@ -1,13 +1,13 @@ use std::io; use hash::H32; use ser::{Deserializable, Reader, Error as ReaderError}; -use chain; +use chain::IndexedBlock; #[derive(Debug, PartialEq)] pub struct Block { pub magic: H32, pub block_size: u32, - pub block: chain::Block, + pub block: IndexedBlock, } impl Deserializable for Block { diff --git a/miner/src/block_assembler.rs b/miner/src/block_assembler.rs index abf4a268..48ee8692 100644 --- a/miner/src/block_assembler.rs +++ b/miner/src/block_assembler.rs @@ -1,6 +1,6 @@ use primitives::hash::H256; -use chain::{OutPoint, TransactionOutput}; -use db::{SharedStore, IndexedTransaction, PreviousTransactionOutputProvider}; +use chain::{OutPoint, TransactionOutput, IndexedTransaction}; +use db::{SharedStore, PreviousTransactionOutputProvider}; use network::Magic; use memory_pool::{MemoryPool, OrderingStrategy, Entry}; use verification::{work_required, block_reward_satoshi, transaction_sigops}; @@ -252,7 +252,7 @@ impl BlockAssembler { #[cfg(test)] mod tests { - use db::IndexedTransaction; + use chain::{IndexedTransaction, IndexedTransactionsRef}; use verification::constants::{MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS}; use memory_pool::Entry; use super::{SizePolicy, NextStep, FittingTransactionsIterator}; @@ -291,8 +291,8 @@ mod tests { #[test] fn test_fitting_transactions_iterator_no_transactions() { let store: Vec = Vec::new(); + let store_ref = IndexedTransactionsRef::new(&store); let entries: Vec = Vec::new(); - let store_ref: &[_] = &store; let iter = FittingTransactionsIterator::new(&store_ref, entries.iter(), MAX_BLOCK_SIZE as u32, MAX_BLOCK_SIGOPS as u32); assert!(iter.collect::>().is_empty()); diff --git a/pbtc/cli.yml b/pbtc/cli.yml index 2c24665f..86c8d1a3 100644 --- a/pbtc/cli.yml +++ b/pbtc/cli.yml @@ -78,3 +78,6 @@ subcommands: - PATH: required: true help: Path of the bitcoin core database + - skip-verification: + long: skip-verification + help: Skip blocks verification diff --git a/pbtc/commands/import.rs b/pbtc/commands/import.rs index ce5c10ef..ee283d7c 100644 --- a/pbtc/commands/import.rs +++ b/pbtc/commands/import.rs @@ -8,9 +8,11 @@ pub fn import(cfg: Config, matches: &ArgMatches) -> Result<(), String> { // TODO: this might be unnecessary here! try!(init_db(&cfg, &db)); - let mut writer = create_sync_blocks_writer(db, cfg.magic); - let blk_path = matches.value_of("PATH").expect("PATH is required in cli.yml; qed"); + let skip_verification = matches.is_present("skip-verification"); + + let mut writer = create_sync_blocks_writer(db, cfg.magic, !skip_verification); + let blk_dir = try!(::import::open_blk_dir(blk_path).map_err(|_| "Import directory does not exist".to_owned())); let mut counter = 0; for blk in blk_dir { @@ -20,7 +22,7 @@ pub fn import(cfg: Config, matches: &ArgMatches) -> Result<(), String> { Ok(_) => { counter += 1; if counter % 1000 == 0 { - info!("Imported {} blocks", counter); + info!(target: "sync", "Imported {} blocks", counter); } } Err(Error::TooManyOrphanBlocks) => return Err("Too many orphan (unordered) blocks".into()), diff --git a/rpc/src/v1/impls/blockchain.rs b/rpc/src/v1/impls/blockchain.rs index 231c735a..6bddd5c4 100644 --- a/rpc/src/v1/impls/blockchain.rs +++ b/rpc/src/v1/impls/blockchain.rs @@ -9,7 +9,7 @@ use v1::helpers::errors::{block_not_found, block_at_height_not_found, transactio transaction_output_not_found, transaction_of_side_branch}; use jsonrpc_macros::Trailing; use jsonrpc_core::Error; -use db; +use {db, chain}; use global_script::Script; use chain::OutPoint; use verification; @@ -69,7 +69,7 @@ impl BlockChainClientCoreApi for BlockChainClientCore { fn verbose_block(&self, hash: GlobalH256) -> Option { self.storage.block(hash.into()) .map(|block| { - let block: db::IndexedBlock = block.into(); + let block: chain::IndexedBlock = block.into(); let height = self.storage.block_number(block.hash()); let confirmations = match height { Some(block_number) => (self.storage.best_block().expect("genesis block is required").number - block_number + 1) as i64, diff --git a/rpc/src/v1/types/block_template.rs b/rpc/src/v1/types/block_template.rs index dac43b48..c06c105c 100644 --- a/rpc/src/v1/types/block_template.rs +++ b/rpc/src/v1/types/block_template.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use super::hash::H256; use super::raw_transaction::RawTransaction; -use db; +use chain; use miner; /// Block template as described in: @@ -96,8 +96,8 @@ impl From for BlockTemplate { } } -impl From for BlockTemplateTransaction { - fn from(transaction: db::IndexedTransaction) -> Self { +impl From for BlockTemplateTransaction { + fn from(transaction: chain::IndexedTransaction) -> Self { use ser::serialize; let serialize = serialize(&transaction.raw); BlockTemplateTransaction { diff --git a/script/src/interpreter.rs b/script/src/interpreter.rs index c73e58ec..a361bbca 100644 --- a/script/src/interpreter.rs +++ b/script/src/interpreter.rs @@ -1,7 +1,7 @@ use std::{cmp, mem}; use bytes::Bytes; use keys::{Signature, Public}; -use chain::SEQUENCE_LOCKTIME_DISABLE_FLAG; +use chain::constants::SEQUENCE_LOCKTIME_DISABLE_FLAG; use crypto::{sha1, sha256, dhash160, dhash256, ripemd160}; use { script, Script, Num, VerificationFlags, Opcode, Error, diff --git a/script/src/verify.rs b/script/src/verify.rs index 6ea02240..d3a5bffa 100644 --- a/script/src/verify.rs +++ b/script/src/verify.rs @@ -1,7 +1,7 @@ use keys::{Public, Signature}; -use chain::{ - self, SEQUENCE_FINAL, SEQUENCE_LOCKTIME_DISABLE_FLAG, - SEQUENCE_LOCKTIME_MASK, SEQUENCE_LOCKTIME_TYPE_FLAG +use chain::constants::{ + SEQUENCE_FINAL, SEQUENCE_LOCKTIME_DISABLE_FLAG, + SEQUENCE_LOCKTIME_MASK, SEQUENCE_LOCKTIME_TYPE_FLAG, LOCKTIME_THRESHOLD }; use {SignatureVersion, Script, TransactionInputSigner, Num}; @@ -64,8 +64,8 @@ impl SignatureChecker for TransactionSignatureChecker { // the nLockTime in the transaction. let lock_time_u32: u32 = lock_time.into(); if !( - (self.signer.lock_time < chain::LOCKTIME_THRESHOLD && lock_time_u32 < chain::LOCKTIME_THRESHOLD) || - (self.signer.lock_time >= chain::LOCKTIME_THRESHOLD && lock_time_u32 >= chain::LOCKTIME_THRESHOLD) + (self.signer.lock_time < LOCKTIME_THRESHOLD && lock_time_u32 < LOCKTIME_THRESHOLD) || + (self.signer.lock_time >= LOCKTIME_THRESHOLD && lock_time_u32 >= LOCKTIME_THRESHOLD) ) { return false; } diff --git a/sync/src/blocks_writer.rs b/sync/src/blocks_writer.rs index 489fcdba..21c35d99 100644 --- a/sync/src/blocks_writer.rs +++ b/sync/src/blocks_writer.rs @@ -17,6 +17,7 @@ pub struct BlocksWriter { orphaned_blocks_pool: OrphanBlocksPool, verifier: SyncVerifier, sink: Arc, + verification: bool, } struct BlocksWriterSink { @@ -29,7 +30,7 @@ struct BlocksWriterSinkData { } impl BlocksWriter { - pub fn new(storage: db::SharedStore, network: Magic) -> BlocksWriter { + pub fn new(storage: db::SharedStore, network: Magic, verification: bool) -> BlocksWriter { let sink_data = Arc::new(BlocksWriterSinkData::new(storage.clone())); let sink = Arc::new(BlocksWriterSink::new(sink_data.clone())); let verifier = SyncVerifier::new(network, storage.clone(), sink); @@ -38,18 +39,18 @@ impl BlocksWriter { orphaned_blocks_pool: OrphanBlocksPool::new(), verifier: verifier, sink: sink_data, + verification: verification, } } - pub fn append_block(&mut self, block: chain::Block) -> Result<(), Error> { - let indexed_block: db::IndexedBlock = block.into(); + pub fn append_block(&mut self, block: chain::IndexedBlock) -> Result<(), Error> { // do not append block if it is already there - if self.storage.contains_block(db::BlockRef::Hash(indexed_block.hash().clone())) { + if self.storage.contains_block(db::BlockRef::Hash(block.hash().clone())) { return Ok(()); } // verify && insert only if parent block is already in the storage - if !self.storage.contains_block(db::BlockRef::Hash(indexed_block.header.raw.previous_header_hash.clone())) { - self.orphaned_blocks_pool.insert_orphaned_block(indexed_block.hash().clone(), indexed_block); + if !self.storage.contains_block(db::BlockRef::Hash(block.header.raw.previous_header_hash.clone())) { + self.orphaned_blocks_pool.insert_orphaned_block(block.hash().clone(), block); // we can't hold many orphaned blocks in memory during import if self.orphaned_blocks_pool.len() > MAX_ORPHANED_BLOCKS { return Err(Error::TooManyOrphanBlocks); @@ -58,13 +59,17 @@ impl BlocksWriter { } // verify && insert block && all its orphan children - let mut verification_queue: VecDeque = self.orphaned_blocks_pool.remove_blocks_for_parent(indexed_block.hash()).into_iter().map(|(_, b)| b).collect(); - verification_queue.push_front(indexed_block); + let mut verification_queue: VecDeque = self.orphaned_blocks_pool.remove_blocks_for_parent(block.hash()).into_iter().map(|(_, b)| b).collect(); + verification_queue.push_front(block); while let Some(block) = verification_queue.pop_front() { - self.verifier.verify_block(block); + if self.verification { + self.verifier.verify_block(block); - if let Some(err) = self.sink.error() { - return Err(err); + if let Some(err) = self.sink.error() { + return Err(err); + } + } else { + try!(self.storage.insert_indexed_block(&block).map_err(Error::Database)); } } @@ -97,7 +102,7 @@ impl VerificationSink for BlocksWriterSink { } impl BlockVerificationSink for BlocksWriterSink { - fn on_block_verification_success(&self, block: db::IndexedBlock) -> Option> { + fn on_block_verification_success(&self, block: chain::IndexedBlock) -> Option> { if let Err(err) = self.data.storage.insert_indexed_block(&block) { *self.data.err.lock() = Some(Error::Database(err)); } @@ -132,8 +137,8 @@ mod tests { #[test] fn blocks_writer_appends_blocks() { let db = Arc::new(db::TestStorage::with_genesis_block()); - let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet); - blocks_target.append_block(test_data::block_h1()).expect("Expecting no error"); + let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet, true); + blocks_target.append_block(test_data::block_h1().into()).expect("Expecting no error"); assert_eq!(db.best_block().expect("Block is inserted").number, 1); } @@ -141,9 +146,9 @@ mod tests { fn blocks_writer_verification_error() { let db = Arc::new(db::TestStorage::with_genesis_block()); let blocks = test_data::build_n_empty_blocks_from_genesis((MAX_ORPHANED_BLOCKS + 2) as u32, 1); - let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet); + let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet, true); for (index, block) in blocks.into_iter().skip(1).enumerate() { - match blocks_target.append_block(block) { + match blocks_target.append_block(block.into()) { Err(Error::TooManyOrphanBlocks) if index == MAX_ORPHANED_BLOCKS => (), Ok(_) if index != MAX_ORPHANED_BLOCKS => (), _ => panic!("unexpected"), @@ -155,12 +160,12 @@ mod tests { #[test] fn blocks_writer_out_of_order_block() { let db = Arc::new(db::TestStorage::with_genesis_block()); - let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet); + let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet, true); let wrong_block = test_data::block_builder() .header().parent(test_data::genesis().hash()).build() .build(); - match blocks_target.append_block(wrong_block).unwrap_err() { + match blocks_target.append_block(wrong_block.into()).unwrap_err() { Error::Verification(_) => (), _ => panic!("Unexpected error"), }; @@ -170,12 +175,12 @@ mod tests { #[test] fn blocks_writer_append_to_existing_db() { let db = Arc::new(db::TestStorage::with_genesis_block()); - let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet); + let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet, true); - assert!(blocks_target.append_block(test_data::genesis()).is_ok()); + assert!(blocks_target.append_block(test_data::genesis().into()).is_ok()); assert_eq!(db.best_block().expect("Block is inserted").number, 0); - assert!(blocks_target.append_block(test_data::block_h1()).is_ok()); + assert!(blocks_target.append_block(test_data::block_h1().into()).is_ok()); assert_eq!(db.best_block().expect("Block is inserted").number, 1); } } diff --git a/sync/src/compact_block_builder.rs b/sync/src/compact_block_builder.rs index 3084f337..217b4bc1 100644 --- a/sync/src/compact_block_builder.rs +++ b/sync/src/compact_block_builder.rs @@ -2,8 +2,7 @@ use std::collections::HashSet; use rand::{thread_rng, Rng}; use bitcrypto::{sha256, siphash24}; use byteorder::{LittleEndian, ByteOrder}; -use chain::{BlockHeader, ShortTransactionID}; -use db::IndexedBlock; +use chain::{BlockHeader, ShortTransactionID, IndexedBlock}; use message::common::{BlockHeaderAndIDs, PrefilledTransaction}; use primitives::hash::H256; use ser::{Stream, Serializable}; diff --git a/sync/src/lib.rs b/sync/src/lib.rs index aebaefbd..96b53144 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -63,8 +63,8 @@ pub enum Error { } /// Create blocks writer. -pub fn create_sync_blocks_writer(db: db::SharedStore, network: Magic) -> blocks_writer::BlocksWriter { - blocks_writer::BlocksWriter::new(db, network) +pub fn create_sync_blocks_writer(db: db::SharedStore, network: Magic, verification: bool) -> blocks_writer::BlocksWriter { + blocks_writer::BlocksWriter::new(db, network, verification) } /// Creates local sync node for given `db` diff --git a/sync/src/orphan_blocks_pool.rs b/sync/src/orphan_blocks_pool.rs index 4e2e3dab..37cfcfc1 100644 --- a/sync/src/orphan_blocks_pool.rs +++ b/sync/src/orphan_blocks_pool.rs @@ -3,7 +3,7 @@ use std::collections::hash_map::Entry; use linked_hash_map::LinkedHashMap; use time; use primitives::hash::H256; -use db::IndexedBlock; +use chain::IndexedBlock; #[derive(Debug)] /// Storage for blocks, for which we have no parent yet. diff --git a/sync/src/synchronization_chain.rs b/sync/src/synchronization_chain.rs index 1a1299f6..69b4a579 100644 --- a/sync/src/synchronization_chain.rs +++ b/sync/src/synchronization_chain.rs @@ -3,8 +3,8 @@ use std::sync::Arc; use std::collections::{VecDeque, HashSet}; use linked_hash_map::LinkedHashMap; use parking_lot::RwLock; -use chain::{BlockHeader, Transaction}; -use db::{self, IndexedBlock}; +use chain::{BlockHeader, Transaction, IndexedBlock}; +use db; use best_headers_chain::{BestHeadersChain, Information as BestHeadersInformation}; use primitives::bytes::Bytes; use primitives::hash::H256; diff --git a/sync/src/synchronization_client.rs b/sync/src/synchronization_client.rs index 9bf722cf..2dfd8db8 100644 --- a/sync/src/synchronization_client.rs +++ b/sync/src/synchronization_client.rs @@ -7,8 +7,8 @@ use futures::{BoxFuture, Future, finished}; use futures::stream::Stream; use tokio_core::reactor::{Handle, Interval}; use futures_cpupool::CpuPool; -use db::{self, IndexedBlock, BlockHeaderProvider, BlockRef}; -use chain::{BlockHeader, Transaction}; +use db::{self, BlockHeaderProvider, BlockRef}; +use chain::{BlockHeader, Transaction, IndexedBlock}; use message::types; use message::common::{InventoryVector, InventoryType}; use primitives::hash::H256; @@ -1312,7 +1312,7 @@ impl SynchronizationClientCore where T: TaskExecutor { } }, BlockAnnouncementType::SendCompactBlock => { - let indexed_blocks: Vec = { + let indexed_blocks: Vec = { let chain = self.chain.read(); new_blocks_hashes.iter() .filter_map(|h| chain.storage().block(db::BlockRef::Hash(h.clone()))) diff --git a/sync/src/synchronization_verifier.rs b/sync/src/synchronization_verifier.rs index b6c7204b..e9d1f99e 100644 --- a/sync/src/synchronization_verifier.rs +++ b/sync/src/synchronization_verifier.rs @@ -2,12 +2,12 @@ use std::thread; use std::collections::VecDeque; use std::sync::Arc; use std::sync::mpsc::{channel, Sender, Receiver}; -use chain::{Transaction, OutPoint, TransactionOutput}; +use chain::{Transaction, OutPoint, TransactionOutput, IndexedBlock}; use network::Magic; use primitives::hash::H256; use synchronization_chain::ChainRef; use verification::{BackwardsCompatibleChainVerifier as ChainVerifier, Verify as VerificationVerify, Chain}; -use db::{SharedStore, IndexedBlock, PreviousTransactionOutputProvider, TransactionOutputObserver}; +use db::{SharedStore, PreviousTransactionOutputProvider, TransactionOutputObserver}; use time::get_time; /// Block verification events sink @@ -231,13 +231,13 @@ pub mod tests { use std::sync::Arc; use std::collections::HashMap; use parking_lot::RwLock; - use chain::Transaction; + use chain::{Transaction, IndexedBlock}; use synchronization_chain::{Chain, ChainRef}; use synchronization_client::CoreVerificationSink; use synchronization_executor::tests::DummyTaskExecutor; use primitives::hash::H256; use super::{Verifier, BlockVerificationSink, TransactionVerificationSink, ChainMemoryPoolTransactionOutputProvider}; - use db::{self, IndexedBlock}; + use db; use test_data; #[derive(Default)] diff --git a/verification/src/canon.rs b/verification/src/canon.rs index 9b3e379a..dd8f94ff 100644 --- a/verification/src/canon.rs +++ b/verification/src/canon.rs @@ -1,6 +1,6 @@ use std::ops; use primitives::hash::H256; -use db::{IndexedBlock, IndexedTransaction, IndexedBlockHeader}; +use chain::{IndexedBlock, IndexedTransaction, IndexedBlockHeader}; /// Blocks whose parents are known to be in the chain #[derive(Clone, Copy)] diff --git a/verification/src/chain_verifier.rs b/verification/src/chain_verifier.rs index 3fa700e3..8501a9dc 100644 --- a/verification/src/chain_verifier.rs +++ b/verification/src/chain_verifier.rs @@ -1,10 +1,10 @@ //! Bitcoin chain verifier use hash::H256; -use db::{self, IndexedBlockHeader, BlockLocation, PreviousTransactionOutputProvider, BlockHeaderProvider, TransactionOutputObserver}; +use chain::{IndexedBlock, IndexedBlockHeader, BlockHeader, Transaction}; +use db::{BlockLocation, SharedStore, PreviousTransactionOutputProvider, BlockHeaderProvider, TransactionOutputObserver}; use network::Magic; use error::{Error, TransactionError}; -use {Verify, chain}; use canon::{CanonBlock, CanonTransaction}; use duplex_store::{DuplexTransactionOutputProvider, NoopStore}; use verify_chain::ChainVerifier; @@ -12,6 +12,7 @@ use verify_header::HeaderVerifier; use verify_transaction::MemoryPoolTransactionVerifier; use accept_chain::ChainAcceptor; use accept_transaction::MemoryPoolTransactionAcceptor; +use Verify; #[derive(PartialEq, Debug)] /// Block verification chain @@ -28,13 +29,13 @@ pub enum Chain { pub type VerificationResult = Result; pub struct BackwardsCompatibleChainVerifier { - store: db::SharedStore, + store: SharedStore, skip_pow: bool, network: Magic, } impl BackwardsCompatibleChainVerifier { - pub fn new(store: db::SharedStore, network: Magic) -> Self { + pub fn new(store: SharedStore, network: Magic) -> Self { BackwardsCompatibleChainVerifier { store: store, skip_pow: false, @@ -48,7 +49,7 @@ impl BackwardsCompatibleChainVerifier { self } - fn verify_block(&self, block: &db::IndexedBlock) -> VerificationResult { + fn verify_block(&self, block: &IndexedBlock) -> VerificationResult { let current_time = ::time::get_time().sec as u32; // first run pre-verification let chain_verifier = ChainVerifier::new(block, self.network, current_time); @@ -77,7 +78,7 @@ impl BackwardsCompatibleChainVerifier { &self, _block_header_provider: &BlockHeaderProvider, hash: &H256, - header: &chain::BlockHeader + header: &BlockHeader ) -> Result<(), Error> { // let's do only preverifcation // TODO: full verification @@ -92,7 +93,7 @@ impl BackwardsCompatibleChainVerifier { prevout_provider: &T, height: u32, time: u32, - transaction: &chain::Transaction, + transaction: &Transaction, ) -> Result<(), TransactionError> where T: PreviousTransactionOutputProvider + TransactionOutputObserver { let indexed_tx = transaction.clone().into(); // let's do preverification first @@ -117,7 +118,7 @@ impl BackwardsCompatibleChainVerifier { } impl Verify for BackwardsCompatibleChainVerifier { - fn verify(&self, block: &db::IndexedBlock) -> VerificationResult { + fn verify(&self, block: &IndexedBlock) -> VerificationResult { let result = self.verify_block(block); trace!( target: "verification", "Block {} (transactions: {}) verification finished. Result {:?}", @@ -132,7 +133,8 @@ impl Verify for BackwardsCompatibleChainVerifier { #[cfg(test)] mod tests { use std::sync::Arc; - use db::{TestStorage, Storage, Store, BlockStapler, IndexedBlock}; + use chain::IndexedBlock; + use db::{TestStorage, Storage, Store, BlockStapler}; use network::Magic; use devtools::RandomTempPath; use {script, test_data}; diff --git a/verification/src/lib.rs b/verification/src/lib.rs index a5db9899..91e40b72 100644 --- a/verification/src/lib.rs +++ b/verification/src/lib.rs @@ -112,5 +112,5 @@ pub use work::{work_required, is_valid_proof_of_work, is_valid_proof_of_work_has /// Interface for block verification pub trait Verify : Send + Sync { - fn verify(&self, block: &db::IndexedBlock) -> VerificationResult; + fn verify(&self, block: &chain::IndexedBlock) -> VerificationResult; } diff --git a/verification/src/verify_block.rs b/verification/src/verify_block.rs index cb8937c0..9672a272 100644 --- a/verification/src/verify_block.rs +++ b/verification/src/verify_block.rs @@ -1,5 +1,5 @@ use std::collections::HashSet; -use db::IndexedBlock; +use chain::IndexedBlock; use sigops::transaction_sigops; use duplex_store::NoopStore; use error::{Error, TransactionError}; diff --git a/verification/src/verify_chain.rs b/verification/src/verify_chain.rs index 84c663cd..bce19e14 100644 --- a/verification/src/verify_chain.rs +++ b/verification/src/verify_chain.rs @@ -1,5 +1,5 @@ use rayon::prelude::{IntoParallelRefIterator, IndexedParallelIterator, ParallelIterator}; -use db::IndexedBlock; +use chain::IndexedBlock; use network::Magic; use error::Error; use verify_block::BlockVerifier; diff --git a/verification/src/verify_header.rs b/verification/src/verify_header.rs index f71a0d6f..d8d05191 100644 --- a/verification/src/verify_header.rs +++ b/verification/src/verify_header.rs @@ -1,5 +1,5 @@ use primitives::compact::Compact; -use db::IndexedBlockHeader; +use chain::IndexedBlockHeader; use network::Magic; use work::is_valid_proof_of_work; use error::Error; diff --git a/verification/src/verify_transaction.rs b/verification/src/verify_transaction.rs index 49647e40..9e3f7a34 100644 --- a/verification/src/verify_transaction.rs +++ b/verification/src/verify_transaction.rs @@ -1,6 +1,6 @@ use std::ops; use serialization::Serializable; -use db::IndexedTransaction; +use chain::IndexedTransaction; use duplex_store::NoopStore; use sigops::transaction_sigops; use error::TransactionError; From 945c19a8eb89ec5e38a04533ead14ce76f2e5f64 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 13 Dec 2016 23:26:08 +0300 Subject: [PATCH 09/13] replace non-final transactions in mempool --- chain/src/block.rs | 2 +- chain/src/transaction.rs | 12 ++- db/src/indexed_block.rs | 2 +- miner/src/block_assembler.rs | 62 +++++++++++---- miner/src/lib.rs | 3 +- miner/src/memory_pool.rs | 92 +++++++++++++++++++++- sync/src/local_node.rs | 4 +- sync/src/synchronization_chain.rs | 23 ++++++ sync/src/synchronization_verifier.rs | 112 ++++++++++++++++----------- test-data/src/chain_builder.rs | 10 ++- 10 files changed, 253 insertions(+), 69 deletions(-) diff --git a/chain/src/block.rs b/chain/src/block.rs index ff4278d8..03cedd87 100644 --- a/chain/src/block.rs +++ b/chain/src/block.rs @@ -68,7 +68,7 @@ impl Block { } pub fn is_final(&self, height: u32) -> bool { - self.transactions.iter().all(|t| t.is_final(height, self.block_header.time)) + self.transactions.iter().all(|t| t.is_final_in_block(height, self.block_header.time)) } } diff --git a/chain/src/transaction.rs b/chain/src/transaction.rs index c2dd5657..8e6c984f 100644 --- a/chain/src/transaction.rs +++ b/chain/src/transaction.rs @@ -250,7 +250,17 @@ impl Transaction { self.inputs.len() == 1 && self.inputs[0].previous_output.is_null() } - pub fn is_final(&self, block_height: u32, block_time: u32) -> bool { + pub fn is_final(&self) -> bool { + // if lock_time is 0, transaction is final + if self.lock_time == 0 { + return true; + } + // setting all sequence numbers to 0xffffffff disables the time lock, so if you want to use locktime, + // at least one input must have a sequence number below the maximum. + self.inputs.iter().all(TransactionInput::is_final) + } + + pub fn is_final_in_block(&self, block_height: u32, block_time: u32) -> bool { if self.lock_time == 0 { return true; } diff --git a/db/src/indexed_block.rs b/db/src/indexed_block.rs index dee6936b..e788a580 100644 --- a/db/src/indexed_block.rs +++ b/db/src/indexed_block.rs @@ -84,6 +84,6 @@ impl IndexedBlock { } pub fn is_final(&self, height: u32) -> bool { - self.transactions.iter().all(|tx| tx.raw.is_final(height, self.header.raw.time)) + self.transactions.iter().all(|tx| tx.raw.is_final_in_block(height, self.header.raw.time)) } } diff --git a/miner/src/block_assembler.rs b/miner/src/block_assembler.rs index abf4a268..eb119947 100644 --- a/miner/src/block_assembler.rs +++ b/miner/src/block_assembler.rs @@ -1,3 +1,4 @@ +use std::collections::HashSet; use primitives::hash::H256; use chain::{OutPoint, TransactionOutput}; use db::{SharedStore, IndexedTransaction, PreviousTransactionOutputProvider}; @@ -102,10 +103,6 @@ impl SizePolicy { self.finish_counter += 1; } - if fits { - self.current_size += size; - } - match (fits, finish) { (true, true) => NextStep::FinishAndAppend, (true, false) => NextStep::Append, @@ -113,6 +110,10 @@ impl SizePolicy { (false, false) => NextStep::Ignore, } } + + fn apply(&mut self, size: u32) { + self.current_size += size; + } } /// Block assembler @@ -136,25 +137,34 @@ struct FittingTransactionsIterator<'a, T> { store: &'a PreviousTransactionOutputProvider, /// Memory pool transactions iterator iter: T, + /// New block height + block_height: u32, + /// New block time + block_time: u32, /// Size policy decides if transactions size fits the block block_size: SizePolicy, /// Sigops policy decides if transactions sigops fits the block sigops: SizePolicy, /// Previous entries are needed to get previous transaction outputs previous_entries: Vec<&'a Entry>, + /// Hashes of ignored entries + ignored: HashSet, /// True if block is already full finished: bool, } impl<'a, T> FittingTransactionsIterator<'a, T> where T: Iterator { - fn new(store: &'a PreviousTransactionOutputProvider, iter: T, max_block_size: u32, max_block_sigops: u32) -> Self { + fn new(store: &'a PreviousTransactionOutputProvider, iter: T, max_block_size: u32, max_block_sigops: u32, block_height: u32, block_time: u32) -> Self { FittingTransactionsIterator { store: store, iter: iter, + block_height: block_height, + block_time: block_time, // reserve some space for header and transations len field block_size: SizePolicy::new(BLOCK_HEADER_SIZE + 4, max_block_size, 1_000, 50), sigops: SizePolicy::new(0, max_block_sigops, 8, 50), previous_entries: Vec::new(), + ignored: HashSet::new(), finished: false, } } @@ -192,18 +202,34 @@ impl<'a, T> Iterator for FittingTransactionsIterator<'a, T> where T: Iterator { + self.block_size.apply(transaction_size); + self.sigops.apply(transaction_size); self.previous_entries.push(entry); return Some(entry); }, NextStep::FinishAndAppend => { self.finished = true; + self.block_size.apply(transaction_size); + self.sigops.apply(transaction_size); self.previous_entries.push(entry); return Some(entry); }, NextStep::Ignore => (), NextStep::FinishAndIgnore => { + self.ignored.insert(entry.hash.clone()); self.finished = true; }, } @@ -227,7 +253,7 @@ impl BlockAssembler { let mut transactions = Vec::new(); let mempool_iter = mempool.iter(OrderingStrategy::ByTransactionScore); - let tx_iter = FittingTransactionsIterator::new(store.as_previous_transaction_output_provider(), mempool_iter, self.max_block_size, self.max_block_sigops); + let tx_iter = FittingTransactionsIterator::new(store.as_previous_transaction_output_provider(), mempool_iter, self.max_block_size, self.max_block_sigops, height, time); for entry in tx_iter { // miner_fee is i64, but we can safely cast it to u64 // memory pool should restrict miner fee to be positive @@ -260,18 +286,18 @@ mod tests { #[test] fn test_size_policy() { let mut size_policy = SizePolicy::new(0, 1000, 200, 3); - assert_eq!(size_policy.decide(100), NextStep::Append); - assert_eq!(size_policy.decide(500), NextStep::Append); + assert_eq!(size_policy.decide(100), NextStep::Append); size_policy.apply(100); + assert_eq!(size_policy.decide(500), NextStep::Append); size_policy.apply(500); assert_eq!(size_policy.decide(600), NextStep::Ignore); - assert_eq!(size_policy.decide(200), NextStep::Append); + assert_eq!(size_policy.decide(200), NextStep::Append); size_policy.apply(200); assert_eq!(size_policy.decide(300), NextStep::Ignore); assert_eq!(size_policy.decide(300), NextStep::Ignore); // this transaction will make counter + buffer > max size - assert_eq!(size_policy.decide(1), NextStep::Append); + assert_eq!(size_policy.decide(1), NextStep::Append); size_policy.apply(1); // so now only 3 more transactions may accepted / ignored - assert_eq!(size_policy.decide(1), NextStep::Append); + assert_eq!(size_policy.decide(1), NextStep::Append); size_policy.apply(1); assert_eq!(size_policy.decide(1000), NextStep::Ignore); - assert_eq!(size_policy.decide(1), NextStep::FinishAndAppend); + assert_eq!(size_policy.decide(1), NextStep::FinishAndAppend); size_policy.apply(1); // we should not call decide again after it returned finish... // but we can, let's check if result is ok assert_eq!(size_policy.decide(1000), NextStep::FinishAndIgnore); @@ -294,11 +320,21 @@ mod tests { let entries: Vec = Vec::new(); let store_ref: &[_] = &store; - let iter = FittingTransactionsIterator::new(&store_ref, entries.iter(), MAX_BLOCK_SIZE as u32, MAX_BLOCK_SIGOPS as u32); + let iter = FittingTransactionsIterator::new(&store_ref, entries.iter(), MAX_BLOCK_SIZE as u32, MAX_BLOCK_SIGOPS as u32, 0, 0); assert!(iter.collect::>().is_empty()); } #[test] fn test_fitting_transactions_iterator_max_block_size_reached() { } + + #[test] + fn test_fitting_transactions_iterator_ignored_parent() { + // TODO + } + + #[test] + fn test_fitting_transactions_iterator_locked_transaction() { + // TODO + } } diff --git a/miner/src/lib.rs b/miner/src/lib.rs index 27cd1bde..9226a690 100644 --- a/miner/src/lib.rs +++ b/miner/src/lib.rs @@ -17,5 +17,6 @@ mod memory_pool; pub use block_assembler::{BlockAssembler, BlockTemplate}; pub use cpu_miner::find_solution; -pub use memory_pool::{MemoryPool, Information as MemoryPoolInformation, OrderingStrategy as MemoryPoolOrderingStrategy}; +pub use memory_pool::{MemoryPool, HashedOutPoint, DoubleSpendCheckResult, Information as MemoryPoolInformation, + OrderingStrategy as MemoryPoolOrderingStrategy}; pub use fee::{transaction_fee, transaction_fee_rate}; diff --git a/miner/src/memory_pool.rs b/miner/src/memory_pool.rs index 72a20d0f..faccaa49 100644 --- a/miner/src/memory_pool.rs +++ b/miner/src/memory_pool.rs @@ -141,11 +141,21 @@ struct ByPackageScoreOrderedEntry { } #[derive(Debug, PartialEq, Eq, Clone)] -struct HashedOutPoint { - /// Transasction output point +pub struct HashedOutPoint { + /// Transaction output point out_point: OutPoint, } +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum DoubleSpendCheckResult { + /// No double spend + NoDoubleSpend, + /// Input {self.1, self.2} of new transaction is already spent in previous final memory-pool transaction {self.0} + DoubleSpend(H256, H256, u32), + /// Some inputs of new transaction are already spent by locked memory-pool transactions + LockedDoubleSpend(HashSet), +} + impl From for HashedOutPoint { fn from(out_point: OutPoint) -> Self { HashedOutPoint { @@ -400,6 +410,41 @@ impl Storage { }) } + pub fn check_double_spend(&self, transaction: &Transaction) -> DoubleSpendCheckResult { + let mut locked_spends: HashSet = HashSet::new(); + for input in &transaction.inputs { + let mut queue: VecDeque = VecDeque::new(); + queue.push_back(input.previous_output.clone()); + + while let Some(prevout) = queue.pop_front() { + // if the same output is already spent with another transaction + if let Some(entry_hash) = self.by_previous_output.get(&prevout.clone().into()).cloned() { + let entry = self.by_hash.get(&entry_hash).expect("checked that it exists line above; qed"); + // check if this is final transaction. If so, that's an double-spend error + if entry.transaction.is_final() { + return DoubleSpendCheckResult::DoubleSpend(entry_hash, prevout.hash, prevout.index); + } + // this prevout is holded by locked transaction + locked_spends.insert(prevout.into()); + // transaction is not final => new transaction possibly replace it in memory pool + // we should also 'virtually exclude' all descendant transactions from emory pool + let locked_outputs: Vec<_> = entry.transaction.outputs.iter().enumerate().map(|(idx, _)| OutPoint { + hash: entry_hash.clone(), + index: idx as u32, + }).collect(); + locked_spends.extend(locked_outputs.iter().cloned().map(Into::into)); + queue.extend(locked_outputs); + } + } + } + + if locked_spends.is_empty() { + DoubleSpendCheckResult::NoDoubleSpend + } else { + DoubleSpendCheckResult::LockedDoubleSpend(locked_spends) + } + } + pub fn remove_by_prevout(&mut self, prevout: &OutPoint) -> Option> { let mut queue: VecDeque = VecDeque::new(); let mut removed: Vec = Vec::new(); @@ -407,7 +452,7 @@ impl Storage { while let Some(prevout) = queue.pop_front() { if let Some(entry_hash) = self.by_previous_output.get(&prevout.clone().into()).cloned() { - let entry = self.remove_by_hash(&entry_hash).expect("checket that it exists line above; qed"); + let entry = self.remove_by_hash(&entry_hash).expect("checked that it exists line above; qed"); queue.extend(entry.transaction.outputs.iter().enumerate().map(|(idx, _)| OutPoint { hash: entry_hash.clone(), index: idx as u32, @@ -604,6 +649,11 @@ impl MemoryPool { self.storage.remove_by_hash(h).map(|entry| entry.transaction) } + /// Checks double spend result + pub fn check_double_spend(&self, transaction: &Transaction) -> DoubleSpendCheckResult { + self.storage.check_double_spend(transaction) + } + /// Removes transaction (and all its descendants) which has spent given output pub fn remove_by_prevout(&mut self, prevout: &OutPoint) -> Option> { self.storage.remove_by_prevout(prevout) @@ -795,7 +845,7 @@ impl<'a> Iterator for MemoryPoolIterator<'a> { mod tests { use chain::{Transaction, OutPoint}; use heapsize::HeapSizeOf; - use super::{MemoryPool, OrderingStrategy}; + use super::{MemoryPool, OrderingStrategy, DoubleSpendCheckResult}; use test_data::{ChainBuilder, TransactionBuilder}; fn to_memory_pool(chain: &mut ChainBuilder) -> MemoryPool { @@ -1222,4 +1272,38 @@ mod tests { assert_eq!(pool.remove_by_prevout(&OutPoint { hash: chain.hash(0), index: 0 }), Some(vec![chain.at(1), chain.at(2)])); assert_eq!(pool.information().transactions_count, 2); } + + #[test] + fn test_memory_pool_check_double_spend() { + let chain = &mut ChainBuilder::new(); + + TransactionBuilder::with_output(10).add_output(10).add_output(10).store(chain) // transaction0 + .reset().set_input(&chain.at(0), 0).add_output(20).lock().store(chain) // locked: transaction0 -> transaction1 + .reset().set_input(&chain.at(0), 0).add_output(30).store(chain) // good replacement: transaction0 -> transaction2 + .reset().set_input(&chain.at(0), 1).add_output(40).store(chain) // not-locked: transaction0 -> transaction3 + .reset().set_input(&chain.at(0), 1).add_output(50).store(chain) // bad replacement: transaction0 -> transaction4 + .reset().set_input(&chain.at(0), 2).add_output(60).store(chain); // no double spending: transaction0 -> transaction5 + let mut pool = MemoryPool::new(); + pool.insert_verified(chain.at(1)); + pool.insert_verified(chain.at(3)); + // check locked double spends + match pool.check_double_spend(&chain.at(2)) { + DoubleSpendCheckResult::LockedDoubleSpend(hs) => assert!(hs.contains(&chain.at(1).inputs[0].previous_output.clone().into())), + _ => panic!("unexpected"), + } + // check unlocked double spends + match pool.check_double_spend(&chain.at(4)) { + DoubleSpendCheckResult::DoubleSpend(hash1, hash2, index) => { + assert_eq!(hash1, chain.at(3).hash()); + assert_eq!(hash2, chain.at(0).hash()); + assert_eq!(index, 1); + }, + _ => panic!("unexpected"), + } + // check no-double spends + match pool.check_double_spend(&chain.at(5)) { + DoubleSpendCheckResult::NoDoubleSpend => (), + _ => panic!("unexpected"), + } + } } diff --git a/sync/src/local_node.rs b/sync/src/local_node.rs index ac7f81fb..5ec801a5 100644 --- a/sync/src/local_node.rs +++ b/sync/src/local_node.rs @@ -634,12 +634,12 @@ mod tests { let transaction_hash = transaction.hash(); let result = local_node.accept_transaction(transaction); - assert_eq!(result, Ok(transaction_hash)); + assert_eq!(result, Ok(transaction_hash.clone())); assert_eq!(executor.lock().take_tasks(), vec![Task::SendInventory(peer_index1, vec![InventoryVector { inv_type: InventoryType::MessageTx, - hash: "0791efccd035c5fe501023ff888106eba5eff533965de4a6e06400f623bcac34".into(), + hash: transaction_hash, }] )] ); diff --git a/sync/src/synchronization_chain.rs b/sync/src/synchronization_chain.rs index 1a1299f6..28a1ea4d 100644 --- a/sync/src/synchronization_chain.rs +++ b/sync/src/synchronization_chain.rs @@ -677,6 +677,13 @@ impl Chain { /// Insert transaction to memory pool pub fn insert_verified_transaction(&mut self, transaction: Transaction) { + // we have verified transaction, but possibly this transaction replaces + // existing transaction from memory pool + // => remove previous transactions before + for input in &transaction.inputs { + self.memory_pool.remove_by_prevout(&input.previous_output); + } + // now insert transaction itself self.memory_pool.insert_verified(transaction); } @@ -1269,4 +1276,20 @@ mod tests { headers[4].clone(), ]), HeadersIntersection::DeadEnd(0)); } + + #[test] + fn update_memory_pool_transaction() { + use test_data::{ChainBuilder, TransactionBuilder}; + + let data_chain = &mut ChainBuilder::new(); + TransactionBuilder::with_output(10).add_output(10).add_output(10).store(data_chain) // transaction0 + .reset().set_input(&data_chain.at(0), 0).add_output(20).lock().store(data_chain) // transaction0 -> transaction1 + .reset().set_input(&data_chain.at(0), 0).add_output(30).store(data_chain); // transaction0 -> transaction2 + + let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block())); + chain.insert_verified_transaction(data_chain.at(1)); + assert_eq!(chain.information().transactions.transactions_count, 1); + chain.insert_verified_transaction(data_chain.at(2)); + assert_eq!(chain.information().transactions.transactions_count, 1); // tx was replaces + } } diff --git a/sync/src/synchronization_verifier.rs b/sync/src/synchronization_verifier.rs index b6c7204b..c1c9492a 100644 --- a/sync/src/synchronization_verifier.rs +++ b/sync/src/synchronization_verifier.rs @@ -1,12 +1,13 @@ use std::thread; -use std::collections::VecDeque; +use std::collections::{VecDeque, HashSet}; use std::sync::Arc; use std::sync::mpsc::{channel, Sender, Receiver}; use chain::{Transaction, OutPoint, TransactionOutput}; use network::Magic; +use miner::{HashedOutPoint, DoubleSpendCheckResult}; use primitives::hash::H256; use synchronization_chain::ChainRef; -use verification::{BackwardsCompatibleChainVerifier as ChainVerifier, Verify as VerificationVerify, Chain}; +use verification::{self, BackwardsCompatibleChainVerifier as ChainVerifier, Verify as VerificationVerify, Chain}; use db::{SharedStore, IndexedBlock, PreviousTransactionOutputProvider, TransactionOutputObserver}; use time::get_time; @@ -57,12 +58,23 @@ pub struct AsyncVerifier { verification_worker_thread: Option>, } +/// Transaction output observer, which looks into storage && into memory pool struct ChainMemoryPoolTransactionOutputProvider { + /// Chain reference chain: ChainRef, + /// Previous outputs, for which we should return 'Not spent' value. + /// These are used when new version of transaction is received. + locked_outputs: Option>, } -#[derive(Default)] -struct EmptyTransactionOutputProvider { +impl VerificationTask { + /// Returns transaction reference if it is transaction verification task + pub fn transaction(&self) -> Option<&Transaction> { + match self { + &VerificationTask::VerifyTransaction(_, ref transaction) => Some(&transaction), + _ => None, + } + } } impl AsyncVerifier { @@ -86,8 +98,22 @@ impl AsyncVerifier { match task { VerificationTask::Stop => break, _ => { - let prevout_provider = ChainMemoryPoolTransactionOutputProvider::with_chain(chain.clone()); - execute_verification_task(&sink, &prevout_provider, &verifier, task) + let prevout_provider = if let Some(ref transaction) = task.transaction() { + match ChainMemoryPoolTransactionOutputProvider::for_transaction(chain.clone(), transaction) { + Err(e) => { + sink.on_transaction_verification_error(&format!("{:?}", e), &transaction.hash()); + return; + }, + Ok(prevout_provider) => Some(prevout_provider), + } + } else { + None + }; + let prevout_provider = match prevout_provider { + Some(ref prevout_provider) => Some(prevout_provider), + None => None, + }; + execute_verification_task(&sink, prevout_provider, &verifier, task) }, } } @@ -137,23 +163,23 @@ impl SyncVerifier where T: VerificationSink { verifier: verifier, sink: sink, } - } } + } impl Verifier for SyncVerifier where T: VerificationSink { /// Verify block fn verify_block(&self, block: IndexedBlock) { - execute_verification_task(&self.sink, &EmptyTransactionOutputProvider::default(), &self.verifier, VerificationTask::VerifyBlock(block)) + execute_verification_task::(&self.sink, None, &self.verifier, VerificationTask::VerifyBlock(block)) } /// Verify transaction - fn verify_transaction(&self, height: u32, transaction: Transaction) { - execute_verification_task(&self.sink, &EmptyTransactionOutputProvider::default(), &self.verifier, VerificationTask::VerifyTransaction(height, transaction)) + fn verify_transaction(&self, _height: u32, _transaction: Transaction) { + unimplemented!() // sync verifier is currently only used for blocks verification } } /// Execute single verification task -fn execute_verification_task(sink: &Arc, tx_output_provider: &U, verifier: &ChainVerifier, task: VerificationTask) { +fn execute_verification_task(sink: &Arc, tx_output_provider: Option<&U>, verifier: &ChainVerifier, task: VerificationTask) { let mut tasks_queue: VecDeque = VecDeque::new(); tasks_queue.push_back(task); @@ -178,6 +204,7 @@ fn execute_verification_task { let time: u32 = get_time().sec as u32; + let tx_output_provider = tx_output_provider.expect("must be provided for transaction checks"); match verifier.verify_mempool_transaction(tx_output_provider, height, time, &transaction) { Ok(_) => sink.on_transaction_verification_success(transaction), Err(e) => sink.on_transaction_verification_error(&format!("{:?}", e), &transaction.hash()), @@ -189,13 +216,36 @@ fn execute_verification_task Self { - ChainMemoryPoolTransactionOutputProvider { - chain: chain, + pub fn for_transaction(chain: ChainRef, transaction: &Transaction) -> Result { + // we have to check if there are another in-mempool transactions which spent same outputs here + match chain.read().memory_pool().check_double_spend(transaction) { + DoubleSpendCheckResult::DoubleSpend(_, hash, index) => Err(verification::TransactionError::UsingSpentOutput(hash, index)), + DoubleSpendCheckResult::NoDoubleSpend => Ok(ChainMemoryPoolTransactionOutputProvider { + chain: chain.clone(), + locked_outputs: None, + }), + DoubleSpendCheckResult::LockedDoubleSpend(locked_outputs) => Ok(ChainMemoryPoolTransactionOutputProvider { + chain: chain.clone(), + locked_outputs: Some(locked_outputs), + }), } } } +impl TransactionOutputObserver for ChainMemoryPoolTransactionOutputProvider { + fn is_spent(&self, prevout: &OutPoint) -> Option { + // check if this output is 'locked' by mempool transaction + if let Some(ref locked_outputs) = self.locked_outputs { + if locked_outputs.contains(&prevout.clone().into()) { + return Some(false); + } + } + + // check spending in storage + self.chain.read().storage().transaction_meta(&prevout.hash).and_then(|tm| tm.is_spent(prevout.index as usize)) + } +} + impl PreviousTransactionOutputProvider for ChainMemoryPoolTransactionOutputProvider { fn previous_transaction_output(&self, prevout: &OutPoint) -> Option { let chain = self.chain.read(); @@ -204,39 +254,16 @@ impl PreviousTransactionOutputProvider for ChainMemoryPoolTransactionOutputProvi } } -impl TransactionOutputObserver for ChainMemoryPoolTransactionOutputProvider { - fn is_spent(&self, prevout: &OutPoint) -> Option { - let chain = self.chain.read(); - if chain.memory_pool().is_spent(prevout) { - return Some(true); - } - chain.storage().transaction_meta(&prevout.hash).and_then(|tm| tm.is_spent(prevout.index as usize)) - } -} - -impl PreviousTransactionOutputProvider for EmptyTransactionOutputProvider { - fn previous_transaction_output(&self, _prevout: &OutPoint) -> Option { - None - } -} - -impl TransactionOutputObserver for EmptyTransactionOutputProvider { - fn is_spent(&self, _prevout: &OutPoint) -> Option { - None - } -} - #[cfg(test)] pub mod tests { use std::sync::Arc; use std::collections::HashMap; - use parking_lot::RwLock; use chain::Transaction; - use synchronization_chain::{Chain, ChainRef}; + use synchronization_chain::Chain; use synchronization_client::CoreVerificationSink; use synchronization_executor::tests::DummyTaskExecutor; use primitives::hash::H256; - use super::{Verifier, BlockVerificationSink, TransactionVerificationSink, ChainMemoryPoolTransactionOutputProvider}; + use super::{Verifier, BlockVerificationSink, TransactionVerificationSink}; use db::{self, IndexedBlock}; use test_data; @@ -283,16 +310,13 @@ pub mod tests { #[test] fn when_transaction_spends_output_twice() { - use db::TransactionOutputObserver; let tx1: Transaction = test_data::TransactionBuilder::with_default_input(0).into(); let tx2: Transaction = test_data::TransactionBuilder::with_default_input(1).into(); let out1 = tx1.inputs[0].previous_output.clone(); let out2 = tx2.inputs[0].previous_output.clone(); let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block())); chain.memory_pool_mut().insert_verified(tx1); - let chain = ChainRef::new(RwLock::new(chain)); - let provider = ChainMemoryPoolTransactionOutputProvider::with_chain(chain); - assert!(provider.is_spent(&out1).unwrap_or_default()); - assert!(!provider.is_spent(&out2).unwrap_or_default()); + assert!(chain.memory_pool().is_spent(&out1)); + assert!(!chain.memory_pool().is_spent(&out2)); } } diff --git a/test-data/src/chain_builder.rs b/test-data/src/chain_builder.rs index 5a37f9b1..62d23f83 100644 --- a/test-data/src/chain_builder.rs +++ b/test-data/src/chain_builder.rs @@ -97,7 +97,7 @@ impl TransactionBuilder { index: output_index, }, script_sig: Bytes::new_with_len(0), - sequence: 0, + sequence: 0xffffffff, }); self } @@ -113,11 +113,17 @@ impl TransactionBuilder { index: output_index, }, script_sig: Bytes::new_with_len(0), - sequence: 0, + sequence: 0xffffffff, }]; self } + pub fn lock(mut self) -> Self { + self.transaction.inputs[0].sequence = 0; + self.transaction.lock_time = 500000; + self + } + pub fn store(self, chain: &mut ChainBuilder) -> Self { chain.transactions.push(self.transaction.clone()); self From 768912c759680bc201d2e9a14b1f8cc17d1fc057 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 14 Dec 2016 00:23:23 +0300 Subject: [PATCH 10/13] fix after merhe --- sync/src/synchronization_verifier.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sync/src/synchronization_verifier.rs b/sync/src/synchronization_verifier.rs index 5c71eae9..e35aba2a 100644 --- a/sync/src/synchronization_verifier.rs +++ b/sync/src/synchronization_verifier.rs @@ -8,7 +8,7 @@ use miner::{HashedOutPoint, DoubleSpendCheckResult}; use primitives::hash::H256; use synchronization_chain::ChainRef; use verification::{self, BackwardsCompatibleChainVerifier as ChainVerifier, Verify as VerificationVerify, Chain}; -use db::{SharedStore, IndexedBlock, PreviousTransactionOutputProvider, TransactionOutputObserver}; +use db::{SharedStore, PreviousTransactionOutputProvider, TransactionOutputObserver}; use time::get_time; /// Block verification events sink @@ -263,8 +263,9 @@ pub mod tests { use synchronization_client::CoreVerificationSink; use synchronization_executor::tests::DummyTaskExecutor; use primitives::hash::H256; + use chain::IndexedBlock; use super::{Verifier, BlockVerificationSink, TransactionVerificationSink}; - use db::{self, IndexedBlock}; + use db; use test_data; #[derive(Default)] From 4fdf56ccf47179f8a9adbab90d5fab1ee15f383a Mon Sep 17 00:00:00 2001 From: NikVolf Date: Tue, 13 Dec 2016 23:26:39 +0100 Subject: [PATCH 11/13] move overflow into separate error type --- verification/src/accept_block.rs | 12 ++++++++++-- verification/src/error.rs | 4 ++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/verification/src/accept_block.rs b/verification/src/accept_block.rs index 5e158a33..9dfaaf59 100644 --- a/verification/src/accept_block.rs +++ b/verification/src/accept_block.rs @@ -127,8 +127,16 @@ impl<'a> BlockRule for BlockCoinbaseClaim<'a> { let claim = self.block.transactions[0].raw.total_spends(); let (fees, overflow) = available.overflowing_sub(spends); - let (reward, overflow2) = fees.overflowing_add(block_reward_satoshi(self.height)); - if overflow || overflow2 || claim > reward { + if overflow { + return Err(Error::TransactionFeesOverflow); + } + + let (reward, overflow) = fees.overflowing_add(block_reward_satoshi(self.height)); + if overflow { + return Err(Error::TransactionFeeAndRewardOverflow); + } + + if claim > reward { Err(Error::CoinbaseOverspend { expected_max: reward, actual: claim }) } else { Ok(()) diff --git a/verification/src/error.rs b/verification/src/error.rs index 3bbfb1e6..72b2f8f0 100644 --- a/verification/src/error.rs +++ b/verification/src/error.rs @@ -36,6 +36,10 @@ pub enum Error { NonFinalBlock, /// Old version block. OldVersionBlock, + /// Sum of the transaction fees in block + coinbase reward exceeds u64::max + TransactionFeeAndRewardOverflow, + /// Sum of the transaction fees in block exceeds u64::max + TransactionFeesOverflow, } #[derive(Debug, PartialEq)] From a9dcc0d6f7085b074b4df23d45a0fc30d2254dda Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 14 Dec 2016 02:08:07 +0300 Subject: [PATCH 12/13] fix bad pattern --- sync/src/synchronization_verifier.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/sync/src/synchronization_verifier.rs b/sync/src/synchronization_verifier.rs index e35aba2a..26e53d5f 100644 --- a/sync/src/synchronization_verifier.rs +++ b/sync/src/synchronization_verifier.rs @@ -109,11 +109,7 @@ impl AsyncVerifier { } else { None }; - let prevout_provider = match prevout_provider { - Some(ref prevout_provider) => Some(prevout_provider), - None => None, - }; - execute_verification_task(&sink, prevout_provider, &verifier, task) + execute_verification_task(&sink, prevout_provider.as_ref(), &verifier, task) }, } } From be53bbdc3e14582718b1b1a446a973a7ab3a872a Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 14 Dec 2016 14:22:02 +0300 Subject: [PATCH 13/13] added some more tests --- miner/src/lib.rs | 4 +- miner/src/memory_pool.rs | 162 +++++++++++++++++++-------- sync/src/synchronization_client.rs | 5 + sync/src/synchronization_verifier.rs | 82 +++++++++++--- 4 files changed, 190 insertions(+), 63 deletions(-) diff --git a/miner/src/lib.rs b/miner/src/lib.rs index 9226a690..c2f38adc 100644 --- a/miner/src/lib.rs +++ b/miner/src/lib.rs @@ -17,6 +17,6 @@ mod memory_pool; pub use block_assembler::{BlockAssembler, BlockTemplate}; pub use cpu_miner::find_solution; -pub use memory_pool::{MemoryPool, HashedOutPoint, DoubleSpendCheckResult, Information as MemoryPoolInformation, - OrderingStrategy as MemoryPoolOrderingStrategy}; +pub use memory_pool::{MemoryPool, HashedOutPoint, Information as MemoryPoolInformation, + OrderingStrategy as MemoryPoolOrderingStrategy, DoubleSpendCheckResult, NonFinalDoubleSpendSet}; pub use fee::{transaction_fee, transaction_fee_rate}; diff --git a/miner/src/memory_pool.rs b/miner/src/memory_pool.rs index faccaa49..fda41156 100644 --- a/miner/src/memory_pool.rs +++ b/miner/src/memory_pool.rs @@ -146,14 +146,26 @@ pub struct HashedOutPoint { out_point: OutPoint, } -#[derive(Debug, PartialEq, Eq, Clone)] +/// Result of checking double spend with +#[derive(Debug, PartialEq)] pub enum DoubleSpendCheckResult { /// No double spend NoDoubleSpend, /// Input {self.1, self.2} of new transaction is already spent in previous final memory-pool transaction {self.0} DoubleSpend(H256, H256, u32), - /// Some inputs of new transaction are already spent by locked memory-pool transactions - LockedDoubleSpend(HashSet), + /// Some inputs of new transaction are already spent by non-final memory-pool transactions + NonFinalDoubleSpend(NonFinalDoubleSpendSet), +} + +/// Set of transaction outputs, which can be replaced if newer transaction +/// replaces non-final transaction in memory pool +#[derive(Debug, PartialEq)] +pub struct NonFinalDoubleSpendSet { + /// Double-spend outputs (outputs of newer transaction, which are also spent by nonfinal transactions of mempool) + pub double_spends: HashSet, + /// Outputs which also will be removed from memory pool in case of newer transaction insertion + /// (i.e. outputs of nonfinal transactions && their descendants) + pub dependent_spends: HashSet, } impl From for HashedOutPoint { @@ -411,37 +423,45 @@ impl Storage { } pub fn check_double_spend(&self, transaction: &Transaction) -> DoubleSpendCheckResult { - let mut locked_spends: HashSet = HashSet::new(); - for input in &transaction.inputs { - let mut queue: VecDeque = VecDeque::new(); - queue.push_back(input.previous_output.clone()); + let mut double_spends: HashSet = HashSet::new(); + let mut dependent_spends: HashSet = HashSet::new(); - while let Some(prevout) = queue.pop_front() { - // if the same output is already spent with another transaction - if let Some(entry_hash) = self.by_previous_output.get(&prevout.clone().into()).cloned() { - let entry = self.by_hash.get(&entry_hash).expect("checked that it exists line above; qed"); - // check if this is final transaction. If so, that's an double-spend error - if entry.transaction.is_final() { - return DoubleSpendCheckResult::DoubleSpend(entry_hash, prevout.hash, prevout.index); + for input in &transaction.inputs { + // find transaction that spends the same output + let prevout: HashedOutPoint = input.previous_output.clone().into(); + if let Some(entry_hash) = self.by_previous_output.get(&prevout).cloned() { + // check if this is final transaction. If so, that's a potential double-spend error + let entry = self.by_hash.get(&entry_hash).expect("checked that it exists line above; qed"); + if entry.transaction.is_final() { + return DoubleSpendCheckResult::DoubleSpend(entry_hash, prevout.out_point.hash, prevout.out_point.index); + } + // else remember this double spend + double_spends.insert(prevout.clone()); + // and 'virtually' remove entry && all descendants from mempool + let mut queue: VecDeque = VecDeque::new(); + queue.push_back(prevout); + while let Some(dependent_prevout) = queue.pop_front() { + // if the same output is already spent with another in-pool transaction + if let Some(dependent_entry_hash) = self.by_previous_output.get(&dependent_prevout).cloned() { + let dependent_entry = self.by_hash.get(&dependent_entry_hash).expect("checked that it exists line above; qed"); + let dependent_outputs: Vec<_> = dependent_entry.transaction.outputs.iter().enumerate().map(|(idx, _)| OutPoint { + hash: dependent_entry_hash.clone(), + index: idx as u32, + }.into()).collect(); + dependent_spends.extend(dependent_outputs.clone()); + queue.extend(dependent_outputs); } - // this prevout is holded by locked transaction - locked_spends.insert(prevout.into()); - // transaction is not final => new transaction possibly replace it in memory pool - // we should also 'virtually exclude' all descendant transactions from emory pool - let locked_outputs: Vec<_> = entry.transaction.outputs.iter().enumerate().map(|(idx, _)| OutPoint { - hash: entry_hash.clone(), - index: idx as u32, - }).collect(); - locked_spends.extend(locked_outputs.iter().cloned().map(Into::into)); - queue.extend(locked_outputs); } } } - if locked_spends.is_empty() { + if double_spends.is_empty() { DoubleSpendCheckResult::NoDoubleSpend } else { - DoubleSpendCheckResult::LockedDoubleSpend(locked_spends) + DoubleSpendCheckResult::NonFinalDoubleSpend(NonFinalDoubleSpendSet { + double_spends: double_spends, + dependent_spends: dependent_spends, + }) } } @@ -649,7 +669,7 @@ impl MemoryPool { self.storage.remove_by_hash(h).map(|entry| entry.transaction) } - /// Checks double spend result + /// Checks if `transaction` spends some outputs, already spent by inpool transactions. pub fn check_double_spend(&self, transaction: &Transaction) -> DoubleSpendCheckResult { self.storage.check_double_spend(transaction) } @@ -1277,33 +1297,83 @@ mod tests { fn test_memory_pool_check_double_spend() { let chain = &mut ChainBuilder::new(); - TransactionBuilder::with_output(10).add_output(10).add_output(10).store(chain) // transaction0 - .reset().set_input(&chain.at(0), 0).add_output(20).lock().store(chain) // locked: transaction0 -> transaction1 - .reset().set_input(&chain.at(0), 0).add_output(30).store(chain) // good replacement: transaction0 -> transaction2 - .reset().set_input(&chain.at(0), 1).add_output(40).store(chain) // not-locked: transaction0 -> transaction3 - .reset().set_input(&chain.at(0), 1).add_output(50).store(chain) // bad replacement: transaction0 -> transaction4 - .reset().set_input(&chain.at(0), 2).add_output(60).store(chain); // no double spending: transaction0 -> transaction5 + TransactionBuilder::with_output(10).add_output(10).add_output(10).store(chain) // t0 + .reset().set_input(&chain.at(0), 0).add_output(20).lock().store(chain) // nonfinal: t0[0] -> t1 + .reset().set_input(&chain.at(1), 0).add_output(30).store(chain) // dependent: t0[0] -> t1[0] -> t2 + .reset().set_input(&chain.at(0), 0).add_output(40).store(chain) // good replacement: t0[0] -> t3 + .reset().set_input(&chain.at(0), 1).add_output(50).store(chain) // final: t0[1] -> t4 + .reset().set_input(&chain.at(0), 1).add_output(60).store(chain) // bad replacement: t0[1] -> t5 + .reset().set_input(&chain.at(0), 2).add_output(70).store(chain); // no double spend: t0[2] -> t6 + let mut pool = MemoryPool::new(); pool.insert_verified(chain.at(1)); - pool.insert_verified(chain.at(3)); - // check locked double spends - match pool.check_double_spend(&chain.at(2)) { - DoubleSpendCheckResult::LockedDoubleSpend(hs) => assert!(hs.contains(&chain.at(1).inputs[0].previous_output.clone().into())), - _ => panic!("unexpected"), - } - // check unlocked double spends - match pool.check_double_spend(&chain.at(4)) { - DoubleSpendCheckResult::DoubleSpend(hash1, hash2, index) => { - assert_eq!(hash1, chain.at(3).hash()); - assert_eq!(hash2, chain.at(0).hash()); - assert_eq!(index, 1); + pool.insert_verified(chain.at(2)); + pool.insert_verified(chain.at(4)); + // when output is spent by nonfinal transaction + match pool.check_double_spend(&chain.at(3)) { + DoubleSpendCheckResult::NonFinalDoubleSpend(set) => { + assert_eq!(set.double_spends.len(), 1); + assert!(set.double_spends.contains(&chain.at(1).inputs[0].previous_output.clone().into())); + assert_eq!(set.dependent_spends.len(), 2); + assert!(set.dependent_spends.contains(&OutPoint { + hash: chain.at(1).hash(), + index: 0, + }.into())); + assert!(set.dependent_spends.contains(&OutPoint { + hash: chain.at(2).hash(), + index: 0, + }.into())); }, _ => panic!("unexpected"), } - // check no-double spends + // when output is spent by final transaction match pool.check_double_spend(&chain.at(5)) { + DoubleSpendCheckResult::DoubleSpend(inpool_hash, prev_hash, prev_index) => { + assert_eq!(inpool_hash, chain.at(4).hash()); + assert_eq!(prev_hash, chain.at(0).hash()); + assert_eq!(prev_index, 1); + }, + _ => panic!("unexpected"), + } + // when output is not spent at all + match pool.check_double_spend(&chain.at(6)) { DoubleSpendCheckResult::NoDoubleSpend => (), _ => panic!("unexpected"), } } + + #[test] + fn test_memory_pool_check_double_spend_multiple_dependent_outputs() { + let chain = &mut ChainBuilder::new(); + + TransactionBuilder::with_output(100).store(chain) // t0 + .reset().set_input(&chain.at(0), 0).add_output(20).add_output(30).add_output(50).lock().store(chain) // nonfinal: t0[0] -> t1 + .reset().set_input(&chain.at(0), 0).add_output(40).store(chain); // good replacement: t0[0] -> t2 + + let mut pool = MemoryPool::new(); + pool.insert_verified(chain.at(1)); + + // when output is spent by nonfinal transaction + match pool.check_double_spend(&chain.at(2)) { + DoubleSpendCheckResult::NonFinalDoubleSpend(set) => { + assert_eq!(set.double_spends.len(), 1); + assert!(set.double_spends.contains(&chain.at(1).inputs[0].previous_output.clone().into())); + assert_eq!(set.dependent_spends.len(), 3); + assert!(set.dependent_spends.contains(&OutPoint { + hash: chain.at(1).hash(), + index: 0, + }.into())); + assert!(set.dependent_spends.contains(&OutPoint { + hash: chain.at(1).hash(), + index: 1, + }.into())); + assert!(set.dependent_spends.contains(&OutPoint { + hash: chain.at(1).hash(), + index: 2, + }.into())); + }, + _ => panic!("unexpected"), + } + + } } diff --git a/sync/src/synchronization_client.rs b/sync/src/synchronization_client.rs index 2dfd8db8..a2e03b16 100644 --- a/sync/src/synchronization_client.rs +++ b/sync/src/synchronization_client.rs @@ -2935,4 +2935,9 @@ pub mod tests { // should not panic sync.on_peer_transaction(1, test_data::TransactionBuilder::with_default_input(0).into()); } + + #[test] + fn when_transaction_replaces_locked_transaction() { + // TODO + } } diff --git a/sync/src/synchronization_verifier.rs b/sync/src/synchronization_verifier.rs index 26e53d5f..553f83d5 100644 --- a/sync/src/synchronization_verifier.rs +++ b/sync/src/synchronization_verifier.rs @@ -1,10 +1,10 @@ use std::thread; -use std::collections::{VecDeque, HashSet}; +use std::collections::VecDeque; use std::sync::Arc; use std::sync::mpsc::{channel, Sender, Receiver}; use chain::{Transaction, OutPoint, TransactionOutput, IndexedBlock}; use network::Magic; -use miner::{HashedOutPoint, DoubleSpendCheckResult}; +use miner::{DoubleSpendCheckResult, NonFinalDoubleSpendSet}; use primitives::hash::H256; use synchronization_chain::ChainRef; use verification::{self, BackwardsCompatibleChainVerifier as ChainVerifier, Verify as VerificationVerify, Chain}; @@ -64,7 +64,7 @@ struct ChainMemoryPoolTransactionOutputProvider { chain: ChainRef, /// Previous outputs, for which we should return 'Not spent' value. /// These are used when new version of transaction is received. - locked_outputs: Option>, + nonfinal_spends: Option, } impl VerificationTask { @@ -214,15 +214,20 @@ fn execute_verification_task Result { // we have to check if there are another in-mempool transactions which spent same outputs here - match chain.read().memory_pool().check_double_spend(transaction) { + let check_result = chain.read().memory_pool().check_double_spend(transaction); + ChainMemoryPoolTransactionOutputProvider::for_double_spend_check_result(chain, check_result) + } + + pub fn for_double_spend_check_result(chain: ChainRef, check_result: DoubleSpendCheckResult) -> Result { + match check_result { DoubleSpendCheckResult::DoubleSpend(_, hash, index) => Err(verification::TransactionError::UsingSpentOutput(hash, index)), DoubleSpendCheckResult::NoDoubleSpend => Ok(ChainMemoryPoolTransactionOutputProvider { chain: chain.clone(), - locked_outputs: None, + nonfinal_spends: None, }), - DoubleSpendCheckResult::LockedDoubleSpend(locked_outputs) => Ok(ChainMemoryPoolTransactionOutputProvider { + DoubleSpendCheckResult::NonFinalDoubleSpend(nonfinal_spends) => Ok(ChainMemoryPoolTransactionOutputProvider { chain: chain.clone(), - locked_outputs: Some(locked_outputs), + nonfinal_spends: Some(nonfinal_spends), }), } } @@ -231,19 +236,32 @@ impl ChainMemoryPoolTransactionOutputProvider { impl TransactionOutputObserver for ChainMemoryPoolTransactionOutputProvider { fn is_spent(&self, prevout: &OutPoint) -> Option { // check if this output is 'locked' by mempool transaction - if let Some(ref locked_outputs) = self.locked_outputs { - if locked_outputs.contains(&prevout.clone().into()) { + if let Some(ref nonfinal_spends) = self.nonfinal_spends { + if nonfinal_spends.double_spends.contains(&prevout.clone().into()) { return Some(false); } } - // check spending in storage - self.chain.read().storage().transaction_meta(&prevout.hash).and_then(|tm| tm.is_spent(prevout.index as usize)) + // we can omit memory_pool check here, because it has been completed in `for_transaction` method + // => just check spending in storage + self.chain.read().storage() + .transaction_meta(&prevout.hash) + .and_then(|tm| tm.is_spent(prevout.index as usize)) } } impl PreviousTransactionOutputProvider for ChainMemoryPoolTransactionOutputProvider { fn previous_transaction_output(&self, prevout: &OutPoint) -> Option { + // check if that is output of some transaction, which is vitually removed from memory pool + if let Some(ref nonfinal_spends) = self.nonfinal_spends { + if nonfinal_spends.dependent_spends.contains(&prevout.clone().into()) { + // transaction is trying to replace some nonfinal transaction + // + it is also depends on this transaction + // => this is definitely an error + return None; + } + } + let chain = self.chain.read(); chain.memory_pool().previous_transaction_output(prevout) .or_else(|| chain.storage().as_previous_transaction_output_provider().previous_transaction_output(prevout)) @@ -254,15 +272,16 @@ impl PreviousTransactionOutputProvider for ChainMemoryPoolTransactionOutputProvi pub mod tests { use std::sync::Arc; use std::collections::HashMap; - use chain::Transaction; - use synchronization_chain::Chain; + use chain::{Transaction, OutPoint}; + use synchronization_chain::{Chain, ChainRef}; use synchronization_client::CoreVerificationSink; use synchronization_executor::tests::DummyTaskExecutor; use primitives::hash::H256; use chain::IndexedBlock; - use super::{Verifier, BlockVerificationSink, TransactionVerificationSink}; - use db; + use super::{Verifier, BlockVerificationSink, TransactionVerificationSink, ChainMemoryPoolTransactionOutputProvider}; + use db::{self, TransactionOutputObserver, PreviousTransactionOutputProvider}; use test_data; + use parking_lot::RwLock; #[derive(Default)] pub struct DummyVerifier { @@ -316,4 +335,37 @@ pub mod tests { assert!(chain.memory_pool().is_spent(&out1)); assert!(!chain.memory_pool().is_spent(&out2)); } + + #[test] + fn when_transaction_depends_on_removed_nonfinal_transaction() { + let dchain = &mut test_data::ChainBuilder::new(); + + test_data::TransactionBuilder::with_output(10).store(dchain) // t0 + .reset().set_input(&dchain.at(0), 0).add_output(20).lock().store(dchain) // nonfinal: t0[0] -> t1 + .reset().set_input(&dchain.at(1), 0).add_output(30).store(dchain) // dependent: t0[0] -> t1[0] -> t2 + .reset().set_input(&dchain.at(0), 0).add_output(40).store(dchain); // good replacement: t0[0] -> t3 + + let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block())); + chain.memory_pool_mut().insert_verified(dchain.at(0)); + chain.memory_pool_mut().insert_verified(dchain.at(1)); + chain.memory_pool_mut().insert_verified(dchain.at(2)); + + // when inserting t3: + // check that is_spent(t0[0]) == Some(false) (as it is spent by nonfinal t1) + // check that is_spent(t1[0]) == None (as t1 is virtually removed) + // check that is_spent(t2[0]) == None (as t2 is virtually removed) + // check that previous_transaction_output(t0[0]) = Some(_) + // check that previous_transaction_output(t1[0]) = None (as t1 is virtually removed) + // check that previous_transaction_output(t2[0]) = None (as t2 is virtually removed) + // => + // if t3 is also depending on t1[0] || t2[0], it will be rejected by verification as missing inputs + let chain = ChainRef::new(RwLock::new(chain)); + let provider = ChainMemoryPoolTransactionOutputProvider::for_transaction(chain, &dchain.at(3)).unwrap(); + assert_eq!(provider.is_spent(&OutPoint { hash: dchain.at(0).hash(), index: 0, }), Some(false)); + assert_eq!(provider.is_spent(&OutPoint { hash: dchain.at(1).hash(), index: 0, }), None); + assert_eq!(provider.is_spent(&OutPoint { hash: dchain.at(2).hash(), index: 0, }), None); + assert_eq!(provider.previous_transaction_output(&OutPoint { hash: dchain.at(0).hash(), index: 0, }), Some(dchain.at(0).outputs[0].clone())); + assert_eq!(provider.previous_transaction_output(&OutPoint { hash: dchain.at(1).hash(), index: 0, }), None); + assert_eq!(provider.previous_transaction_output(&OutPoint { hash: dchain.at(2).hash(), index: 0, }), None); + } }