Merge branch 'master' into rpc_raw_continue

This commit is contained in:
Svyatoslav Nikolsky 2016-12-13 13:32:37 +03:00
commit 3cc7ec96d1
46 changed files with 2120 additions and 553 deletions

39
Cargo.lock generated
View File

@ -2,15 +2,13 @@
name = "verification"
version = "0.1.0"
dependencies = [
"byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
"chain 0.1.0",
"db 0.1.0",
"ethcore-devtools 1.3.0",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"network 0.1.0",
"parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"primitives 0.1.0",
"scoped-pool 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"script 0.1.0",
"serialization 0.1.0",
"test-data 0.1.0",
@ -747,6 +745,17 @@ dependencies = [
"rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rayon"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "regex"
version = "0.1.80"
@ -848,26 +857,11 @@ dependencies = [
"semver 0.1.20 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "scoped-pool"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
"scopeguard 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"variance 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "scoped-tls"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "scopeguard"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "script"
version = "0.1.0"
@ -1167,11 +1161,6 @@ name = "utf8-ranges"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "variance"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "vec_map"
version = "0.6.0"
@ -1287,6 +1276,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum quick-error 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0aad603e8d7fb67da22dbdf1f4b826ce8829e406124109e73cf1b2454b93a71c"
"checksum rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "022e0636ec2519ddae48154b028864bdce4eaf7d35226ab8e65c611be97b189d"
"checksum rayon 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0f0783f5880c56f5a308e219ac9309dbe781e064741dd5def4c617c440890305"
"checksum rayon 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3b6a6e05e0e6b703e9f2ad266eb63f3712e693a17a2702b95a23de14ce8defa9"
"checksum regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)" = "4fd4ace6a8cf7860714a2c2280d6c1f7e6a413486c13298bbc86fd3da019402f"
"checksum regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f9ec002c35e86791825ed294b50008eea9ddfc8def4420124fbc6b08db834957"
"checksum rocksdb 0.4.5 (git+https://github.com/ethcore/rust-rocksdb)" = "<none>"
@ -1295,9 +1285,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)" = "f76d05d3993fd5f4af9434e8e436db163a12a9d40e1a58a726f27a01dfd12a2a"
"checksum rustc-serialize 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)" = "237546c689f20bb44980270c73c3b9edd0891c1be49cc1274406134a66d3957b"
"checksum rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "c5f5376ea5e30ce23c03eb77cbe4962b988deead10910c372b226388b594c084"
"checksum scoped-pool 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "817a3a15e704545ce59ed2b5c60a5d32bda4d7869befb8b36667b658a6c00b43"
"checksum scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f417c22df063e9450888a7561788e9bd46d3bb3c1466435b4eccb903807f147d"
"checksum scopeguard 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "59a076157c1e2dc561d8de585151ee6965d910dd4dcb5dabb7ae3e83981a6c57"
"checksum semver 0.1.20 (registry+https://github.com/rust-lang/crates.io-index)" = "d4f410fedcf71af0345d7607d246e7ad15faaadd49d240ee3b24e5dc21a820ac"
"checksum serde 0.8.19 (registry+https://github.com/rust-lang/crates.io-index)" = "58a19c0871c298847e6b68318484685cd51fa5478c0c905095647540031356e5"
"checksum serde_codegen 0.8.9 (registry+https://github.com/rust-lang/crates.io-index)" = "da68810d845f8e33a80243c28794650397056cbe7aea4c9c7516f55d1061c94e"
@ -1328,7 +1316,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum unicode-xid 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "36dff09cafb4ec7c8cf0023eb0b686cb6ce65499116a12201c9e11840ca01beb"
"checksum url 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "48ccf7bd87a81b769cf84ad556e034541fb90e1cd6d4bc375c822ed9500cd9d7"
"checksum utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1ca13c08c41c9c3e04224ed9ff80461d97e121589ff27c753a16cb10830ae0f"
"checksum variance 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3abfc2be1fb59663871379ea884fd81de80c496f2274e021c01d6fe56cd77b05"
"checksum vec_map 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cac5efe5cb0fa14ec2f84f83c701c562ee63f6dcc680861b21d65c682adfb05f"
"checksum vecio 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0795a11576d29ae80525a3fda315bf7b534f8feb9d34101e5fe63fb95bb2fd24"
"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"

View File

@ -238,6 +238,14 @@ impl Transaction {
&self.outputs
}
pub fn is_empty(&self) -> bool {
self.inputs.is_empty() || self.outputs.is_empty()
}
pub fn is_null(&self) -> bool {
self.inputs.iter().any(|input| input.previous_output.is_null())
}
pub fn is_coinbase(&self) -> bool {
self.inputs.len() == 1 && self.inputs[0].previous_output.is_null()
}
@ -261,9 +269,7 @@ impl Transaction {
}
pub fn total_spends(&self) -> u64 {
self.outputs
.iter()
.fold(0u64, |acc, out| acc + out.value)
self.outputs.iter().map(|output| output.value).sum()
}
}

View File

@ -19,8 +19,29 @@ impl PreviousTransactionOutputProvider for IndexedBlock {
}
impl TransactionOutputObserver for IndexedBlock {
fn is_spent(&self, prevout: &OutPoint) -> Option<bool> {
self.previous_transaction_output(prevout).map(|_output| false)
fn is_spent(&self, _prevout: &OutPoint) -> Option<bool> {
// the code below is valid, but commented out due it's poor performance
// we could optimize it by indexing all outputs once
// let tx: IndexedTransaction = { .. }
// let indexed_outputs: IndexedOutputs = tx.indexed_outputs();
// indexed_outputs.is_spent()
None
// if previous transaction output appears more than once than we can safely
// tell that it's spent (double spent)
//let spends = self.transactions.iter()
//.flat_map(|tx| &tx.raw.inputs)
//.filter(|input| &input.previous_output == prevout)
//.take(2)
//.count();
//match spends {
//0 => None,
//1 => Some(false),
//2 => Some(true),
//_ => unreachable!("spends <= 2; self.take(2); qed"),
//}
}
}

View File

@ -72,6 +72,7 @@ pub use error::{Error, ConsistencyError};
pub use kvdb::Database;
pub use transaction_provider::{TransactionProvider, PreviousTransactionOutputProvider};
pub use transaction_meta_provider::{TransactionMetaProvider, TransactionOutputObserver};
pub use transaction_meta::TransactionMeta;
pub use block_stapler::{BlockStapler, BlockInsertedChain};
pub use block_provider::{BlockProvider, BlockHeaderProvider};
pub use indexed_block::IndexedBlock;

View File

@ -2,11 +2,17 @@ use primitives::hash::H256;
use chain::OutPoint;
use transaction_meta::TransactionMeta;
pub trait TransactionOutputObserver {
/// Transaction output observers track if output has been spent
pub trait TransactionOutputObserver: Send + Sync {
/// Returns None if we have no information about previous output
/// Returns Some(false) if we know that output hasn't been spent
/// Returns Some(true) if we know that output has been spent
fn is_spent(&self, prevout: &OutPoint) -> Option<bool>;
}
pub trait TransactionMetaProvider {
/// get transaction metadata
/// Transaction meta provider stores transaction meta information
pub trait TransactionMetaProvider: Send + Sync {
/// Returns None if transactin with given hash does not exist
/// Otherwise returns transaction meta object
fn transaction_meta(&self, hash: &H256) -> Option<TransactionMeta>;
}

View File

@ -15,9 +15,9 @@ pub trait TransactionProvider {
fn transaction(&self, hash: &H256) -> Option<chain::Transaction>;
}
/// During transaction the only part of old transaction that we need is `TransactionOutput`.
/// During transaction verifiction the only part of old transaction that we need is `TransactionOutput`.
/// Structures like `IndexedBlock` or `MemoryPool` already have it in memory, so it would be
/// a shame to clone the whole transaction just to get single output.
pub trait PreviousTransactionOutputProvider {
pub trait PreviousTransactionOutputProvider: Send + Sync {
fn previous_transaction_output(&self, prevout: &chain::OutPoint) -> Option<chain::TransactionOutput>;
}

View File

@ -1,11 +1,10 @@
use primitives::hash::H256;
use db::{SharedStore, IndexedTransaction};
use chain::{OutPoint, TransactionOutput};
use db::{SharedStore, IndexedTransaction, PreviousTransactionOutputProvider};
use network::Magic;
use memory_pool::{MemoryPool, OrderingStrategy};
use verification::{
work_required, block_reward_satoshi, transaction_sigops,
StoreWithUnretainedOutputs, MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS
};
use memory_pool::{MemoryPool, OrderingStrategy, Entry};
use verification::{work_required, block_reward_satoshi, transaction_sigops};
pub use verification::constants::{MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS};
const BLOCK_VERSION: u32 = 0x20000000;
const BLOCK_HEADER_SIZE: u32 = 4 + 32 + 32 + 4 + 4 + 4;
@ -131,6 +130,89 @@ impl Default for BlockAssembler {
}
}
/// Iterator iterating over mempool transactions and yielding only those which fit the block
struct FittingTransactionsIterator<'a, T> {
/// Shared store is used to query previous transaction outputs from database
store: &'a PreviousTransactionOutputProvider,
/// Memory pool transactions iterator
iter: T,
/// Size policy decides if transactions size fits the block
block_size: SizePolicy,
/// Sigops policy decides if transactions sigops fits the block
sigops: SizePolicy,
/// Previous entries are needed to get previous transaction outputs
previous_entries: Vec<&'a Entry>,
/// True if block is already full
finished: bool,
}
impl<'a, T> FittingTransactionsIterator<'a, T> where T: Iterator<Item = &'a Entry> {
fn new(store: &'a PreviousTransactionOutputProvider, iter: T, max_block_size: u32, max_block_sigops: u32) -> Self {
FittingTransactionsIterator {
store: store,
iter: iter,
// reserve some space for header and transations len field
block_size: SizePolicy::new(BLOCK_HEADER_SIZE + 4, max_block_size, 1_000, 50),
sigops: SizePolicy::new(0, max_block_sigops, 8, 50),
previous_entries: Vec::new(),
finished: false,
}
}
}
impl<'a, T> PreviousTransactionOutputProvider for FittingTransactionsIterator<'a, T> where T: Send + Sync {
fn previous_transaction_output(&self, prevout: &OutPoint) -> Option<TransactionOutput> {
self.store.previous_transaction_output(prevout)
.or_else(|| {
self.previous_entries.iter()
.find(|e| e.hash == prevout.hash)
.and_then(|e| e.transaction.outputs.iter().nth(prevout.index as usize))
.cloned()
})
}
}
impl<'a, T> Iterator for FittingTransactionsIterator<'a, T> where T: Iterator<Item = &'a Entry> + Send + Sync {
type Item = &'a Entry;
fn next(&mut self) -> Option<Self::Item> {
while !self.finished {
let entry = match self.iter.next() {
Some(entry) => entry,
None => {
self.finished = true;
return None;
}
};
let transaction_size = entry.size as u32;
let bip16_active = true;
let sigops_count = transaction_sigops(&entry.transaction, self, bip16_active) as u32;
let size_step = self.block_size.decide(transaction_size);
let sigops_step = self.sigops.decide(sigops_count);
match size_step.and(sigops_step) {
NextStep::Append => {
self.previous_entries.push(entry);
return Some(entry);
},
NextStep::FinishAndAppend => {
self.finished = true;
self.previous_entries.push(entry);
return Some(entry);
},
NextStep::Ignore => (),
NextStep::FinishAndIgnore => {
self.finished = true;
},
}
}
None
}
}
impl BlockAssembler {
pub fn create_new_block(&self, store: &SharedStore, mempool: &MemoryPool, time: u32, network: Magic) -> BlockTemplate {
// get best block
@ -141,13 +223,18 @@ impl BlockAssembler {
let nbits = work_required(previous_header_hash.clone(), time, height, store.as_block_header_provider(), network);
let version = BLOCK_VERSION;
let mut block_size = SizePolicy::new(BLOCK_HEADER_SIZE, self.max_block_size, 1_000, 50);
let mut sigops = SizePolicy::new(0, self.max_block_sigops, 8, 50);
let mut coinbase_value = block_reward_satoshi(height);
let mut transactions = Vec::new();
// add priority transactions
BlockAssembler::fill_transactions(store, mempool, &mut block_size, &mut sigops, &mut coinbase_value, &mut transactions, OrderingStrategy::ByTransactionScore);
let mempool_iter = mempool.iter(OrderingStrategy::ByTransactionScore);
let tx_iter = FittingTransactionsIterator::new(store.as_previous_transaction_output_provider(), mempool_iter, self.max_block_size, self.max_block_sigops);
for entry in tx_iter {
// miner_fee is i64, but we can safely cast it to u64
// memory pool should restrict miner fee to be positive
coinbase_value += entry.miner_fee as u64;
let tx = IndexedTransaction::new(entry.hash.clone(), entry.transaction.clone());
transactions.push(tx);
}
BlockTemplate {
version: version,
@ -161,53 +248,14 @@ impl BlockAssembler {
sigop_limit: self.max_block_sigops,
}
}
fn fill_transactions(
store: &SharedStore,
mempool: &MemoryPool,
block_size: &mut SizePolicy,
sigops: &mut SizePolicy,
coinbase_value: &mut u64,
transactions: &mut Vec<IndexedTransaction>,
strategy: OrderingStrategy
) {
for entry in mempool.iter(strategy) {
let transaction_size = entry.size as u32;
let sigops_count = {
let txs: &[_] = &*transactions;
let unretained_store = StoreWithUnretainedOutputs::new(store, &txs);
let bip16_active = true;
transaction_sigops(&entry.transaction, &unretained_store, bip16_active) as u32
};
let size_step = block_size.decide(transaction_size);
let sigops_step = sigops.decide(sigops_count);
match size_step.and(sigops_step) {
NextStep::Append => {
let tx = IndexedTransaction::new(entry.hash.clone(), entry.transaction.clone());
// miner_fee is i64, but we can safely cast it to u64
// memory pool should restrict miner fee to be positive
*coinbase_value += entry.miner_fee as u64;
transactions.push(tx);
},
NextStep::FinishAndAppend => {
let tx = IndexedTransaction::new(entry.hash.clone(), entry.transaction.clone());
transactions.push(tx);
break;
},
NextStep::Ignore => (),
NextStep::FinishAndIgnore => {
break;
},
}
}
}
}
#[cfg(test)]
mod tests {
use super::{SizePolicy, NextStep};
use db::IndexedTransaction;
use verification::constants::{MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS};
use memory_pool::Entry;
use super::{SizePolicy, NextStep, FittingTransactionsIterator};
#[test]
fn test_size_policy() {
@ -239,4 +287,18 @@ mod tests {
assert_eq!(NextStep::FinishAndAppend.and(NextStep::Ignore), NextStep::FinishAndIgnore);
assert_eq!(NextStep::FinishAndAppend.and(NextStep::Append), NextStep::FinishAndAppend);
}
#[test]
fn test_fitting_transactions_iterator_no_transactions() {
let store: Vec<IndexedTransaction> = Vec::new();
let entries: Vec<Entry> = Vec::new();
let store_ref: &[_] = &store;
let iter = FittingTransactionsIterator::new(&store_ref, entries.iter(), MAX_BLOCK_SIZE as u32, MAX_BLOCK_SIGOPS as u32);
assert!(iter.collect::<Vec<_>>().is_empty());
}
#[test]
fn test_fitting_transactions_iterator_max_block_size_reached() {
}
}

View File

@ -30,7 +30,7 @@ pub use primitives::{hash, bytes};
pub use config::Config;
pub use net::Config as NetConfig;
pub use p2p::P2P;
pub use p2p::{P2P, Context};
pub use event_loop::{event_loop, forever};
pub use util::{PeerId, PeerInfo, InternetProtocol};
pub use util::{NodeTableError, PeerId, PeerInfo, InternetProtocol};
pub use protocol::{InboundSyncConnection, InboundSyncConnectionRef, OutboundSyncConnection, OutboundSyncConnectionRef, LocalSyncNode, LocalSyncNodeRef};

View File

@ -1,5 +1,6 @@
use std::{io, net, error, time};
use std::sync::Arc;
use std::net::SocketAddr;
use parking_lot::RwLock;
use futures::{Future, finished, failed, BoxFuture};
use futures::stream::Stream;
@ -12,7 +13,7 @@ use ns_dns_tokio::DnsResolver;
use message::{Payload, MessageResult, Message};
use message::common::Services;
use net::{connect, Connections, Channel, Config as NetConfig, accept_connection, ConnectionCounter};
use util::{NodeTable, Node, Direction};
use util::{NodeTable, Node, NodeTableError, Direction};
use session::{SessionFactory, SeednodeSessionFactory, NormalSessionFactory};
use {Config, PeerId};
use protocol::{LocalSyncNodeRef, InboundSyncConnectionRef, OutboundSyncConnectionRef};
@ -88,9 +89,21 @@ impl Context {
self.node_table.write().insert_many(nodes);
}
/// Adds node to table.
pub fn add_node(&self, addr: SocketAddr) -> Result<(), NodeTableError> {
trace!("Adding node {} to node table", &addr);
self.node_table.write().add(addr, self.config.connection.services)
}
/// Removes node from table.
pub fn remove_node(&self, addr: SocketAddr) -> Result<(), NodeTableError> {
trace!("Removing node {} from node table", &addr);
self.node_table.write().remove(&addr)
}
/// Every 10 seconds check if we have reached maximum number of outbound connections.
/// If not, connect to best peers.
pub fn autoconnect(context: Arc<Context>, handle: &Handle, config: NetConfig) {
pub fn autoconnect(context: Arc<Context>, handle: &Handle) {
let c = context.clone();
// every 10 seconds connect to new peers (if needed)
let interval: BoxedEmptyFuture = Interval::new(time::Duration::new(10, 0), handle).expect("Failed to create interval")
@ -113,7 +126,7 @@ impl Context {
trace!("Creating {} more outbound connections", addresses.len());
for address in addresses {
Context::connect::<NormalSessionFactory>(context.clone(), address, config.clone());
Context::connect::<NormalSessionFactory>(context.clone(), address);
}
if let Err(_err) = context.node_table.read().save_to_file(&context.config.node_table_path) {
@ -174,13 +187,18 @@ impl Context {
}
/// Connect to socket using given context.
pub fn connect<T>(context: Arc<Context>, socket: net::SocketAddr, config: NetConfig) where T: SessionFactory {
pub fn connect<T>(context: Arc<Context>, socket: net::SocketAddr) where T: SessionFactory {
context.connection_counter.note_new_outbound_connection();
context.remote.clone().spawn(move |handle| {
context.pool.clone().spawn(Context::connect_future::<T>(context, socket, handle, &config))
let config = context.config.clone();
context.pool.clone().spawn(Context::connect_future::<T>(context, socket, handle, &config.connection))
})
}
pub fn connect_normal(context: Arc<Context>, socket: net::SocketAddr) {
Self::connect::<NormalSessionFactory>(context, socket)
}
pub fn accept_connection_future(context: Arc<Context>, stream: TcpStream, socket: net::SocketAddr, handle: &Handle, config: NetConfig) -> BoxedEmptyFuture {
accept_connection(stream, handle, &config, socket).then(move |result| {
match result {
@ -420,25 +438,25 @@ impl P2P {
self.connect_to_seednode(&resolver, seed);
}
Context::autoconnect(self.context.clone(), &self.event_loop_handle, self.config.connection.clone());
Context::autoconnect(self.context.clone(), &self.event_loop_handle);
try!(self.listen());
Ok(())
}
/// Attempts to connect to the specified node
pub fn connect<T>(&self, addr: net::SocketAddr) where T: SessionFactory {
Context::connect::<T>(self.context.clone(), addr, self.config.connection.clone());
Context::connect::<T>(self.context.clone(), addr);
}
pub fn connect_to_seednode(&self, resolver: &Resolver, seednode: &str) {
let owned_seednode = seednode.to_owned();
let context = self.context.clone();
let connection_config = self.config.connection.clone();
let dns_lookup = resolver.resolve(seednode).then(move |result| {
match result {
Ok(address) => match address.pick_one() {
Some(socket) => {
trace!("Dns lookup of seednode {} finished. Connecting to {}", owned_seednode, socket);
Context::connect::<SeednodeSessionFactory>(context, socket, connection_config);
Context::connect::<SeednodeSessionFactory>(context, socket);
},
None => {
trace!("Dns lookup of seednode {} resolved with no results", owned_seednode);
@ -454,10 +472,13 @@ impl P2P {
self.event_loop_handle.spawn(pool_work);
}
fn listen(&self) -> Result<(), Box<error::Error>> {
let server = try!(Context::listen(self.context.clone(), &self.event_loop_handle, self.config.connection.clone()));
self.event_loop_handle.spawn(server);
Ok(())
}
pub fn context(&self) -> &Arc<Context> {
&self.context
}
}

View File

@ -7,7 +7,7 @@ mod response_queue;
mod synchronizer;
pub use self::internet_protocol::InternetProtocol;
pub use self::node_table::{NodeTable, Node};
pub use self::node_table::{NodeTable, NodeTableError, Node};
pub use self::peer::{PeerId, PeerInfo, Direction};
pub use self::response_queue::{ResponseQueue, Responses};
pub use self::synchronizer::{Synchronizer, ConfigurableSynchronizer};

View File

@ -161,6 +161,9 @@ impl PartialOrd for Node {
}
}
#[derive(Debug)]
pub enum NodeTableError { AddressAlreadyAdded, NoAddressInTable }
#[derive(Default, Debug)]
pub struct NodeTable<T = RealTime> where T: Time {
/// Time source.
@ -219,6 +222,35 @@ impl<T> NodeTable<T> where T: Time {
}
}
pub fn exists(&self, addr: SocketAddr) -> bool {
self.by_addr.contains_key(&addr)
}
pub fn add(&mut self, addr: SocketAddr, services: Services) -> Result<(), NodeTableError> {
if self.exists(addr.clone()) {
Err(NodeTableError::AddressAlreadyAdded)
}
else {
self.insert(addr, services);
Ok(())
}
}
/// Tries to remove node with the speicified socket address
/// from table, if exists.
/// Returnes `true` if it has removed anything
pub fn remove(&mut self, addr: &SocketAddr) -> Result<(), NodeTableError> {
let node = self.by_addr.remove(&addr);
match node {
Some(val) => {
self.by_time.remove(&val.clone().into());
self.by_score.remove(&val.into());
Ok(())
}
None => Err(NodeTableError::NoAddressInTable)
}
}
/// Inserts many new addresses into node table.
/// Used in `addr` request handler.
/// Discards all nodes with timestamp newer than current time.
@ -452,6 +484,43 @@ mod tests {
table.note_failure(&s1);
}
#[test]
fn add_node() {
let mut table = NodeTable::<ZeroTime>::default();
let add_result = table.add("127.0.0.1:8001".parse().unwrap(), Services::default());
assert!(add_result.is_ok())
}
#[test]
fn add_duplicate() {
let mut table = NodeTable::<ZeroTime>::default();
table.add("127.0.0.1:8001".parse().unwrap(), Services::default()).unwrap();
let add_result = table.add("127.0.0.1:8001".parse().unwrap(), Services::default());
assert!(add_result.is_err())
}
#[test]
fn remove() {
let mut table = NodeTable::<ZeroTime>::default();
table.add("127.0.0.1:8001".parse().unwrap(), Services::default()).unwrap();
let remove_result = table.remove(&"127.0.0.1:8001".parse().unwrap());
assert!(remove_result.is_ok());
assert_eq!(0, table.by_addr.len());
assert_eq!(0, table.by_score.len());
assert_eq!(0, table.by_time.len());
}
#[test]
fn remove_nonexistant() {
let mut table = NodeTable::<ZeroTime>::default();
let remove_result = table.remove(&"127.0.0.1:8001".parse().unwrap());
assert!(remove_result.is_err());
}
#[test]
fn test_save_and_load() {
let s0: SocketAddr = "127.0.0.1:8000".parse().unwrap();

View File

@ -37,14 +37,15 @@ pub fn start(cfg: config::Config) -> Result<(), String> {
let local_sync_node = create_local_sync_node(&sync_handle, cfg.magic, db.clone());
let sync_connection_factory = create_sync_connection_factory(local_sync_node.clone());
let p2p = try!(p2p::P2P::new(p2p_cfg, sync_connection_factory, el.handle()).map_err(|x| x.to_string()));
let rpc_deps = rpc::Dependencies {
network: cfg.magic,
storage: db,
local_sync_node: local_sync_node,
p2p_context: p2p.context().clone(),
};
let _rpc_server = try!(rpc::new_http(cfg.rpc_config, rpc_deps));
let p2p = try!(p2p::P2P::new(p2p_cfg, sync_connection_factory, el.handle()).map_err(|x| x.to_string()));
try!(p2p.run().map_err(|_| "Failed to start p2p module"));
el.run(p2p::forever()).unwrap();
Ok(())

View File

@ -1,15 +1,18 @@
use std::net::SocketAddr;
use std::sync::Arc;
use rpc_apis::{self, ApiSet};
use ethcore_rpc::{Server, RpcServer, RpcServerError};
use network::Magic;
use std::io;
use sync;
use db;
use p2p;
pub struct Dependencies {
pub network: Magic,
pub local_sync_node: sync::LocalNodeRef,
pub storage: db::SharedStore,
pub p2p_context: Arc<p2p::Context>,
}
#[derive(Debug, PartialEq)]

View File

@ -11,6 +11,8 @@ pub enum Api {
Miner,
/// BlockChain-related methods
BlockChain,
/// Network
Network,
}
#[derive(Debug, PartialEq, Eq)]
@ -20,7 +22,7 @@ pub enum ApiSet {
impl Default for ApiSet {
fn default() -> Self {
ApiSet::List(vec![Api::Raw, Api::Miner, Api::BlockChain].into_iter().collect())
ApiSet::List(vec![Api::Raw, Api::Miner, Api::BlockChain, Api::Network].into_iter().collect())
}
}
@ -32,6 +34,7 @@ impl FromStr for Api {
"raw" => Ok(Api::Raw),
"miner" => Ok(Api::Miner),
"blockchain" => Ok(Api::BlockChain),
"network" => Ok(Api::Network),
api => Err(format!("Unknown api: {}", api)),
}
}
@ -53,6 +56,7 @@ pub fn setup_rpc<T: Extendable>(server: T, apis: ApiSet, deps: Dependencies) ->
Api::Raw => server.add_delegate(RawClient::new(RawClientCore::new(deps.local_sync_node.clone())).to_delegate()),
Api::Miner => server.add_delegate(MinerClient::new(MinerClientCore::new(deps.local_sync_node.clone())).to_delegate()),
Api::BlockChain => server.add_delegate(BlockChainClient::new(BlockChainClientCore::new(deps.network, deps.storage.clone())).to_delegate()),
Api::Network => server.add_delegate(NetworkClient::new(NetworkClientCore::new(deps.p2p_context.clone())).to_delegate()),
}
}
server

View File

@ -7,21 +7,23 @@ mod codes {
pub const TRANSACTION_OUTPUT_NOT_FOUND: i64 = -32097;
pub const TRANSACTION_OF_SIDE_BRANCH: i64 = -32098;
pub const BLOCK_NOT_FOUND: i64 = -32099;
pub const NODE_ALREADY_ADDED: i64 = -32150;
pub const NODE_NOT_ADDED: i64 = -32151;
}
use std::fmt;
use jsonrpc_core::{Error, ErrorCode, Value};
macro_rules! rpc_unimplemented {
() => (Err(::v1::helpers::errors::unimplemented(None)))
macro_rules! rpc_unimplemented {
() => (Err(::v1::helpers::errors::unimplemented(None)))
}
pub fn unimplemented(details: Option<String>) -> Error {
Error {
code: ErrorCode::InternalError,
message: "This request is not implemented yet. Please create an issue on Github repo.".into(),
data: details.map(Value::String),
}
pub fn unimplemented(details: Option<String>) -> Error {
Error {
code: ErrorCode::InternalError,
message: "This request is not implemented yet. Please create an issue on Github repo.".into(),
data: details.map(Value::String),
}
}
pub fn invalid_params<T: fmt::Debug>(param: &str, details: T) -> Error {
@ -79,3 +81,19 @@ pub fn transaction_of_side_branch<T: fmt::Debug>(data: T) -> Error {
data: Some(Value::String(format!("{:?}", data))),
}
}
pub fn node_already_added() -> Error {
Error {
code: ErrorCode::ServerError(codes::NODE_ALREADY_ADDED),
message: "Node already added to the node table".into(),
data: None,
}
}
pub fn node_not_added() -> Error {
Error {
code: ErrorCode::ServerError(codes::NODE_NOT_ADDED),
message: "Node not added to the node table".into(),
data: None,
}
}

View File

@ -14,6 +14,7 @@ use global_script::Script;
use chain::OutPoint;
use verification;
use ser::serialize;
use network::Magic;
use primitives::hash::H256 as GlobalH256;
use network::Magic;
@ -38,7 +39,7 @@ pub struct BlockChainClientCore {
impl BlockChainClientCore {
pub fn new(network: Magic, storage: db::SharedStore) -> Self {
assert!(storage.best_block().is_some());
BlockChainClientCore {
network: network,
storage: storage,
@ -76,14 +77,20 @@ impl BlockChainClientCoreApi for BlockChainClientCore {
None => -1,
};
let block_size = block.size();
let median_time = verification::ChainVerifier::median_timestamp(self.storage.as_block_header_provider(), &block.header.raw);
// TODO: use real network
let median_time = verification::median_timestamp(
&block.header.raw,
self.storage.as_block_header_provider(),
Magic::Mainnet,
);
VerboseBlock {
confirmations: confirmations,
size: block_size as u32,
strippedsize: block_size as u32, // TODO: segwit
weight: block_size as u32, // TODO: segwit
height: height,
mediantime: median_time,
mediantime: Some(median_time),
difficulty: block.header.raw.bits.to_f64(),
chainwork: U256::default(), // TODO: read from storage
previousblockhash: Some(block.header.raw.previous_header_hash.clone().into()),
@ -423,7 +430,7 @@ pub mod tests {
merkleroot: "982051fd1e4ba744bbbe680e1fee14677ba1a3c3540bf7b1cdb606e857233e0e".into(),
tx: vec!["982051fd1e4ba744bbbe680e1fee14677ba1a3c3540bf7b1cdb606e857233e0e".into()],
time: 1231469665,
mediantime: None,
mediantime: Some(1231006505),
nonce: 2573394689,
bits: 486604799,
difficulty: 1.0,
@ -449,7 +456,7 @@ pub mod tests {
merkleroot: "d5fdcc541e25de1c7a5addedf24858b8bb665c9f36ef744ee42c316022c90f9b".into(),
tx: vec!["d5fdcc541e25de1c7a5addedf24858b8bb665c9f36ef744ee42c316022c90f9b".into()],
time: 1231469744,
mediantime: None,
mediantime: Some(1231469665),
nonce: 1639830024,
bits: 486604799,
difficulty: 1.0,

View File

@ -1,7 +1,9 @@
mod blockchain;
mod miner;
mod raw;
mod network;
pub use self::blockchain::{BlockChainClient, BlockChainClientCore};
pub use self::miner::{MinerClient, MinerClientCore};
pub use self::raw::{RawClient, RawClientCore};
pub use self::network::{NetworkClient, NetworkClientCore};

View File

@ -0,0 +1,68 @@
use std::sync::Arc;
use std::net::SocketAddr;
use v1::traits::Network as NetworkRpc;
use v1::types::AddNodeOperation;
use jsonrpc_core::Error;
use v1::helpers::errors;
use p2p;
pub trait NetworkApi : Send + Sync + 'static {
fn add_node(&self, socket_addr: SocketAddr) -> Result<(), p2p::NodeTableError>;
fn remove_node(&self, socket_addr: SocketAddr) -> Result<(), p2p::NodeTableError>;
fn connect(&self, socket_addr: SocketAddr);
}
impl<T> NetworkRpc for NetworkClient<T> where T: NetworkApi {
fn add_node(&self, node: String, operation: AddNodeOperation) -> Result<(), Error> {
let addr = try!(node.parse().map_err(
|_| errors::invalid_params("node", "Invalid socket address format, should be ip:port (127.0.0.1:8008)")));
match operation {
AddNodeOperation::Add => {
self.api.add_node(addr).map_err(|_| errors::node_already_added())
},
AddNodeOperation::Remove => {
self.api.remove_node(addr).map_err(|_| errors::node_not_added())
},
AddNodeOperation::OneTry => {
self.api.connect(addr);
Ok(())
}
}
}
}
pub struct NetworkClient<T: NetworkApi> {
api: T,
}
impl<T> NetworkClient<T> where T: NetworkApi {
pub fn new(api: T) -> Self {
NetworkClient {
api: api,
}
}
}
pub struct NetworkClientCore {
p2p: Arc<p2p::Context>,
}
impl NetworkClientCore {
pub fn new(p2p: Arc<p2p::Context>) -> Self {
NetworkClientCore { p2p: p2p }
}
}
impl NetworkApi for NetworkClientCore {
fn add_node(&self, socket_addr: SocketAddr) -> Result<(), p2p::NodeTableError> {
self.p2p.add_node(socket_addr)
}
fn remove_node(&self, socket_addr: SocketAddr) -> Result<(), p2p::NodeTableError> {
self.p2p.remove_node(socket_addr)
}
fn connect(&self, socket_addr: SocketAddr) {
p2p::Context::connect_normal(self.p2p.clone(), socket_addr);
}
}

View File

@ -7,6 +7,8 @@ pub mod types;
pub use self::traits::Raw;
pub use self::traits::Miner;
pub use self::traits::BlockChain;
pub use self::traits::Network;
pub use self::impls::{RawClient, RawClientCore};
pub use self::impls::{MinerClient, MinerClientCore};
pub use self::impls::{BlockChainClient, BlockChainClientCore};
pub use self::impls::{NetworkClient, NetworkClientCore};
pub use self::impls::{BlockChainClient, BlockChainClientCore};

View File

@ -1,7 +1,9 @@
mod blockchain;
mod miner;
mod raw;
mod network;
pub use self::blockchain::BlockChain;
pub use self::miner::Miner;
pub use self::raw::Raw;
pub use self::raw::Raw;
pub use self::network::Network;

View File

@ -0,0 +1,14 @@
use jsonrpc_core::Error;
use v1::types::AddNodeOperation;
build_rpc_trait! {
/// Parity-bitcoin network interface
pub trait Network {
/// Add/remove/connecto to the node
/// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "method": "addnode", "params": ["127.0.0.1:8888", "add"], "id":1 }' -H 'content-type: application/json;' http://127.0.0.1:8332/
/// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "method": "addnode", "params": ["127.0.0.1:8888", "remove"], "id":1 }' -H 'content-type: application/json;' http://127.0.0.1:8332/
/// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "method": "addnode", "params": ["127.0.0.1:8888", "onetry"], "id":1 }' -H 'content-type: application/json;' http://127.0.0.1:8332/
#[rpc(name = "addnode")]
fn add_node(&self, String, AddNodeOperation) -> Result<(), Error>;
}
}

View File

@ -10,6 +10,7 @@ mod hash;
mod script;
mod transaction;
mod uint;
mod nodes;
pub use self::address::Address;
pub use self::block::RawBlock;
@ -25,3 +26,4 @@ pub use self::transaction::{RawTransaction, Transaction, TransactionInput, Trans
TransactionInputScript, TransactionOutputScript, SignedTransactionInput, GetRawTransactionResponse,
SignedTransactionOutput};
pub use self::uint::U256;
pub use self::nodes::AddNodeOperation;

31
rpc/src/v1/types/nodes.rs Normal file
View File

@ -0,0 +1,31 @@
use serde::{Deserialize, Deserializer};
#[derive(Debug, PartialEq)]
pub enum AddNodeOperation {
Add,
Remove,
OneTry,
}
impl Deserialize for AddNodeOperation {
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error> where D: Deserializer {
use serde::de::Visitor;
struct DummyVisitor;
impl Visitor for DummyVisitor {
type Value = AddNodeOperation;
fn visit_str<E>(&mut self, value: &str) -> Result<AddNodeOperation, E> where E: ::serde::de::Error {
match value {
"add" => Ok(AddNodeOperation::Add),
"remove" => Ok(AddNodeOperation::Remove),
"onetry" => Ok(AddNodeOperation::OneTry),
_ => Err(E::invalid_value(&format!("unknown ScriptType variant: {}", value))),
}
}
}
deserializer.deserialize(DummyVisitor)
}
}

View File

@ -49,7 +49,7 @@ use std::sync::Arc;
use parking_lot::RwLock;
use tokio_core::reactor::Handle;
use network::Magic;
use verification::ChainVerifier;
use verification::BackwardsCompatibleChainVerifier as ChainVerifier;
/// Sync errors.
#[derive(Debug)]

View File

@ -341,7 +341,7 @@ mod tests {
use synchronization_verifier::tests::DummyVerifier;
use tokio_core::reactor::{Core, Handle};
use primitives::bytes::Bytes;
use verification::ChainVerifier;
use verification::BackwardsCompatibleChainVerifier as ChainVerifier;
struct DummyOutboundSyncConnection;

View File

@ -29,7 +29,7 @@ use synchronization_verifier::{Verifier, VerificationSink, BlockVerificationSink
use compact_block_builder::build_compact_block;
use hash_queue::HashPosition;
use miner::transaction_fee_rate;
use verification::ChainVerifier;
use verification::BackwardsCompatibleChainVerifier as ChainVerifier;
use time;
use std::time::Duration;
use miner::{BlockAssembler, BlockTemplate};
@ -1827,7 +1827,7 @@ pub mod tests {
use synchronization_verifier::tests::DummyVerifier;
use synchronization_server::ServerTaskIndex;
use primitives::hash::H256;
use verification::ChainVerifier;
use verification::BackwardsCompatibleChainVerifier as ChainVerifier;
use p2p::event_loop;
use test_data;
use db::{self, BlockHeaderProvider};

View File

@ -6,7 +6,7 @@ use chain::{Transaction, OutPoint, TransactionOutput};
use network::Magic;
use primitives::hash::H256;
use synchronization_chain::ChainRef;
use verification::{ChainVerifier, Verify as VerificationVerify, Chain};
use verification::{BackwardsCompatibleChainVerifier as ChainVerifier, Verify as VerificationVerify, Chain};
use db::{SharedStore, IndexedBlock, PreviousTransactionOutputProvider, TransactionOutputObserver};
use time::get_time;
@ -178,7 +178,7 @@ fn execute_verification_task<T: VerificationSink, U: PreviousTransactionOutputPr
},
VerificationTask::VerifyTransaction(height, transaction) => {
let time: u32 = get_time().sec as u32;
match verifier.verify_transaction(tx_output_provider, height, time, &transaction, 1) {
match verifier.verify_mempool_transaction(tx_output_provider, height, time, &transaction) {
Ok(_) => sink.on_transaction_verification_success(transaction),
Err(e) => sink.on_transaction_verification_error(&format!("{:?}", e), &transaction.hash()),
}

View File

@ -4,11 +4,9 @@ version = "0.1.0"
authors = ["Nikolay Volf <nikvolf@gmail.com>"]
[dependencies]
byteorder = "0.5"
parking_lot = "0.3"
time = "0.1"
log = "0.3"
scoped-pool = "1.0"
rayon = "0.5"
ethcore-devtools = { path = "../devtools" }
primitives = { path = "../primitives" }

View File

@ -0,0 +1,137 @@
use network::{Magic, ConsensusParams};
use db::PreviousTransactionOutputProvider;
use sigops::transaction_sigops;
use work::block_reward_satoshi;
use duplex_store::DuplexTransactionOutputProvider;
use canon::CanonBlock;
use constants::MAX_BLOCK_SIGOPS;
use error::Error;
/// Flexible verification of ordered block
pub struct BlockAcceptor<'a> {
pub finality: BlockFinality<'a>,
pub sigops: BlockSigops<'a>,
pub coinbase_claim: BlockCoinbaseClaim<'a>,
}
impl<'a> BlockAcceptor<'a> {
pub fn new(store: &'a PreviousTransactionOutputProvider, network: Magic, block: CanonBlock<'a>, height: u32) -> Self {
let params = network.consensus_params();
BlockAcceptor {
finality: BlockFinality::new(block, height),
sigops: BlockSigops::new(block, store, params, MAX_BLOCK_SIGOPS),
coinbase_claim: BlockCoinbaseClaim::new(block, store, height),
}
}
pub fn check(&self) -> Result<(), Error> {
try!(self.finality.check());
try!(self.sigops.check());
try!(self.coinbase_claim.check());
Ok(())
}
}
trait BlockRule {
/// If verification fails returns an error
fn check(&self) -> Result<(), Error>;
}
pub struct BlockFinality<'a> {
block: CanonBlock<'a>,
height: u32,
}
impl<'a> BlockFinality<'a> {
fn new(block: CanonBlock<'a>, height: u32) -> Self {
BlockFinality {
block: block,
height: height,
}
}
}
impl<'a> BlockRule for BlockFinality<'a> {
fn check(&self) -> Result<(), Error> {
if self.block.is_final(self.height) {
Ok(())
} else {
Err(Error::NonFinalBlock)
}
}
}
pub struct BlockSigops<'a> {
block: CanonBlock<'a>,
store: &'a PreviousTransactionOutputProvider,
consensus_params: ConsensusParams,
max_sigops: usize,
}
impl<'a> BlockSigops<'a> {
fn new(block: CanonBlock<'a>, store: &'a PreviousTransactionOutputProvider, consensus_params: ConsensusParams, max_sigops: usize) -> Self {
BlockSigops {
block: block,
store: store,
consensus_params: consensus_params,
max_sigops: max_sigops,
}
}
}
impl<'a> BlockRule for BlockSigops<'a> {
fn check(&self) -> Result<(), Error> {
let store = DuplexTransactionOutputProvider::new(self.store, &*self.block);
let bip16_active = self.block.header.raw.time >= self.consensus_params.bip16_time;
let sigops = self.block.transactions.iter()
.map(|tx| transaction_sigops(&tx.raw, &store, bip16_active))
.sum::<usize>();
if sigops > self.max_sigops {
Err(Error::MaximumSigops)
} else {
Ok(())
}
}
}
pub struct BlockCoinbaseClaim<'a> {
block: CanonBlock<'a>,
store: &'a PreviousTransactionOutputProvider,
height: u32,
}
impl<'a> BlockCoinbaseClaim<'a> {
fn new(block: CanonBlock<'a>, store: &'a PreviousTransactionOutputProvider, height: u32) -> Self {
BlockCoinbaseClaim {
block: block,
store: store,
height: height,
}
}
}
impl<'a> BlockRule for BlockCoinbaseClaim<'a> {
fn check(&self) -> Result<(), Error> {
let store = DuplexTransactionOutputProvider::new(self.store, &*self.block);
let available = self.block.transactions.iter()
.skip(1)
.flat_map(|tx| tx.raw.inputs.iter())
.map(|input| store.previous_transaction_output(&input.previous_output).map(|o| o.value).unwrap_or(0))
.sum::<u64>();
let spends = self.block.transactions.iter()
.skip(1)
.map(|tx| tx.raw.total_spends())
.sum::<u64>();
let claim = self.block.transactions[0].raw.total_spends();
let (fees, overflow) = available.overflowing_sub(spends);
let (reward, overflow2) = fees.overflowing_add(block_reward_satoshi(self.height));
if overflow || overflow2 || claim > reward {
Err(Error::CoinbaseOverspend { expected_max: reward, actual: claim })
} else {
Ok(())
}
}
}

View File

@ -0,0 +1,62 @@
use rayon::prelude::{IntoParallelRefIterator, IndexedParallelIterator, ParallelIterator};
use db::SharedStore;
use network::Magic;
use error::Error;
use canon::CanonBlock;
use accept_block::BlockAcceptor;
use accept_header::HeaderAcceptor;
use accept_transaction::TransactionAcceptor;
use duplex_store::DuplexTransactionOutputProvider;
pub struct ChainAcceptor<'a> {
pub block: BlockAcceptor<'a>,
pub header: HeaderAcceptor<'a>,
pub transactions: Vec<TransactionAcceptor<'a>>,
}
impl<'a> ChainAcceptor<'a> {
pub fn new(store: &'a SharedStore, network: Magic, block: CanonBlock<'a>, height: u32) -> Self {
trace!(target: "verification", "Block verification {}", block.hash().to_reversed_str());
let prevouts = DuplexTransactionOutputProvider::new(store.as_previous_transaction_output_provider(), block.raw());
ChainAcceptor {
block: BlockAcceptor::new(store.as_previous_transaction_output_provider(), network, block, height),
header: HeaderAcceptor::new(store.as_block_header_provider(), network, block.header(), height),
transactions: block.transactions()
.into_iter()
.map(|tx| TransactionAcceptor::new(
store.as_transaction_meta_provider(),
prevouts,
block.raw(),
network,
tx,
block.hash(),
height,
block.header.raw.time
))
.collect(),
}
}
pub fn check(&self) -> Result<(), Error> {
try!(self.block.check());
try!(self.header.check());
try!(self.check_transactions_with_eval(true));
Ok(())
}
/// backwards test compatibility
/// TODO: get rid of this
pub fn check_with_eval(&self, eval: bool) -> Result<(), Error> {
try!(self.block.check());
try!(self.header.check());
try!(self.check_transactions_with_eval(eval));
Ok(())
}
fn check_transactions_with_eval(&self, eval: bool) -> Result<(), Error> {
self.transactions.par_iter()
.enumerate()
.fold(|| Ok(()), |result, (index, tx)| result.and_then(|_| tx.check_with_eval(eval).map_err(|err| Error::Transaction(index, err))))
.reduce(|| Ok(()), |acc, check| acc.and(check))
}
}

View File

@ -0,0 +1,117 @@
use network::Magic;
use db::BlockHeaderProvider;
use canon::CanonHeader;
use constants::MIN_BLOCK_VERSION;
use error::Error;
use work::work_required;
use timestamp::median_timestamp;
pub struct HeaderAcceptor<'a> {
pub version: HeaderVersion<'a>,
pub work: HeaderWork<'a>,
pub median_timestamp: HeaderMedianTimestamp<'a>,
}
impl<'a> HeaderAcceptor<'a> {
pub fn new(store: &'a BlockHeaderProvider, network: Magic, header: CanonHeader<'a>, height: u32) -> Self {
HeaderAcceptor {
// TODO: check last 1000 blocks instead of hardcoding the value
version: HeaderVersion::new(header, MIN_BLOCK_VERSION),
work: HeaderWork::new(header, store, height, network),
median_timestamp: HeaderMedianTimestamp::new(header, store, network),
}
}
pub fn check(&self) -> Result<(), Error> {
try!(self.version.check());
try!(self.work.check());
try!(self.median_timestamp.check());
Ok(())
}
}
pub trait HeaderRule {
fn check(&self) -> Result<(), Error>;
}
pub struct HeaderVersion<'a> {
header: CanonHeader<'a>,
min_version: u32,
}
impl<'a> HeaderVersion<'a> {
fn new(header: CanonHeader<'a>, min_version: u32) -> Self {
HeaderVersion {
header: header,
min_version: min_version,
}
}
}
impl<'a> HeaderRule for HeaderVersion<'a> {
fn check(&self) -> Result<(), Error> {
if self.header.raw.version < self.min_version {
Err(Error::OldVersionBlock)
} else {
Ok(())
}
}
}
pub struct HeaderWork<'a> {
header: CanonHeader<'a>,
store: &'a BlockHeaderProvider,
height: u32,
network: Magic,
}
impl<'a> HeaderWork<'a> {
fn new(header: CanonHeader<'a>, store: &'a BlockHeaderProvider, height: u32, network: Magic) -> Self {
HeaderWork {
header: header,
store: store,
height: height,
network: network,
}
}
}
impl<'a> HeaderRule for HeaderWork<'a> {
fn check(&self) -> Result<(), Error> {
let previous_header_hash = self.header.raw.previous_header_hash.clone();
let time = self.header.raw.time;
let work = work_required(previous_header_hash, time, self.height, self.store, self.network);
if work == self.header.raw.bits {
Ok(())
} else {
Err(Error::Difficulty)
}
}
}
pub struct HeaderMedianTimestamp<'a> {
header: CanonHeader<'a>,
store: &'a BlockHeaderProvider,
network: Magic,
}
impl<'a> HeaderMedianTimestamp<'a> {
fn new(header: CanonHeader<'a>, store: &'a BlockHeaderProvider, network: Magic) -> Self {
HeaderMedianTimestamp {
header: header,
store: store,
network: network,
}
}
}
impl<'a> HeaderRule for HeaderMedianTimestamp<'a> {
fn check(&self) -> Result<(), Error> {
let median = median_timestamp(&self.header.raw, self.store, self.network);
if self.header.raw.time <= median {
Err(Error::Timestamp)
} else {
Ok(())
}
}
}

View File

@ -0,0 +1,392 @@
use primitives::hash::H256;
use db::{TransactionMetaProvider, PreviousTransactionOutputProvider, TransactionOutputObserver};
use network::{Magic, ConsensusParams};
use script::{Script, verify_script, VerificationFlags, TransactionSignatureChecker, TransactionInputSigner};
use duplex_store::{DuplexTransactionOutputProvider};
use sigops::transaction_sigops;
use canon::CanonTransaction;
use constants::{COINBASE_MATURITY, MAX_BLOCK_SIGOPS};
use error::TransactionError;
pub struct TransactionAcceptor<'a> {
pub bip30: TransactionBip30<'a>,
pub missing_inputs: TransactionMissingInputs<'a>,
pub maturity: TransactionMaturity<'a>,
pub overspent: TransactionOverspent<'a>,
pub double_spent: TransactionDoubleSpend<'a>,
pub eval: TransactionEval<'a>,
}
impl<'a> TransactionAcceptor<'a> {
pub fn new(
// in case of block validation, it's only current block,
meta_store: &'a TransactionMetaProvider,
// previous transaction outputs
// in case of block validation, that's database and currently processed block
prevout_store: DuplexTransactionOutputProvider<'a>,
// in case of block validation, that's database and currently processed block
spent_store: &'a TransactionOutputObserver,
network: Magic,
transaction: CanonTransaction<'a>,
block_hash: &'a H256,
height: u32,
time: u32,
) -> Self {
trace!(target: "verification", "Tx verification {}", transaction.hash.to_reversed_str());
let params = network.consensus_params();
TransactionAcceptor {
bip30: TransactionBip30::new_for_sync(transaction, meta_store, params.clone(), block_hash, height),
missing_inputs: TransactionMissingInputs::new(transaction, prevout_store),
maturity: TransactionMaturity::new(transaction, meta_store, height),
overspent: TransactionOverspent::new(transaction, prevout_store),
double_spent: TransactionDoubleSpend::new(transaction, spent_store),
eval: TransactionEval::new(transaction, prevout_store, params, height, time),
}
}
pub fn check(&self) -> Result<(), TransactionError> {
try!(self.bip30.check());
try!(self.missing_inputs.check());
try!(self.maturity.check());
try!(self.overspent.check());
try!(self.double_spent.check());
try!(self.eval.check());
Ok(())
}
/// backwards test compatibility
/// TODO: get rid of this
pub fn check_with_eval(&self, eval: bool) -> Result<(), TransactionError> {
try!(self.bip30.check());
try!(self.missing_inputs.check());
try!(self.maturity.check());
try!(self.overspent.check());
try!(self.double_spent.check());
if eval {
try!(self.eval.check());
}
Ok(())
}
}
pub struct MemoryPoolTransactionAcceptor<'a> {
pub bip30: TransactionBip30<'a>,
pub missing_inputs: TransactionMissingInputs<'a>,
pub maturity: TransactionMaturity<'a>,
pub overspent: TransactionOverspent<'a>,
pub sigops: TransactionSigops<'a>,
pub double_spent: TransactionDoubleSpend<'a>,
pub eval: TransactionEval<'a>,
}
impl<'a> MemoryPoolTransactionAcceptor<'a> {
pub fn new(
// TODO: in case of memory pool it should be db and memory pool
meta_store: &'a TransactionMetaProvider,
// in case of memory pool it should be db and memory pool
prevout_store: DuplexTransactionOutputProvider<'a>,
// in case of memory pool it should be db and memory pool
spent_store: &'a TransactionOutputObserver,
network: Magic,
transaction: CanonTransaction<'a>,
height: u32,
time: u32,
) -> Self {
trace!(target: "verification", "Mempool-Tx verification {}", transaction.hash.to_reversed_str());
let params = network.consensus_params();
MemoryPoolTransactionAcceptor {
bip30: TransactionBip30::new_for_mempool(transaction, meta_store),
missing_inputs: TransactionMissingInputs::new(transaction, prevout_store),
maturity: TransactionMaturity::new(transaction, meta_store, height),
overspent: TransactionOverspent::new(transaction, prevout_store),
sigops: TransactionSigops::new(transaction, prevout_store, params.clone(), MAX_BLOCK_SIGOPS, time),
double_spent: TransactionDoubleSpend::new(transaction, spent_store),
eval: TransactionEval::new(transaction, prevout_store, params, height, time),
}
}
pub fn check(&self) -> Result<(), TransactionError> {
// TODO: b82 fails, when this is enabled, fix this
//try!(self.bip30.check());
try!(self.missing_inputs.check());
try!(self.maturity.check());
try!(self.overspent.check());
try!(self.sigops.check());
try!(self.double_spent.check());
try!(self.eval.check());
Ok(())
}
}
pub trait TransactionRule {
fn check(&self) -> Result<(), TransactionError>;
}
pub struct TransactionBip30<'a> {
transaction: CanonTransaction<'a>,
store: &'a TransactionMetaProvider,
exception: bool,
}
impl<'a> TransactionBip30<'a> {
fn new_for_sync(
transaction: CanonTransaction<'a>,
store: &'a TransactionMetaProvider,
consensus_params: ConsensusParams,
block_hash: &'a H256,
height: u32
) -> Self {
let exception = consensus_params.is_bip30_exception(block_hash, height);
TransactionBip30 {
transaction: transaction,
store: store,
exception: exception,
}
}
fn new_for_mempool(transaction: CanonTransaction<'a>, store: &'a TransactionMetaProvider) -> Self {
TransactionBip30 {
transaction: transaction,
store: store,
exception: false,
}
}
}
impl<'a> TransactionRule for TransactionBip30<'a> {
fn check(&self) -> Result<(), TransactionError> {
// we allow optionals here, cause previous output may be a part of current block
// yet, we do not need to check current block, cause duplicated transactions
// in the same block are also forbidden
//
// update*
// TODO:
// There is a potential consensus failure here, cause transaction before this one
// may have fully spent the output, and we, by checking only storage, have no knowladge
// of it
match self.store.transaction_meta(&self.transaction.hash) {
Some(ref meta) if !meta.is_fully_spent() && !self.exception => {
Err(TransactionError::UnspentTransactionWithTheSameHash)
},
_ => Ok(())
}
}
}
pub struct TransactionMissingInputs<'a> {
transaction: CanonTransaction<'a>,
store: DuplexTransactionOutputProvider<'a>,
}
impl<'a> TransactionMissingInputs<'a> {
fn new(transaction: CanonTransaction<'a>, store: DuplexTransactionOutputProvider<'a>) -> Self {
TransactionMissingInputs {
transaction: transaction,
store: store,
}
}
}
impl<'a> TransactionRule for TransactionMissingInputs<'a> {
fn check(&self) -> Result<(), TransactionError> {
let missing_index = self.transaction.raw.inputs.iter()
.position(|input| {
let is_not_null = !input.previous_output.is_null();
let is_missing = self.store.previous_transaction_output(&input.previous_output).is_none();
is_not_null && is_missing
});
match missing_index {
Some(index) => Err(TransactionError::Input(index)),
None => Ok(())
}
}
}
pub struct TransactionMaturity<'a> {
transaction: CanonTransaction<'a>,
store: &'a TransactionMetaProvider,
height: u32,
}
impl<'a> TransactionMaturity<'a> {
fn new(transaction: CanonTransaction<'a>, store: &'a TransactionMetaProvider, height: u32) -> Self {
TransactionMaturity {
transaction: transaction,
store: store,
height: height,
}
}
}
impl<'a> TransactionRule for TransactionMaturity<'a> {
fn check(&self) -> Result<(), TransactionError> {
// TODO: this is should also fail when we are trying to spend current block coinbase
let immature_spend = self.transaction.raw.inputs.iter()
.any(|input| match self.store.transaction_meta(&input.previous_output.hash) {
Some(ref meta) if meta.is_coinbase() && self.height < meta.height() + COINBASE_MATURITY => true,
_ => false,
});
if immature_spend {
Err(TransactionError::Maturity)
} else {
Ok(())
}
}
}
pub struct TransactionOverspent<'a> {
transaction: CanonTransaction<'a>,
store: DuplexTransactionOutputProvider<'a>,
}
impl<'a> TransactionOverspent<'a> {
fn new(transaction: CanonTransaction<'a>, store: DuplexTransactionOutputProvider<'a>) -> Self {
TransactionOverspent {
transaction: transaction,
store: store,
}
}
}
impl<'a> TransactionRule for TransactionOverspent<'a> {
fn check(&self) -> Result<(), TransactionError> {
if self.transaction.raw.is_coinbase() {
return Ok(());
}
let available = self.transaction.raw.inputs.iter()
.map(|input| self.store.previous_transaction_output(&input.previous_output).map(|o| o.value).unwrap_or(0))
.sum::<u64>();
let spends = self.transaction.raw.total_spends();
if spends > available {
Err(TransactionError::Overspend)
} else {
Ok(())
}
}
}
pub struct TransactionSigops<'a> {
transaction: CanonTransaction<'a>,
store: DuplexTransactionOutputProvider<'a>,
consensus_params: ConsensusParams,
max_sigops: usize,
time: u32,
}
impl<'a> TransactionSigops<'a> {
fn new(transaction: CanonTransaction<'a>, store: DuplexTransactionOutputProvider<'a>, consensus_params: ConsensusParams, max_sigops: usize, time: u32) -> Self {
TransactionSigops {
transaction: transaction,
store: store,
consensus_params: consensus_params,
max_sigops: max_sigops,
time: time,
}
}
}
impl<'a> TransactionRule for TransactionSigops<'a> {
fn check(&self) -> Result<(), TransactionError> {
let bip16_active = self.time >= self.consensus_params.bip16_time;
let sigops = transaction_sigops(&self.transaction.raw, &self.store, bip16_active);
if sigops > self.max_sigops {
Err(TransactionError::MaxSigops)
} else {
Ok(())
}
}
}
pub struct TransactionEval<'a> {
transaction: CanonTransaction<'a>,
store: DuplexTransactionOutputProvider<'a>,
verify_p2sh: bool,
verify_clocktime: bool,
}
impl<'a> TransactionEval<'a> {
fn new(
transaction: CanonTransaction<'a>,
store: DuplexTransactionOutputProvider<'a>,
params: ConsensusParams,
height: u32,
time: u32,
) -> Self {
let verify_p2sh = time >= params.bip16_time;
let verify_clocktime = height >= params.bip65_height;
TransactionEval {
transaction: transaction,
store: store,
verify_p2sh: verify_p2sh,
verify_clocktime: verify_clocktime,
}
}
}
impl<'a> TransactionRule for TransactionEval<'a> {
fn check(&self) -> Result<(), TransactionError> {
if self.transaction.raw.is_coinbase() {
return Ok(());
}
let signer: TransactionInputSigner = self.transaction.raw.clone().into();
let mut checker = TransactionSignatureChecker {
signer: signer,
input_index: 0,
};
for (index, input) in self.transaction.raw.inputs.iter().enumerate() {
let output = self.store.previous_transaction_output(&input.previous_output)
.ok_or_else(|| TransactionError::UnknownReference(input.previous_output.hash.clone()))?;
checker.input_index = index;
let input: Script = input.script_sig.clone().into();
let output: Script = output.script_pubkey.into();
let flags = VerificationFlags::default()
.verify_p2sh(self.verify_p2sh)
.verify_clocktimeverify(self.verify_clocktime);
try!(verify_script(&input, &output, &flags, &checker).map_err(|_| TransactionError::Signature(index)));
}
Ok(())
}
}
pub struct TransactionDoubleSpend<'a> {
transaction: CanonTransaction<'a>,
store: &'a TransactionOutputObserver,
}
impl<'a> TransactionDoubleSpend<'a> {
fn new(transaction: CanonTransaction<'a>, store: &'a TransactionOutputObserver) -> Self {
TransactionDoubleSpend {
transaction: transaction,
store: store,
}
}
}
impl<'a> TransactionRule for TransactionDoubleSpend<'a> {
fn check(&self) -> Result<(), TransactionError> {
for input in &self.transaction.raw.inputs {
if self.store.is_spent(&input.previous_output).unwrap_or(false) {
return Err(TransactionError::UsingSpentOutput(
input.previous_output.hash.clone(),
input.previous_output.index
))
}
}
Ok(())
}
}

83
verification/src/canon.rs Normal file
View File

@ -0,0 +1,83 @@
use std::ops;
use primitives::hash::H256;
use db::{IndexedBlock, IndexedTransaction, IndexedBlockHeader};
/// Blocks whose parents are known to be in the chain
#[derive(Clone, Copy)]
pub struct CanonBlock<'a> {
block: &'a IndexedBlock,
}
impl<'a> CanonBlock<'a> {
pub fn new(block: &'a IndexedBlock) -> Self {
CanonBlock {
block: block,
}
}
pub fn hash<'b>(&'b self) -> &'a H256 where 'a: 'b {
&self.block.header.hash
}
pub fn raw<'b>(&'b self) -> &'a IndexedBlock where 'a: 'b {
self.block
}
pub fn header<'b>(&'b self) -> CanonHeader<'a> where 'a: 'b {
CanonHeader::new(&self.block.header)
}
pub fn transactions<'b>(&'b self) -> Vec<CanonTransaction<'a>> where 'a: 'b {
self.block.transactions.iter().map(CanonTransaction::new).collect()
}
}
impl<'a> ops::Deref for CanonBlock<'a> {
type Target = IndexedBlock;
fn deref(&self) -> &Self::Target {
self.block
}
}
#[derive(Clone, Copy)]
pub struct CanonHeader<'a> {
header: &'a IndexedBlockHeader,
}
impl<'a> CanonHeader<'a> {
pub fn new(header: &'a IndexedBlockHeader) -> Self {
CanonHeader {
header: header,
}
}
}
impl<'a> ops::Deref for CanonHeader<'a> {
type Target = IndexedBlockHeader;
fn deref(&self) -> &Self::Target {
self.header
}
}
#[derive(Clone, Copy)]
pub struct CanonTransaction<'a> {
transaction: &'a IndexedTransaction,
}
impl<'a> CanonTransaction<'a> {
pub fn new(transaction: &'a IndexedTransaction) -> Self {
CanonTransaction {
transaction: transaction,
}
}
}
impl<'a> ops::Deref for CanonTransaction<'a> {
type Target = IndexedTransaction;
fn deref(&self) -> &Self::Target {
self.transaction
}
}

View File

@ -1,21 +1,17 @@
//! Bitcoin chain verifier
use std::collections::BTreeSet;
use scoped_pool::Pool;
use hash::H256;
use db::{self, BlockLocation, PreviousTransactionOutputProvider, BlockHeaderProvider, TransactionOutputObserver};
use network::{Magic, ConsensusParams};
use db::{self, IndexedBlockHeader, BlockLocation, PreviousTransactionOutputProvider, BlockHeaderProvider, TransactionOutputObserver};
use network::Magic;
use error::{Error, TransactionError};
use sigops::{StoreWithUnretainedOutputs, transaction_sigops};
use {Verify, chain, utils};
const BLOCK_MAX_FUTURE: i64 = 2 * 60 * 60; // 2 hours
const COINBASE_MATURITY: u32 = 100; // 2 hours
pub const MAX_BLOCK_SIZE: usize = 1_000_000;
pub const MAX_BLOCK_SIGOPS: usize = 20_000;
const TRANSACTIONS_VERIFY_THREADS: usize = 8;
const TRANSACTIONS_VERIFY_PARALLEL_THRESHOLD: usize = 32;
use {Verify, chain};
use canon::{CanonBlock, CanonTransaction};
use duplex_store::{DuplexTransactionOutputProvider, NoopStore};
use verify_chain::ChainVerifier;
use verify_header::HeaderVerifier;
use verify_transaction::MemoryPoolTransactionVerifier;
use accept_chain::ChainAcceptor;
use accept_transaction::MemoryPoolTransactionAcceptor;
#[derive(PartialEq, Debug)]
/// Block verification chain
@ -31,24 +27,20 @@ pub enum Chain {
/// Verification result
pub type VerificationResult = Result<Chain, Error>;
pub struct ChainVerifier {
pub struct BackwardsCompatibleChainVerifier {
store: db::SharedStore,
skip_pow: bool,
skip_sig: bool,
network: Magic,
consensus_params: ConsensusParams,
pool: Pool,
}
impl ChainVerifier {
impl BackwardsCompatibleChainVerifier {
pub fn new(store: db::SharedStore, network: Magic) -> Self {
ChainVerifier {
BackwardsCompatibleChainVerifier {
store: store,
skip_pow: false,
skip_sig: false,
network: network,
consensus_params: network.consensus_params(),
pool: Pool::new(TRANSACTIONS_VERIFY_THREADS),
}
}
@ -64,313 +56,75 @@ impl ChainVerifier {
self
}
pub fn verify_p2sh(&self, time: u32) -> bool {
time >= self.consensus_params.bip16_time
}
pub fn verify_clocktimeverify(&self, height: u32) -> bool {
height >= self.consensus_params.bip65_height
}
/// Returns number of block signature operations.
/// NOTE: This function expects all previous blocks to be already in database.
fn block_sigops(&self, block: &db::IndexedBlock) -> usize {
// strict pay-to-script-hash signature operations count toward block
// signature operations limit is enforced with BIP16
let store = StoreWithUnretainedOutputs::new(&self.store, block);
let bip16_active = self.verify_p2sh(block.header.raw.time);
block.transactions.iter().map(|tx| transaction_sigops(&tx.raw, &store, bip16_active)).sum()
}
fn ordered_verify(&self, block: &db::IndexedBlock, at_height: u32) -> Result<(), Error> {
if !block.is_final(at_height) {
return Err(Error::NonFinalBlock);
}
// transaction verification including number of signature operations checking
if self.block_sigops(block) > MAX_BLOCK_SIGOPS {
return Err(Error::MaximumSigops);
}
let block_hash = block.hash();
// check that difficulty matches the adjusted level
//if let Some(work) = self.work_required(block, at_height) {
if at_height != 0 && !self.skip_pow {
let work = utils::work_required(
block.header.raw.previous_header_hash.clone(),
block.header.raw.time,
at_height,
self.store.as_block_header_provider(),
self.network
);
if !self.skip_pow && work != block.header.raw.bits {
trace!(target: "verification", "pow verification error at height: {}", at_height);
trace!(target: "verification", "expected work: {:?}, got {:?}", work, block.header.raw.bits);
return Err(Error::Difficulty);
}
}
let coinbase_spends = block.transactions[0].raw.total_spends();
// bip30
for (tx_index, tx) in block.transactions.iter().enumerate() {
if let Some(meta) = self.store.transaction_meta(&tx.hash) {
if !meta.is_fully_spent() && !self.consensus_params.is_bip30_exception(&block_hash, at_height) {
return Err(Error::Transaction(tx_index, TransactionError::UnspentTransactionWithTheSameHash));
}
}
}
let unretained_store = StoreWithUnretainedOutputs::new(&self.store, block);
let mut total_unspent = 0u64;
for (tx_index, tx) in block.transactions.iter().enumerate().skip(1) {
let mut total_claimed: u64 = 0;
for input in &tx.raw.inputs {
// Coinbase maturity check
if let Some(previous_meta) = self.store.transaction_meta(&input.previous_output.hash) {
// check if it exists only
// it will fail a little later if there is no transaction at all
if previous_meta.is_coinbase() &&
(at_height < COINBASE_MATURITY || at_height - COINBASE_MATURITY < previous_meta.height()) {
return Err(Error::Transaction(tx_index, TransactionError::Maturity));
}
}
let previous_output = unretained_store.previous_transaction_output(&input.previous_output)
.expect("missing tx, out of order verification or malformed db");
total_claimed += previous_output.value;
}
let total_spends = tx.raw.total_spends();
if total_claimed < total_spends {
return Err(Error::Transaction(tx_index, TransactionError::Overspend));
}
// total_claimed is greater than total_spends, checked above and returned otherwise, cannot overflow; qed
total_unspent += total_claimed - total_spends;
}
let expected_max = utils::block_reward_satoshi(at_height) + total_unspent;
if coinbase_spends > expected_max{
return Err(Error::CoinbaseOverspend { expected_max: expected_max, actual: coinbase_spends });
}
Ok(())
}
pub fn verify_transaction<T>(
&self,
prevout_provider: &T,
height: u32,
time: u32,
transaction: &chain::Transaction,
sequence: usize
) -> Result<(), TransactionError> where T: PreviousTransactionOutputProvider + TransactionOutputObserver {
use script::{
TransactionInputSigner,
TransactionSignatureChecker,
VerificationFlags,
Script,
verify_script,
};
if sequence == 0 {
return Ok(());
}
// must not be coinbase (sequence = 0 is returned above)
if transaction.is_coinbase() { return Err(TransactionError::MisplacedCoinbase(sequence)); }
let unretained_store = StoreWithUnretainedOutputs::new(&self.store, prevout_provider);
for (input_index, input) in transaction.inputs().iter().enumerate() {
// signature verification
let signer: TransactionInputSigner = transaction.clone().into();
let paired_output = match unretained_store.previous_transaction_output(&input.previous_output) {
Some(output) => output,
_ => return Err(TransactionError::UnknownReference(input.previous_output.hash.clone()))
};
// unwrap_or(false) is actually not right!
// but can be here because of two reasons
// - this function is not responsible for checking if previous transactions
// in currently processed block / mempool already spent this output
// - if we process transactions from mempool we shouldn't care if transactions before it
// spent this output, cause they may not make their way into the block due to their size
// or sigops limit
if prevout_provider.is_spent(&input.previous_output).unwrap_or(false) {
return Err(TransactionError::UsingSpentOutput(input.previous_output.hash.clone(), input.previous_output.index))
}
let checker = TransactionSignatureChecker {
signer: signer,
input_index: input_index,
};
let input: Script = input.script_sig.clone().into();
let output: Script = paired_output.script_pubkey.into();
let flags = VerificationFlags::default()
.verify_p2sh(self.verify_p2sh(time))
.verify_clocktimeverify(self.verify_clocktimeverify(height));
// for tests only, skips as late as possible
if self.skip_sig { continue; }
if let Err(e) = verify_script(&input, &output, &flags, &checker) {
trace!(target: "verification", "transaction signature verification failure: {:?}", e);
trace!(target: "verification", "input:\n{}", input);
trace!(target: "verification", "output:\n{}", output);
// todo: log error here
return Err(TransactionError::Signature(input_index))
}
}
Ok(())
}
pub fn verify_block_header(
&self,
block_header_provider: &BlockHeaderProvider,
hash: &H256,
header: &chain::BlockHeader
) -> Result<(), Error> {
// target difficulty threshold
if !self.skip_pow && !utils::is_valid_proof_of_work(self.network.max_bits(), header.bits, hash) {
return Err(Error::Pow);
}
// check if block timestamp is not far in the future
if utils::age(header.time) < -BLOCK_MAX_FUTURE {
return Err(Error::FuturisticTimestamp);
}
if let Some(median_timestamp) = ChainVerifier::median_timestamp(block_header_provider, header) {
// TODO: make timestamp validation on testnet work...
if self.network != Magic::Testnet && median_timestamp >= header.time {
trace!(
target: "verification", "median timestamp verification failed, median: {}, current: {}",
median_timestamp,
header.time
);
return Err(Error::Timestamp);
}
}
Ok(())
}
fn verify_block(&self, block: &db::IndexedBlock) -> VerificationResult {
use task::Task;
let hash = block.hash();
// There should be at least 1 transaction
if block.transactions.is_empty() {
return Err(Error::Empty);
}
// block header checks
try!(self.verify_block_header(self.store.as_block_header_provider(), &hash, &block.header.raw));
// todo: serialized_size function is at least suboptimal
let size = block.size();
if size > MAX_BLOCK_SIZE {
return Err(Error::Size(size))
}
// verify merkle root
if block.merkle_root() != block.header.raw.merkle_root_hash {
return Err(Error::MerkleRoot);
}
let first_tx = &block.transactions[0].raw;
// check first transaction is a coinbase transaction
if !first_tx.is_coinbase() {
return Err(Error::Coinbase)
}
// check that coinbase has a valid signature
// is_coinbase() = true above guarantees that there is at least one input
let coinbase_script_len = first_tx.inputs[0].script_sig.len();
if coinbase_script_len < 2 || coinbase_script_len > 100 {
return Err(Error::CoinbaseSignatureLength(coinbase_script_len));
}
let current_time = ::time::get_time().sec as u32;
// first run pre-verification
let chain_verifier = ChainVerifier::new(block, self.network, current_time);
try!(chain_verifier.check_with_pow(!self.skip_pow));
// check pre-verified header location
// TODO: now this function allows full verification for sidechain block
// it should allow full verification only for canon blocks
let location = match self.store.accepted_location(&block.header.raw) {
Some(location) => location,
None => return Ok(Chain::Orphan),
};
if block.transactions.len() > TRANSACTIONS_VERIFY_PARALLEL_THRESHOLD {
// todo: might use on-stack vector (smallvec/elastic array)
let mut transaction_tasks: Vec<Task> = Vec::with_capacity(TRANSACTIONS_VERIFY_THREADS);
let mut last = 0;
for num_task in 0..TRANSACTIONS_VERIFY_THREADS {
let from = last;
last = from + ::std::cmp::max(1, block.transactions.len() / TRANSACTIONS_VERIFY_THREADS);
if num_task == TRANSACTIONS_VERIFY_THREADS - 1 { last = block.transactions.len(); };
transaction_tasks.push(Task::new(block, location.height(), from, last));
}
// now do full verification
let canon_block = CanonBlock::new(block);
let chain_acceptor = ChainAcceptor::new(&self.store, self.network, canon_block, location.height());
try!(chain_acceptor.check_with_eval(!self.skip_sig));
self.pool.scoped(|scope| {
for task in transaction_tasks.iter_mut() {
scope.execute(move || task.progress(self))
}
self.store.flush();
});
for task in transaction_tasks.into_iter() {
if let Err((index, tx_err)) = task.result() {
return Err(Error::Transaction(index, tx_err));
}
}
}
else {
for (index, tx) in block.transactions.iter().enumerate() {
if let Err(tx_err) = self.verify_transaction(block, location.height(), block.header.raw.time, &tx.raw, index) {
return Err(Error::Transaction(index, tx_err));
}
}
}
// todo: pre-process projected block number once verification is parallel!
match location {
BlockLocation::Main(block_number) => {
try!(self.ordered_verify(block, block_number));
Ok(Chain::Main)
},
BlockLocation::Side(block_number) => {
try!(self.ordered_verify(block, block_number));
Ok(Chain::Side)
},
BlockLocation::Main(_) => Ok(Chain::Main),
BlockLocation::Side(_) => Ok(Chain::Side),
}
}
pub fn median_timestamp(block_header_provider: &BlockHeaderProvider, header: &chain::BlockHeader) -> Option<u32> {
let mut timestamps = BTreeSet::new();
let mut block_ref = header.previous_header_hash.clone().into();
// TODO: optimize it, so it does not make 11 redundant queries each time
for _ in 0..11 {
let previous_header = match block_header_provider.block_header(block_ref) {
Some(h) => h,
None => { break; }
};
timestamps.insert(previous_header.time);
block_ref = previous_header.previous_header_hash.into();
}
pub fn verify_block_header(
&self,
_block_header_provider: &BlockHeaderProvider,
hash: &H256,
header: &chain::BlockHeader
) -> Result<(), Error> {
// let's do only preverifcation
// TODO: full verification
let current_time = ::time::get_time().sec as u32;
let header = IndexedBlockHeader::new(hash.clone(), header.clone());
let header_verifier = HeaderVerifier::new(&header, self.network, current_time);
header_verifier.check_with_pow(!self.skip_pow)
}
if timestamps.len() > 2 {
let timestamps: Vec<_> = timestamps.into_iter().collect();
Some(timestamps[timestamps.len() / 2])
}
else { None }
pub fn verify_mempool_transaction<T>(
&self,
prevout_provider: &T,
height: u32,
time: u32,
transaction: &chain::Transaction,
) -> Result<(), TransactionError> where T: PreviousTransactionOutputProvider + TransactionOutputObserver {
let indexed_tx = transaction.clone().into();
// let's do preverification first
let tx_verifier = MemoryPoolTransactionVerifier::new(&indexed_tx);
try!(tx_verifier.check());
let canon_tx = CanonTransaction::new(&indexed_tx);
// now let's do full verification
let noop = NoopStore;
let prevouts = DuplexTransactionOutputProvider::new(prevout_provider, &noop);
let tx_acceptor = MemoryPoolTransactionAcceptor::new(
self.store.as_transaction_meta_provider(),
prevouts,
prevout_provider,
self.network,
canon_tx,
height,
time
);
tx_acceptor.check()
}
}
impl Verify for ChainVerifier {
impl Verify for BackwardsCompatibleChainVerifier {
fn verify(&self, block: &db::IndexedBlock) -> VerificationResult {
let result = self.verify_block(block);
trace!(
@ -390,7 +144,7 @@ mod tests {
use network::Magic;
use devtools::RandomTempPath;
use {script, test_data};
use super::ChainVerifier;
use super::BackwardsCompatibleChainVerifier as ChainVerifier;
use super::super::{Verify, Chain, Error, TransactionError};
#[test]
@ -441,9 +195,13 @@ mod tests {
let genesis_coinbase = genesis.transactions()[0].hash();
let block = test_data::block_builder()
.transaction().coinbase().build()
.transaction()
.coinbase()
.output().value(1).build()
.build()
.transaction()
.input().hash(genesis_coinbase).build()
.output().value(2).build()
.build()
.merkled_header().parent(genesis.hash()).build()
.build();
@ -478,9 +236,13 @@ mod tests {
let reference_tx = genesis.transactions()[1].hash();
let block = test_data::block_builder()
.transaction().coinbase().build()
.transaction()
.coinbase()
.output().value(2).build()
.build()
.transaction()
.input().hash(reference_tx).build()
.output().value(1).build()
.build()
.merkled_header().parent(genesis.hash()).build()
.build();
@ -512,7 +274,10 @@ mod tests {
let first_tx_hash = genesis.transactions()[1].hash();
let block = test_data::block_builder()
.transaction().coinbase().build()
.transaction()
.coinbase()
.output().value(2).build()
.build()
.transaction()
.input().hash(first_tx_hash).build()
.output().value(30).build()
@ -547,17 +312,23 @@ mod tests {
.build();
storage.insert_block(&genesis).expect("Genesis should be inserted with no errors");
let genesis_coinbase = genesis.transactions()[1].hash();
let first_tx_hash = genesis.transactions()[1].hash();
let block = test_data::block_builder()
.transaction().coinbase().build()
.transaction()
.input().hash(genesis_coinbase).build()
.output().value(30).build()
.output().value(20).build()
.coinbase()
.output().value(2).build()
.build()
.transaction()
.input().hash(first_tx_hash).build()
.output().value(19).build()
.output().value(31).build()
.build()
.derived_transaction(1, 0)
.output().value(35).build()
.output().value(20).build()
.build()
.derived_transaction(1, 1)
.output().value(20).build()
.build()
.merkled_header().parent(genesis.hash()).build()
.build();
@ -637,7 +408,7 @@ mod tests {
}
let mut builder_tx2 = script::Builder::default();
for _ in 0..11000 {
for _ in 0..11001 {
builder_tx2 = builder_tx2.push_opcode(script::Opcode::OP_CHECKSIG)
}

View File

@ -0,0 +1,21 @@
//! Consenus constants
pub const BLOCK_MAX_FUTURE: i64 = 2 * 60 * 60; // 2 hours
pub const COINBASE_MATURITY: u32 = 100; // 2 hours
pub const MAX_BLOCK_SIZE: usize = 1_000_000;
pub const MAX_BLOCK_SIGOPS: usize = 20_000;
pub const MIN_COINBASE_SIZE: usize = 2;
pub const MAX_COINBASE_SIZE: usize = 100;
pub const MIN_BLOCK_VERSION: u32 = 0;
pub const RETARGETING_FACTOR: u32 = 4;
pub const TARGET_SPACING_SECONDS: u32 = 10 * 60;
pub const DOUBLE_SPACING_SECONDS: u32 = 2 * TARGET_SPACING_SECONDS;
pub const TARGET_TIMESPAN_SECONDS: u32 = 2 * 7 * 24 * 60 * 60;
// The upper and lower bounds for retargeting timespan
pub const MIN_TIMESPAN: u32 = TARGET_TIMESPAN_SECONDS / RETARGETING_FACTOR;
pub const MAX_TIMESPAN: u32 = TARGET_TIMESPAN_SECONDS * RETARGETING_FACTOR;
// Target number of blocks, 2 weaks, 2016
pub const RETARGETING_INTERVAL: u32 = TARGET_TIMESPAN_SECONDS / TARGET_SPACING_SECONDS;

View File

@ -0,0 +1,35 @@
//! Some transaction validation rules,
//! require sophisticated (in more than one source) previous transaction lookups
use chain::{OutPoint, TransactionOutput};
use db::PreviousTransactionOutputProvider;
#[derive(Clone, Copy)]
pub struct DuplexTransactionOutputProvider<'a> {
first: &'a PreviousTransactionOutputProvider,
second: &'a PreviousTransactionOutputProvider,
}
impl<'a> DuplexTransactionOutputProvider<'a> {
pub fn new(first: &'a PreviousTransactionOutputProvider, second: &'a PreviousTransactionOutputProvider) -> Self {
DuplexTransactionOutputProvider {
first: first,
second: second,
}
}
}
impl<'a> PreviousTransactionOutputProvider for DuplexTransactionOutputProvider<'a> {
fn previous_transaction_output(&self, prevout: &OutPoint) -> Option<TransactionOutput> {
self.first.previous_transaction_output(prevout)
.or_else(|| self.second.previous_transaction_output(prevout))
}
}
pub struct NoopStore;
impl PreviousTransactionOutputProvider for NoopStore {
fn previous_transaction_output(&self, _prevout: &OutPoint) -> Option<TransactionOutput> {
None
}
}

View File

@ -5,6 +5,8 @@ use primitives::hash::H256;
pub enum Error {
/// has an equal duplicate in the chain
Duplicate,
/// Contains duplicated transactions
DuplicatedTransactions,
/// No transactions in block
Empty,
/// Invalid proof-of-work (Block hash does not satisfy nBits)
@ -32,11 +34,25 @@ pub enum Error {
Size(usize),
/// Block transactions are not final.
NonFinalBlock,
/// Old version block.
OldVersionBlock,
}
#[derive(Debug, PartialEq)]
/// Possible transactions verification errors
pub enum TransactionError {
/// Transaction has no inputs or no outputs
Empty,
/// Transaction is not coinbase transaction but has null inputs
NullNonCoinbase,
/// Coinbase signature is not in the range 2-100
CoinbaseSignatureLength(usize),
/// Transaction size exceeds block size limit
MaxSize,
/// Transaction has more sigops than it's allowed
MaxSigops,
/// Transaction is a part of memory pool, but is a coinbase
MemoryPoolCoinbase,
/// Not found corresponding output for transaction input
Input(usize),
/// Referenced coinbase output for the transaction input is not mature enough
@ -54,7 +70,7 @@ pub enum TransactionError {
/// Too many signature operations once p2sh operations included
SigopsP2SH(usize),
/// Coinbase transaction is found at position that is not 0
MisplacedCoinbase(usize),
MisplacedCoinbase,
/// Not fully spent transaction with the same hash already exists, bip30.
UnspentTransactionWithTheSameHash,
/// Using output that is surely spent

View File

@ -1,11 +1,60 @@
//! Bitcoin blocks verification
//! Bitcoin consensus verification
//!
//! Full block verification consists of two phases:
//! - pre-verification
//! - full-verification
//!
//! In this library, pre-verification is done by `VerifyXXX` structures
//! Full-verification is done by `AcceptXXX` structures
//!
//! Use cases:
//!
//! --> A. on_new_block:
//!
//! A.1 VerifyHeader
//! A.2 VerifyBlock,
//! A.3 VerifyTransaction for each tx
//!
//! A.4.a if it is block from canon chain
//! A.4.a.1 AcceptHeader
//! A.4.a.2 AcceptBlock
//! A.4.a.3 AcceptTransaction for each tx
//!
//! A.4.b if it is block from side chain becoming canon
//! decanonize old canon chain blocks
//! canonize new canon chain blocks (without currently processed block)
//! A.4.b.1 AcceptHeader for each header
//! A.4.b.2 AcceptBlock for each block
//! A.4.b.3 AcceptTransaction for each tx in each block
//! A.4.b.4 AcceptHeader
//! A.4.b.5 AcceptBlock
//! A.4.b.6 AcceptTransaction for each tx
//! if any step failed, revert chain back to old canon
//!
//! A.4.c if it is block from side chain do nothing
//!
//! --> B. on_memory_pool_transaction
//!
//! B.1 VerifyMemoryPoolTransaction
//! B.2 AcceptMemoryPoolTransaction
//!
//! --> C. on_block_header
//!
//! C.1 VerifyHeader
//! C.2 AcceptHeader (?)
//!
//! --> D. after successfull chain_reorganization
//!
//! D.1 AcceptMemoryPoolTransaction on each tx in memory pool
//!
//! --> E. D might be super inefficient when memory pool is large
//! so instead we might want to call AcceptMemoryPoolTransaction on each tx
//! that is inserted into assembled block
extern crate byteorder;
extern crate parking_lot;
extern crate time;
#[macro_use]
extern crate log;
extern crate scoped_pool;
extern crate rayon;
extern crate db;
extern crate chain;
@ -19,18 +68,47 @@ extern crate ethcore_devtools as devtools;
#[cfg(test)]
extern crate test_data;
mod chain_verifier;
pub mod constants;
mod canon;
mod duplex_store;
mod error;
mod sigops;
mod task;
mod utils;
mod timestamp;
mod work;
// pre-verification
mod verify_block;
mod verify_chain;
mod verify_header;
mod verify_transaction;
// full verification
mod accept_block;
mod accept_chain;
mod accept_header;
mod accept_transaction;
// backwards compatibility
mod chain_verifier;
pub use primitives::{uint, hash, compact};
pub use chain_verifier::{Chain, ChainVerifier, VerificationResult, MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS};
pub use canon::{CanonBlock, CanonHeader, CanonTransaction};
pub use accept_block::BlockAcceptor;
pub use accept_chain::ChainAcceptor;
pub use accept_header::HeaderAcceptor;
pub use accept_transaction::{TransactionAcceptor, MemoryPoolTransactionAcceptor};
pub use verify_block::BlockVerifier;
pub use verify_chain::ChainVerifier as ChainVerifier;
pub use verify_header::HeaderVerifier;
pub use verify_transaction::{TransactionVerifier, MemoryPoolTransactionVerifier};
pub use chain_verifier::{Chain, BackwardsCompatibleChainVerifier, VerificationResult};
pub use error::{Error, TransactionError};
pub use sigops::{transaction_sigops, StoreWithUnretainedOutputs};
pub use utils::{work_required, is_valid_proof_of_work, is_valid_proof_of_work_hash, block_reward_satoshi};
pub use sigops::transaction_sigops;
pub use timestamp::median_timestamp;
pub use work::{work_required, is_valid_proof_of_work, is_valid_proof_of_work_hash, block_reward_satoshi};
/// Interface for block verification
pub trait Verify : Send + Sync {

View File

@ -1,29 +1,11 @@
use chain::{Transaction, TransactionOutput, OutPoint};
use db::{PreviousTransactionOutputProvider, SharedStore};
use chain::Transaction;
use db::PreviousTransactionOutputProvider;
use script::Script;
pub struct StoreWithUnretainedOutputs<'a, T> where T: 'a {
store: &'a SharedStore,
outputs: &'a T,
}
impl<'a, T> StoreWithUnretainedOutputs<'a, T> where T: PreviousTransactionOutputProvider {
pub fn new(store: &'a SharedStore, outputs: &'a T) -> Self {
StoreWithUnretainedOutputs {
store: store,
outputs: outputs,
}
}
}
impl<'a, T> PreviousTransactionOutputProvider for StoreWithUnretainedOutputs<'a, T> where T: PreviousTransactionOutputProvider {
fn previous_transaction_output(&self, prevout: &OutPoint) -> Option<TransactionOutput> {
self.store.transaction(&prevout.hash)
.and_then(|tx| tx.outputs.into_iter().nth(prevout.index as usize))
.or_else(|| self.outputs.previous_transaction_output(prevout))
}
}
/// Counts signature operations in given transaction
/// bip16_active flag indicates if we should also count signature operations
/// in previous transactions. If one of the previous transaction outputs is
/// missing, we simply ignore that fact and just carry on counting
pub fn transaction_sigops(
transaction: &Transaction,
store: &PreviousTransactionOutputProvider,
@ -38,17 +20,21 @@ pub fn transaction_sigops(
return output_sigops;
}
let input_sigops: usize = transaction.inputs.iter().map(|input| {
let input_script: Script = input.script_sig.clone().into();
let mut sigops = input_script.sigops_count(false);
if bip16_active {
let previous_output = store.previous_transaction_output(&input.previous_output)
.expect("missing tx, out of order verification or malformed db");
let prevout_script: Script = previous_output.script_pubkey.into();
sigops += input_script.pay_to_script_hash_sigops(&prevout_script);
}
sigops
}).sum();
let mut input_sigops = 0usize;
let mut bip16_sigops = 0usize;
input_sigops + output_sigops
for input in &transaction.inputs {
let input_script: Script = input.script_sig.clone().into();
input_sigops += input_script.sigops_count(false);
if bip16_active {
let previous_output = match store.previous_transaction_output(&input.previous_output) {
Some(output) => output,
None => continue,
};
let prevout_script: Script = previous_output.script_pubkey.into();
bip16_sigops += input_script.pay_to_script_hash_sigops(&prevout_script);
}
}
input_sigops + output_sigops + bip16_sigops
}

View File

@ -1,38 +0,0 @@
use chain_verifier::ChainVerifier;
use super::TransactionError;
use db::IndexedBlock;
pub struct Task<'a> {
block: &'a IndexedBlock,
block_height: u32,
from: usize,
to: usize,
result: Result<(), TransactionCheckError>,
}
type TransactionCheckError = (usize, TransactionError);
impl<'a> Task<'a> {
pub fn new(block: &'a IndexedBlock, block_height: u32, from: usize, to: usize) -> Self {
Task {
block: block,
block_height: block_height,
from: from,
to: to,
result: Ok(()),
}
}
pub fn progress(&mut self, verifier: &ChainVerifier) {
for index in self.from..self.to {
if let Err(e) = verifier.verify_transaction(self.block, self.block_height, self.block.header.raw.time, &self.block.transactions[index].raw, index) {
self.result = Err((index, e))
}
}
self.result = Ok(());
}
pub fn result(self) -> Result<(), TransactionCheckError> {
self.result
}
}

View File

@ -0,0 +1,34 @@
use std::collections::BTreeSet;
use chain::BlockHeader;
use db::BlockHeaderProvider;
use network::Magic;
/// Returns median timestamp, of given header ancestors.
/// The header should be later expected to have higher timestamp
/// than this median timestamp
pub fn median_timestamp(header: &BlockHeader, store: &BlockHeaderProvider, network: Magic) -> u32 {
// TODO: timestamp validation on testnet is broken
if network == Magic::Testnet {
return 0;
}
let ancestors = 11;
let mut timestamps = BTreeSet::new();
let mut block_ref = header.previous_header_hash.clone().into();
for _ in 0..ancestors {
let previous_header = match store.block_header(block_ref) {
Some(h) => h,
None => break,
};
timestamps.insert(previous_header.time);
block_ref = previous_header.previous_header_hash.into();
}
if timestamps.is_empty() {
return 0;
}
let timestamps = timestamps.into_iter().collect::<Vec<_>>();
timestamps[timestamps.len() / 2]
}

View File

@ -0,0 +1,214 @@
use std::collections::HashSet;
use db::IndexedBlock;
use sigops::transaction_sigops;
use duplex_store::NoopStore;
use error::{Error, TransactionError};
use constants::{MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS};
pub struct BlockVerifier<'a> {
pub empty: BlockEmpty<'a>,
pub coinbase: BlockCoinbase<'a>,
pub serialized_size: BlockSerializedSize<'a>,
pub extra_coinbases: BlockExtraCoinbases<'a>,
pub transactions_uniqueness: BlockTransactionsUniqueness<'a>,
pub sigops: BlockSigops<'a>,
pub merkle_root: BlockMerkleRoot<'a>,
}
impl<'a> BlockVerifier<'a> {
pub fn new(block: &'a IndexedBlock) -> Self {
BlockVerifier {
empty: BlockEmpty::new(block),
coinbase: BlockCoinbase::new(block),
serialized_size: BlockSerializedSize::new(block, MAX_BLOCK_SIZE),
extra_coinbases: BlockExtraCoinbases::new(block),
transactions_uniqueness: BlockTransactionsUniqueness::new(block),
sigops: BlockSigops::new(block, MAX_BLOCK_SIGOPS),
merkle_root: BlockMerkleRoot::new(block),
}
}
pub fn check(&self) -> Result<(), Error> {
try!(self.empty.check());
try!(self.coinbase.check());
try!(self.serialized_size.check());
try!(self.extra_coinbases.check());
try!(self.transactions_uniqueness.check());
try!(self.sigops.check());
try!(self.merkle_root.check());
Ok(())
}
}
trait BlockRule {
fn check(&self) -> Result<(), Error>;
}
pub struct BlockEmpty<'a> {
block: &'a IndexedBlock,
}
impl<'a> BlockEmpty<'a> {
fn new(block: &'a IndexedBlock) -> Self {
BlockEmpty {
block: block,
}
}
}
impl<'a> BlockRule for BlockEmpty<'a> {
fn check(&self) -> Result<(), Error> {
if self.block.transactions.is_empty() {
Err(Error::Empty)
} else {
Ok(())
}
}
}
pub struct BlockSerializedSize<'a> {
block: &'a IndexedBlock,
max_size: usize,
}
impl<'a> BlockSerializedSize<'a> {
fn new(block: &'a IndexedBlock, max_size: usize) -> Self {
BlockSerializedSize {
block: block,
max_size: max_size,
}
}
}
impl<'a> BlockRule for BlockSerializedSize<'a> {
fn check(&self) -> Result<(), Error> {
let size = self.block.size();
if size > self.max_size {
Err(Error::Size(size))
} else {
Ok(())
}
}
}
pub struct BlockCoinbase<'a> {
block: &'a IndexedBlock,
}
impl<'a> BlockCoinbase<'a> {
fn new(block: &'a IndexedBlock) -> Self {
BlockCoinbase {
block: block,
}
}
}
impl<'a> BlockRule for BlockCoinbase<'a> {
fn check(&self) -> Result<(), Error> {
if self.block.transactions.first().map(|tx| tx.raw.is_coinbase()).unwrap_or(false) {
Ok(())
} else {
Err(Error::Coinbase)
}
}
}
pub struct BlockExtraCoinbases<'a> {
block: &'a IndexedBlock,
}
impl<'a> BlockExtraCoinbases<'a> {
fn new(block: &'a IndexedBlock) -> Self {
BlockExtraCoinbases {
block: block,
}
}
}
impl<'a> BlockRule for BlockExtraCoinbases<'a> {
fn check(&self) -> Result<(), Error> {
let misplaced = self.block.transactions.iter()
.skip(1)
.position(|tx| tx.raw.is_coinbase());
match misplaced {
Some(index) => Err(Error::Transaction(index + 1, TransactionError::MisplacedCoinbase)),
None => Ok(()),
}
}
}
pub struct BlockTransactionsUniqueness<'a> {
block: &'a IndexedBlock,
}
impl<'a> BlockTransactionsUniqueness<'a> {
fn new(block: &'a IndexedBlock) -> Self {
BlockTransactionsUniqueness {
block: block,
}
}
}
impl<'a> BlockRule for BlockTransactionsUniqueness<'a> {
fn check(&self) -> Result<(), Error> {
let hashes = self.block.transactions.iter().map(|tx| tx.hash.clone()).collect::<HashSet<_>>();
if hashes.len() == self.block.transactions.len() {
Ok(())
} else {
Err(Error::DuplicatedTransactions)
}
}
}
pub struct BlockSigops<'a> {
block: &'a IndexedBlock,
max_sigops: usize,
}
impl<'a> BlockSigops<'a> {
fn new(block: &'a IndexedBlock, max_sigops: usize) -> Self {
BlockSigops {
block: block,
max_sigops: max_sigops,
}
}
}
impl<'a> BlockRule for BlockSigops<'a> {
fn check(&self) -> Result<(), Error> {
// We cannot know if bip16 is enabled at this point so we disable it.
let sigops = self.block.transactions.iter()
.map(|tx| transaction_sigops(&tx.raw, &NoopStore, false))
.sum::<usize>();
if sigops > self.max_sigops {
Err(Error::MaximumSigops)
} else {
Ok(())
}
}
}
pub struct BlockMerkleRoot<'a> {
block: &'a IndexedBlock,
}
impl<'a> BlockMerkleRoot<'a> {
fn new(block: &'a IndexedBlock) -> Self {
BlockMerkleRoot {
block: block,
}
}
}
impl<'a> BlockRule for BlockMerkleRoot<'a> {
fn check(&self) -> Result<(), Error> {
if self.block.merkle_root() == self.block.header.raw.merkle_root_hash {
Ok(())
} else {
Err(Error::MerkleRoot)
}
}
}

View File

@ -0,0 +1,47 @@
use rayon::prelude::{IntoParallelRefIterator, IndexedParallelIterator, ParallelIterator};
use db::IndexedBlock;
use network::Magic;
use error::Error;
use verify_block::BlockVerifier;
use verify_header::HeaderVerifier;
use verify_transaction::TransactionVerifier;
pub struct ChainVerifier<'a> {
pub block: BlockVerifier<'a>,
pub header: HeaderVerifier<'a>,
pub transactions: Vec<TransactionVerifier<'a>>,
}
impl<'a> ChainVerifier<'a> {
pub fn new(block: &'a IndexedBlock, network: Magic, current_time: u32) -> Self {
trace!(target: "verification", "Block pre-verification {}", block.hash().to_reversed_str());
ChainVerifier {
block: BlockVerifier::new(block),
header: HeaderVerifier::new(&block.header, network, current_time),
transactions: block.transactions.iter().map(TransactionVerifier::new).collect(),
}
}
pub fn check(&self) -> Result<(), Error> {
try!(self.block.check());
try!(self.header.check());
try!(self.check_transactions());
Ok(())
}
/// backwards test compatibility
/// TODO: get rid of this
pub fn check_with_pow(&self, pow: bool) -> Result<(), Error> {
try!(self.block.check());
try!(self.header.check_with_pow(pow));
try!(self.check_transactions());
Ok(())
}
fn check_transactions(&self) -> Result<(), Error> {
self.transactions.par_iter()
.enumerate()
.fold(|| Ok(()), |result, (index, tx)| result.and_then(|_| tx.check().map_err(|err| Error::Transaction(index, err))))
.reduce(|| Ok(()), |acc, check| acc.and(check))
}
}

View File

@ -0,0 +1,90 @@
use primitives::compact::Compact;
use db::IndexedBlockHeader;
use network::Magic;
use work::is_valid_proof_of_work;
use error::Error;
use constants::BLOCK_MAX_FUTURE;
pub struct HeaderVerifier<'a> {
pub proof_of_work: HeaderProofOfWork<'a>,
pub timestamp: HeaderTimestamp<'a>,
}
impl<'a> HeaderVerifier<'a> {
pub fn new(header: &'a IndexedBlockHeader, network: Magic, current_time: u32) -> Self {
HeaderVerifier {
proof_of_work: HeaderProofOfWork::new(header, network),
timestamp: HeaderTimestamp::new(header, current_time, BLOCK_MAX_FUTURE as u32),
}
}
pub fn check(&self) -> Result<(), Error> {
try!(self.proof_of_work.check());
try!(self.timestamp.check());
Ok(())
}
/// backwards test compatibility
/// TODO: get rid of this
pub fn check_with_pow(&self, pow: bool) -> Result<(), Error> {
if pow {
try!(self.proof_of_work.check());
}
try!(self.timestamp.check());
Ok(())
}
}
pub trait HeaderRule {
fn check(&self) -> Result<(), Error>;
}
pub struct HeaderProofOfWork<'a> {
header: &'a IndexedBlockHeader,
max_work_bits: Compact,
}
impl<'a> HeaderProofOfWork<'a> {
fn new(header: &'a IndexedBlockHeader, network: Magic) -> Self {
HeaderProofOfWork {
header: header,
max_work_bits: network.max_bits(),
}
}
}
impl<'a> HeaderRule for HeaderProofOfWork<'a> {
fn check(&self) -> Result<(), Error> {
if is_valid_proof_of_work(self.max_work_bits, self.header.raw.bits, &self.header.hash) {
Ok(())
} else {
Err(Error::Pow)
}
}
}
pub struct HeaderTimestamp<'a> {
header: &'a IndexedBlockHeader,
current_time: u32,
max_future: u32,
}
impl<'a> HeaderTimestamp<'a> {
fn new(header: &'a IndexedBlockHeader, current_time: u32, max_future: u32) -> Self {
HeaderTimestamp {
header: header,
current_time: current_time,
max_future: max_future,
}
}
}
impl<'a> HeaderRule for HeaderTimestamp<'a> {
fn check(&self) -> Result<(), Error> {
if self.header.raw.time > self.current_time + self.max_future {
Err(Error::FuturisticTimestamp)
} else {
Ok(())
}
}
}

View File

@ -0,0 +1,206 @@
use std::ops;
use serialization::Serializable;
use db::IndexedTransaction;
use duplex_store::NoopStore;
use sigops::transaction_sigops;
use error::TransactionError;
use constants::{MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS, MIN_COINBASE_SIZE, MAX_COINBASE_SIZE};
pub struct TransactionVerifier<'a> {
pub empty: TransactionEmpty<'a>,
pub null_non_coinbase: TransactionNullNonCoinbase<'a>,
pub oversized_coinbase: TransactionOversizedCoinbase<'a>,
}
impl<'a> TransactionVerifier<'a> {
pub fn new(transaction: &'a IndexedTransaction) -> Self {
trace!(target: "verification", "Tx pre-verification {}", transaction.hash.to_reversed_str());
TransactionVerifier {
empty: TransactionEmpty::new(transaction),
null_non_coinbase: TransactionNullNonCoinbase::new(transaction),
oversized_coinbase: TransactionOversizedCoinbase::new(transaction, MIN_COINBASE_SIZE..MAX_COINBASE_SIZE),
}
}
pub fn check(&self) -> Result<(), TransactionError> {
try!(self.empty.check());
try!(self.null_non_coinbase.check());
try!(self.oversized_coinbase.check());
Ok(())
}
}
pub struct MemoryPoolTransactionVerifier<'a> {
pub empty: TransactionEmpty<'a>,
pub null_non_coinbase: TransactionNullNonCoinbase<'a>,
pub is_coinbase: TransactionMemoryPoolCoinbase<'a>,
pub size: TransactionSize<'a>,
pub sigops: TransactionSigops<'a>,
}
impl<'a> MemoryPoolTransactionVerifier<'a> {
pub fn new(transaction: &'a IndexedTransaction) -> Self {
trace!(target: "verification", "Mempool-Tx pre-verification {}", transaction.hash.to_reversed_str());
MemoryPoolTransactionVerifier {
empty: TransactionEmpty::new(transaction),
null_non_coinbase: TransactionNullNonCoinbase::new(transaction),
is_coinbase: TransactionMemoryPoolCoinbase::new(transaction),
size: TransactionSize::new(transaction, MAX_BLOCK_SIZE),
sigops: TransactionSigops::new(transaction, MAX_BLOCK_SIGOPS),
}
}
pub fn check(&self) -> Result<(), TransactionError> {
try!(self.empty.check());
try!(self.null_non_coinbase.check());
try!(self.is_coinbase.check());
try!(self.size.check());
try!(self.sigops.check());
Ok(())
}
}
trait TransactionRule {
fn check(&self) -> Result<(), TransactionError>;
}
pub struct TransactionEmpty<'a> {
transaction: &'a IndexedTransaction,
}
impl<'a> TransactionEmpty<'a> {
fn new(transaction: &'a IndexedTransaction) -> Self {
TransactionEmpty {
transaction: transaction,
}
}
}
impl<'a> TransactionRule for TransactionEmpty<'a> {
fn check(&self) -> Result<(), TransactionError> {
if self.transaction.raw.is_empty() {
Err(TransactionError::Empty)
} else {
Ok(())
}
}
}
pub struct TransactionNullNonCoinbase<'a> {
transaction: &'a IndexedTransaction,
}
impl<'a> TransactionNullNonCoinbase<'a> {
fn new(transaction: &'a IndexedTransaction) -> Self {
TransactionNullNonCoinbase {
transaction: transaction,
}
}
}
impl<'a> TransactionRule for TransactionNullNonCoinbase<'a> {
fn check(&self) -> Result<(), TransactionError> {
if !self.transaction.raw.is_coinbase() && self.transaction.raw.is_null() {
Err(TransactionError::NullNonCoinbase)
} else {
Ok(())
}
}
}
pub struct TransactionOversizedCoinbase<'a> {
transaction: &'a IndexedTransaction,
size_range: ops::Range<usize>,
}
impl<'a> TransactionOversizedCoinbase<'a> {
fn new(transaction: &'a IndexedTransaction, size_range: ops::Range<usize>) -> Self {
TransactionOversizedCoinbase {
transaction: transaction,
size_range: size_range,
}
}
}
impl<'a> TransactionRule for TransactionOversizedCoinbase<'a> {
fn check(&self) -> Result<(), TransactionError> {
if self.transaction.raw.is_coinbase() {
let script_len = self.transaction.raw.inputs[0].script_sig.len();
if script_len < self.size_range.start || script_len > self.size_range.end {
return Err(TransactionError::CoinbaseSignatureLength(script_len));
}
}
Ok(())
}
}
pub struct TransactionMemoryPoolCoinbase<'a> {
transaction: &'a IndexedTransaction,
}
impl<'a> TransactionMemoryPoolCoinbase<'a> {
fn new(transaction: &'a IndexedTransaction) -> Self {
TransactionMemoryPoolCoinbase {
transaction: transaction,
}
}
}
impl<'a> TransactionRule for TransactionMemoryPoolCoinbase<'a> {
fn check(&self) -> Result<(), TransactionError> {
if self.transaction.raw.is_coinbase() {
Err(TransactionError::MemoryPoolCoinbase)
} else {
Ok(())
}
}
}
pub struct TransactionSize<'a> {
transaction: &'a IndexedTransaction,
max_size: usize,
}
impl<'a> TransactionSize<'a> {
fn new(transaction: &'a IndexedTransaction, max_size: usize) -> Self {
TransactionSize {
transaction: transaction,
max_size: max_size,
}
}
}
impl<'a> TransactionRule for TransactionSize<'a> {
fn check(&self) -> Result<(), TransactionError> {
if self.transaction.raw.serialized_size() > self.max_size {
Err(TransactionError::MaxSize)
} else {
Ok(())
}
}
}
pub struct TransactionSigops<'a> {
transaction: &'a IndexedTransaction,
max_sigops: usize,
}
impl<'a> TransactionSigops<'a> {
fn new(transaction: &'a IndexedTransaction, max_sigops: usize) -> Self {
TransactionSigops {
transaction: transaction,
max_sigops: max_sigops,
}
}
}
impl<'a> TransactionRule for TransactionSigops<'a> {
fn check(&self) -> Result<(), TransactionError> {
let sigops = transaction_sigops(&self.transaction.raw, &NoopStore, false);
if sigops > self.max_sigops {
Err(TransactionError::MaxSigops)
} else {
Ok(())
}
}
}

View File

@ -5,17 +5,10 @@ use primitives::uint::U256;
use network::Magic;
use db::{BlockHeaderProvider, BlockRef};
const RETARGETING_FACTOR: u32 = 4;
const TARGET_SPACING_SECONDS: u32 = 10 * 60;
const DOUBLE_SPACING_SECONDS: u32 = 2 * TARGET_SPACING_SECONDS;
const TARGET_TIMESPAN_SECONDS: u32 = 2 * 7 * 24 * 60 * 60;
// The upper and lower bounds for retargeting timespan
const MIN_TIMESPAN: u32 = TARGET_TIMESPAN_SECONDS / RETARGETING_FACTOR;
const MAX_TIMESPAN: u32 = TARGET_TIMESPAN_SECONDS * RETARGETING_FACTOR;
// Target number of blocks, 2 weaks, 2016
pub const RETARGETING_INTERVAL: u32 = TARGET_TIMESPAN_SECONDS / TARGET_SPACING_SECONDS;
use constants::{
DOUBLE_SPACING_SECONDS,
TARGET_TIMESPAN_SECONDS, MIN_TIMESPAN, MAX_TIMESPAN, RETARGETING_INTERVAL
};
pub fn is_retarget_height(height: u32) -> bool {
height % RETARGETING_INTERVAL == 0
@ -63,7 +56,9 @@ pub fn retarget_timespan(retarget_timestamp: u32, last_timestamp: u32) -> u32 {
/// Returns work required for given header
pub fn work_required(parent_hash: H256, time: u32, height: u32, store: &BlockHeaderProvider, network: Magic) -> Compact {
assert!(height != 0, "cannot calculate required work for genesis block");
if height == 0 {
return network.max_bits();
}
let parent_header = store.block_header(parent_hash.clone().into()).expect("self.height != 0; qed");
@ -140,10 +135,6 @@ pub fn block_reward_satoshi(block_height: u32) -> u64 {
res
}
pub fn age(protocol_time: u32) -> i64 {
::time::get_time().sec - protocol_time as i64
}
#[cfg(test)]
mod tests {
use primitives::hash::H256;