Merge branch 'master' into rpc_raw_continue

This commit is contained in:
Svyatoslav Nikolsky 2016-12-14 14:36:31 +03:00
commit e09401c7e9
55 changed files with 966 additions and 351 deletions

View File

@ -68,7 +68,7 @@ impl Block {
}
pub fn is_final(&self, height: u32) -> bool {
self.transactions.iter().all(|t| t.is_final(height, self.block_header.time))
self.transactions.iter().all(|t| t.is_final_in_block(height, self.block_header.time))
}
}

22
chain/src/constants.rs Normal file
View File

@ -0,0 +1,22 @@
// Below flags apply in the context of BIP 68
// If this flag set, CTxIn::nSequence is NOT interpreted as a
// relative lock-time.
pub const SEQUENCE_LOCKTIME_DISABLE_FLAG: u32 = 1u32 << 31;
// Setting nSequence to this value for every input in a transaction
// disables nLockTime.
pub const SEQUENCE_FINAL: u32 = 0xffffffff;
// If CTxIn::nSequence encodes a relative lock-time and this flag
// is set, the relative lock-time has units of 512 seconds,
// otherwise it specifies blocks with a granularity of 1.
pub const SEQUENCE_LOCKTIME_TYPE_FLAG: u32 = (1 << 22);
// If CTxIn::nSequence encodes a relative lock-time, this mask is
// applied to extract that lock-time from the sequence field.
pub const SEQUENCE_LOCKTIME_MASK: u32 = 0x0000ffff;
/// Threshold for `nLockTime`: below this value it is interpreted as block number,
/// otherwise as UNIX timestamp.
pub const LOCKTIME_THRESHOLD: u32 = 500000000; // Tue Nov 5 00:53:20 1985 UTC

View File

@ -0,0 +1,77 @@
use std::{io, cmp};
use hash::H256;
use ser::{
Serializable, serialized_list_size,
Deserializable, Reader, Error as ReaderError
};
use block::Block;
use transaction::Transaction;
use merkle_root::merkle_root;
use indexed_header::IndexedBlockHeader;
use indexed_transaction::IndexedTransaction;
#[derive(Debug, Clone)]
pub struct IndexedBlock {
pub header: IndexedBlockHeader,
pub transactions: Vec<IndexedTransaction>,
}
impl From<Block> for IndexedBlock {
fn from(block: Block) -> Self {
let Block { block_header, transactions } = block;
IndexedBlock {
header: block_header.into(),
transactions: transactions.into_iter().map(Into::into).collect(),
}
}
}
impl cmp::PartialEq for IndexedBlock {
fn eq(&self, other: &Self) -> bool {
self.header.hash == other.header.hash
}
}
impl IndexedBlock {
pub fn new(header: IndexedBlockHeader, transactions: Vec<IndexedTransaction>) -> Self {
IndexedBlock {
header: header,
transactions: transactions,
}
}
pub fn hash(&self) -> &H256 {
&self.header.hash
}
pub fn to_raw_block(self) -> Block {
Block::new(self.header.raw, self.transactions.into_iter().map(|tx| tx.raw).collect())
}
pub fn size(&self) -> usize {
let header_size = self.header.raw.serialized_size();
let transactions = self.transactions.iter().map(|tx| &tx.raw).collect::<Vec<_>>();
let txs_size = serialized_list_size::<Transaction, &Transaction>(&transactions);
header_size + txs_size
}
pub fn merkle_root(&self) -> H256 {
merkle_root(&self.transactions.iter().map(|tx| &tx.hash).collect::<Vec<&H256>>())
}
pub fn is_final(&self, height: u32) -> bool {
self.transactions.iter().all(|tx| tx.raw.is_final_in_block(height, self.header.raw.time))
}
}
impl Deserializable for IndexedBlock {
fn deserialize<T>(reader: &mut Reader<T>) -> Result<Self, ReaderError> where T: io::Read {
let block = IndexedBlock {
header: try!(reader.read()),
transactions: try!(reader.read_list()),
};
Ok(block)
}
}

View File

@ -0,0 +1,48 @@
use std::{io, cmp};
use hash::H256;
use ser::{Deserializable, Reader, Error as ReaderError};
use block_header::BlockHeader;
use read_and_hash::ReadAndHash;
#[derive(Debug, Clone)]
pub struct IndexedBlockHeader {
pub hash: H256,
pub raw: BlockHeader,
}
impl From<BlockHeader> for IndexedBlockHeader {
fn from(header: BlockHeader) -> Self {
IndexedBlockHeader {
hash: header.hash(),
raw: header,
}
}
}
impl IndexedBlockHeader {
pub fn new(hash: H256, header: BlockHeader) -> Self {
IndexedBlockHeader {
hash: hash,
raw: header,
}
}
}
impl cmp::PartialEq for IndexedBlockHeader {
fn eq(&self, other: &Self) -> bool {
self.hash == other.hash
}
}
impl Deserializable for IndexedBlockHeader {
fn deserialize<T>(reader: &mut Reader<T>) -> Result<Self, ReaderError> where T: io::Read {
let data = try!(reader.read_and_hash::<BlockHeader>());
// TODO: use len
let header = IndexedBlockHeader {
raw: data.data,
hash: data.hash,
};
Ok(header)
}
}

View File

@ -0,0 +1,60 @@
use std::{cmp, io, borrow};
use hash::H256;
use ser::{Deserializable, Reader, Error as ReaderError};
use transaction::Transaction;
use read_and_hash::ReadAndHash;
#[derive(Debug, Clone)]
pub struct IndexedTransaction {
pub hash: H256,
pub raw: Transaction,
}
impl From<Transaction> for IndexedTransaction {
fn from(tx: Transaction) -> Self {
IndexedTransaction {
hash: tx.hash(),
raw: tx,
}
}
}
impl IndexedTransaction {
pub fn new(hash: H256, transaction: Transaction) -> Self {
IndexedTransaction {
hash: hash,
raw: transaction,
}
}
}
impl cmp::PartialEq for IndexedTransaction {
fn eq(&self, other: &Self) -> bool {
self.hash == other.hash
}
}
impl Deserializable for IndexedTransaction {
fn deserialize<T>(reader: &mut Reader<T>) -> Result<Self, ReaderError> where T: io::Read {
let data = try!(reader.read_and_hash::<Transaction>());
// TODO: use len
let tx = IndexedTransaction {
raw: data.data,
hash: data.hash,
};
Ok(tx)
}
}
pub struct IndexedTransactionsRef<'a, T> where T: 'a {
pub transactions: &'a [T],
}
impl<'a, T> IndexedTransactionsRef<'a, T> where T: borrow::Borrow<IndexedTransaction> {
pub fn new(transactions: &'a [T]) -> Self {
IndexedTransactionsRef {
transactions: transactions,
}
}
}

View File

@ -4,27 +4,34 @@ extern crate primitives;
extern crate bitcrypto as crypto;
extern crate serialization as ser;
pub mod constants;
mod block;
mod block_header;
mod merkle_root;
mod transaction;
/// `IndexedBlock` extension
mod read_and_hash;
mod indexed_block;
mod indexed_header;
mod indexed_transaction;
pub trait RepresentH256 {
fn h256(&self) -> primitives::hash::H256;
fn h256(&self) -> hash::H256;
}
pub use rustc_serialize::hex;
pub use primitives::{hash, bytes, uint, compact};
pub use self::block::Block;
pub use self::block_header::BlockHeader;
pub use self::merkle_root::merkle_root;
pub use self::merkle_root::merkle_node_hash;
pub use self::transaction::{
Transaction, TransactionInput, TransactionOutput, OutPoint,
SEQUENCE_LOCKTIME_DISABLE_FLAG, SEQUENCE_FINAL,
SEQUENCE_LOCKTIME_TYPE_FLAG, SEQUENCE_LOCKTIME_MASK,
LOCKTIME_THRESHOLD
};
pub use block::Block;
pub use block_header::BlockHeader;
pub use merkle_root::{merkle_root, merkle_node_hash};
pub use transaction::{Transaction, TransactionInput, TransactionOutput, OutPoint};
pub use read_and_hash::{ReadAndHash, HashedData};
pub use indexed_block::IndexedBlock;
pub use indexed_header::IndexedBlockHeader;
pub use indexed_transaction::{IndexedTransaction, IndexedTransactionsRef};
pub type ShortTransactionID = hash::H48;

View File

@ -0,0 +1,33 @@
use std::io;
use hash::H256;
use crypto::{DHash256, Digest};
use ser::{Reader, Error as ReaderError, Deserializable};
pub struct HashedData<T> {
pub size: usize,
pub hash: H256,
pub data: T,
}
pub trait ReadAndHash {
fn read_and_hash<T>(&mut self) -> Result<HashedData<T>, ReaderError> where T: Deserializable;
}
impl<R> ReadAndHash for Reader<R> where R: io::Read {
fn read_and_hash<T>(&mut self) -> Result<HashedData<T>, ReaderError> where T: Deserializable {
let mut size = 0usize;
let mut hasher = DHash256::new();
let data = self.read_with_proxy(|bytes| {
size += bytes.len();
hasher.input(bytes);
})?;
let result = HashedData {
hash: hasher.finish(),
data: data,
size: size,
};
Ok(result)
}
}

View File

@ -11,28 +11,7 @@ use ser::{
};
use crypto::dhash256;
use hash::H256;
// Below flags apply in the context of BIP 68
// If this flag set, CTxIn::nSequence is NOT interpreted as a
// relative lock-time.
pub const SEQUENCE_LOCKTIME_DISABLE_FLAG: u32 = 1u32 << 31;
// Setting nSequence to this value for every input in a transaction
// disables nLockTime.
pub const SEQUENCE_FINAL: u32 = 0xffffffff;
// If CTxIn::nSequence encodes a relative lock-time and this flag
// is set, the relative lock-time has units of 512 seconds,
// otherwise it specifies blocks with a granularity of 1.
pub const SEQUENCE_LOCKTIME_TYPE_FLAG: u32 = (1 << 22);
// If CTxIn::nSequence encodes a relative lock-time, this mask is
// applied to extract that lock-time from the sequence field.
pub const SEQUENCE_LOCKTIME_MASK: u32 = 0x0000ffff;
/// Threshold for `nLockTime`: below this value it is interpreted as block number,
/// otherwise as UNIX timestamp.
pub const LOCKTIME_THRESHOLD: u32 = 500000000; // Tue Nov 5 00:53:20 1985 UTC
use constants::{SEQUENCE_FINAL, LOCKTIME_THRESHOLD};
#[derive(Debug, PartialEq, Eq, Clone, Default)]
pub struct OutPoint {
@ -250,7 +229,17 @@ impl Transaction {
self.inputs.len() == 1 && self.inputs[0].previous_output.is_null()
}
pub fn is_final(&self, block_height: u32, block_time: u32) -> bool {
pub fn is_final(&self) -> bool {
// if lock_time is 0, transaction is final
if self.lock_time == 0 {
return true;
}
// setting all sequence numbers to 0xffffffff disables the time lock, so if you want to use locktime,
// at least one input must have a sequence number below the maximum.
self.inputs.iter().all(TransactionInput::is_final)
}
pub fn is_final_in_block(&self, block_height: u32, block_time: u32) -> bool {
if self.lock_time == 0 {
return true;
}

View File

@ -2,11 +2,11 @@ extern crate crypto as rcrypto;
extern crate primitives;
extern crate siphasher;
pub use rcrypto::digest::Digest;
use std::hash::Hasher;
use rcrypto::sha1::Sha1;
use rcrypto::sha2::Sha256;
use rcrypto::ripemd160::Ripemd160;
use rcrypto::digest::Digest;
use siphasher::sip::SipHasher24;
use primitives::hash::{H32, H160, H256};
@ -72,6 +72,12 @@ impl DHash256 {
pub fn new() -> Self {
DHash256::default()
}
pub fn finish(mut self) -> H256 {
let mut result = H256::default();
self.result(&mut *result);
result
}
}
impl Digest for DHash256 {

View File

@ -1,7 +1,7 @@
use primitives::hash::H256;
use super::{BlockLocation, IndexedBlock};
use chain;
use chain::{self, IndexedBlock};
use error::Error;
use super::BlockLocation;
#[derive(Debug, PartialEq)]
pub struct Reorganization {

49
db/src/impls.rs Normal file
View File

@ -0,0 +1,49 @@
use std::borrow::Borrow;
use chain::{OutPoint, TransactionOutput, IndexedTransactionsRef, IndexedTransaction, IndexedBlock};
use transaction_provider::PreviousTransactionOutputProvider;
use transaction_meta_provider::TransactionOutputObserver;
impl<'a, T> PreviousTransactionOutputProvider for IndexedTransactionsRef<'a, T>
where T: Borrow<IndexedTransaction> + Send + Sync {
fn previous_transaction_output(&self, prevout: &OutPoint) -> Option<TransactionOutput> {
self.transactions.iter()
.map(Borrow::borrow)
.find(|tx| tx.hash == prevout.hash)
.and_then(|tx| tx.raw.outputs.get(prevout.index as usize))
.cloned()
}
}
impl PreviousTransactionOutputProvider for IndexedBlock {
fn previous_transaction_output(&self, prevout: &OutPoint) -> Option<TransactionOutput> {
let txs = IndexedTransactionsRef::new(&self.transactions);
txs.previous_transaction_output(prevout)
}
}
impl TransactionOutputObserver for IndexedBlock {
fn is_spent(&self, _prevout: &OutPoint) -> Option<bool> {
// the code below is valid, but commented out due it's poor performance
// we could optimize it by indexing all outputs once
// let tx: IndexedTransaction = { .. }
// let indexed_outputs: IndexedOutputs = tx.indexed_outputs();
// indexed_outputs.is_spent()
None
// if previous transaction output appears more than once than we can safely
// tell that it's spent (double spent)
//let spends = self.transactions.iter()
//.flat_map(|tx| &tx.raw.inputs)
//.filter(|input| &input.previous_output == prevout)
//.take(2)
//.count();
//match spends {
//0 => None,
//1 => Some(false),
//2 => Some(true),
//_ => unreachable!("spends <= 2; self.take(2); qed"),
//}
}
}

View File

@ -1,89 +0,0 @@
use primitives::hash::H256;
use chain::{Block, OutPoint, TransactionOutput, merkle_root, Transaction};
use serialization::{Serializable, serialized_list_size};
use indexed_header::IndexedBlockHeader;
use indexed_transaction::IndexedTransaction;
use {TransactionOutputObserver, PreviousTransactionOutputProvider};
#[derive(Debug, Clone)]
pub struct IndexedBlock {
pub header: IndexedBlockHeader,
pub transactions: Vec<IndexedTransaction>,
}
impl PreviousTransactionOutputProvider for IndexedBlock {
fn previous_transaction_output(&self, prevout: &OutPoint) -> Option<TransactionOutput> {
let txs: &[_] = &self.transactions;
txs.previous_transaction_output(prevout)
}
}
impl TransactionOutputObserver for IndexedBlock {
fn is_spent(&self, _prevout: &OutPoint) -> Option<bool> {
// the code below is valid, but commented out due it's poor performance
// we could optimize it by indexing all outputs once
// let tx: IndexedTransaction = { .. }
// let indexed_outputs: IndexedOutputs = tx.indexed_outputs();
// indexed_outputs.is_spent()
None
// if previous transaction output appears more than once than we can safely
// tell that it's spent (double spent)
//let spends = self.transactions.iter()
//.flat_map(|tx| &tx.raw.inputs)
//.filter(|input| &input.previous_output == prevout)
//.take(2)
//.count();
//match spends {
//0 => None,
//1 => Some(false),
//2 => Some(true),
//_ => unreachable!("spends <= 2; self.take(2); qed"),
//}
}
}
impl From<Block> for IndexedBlock {
fn from(block: Block) -> Self {
let Block { block_header, transactions } = block;
IndexedBlock {
header: block_header.into(),
transactions: transactions.into_iter().map(Into::into).collect(),
}
}
}
impl IndexedBlock {
pub fn new(header: IndexedBlockHeader, transactions: Vec<IndexedTransaction>) -> Self {
IndexedBlock {
header: header,
transactions: transactions,
}
}
pub fn hash(&self) -> &H256 {
&self.header.hash
}
pub fn to_raw_block(self) -> Block {
Block::new(self.header.raw, self.transactions.into_iter().map(|tx| tx.raw).collect())
}
pub fn size(&self) -> usize {
let header_size = self.header.raw.serialized_size();
let transactions = self.transactions.iter().map(|tx| &tx.raw).collect::<Vec<_>>();
let txs_size = serialized_list_size::<Transaction, &Transaction>(&transactions);
header_size + txs_size
}
pub fn merkle_root(&self) -> H256 {
merkle_root(&self.transactions.iter().map(|tx| &tx.hash).collect::<Vec<&H256>>())
}
pub fn is_final(&self, height: u32) -> bool {
self.transactions.iter().all(|tx| tx.raw.is_final(height, self.header.raw.time))
}
}

View File

@ -1,26 +0,0 @@
use primitives::hash::H256;
use chain::BlockHeader;
#[derive(Debug, Clone)]
pub struct IndexedBlockHeader {
pub hash: H256,
pub raw: BlockHeader,
}
impl From<BlockHeader> for IndexedBlockHeader {
fn from(header: BlockHeader) -> Self {
IndexedBlockHeader {
hash: header.hash(),
raw: header,
}
}
}
impl IndexedBlockHeader {
pub fn new(hash: H256, header: BlockHeader) -> Self {
IndexedBlockHeader {
hash: hash,
raw: header,
}
}
}

View File

@ -1,43 +0,0 @@
use std::cmp;
use primitives::hash::H256;
use chain::{Transaction, OutPoint, TransactionOutput};
use PreviousTransactionOutputProvider;
#[derive(Debug, Clone)]
pub struct IndexedTransaction {
pub hash: H256,
pub raw: Transaction,
}
impl From<Transaction> for IndexedTransaction {
fn from(tx: Transaction) -> Self {
IndexedTransaction {
hash: tx.hash(),
raw: tx,
}
}
}
impl IndexedTransaction {
pub fn new(hash: H256, transaction: Transaction) -> Self {
IndexedTransaction {
hash: hash,
raw: transaction,
}
}
}
impl cmp::PartialEq for IndexedTransaction {
fn eq(&self, other: &Self) -> bool {
self.hash == other.hash
}
}
impl<'a> PreviousTransactionOutputProvider for &'a [IndexedTransaction] {
fn previous_transaction_output(&self, prevout: &OutPoint) -> Option<TransactionOutput> {
self.iter()
.find(|tx| tx.hash == prevout.hash)
.and_then(|tx| tx.raw.outputs.get(prevout.index as usize))
.cloned()
}
}

View File

@ -28,9 +28,7 @@ mod transaction_provider;
mod transaction_meta_provider;
mod error;
mod update_context;
mod indexed_block;
mod indexed_header;
mod indexed_transaction;
mod impls;
#[derive(Debug, Clone)]
pub enum BlockRef {
@ -75,9 +73,6 @@ pub use transaction_meta_provider::{TransactionMetaProvider, TransactionOutputOb
pub use transaction_meta::TransactionMeta;
pub use block_stapler::{BlockStapler, BlockInsertedChain};
pub use block_provider::{BlockProvider, BlockHeaderProvider};
pub use indexed_block::IndexedBlock;
pub use indexed_header::IndexedBlockHeader;
pub use indexed_transaction::IndexedTransaction;
#[cfg(feature="dev")]
pub use test_storage::TestStorage;

View File

@ -4,14 +4,13 @@ use std::fs;
use std::path::Path;
use kvdb::{Database, DatabaseConfig};
use byteorder::{LittleEndian, ByteOrder};
use primitives::hash::H256;
use primitives::bytes::Bytes;
use super::{BlockRef, BestBlock, BlockLocation, IndexedBlock};
use serialization::{serialize, deserialize};
use chain;
use parking_lot::RwLock;
use lru_cache::LruCache;
use primitives::hash::H256;
use primitives::bytes::Bytes;
use chain::{self, IndexedBlock, IndexedBlockHeader, IndexedTransaction};
use serialization::{serialize, deserialize};
use transaction_meta::TransactionMeta;
use error::{Error, ConsistencyError, MetaError};
use update_context::UpdateContext;
@ -19,9 +18,7 @@ use block_provider::{BlockProvider, BlockHeaderProvider};
use transaction_provider::{TransactionProvider, PreviousTransactionOutputProvider};
use transaction_meta_provider::TransactionMetaProvider;
use block_stapler::{BlockStapler, BlockInsertedChain, Reorganization};
use indexed_header::IndexedBlockHeader;
use indexed_transaction::IndexedTransaction;
use super::{BlockRef, BestBlock, BlockLocation};
pub const COL_COUNT: u32 = 10;
pub const COL_META: u32 = 0;

View File

@ -3,9 +3,9 @@
use super::{
BlockRef, Store, Error, BestBlock, BlockLocation, BlockInsertedChain, BlockProvider,
BlockStapler, TransactionMetaProvider, TransactionProvider, PreviousTransactionOutputProvider,
IndexedBlock, BlockHeaderProvider,
BlockHeaderProvider,
};
use chain::{self, Block};
use chain::{self, Block, IndexedBlock};
use primitives::hash::H256;
use serialization;
use chain::bytes::Bytes;

View File

@ -1,13 +1,13 @@
use std::io;
use hash::H32;
use ser::{Deserializable, Reader, Error as ReaderError};
use chain;
use chain::IndexedBlock;
#[derive(Debug, PartialEq)]
pub struct Block {
pub magic: H32,
pub block_size: u32,
pub block: chain::Block,
pub block: IndexedBlock,
}
impl Deserializable for Block {

View File

@ -1,6 +1,7 @@
use std::collections::HashSet;
use primitives::hash::H256;
use chain::{OutPoint, TransactionOutput};
use db::{SharedStore, IndexedTransaction, PreviousTransactionOutputProvider};
use chain::{OutPoint, TransactionOutput, IndexedTransaction};
use db::{SharedStore, PreviousTransactionOutputProvider};
use network::Magic;
use memory_pool::{MemoryPool, OrderingStrategy, Entry};
use verification::{work_required, block_reward_satoshi, transaction_sigops};
@ -102,10 +103,6 @@ impl SizePolicy {
self.finish_counter += 1;
}
if fits {
self.current_size += size;
}
match (fits, finish) {
(true, true) => NextStep::FinishAndAppend,
(true, false) => NextStep::Append,
@ -113,6 +110,10 @@ impl SizePolicy {
(false, false) => NextStep::Ignore,
}
}
fn apply(&mut self, size: u32) {
self.current_size += size;
}
}
/// Block assembler
@ -136,25 +137,34 @@ struct FittingTransactionsIterator<'a, T> {
store: &'a PreviousTransactionOutputProvider,
/// Memory pool transactions iterator
iter: T,
/// New block height
block_height: u32,
/// New block time
block_time: u32,
/// Size policy decides if transactions size fits the block
block_size: SizePolicy,
/// Sigops policy decides if transactions sigops fits the block
sigops: SizePolicy,
/// Previous entries are needed to get previous transaction outputs
previous_entries: Vec<&'a Entry>,
/// Hashes of ignored entries
ignored: HashSet<H256>,
/// True if block is already full
finished: bool,
}
impl<'a, T> FittingTransactionsIterator<'a, T> where T: Iterator<Item = &'a Entry> {
fn new(store: &'a PreviousTransactionOutputProvider, iter: T, max_block_size: u32, max_block_sigops: u32) -> Self {
fn new(store: &'a PreviousTransactionOutputProvider, iter: T, max_block_size: u32, max_block_sigops: u32, block_height: u32, block_time: u32) -> Self {
FittingTransactionsIterator {
store: store,
iter: iter,
block_height: block_height,
block_time: block_time,
// reserve some space for header and transations len field
block_size: SizePolicy::new(BLOCK_HEADER_SIZE + 4, max_block_size, 1_000, 50),
sigops: SizePolicy::new(0, max_block_sigops, 8, 50),
previous_entries: Vec::new(),
ignored: HashSet::new(),
finished: false,
}
}
@ -192,18 +202,34 @@ impl<'a, T> Iterator for FittingTransactionsIterator<'a, T> where T: Iterator<It
let size_step = self.block_size.decide(transaction_size);
let sigops_step = self.sigops.decide(sigops_count);
// both next checks could be checked above, but then it will break finishing
// check if transaction is still not finalized in this block
if !entry.transaction.is_final_in_block(self.block_height, self.block_time) {
continue;
}
// check if any parent transaction has been ignored
if !self.ignored.is_empty() && entry.transaction.inputs.iter().any(|input| self.ignored.contains(&input.previous_output.hash)) {
continue;
}
match size_step.and(sigops_step) {
NextStep::Append => {
self.block_size.apply(transaction_size);
self.sigops.apply(transaction_size);
self.previous_entries.push(entry);
return Some(entry);
},
NextStep::FinishAndAppend => {
self.finished = true;
self.block_size.apply(transaction_size);
self.sigops.apply(transaction_size);
self.previous_entries.push(entry);
return Some(entry);
},
NextStep::Ignore => (),
NextStep::FinishAndIgnore => {
self.ignored.insert(entry.hash.clone());
self.finished = true;
},
}
@ -227,7 +253,7 @@ impl BlockAssembler {
let mut transactions = Vec::new();
let mempool_iter = mempool.iter(OrderingStrategy::ByTransactionScore);
let tx_iter = FittingTransactionsIterator::new(store.as_previous_transaction_output_provider(), mempool_iter, self.max_block_size, self.max_block_sigops);
let tx_iter = FittingTransactionsIterator::new(store.as_previous_transaction_output_provider(), mempool_iter, self.max_block_size, self.max_block_sigops, height, time);
for entry in tx_iter {
// miner_fee is i64, but we can safely cast it to u64
// memory pool should restrict miner fee to be positive
@ -252,7 +278,7 @@ impl BlockAssembler {
#[cfg(test)]
mod tests {
use db::IndexedTransaction;
use chain::{IndexedTransaction, IndexedTransactionsRef};
use verification::constants::{MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS};
use memory_pool::Entry;
use super::{SizePolicy, NextStep, FittingTransactionsIterator};
@ -260,18 +286,18 @@ mod tests {
#[test]
fn test_size_policy() {
let mut size_policy = SizePolicy::new(0, 1000, 200, 3);
assert_eq!(size_policy.decide(100), NextStep::Append);
assert_eq!(size_policy.decide(500), NextStep::Append);
assert_eq!(size_policy.decide(100), NextStep::Append); size_policy.apply(100);
assert_eq!(size_policy.decide(500), NextStep::Append); size_policy.apply(500);
assert_eq!(size_policy.decide(600), NextStep::Ignore);
assert_eq!(size_policy.decide(200), NextStep::Append);
assert_eq!(size_policy.decide(200), NextStep::Append); size_policy.apply(200);
assert_eq!(size_policy.decide(300), NextStep::Ignore);
assert_eq!(size_policy.decide(300), NextStep::Ignore);
// this transaction will make counter + buffer > max size
assert_eq!(size_policy.decide(1), NextStep::Append);
assert_eq!(size_policy.decide(1), NextStep::Append); size_policy.apply(1);
// so now only 3 more transactions may accepted / ignored
assert_eq!(size_policy.decide(1), NextStep::Append);
assert_eq!(size_policy.decide(1), NextStep::Append); size_policy.apply(1);
assert_eq!(size_policy.decide(1000), NextStep::Ignore);
assert_eq!(size_policy.decide(1), NextStep::FinishAndAppend);
assert_eq!(size_policy.decide(1), NextStep::FinishAndAppend); size_policy.apply(1);
// we should not call decide again after it returned finish...
// but we can, let's check if result is ok
assert_eq!(size_policy.decide(1000), NextStep::FinishAndIgnore);
@ -291,14 +317,24 @@ mod tests {
#[test]
fn test_fitting_transactions_iterator_no_transactions() {
let store: Vec<IndexedTransaction> = Vec::new();
let store_ref = IndexedTransactionsRef::new(&store);
let entries: Vec<Entry> = Vec::new();
let store_ref: &[_] = &store;
let iter = FittingTransactionsIterator::new(&store_ref, entries.iter(), MAX_BLOCK_SIZE as u32, MAX_BLOCK_SIGOPS as u32);
let iter = FittingTransactionsIterator::new(&store_ref, entries.iter(), MAX_BLOCK_SIZE as u32, MAX_BLOCK_SIGOPS as u32, 0, 0);
assert!(iter.collect::<Vec<_>>().is_empty());
}
#[test]
fn test_fitting_transactions_iterator_max_block_size_reached() {
}
#[test]
fn test_fitting_transactions_iterator_ignored_parent() {
// TODO
}
#[test]
fn test_fitting_transactions_iterator_locked_transaction() {
// TODO
}
}

View File

@ -17,5 +17,6 @@ mod memory_pool;
pub use block_assembler::{BlockAssembler, BlockTemplate};
pub use cpu_miner::find_solution;
pub use memory_pool::{MemoryPool, Information as MemoryPoolInformation, OrderingStrategy as MemoryPoolOrderingStrategy};
pub use memory_pool::{MemoryPool, HashedOutPoint, Information as MemoryPoolInformation,
OrderingStrategy as MemoryPoolOrderingStrategy, DoubleSpendCheckResult, NonFinalDoubleSpendSet};
pub use fee::{transaction_fee, transaction_fee_rate};

View File

@ -141,11 +141,33 @@ struct ByPackageScoreOrderedEntry {
}
#[derive(Debug, PartialEq, Eq, Clone)]
struct HashedOutPoint {
/// Transasction output point
pub struct HashedOutPoint {
/// Transaction output point
out_point: OutPoint,
}
/// Result of checking double spend with
#[derive(Debug, PartialEq)]
pub enum DoubleSpendCheckResult {
/// No double spend
NoDoubleSpend,
/// Input {self.1, self.2} of new transaction is already spent in previous final memory-pool transaction {self.0}
DoubleSpend(H256, H256, u32),
/// Some inputs of new transaction are already spent by non-final memory-pool transactions
NonFinalDoubleSpend(NonFinalDoubleSpendSet),
}
/// Set of transaction outputs, which can be replaced if newer transaction
/// replaces non-final transaction in memory pool
#[derive(Debug, PartialEq)]
pub struct NonFinalDoubleSpendSet {
/// Double-spend outputs (outputs of newer transaction, which are also spent by nonfinal transactions of mempool)
pub double_spends: HashSet<HashedOutPoint>,
/// Outputs which also will be removed from memory pool in case of newer transaction insertion
/// (i.e. outputs of nonfinal transactions && their descendants)
pub dependent_spends: HashSet<HashedOutPoint>,
}
impl From<OutPoint> for HashedOutPoint {
fn from(out_point: OutPoint) -> Self {
HashedOutPoint {
@ -400,6 +422,49 @@ impl Storage {
})
}
pub fn check_double_spend(&self, transaction: &Transaction) -> DoubleSpendCheckResult {
let mut double_spends: HashSet<HashedOutPoint> = HashSet::new();
let mut dependent_spends: HashSet<HashedOutPoint> = HashSet::new();
for input in &transaction.inputs {
// find transaction that spends the same output
let prevout: HashedOutPoint = input.previous_output.clone().into();
if let Some(entry_hash) = self.by_previous_output.get(&prevout).cloned() {
// check if this is final transaction. If so, that's a potential double-spend error
let entry = self.by_hash.get(&entry_hash).expect("checked that it exists line above; qed");
if entry.transaction.is_final() {
return DoubleSpendCheckResult::DoubleSpend(entry_hash, prevout.out_point.hash, prevout.out_point.index);
}
// else remember this double spend
double_spends.insert(prevout.clone());
// and 'virtually' remove entry && all descendants from mempool
let mut queue: VecDeque<HashedOutPoint> = VecDeque::new();
queue.push_back(prevout);
while let Some(dependent_prevout) = queue.pop_front() {
// if the same output is already spent with another in-pool transaction
if let Some(dependent_entry_hash) = self.by_previous_output.get(&dependent_prevout).cloned() {
let dependent_entry = self.by_hash.get(&dependent_entry_hash).expect("checked that it exists line above; qed");
let dependent_outputs: Vec<_> = dependent_entry.transaction.outputs.iter().enumerate().map(|(idx, _)| OutPoint {
hash: dependent_entry_hash.clone(),
index: idx as u32,
}.into()).collect();
dependent_spends.extend(dependent_outputs.clone());
queue.extend(dependent_outputs);
}
}
}
}
if double_spends.is_empty() {
DoubleSpendCheckResult::NoDoubleSpend
} else {
DoubleSpendCheckResult::NonFinalDoubleSpend(NonFinalDoubleSpendSet {
double_spends: double_spends,
dependent_spends: dependent_spends,
})
}
}
pub fn remove_by_prevout(&mut self, prevout: &OutPoint) -> Option<Vec<Transaction>> {
let mut queue: VecDeque<OutPoint> = VecDeque::new();
let mut removed: Vec<Transaction> = Vec::new();
@ -407,7 +472,7 @@ impl Storage {
while let Some(prevout) = queue.pop_front() {
if let Some(entry_hash) = self.by_previous_output.get(&prevout.clone().into()).cloned() {
let entry = self.remove_by_hash(&entry_hash).expect("checket that it exists line above; qed");
let entry = self.remove_by_hash(&entry_hash).expect("checked that it exists line above; qed");
queue.extend(entry.transaction.outputs.iter().enumerate().map(|(idx, _)| OutPoint {
hash: entry_hash.clone(),
index: idx as u32,
@ -604,6 +669,11 @@ impl MemoryPool {
self.storage.remove_by_hash(h).map(|entry| entry.transaction)
}
/// Checks if `transaction` spends some outputs, already spent by inpool transactions.
pub fn check_double_spend(&self, transaction: &Transaction) -> DoubleSpendCheckResult {
self.storage.check_double_spend(transaction)
}
/// Removes transaction (and all its descendants) which has spent given output
pub fn remove_by_prevout(&mut self, prevout: &OutPoint) -> Option<Vec<Transaction>> {
self.storage.remove_by_prevout(prevout)
@ -795,7 +865,7 @@ impl<'a> Iterator for MemoryPoolIterator<'a> {
mod tests {
use chain::{Transaction, OutPoint};
use heapsize::HeapSizeOf;
use super::{MemoryPool, OrderingStrategy};
use super::{MemoryPool, OrderingStrategy, DoubleSpendCheckResult};
use test_data::{ChainBuilder, TransactionBuilder};
fn to_memory_pool(chain: &mut ChainBuilder) -> MemoryPool {
@ -1222,4 +1292,88 @@ mod tests {
assert_eq!(pool.remove_by_prevout(&OutPoint { hash: chain.hash(0), index: 0 }), Some(vec![chain.at(1), chain.at(2)]));
assert_eq!(pool.information().transactions_count, 2);
}
#[test]
fn test_memory_pool_check_double_spend() {
let chain = &mut ChainBuilder::new();
TransactionBuilder::with_output(10).add_output(10).add_output(10).store(chain) // t0
.reset().set_input(&chain.at(0), 0).add_output(20).lock().store(chain) // nonfinal: t0[0] -> t1
.reset().set_input(&chain.at(1), 0).add_output(30).store(chain) // dependent: t0[0] -> t1[0] -> t2
.reset().set_input(&chain.at(0), 0).add_output(40).store(chain) // good replacement: t0[0] -> t3
.reset().set_input(&chain.at(0), 1).add_output(50).store(chain) // final: t0[1] -> t4
.reset().set_input(&chain.at(0), 1).add_output(60).store(chain) // bad replacement: t0[1] -> t5
.reset().set_input(&chain.at(0), 2).add_output(70).store(chain); // no double spend: t0[2] -> t6
let mut pool = MemoryPool::new();
pool.insert_verified(chain.at(1));
pool.insert_verified(chain.at(2));
pool.insert_verified(chain.at(4));
// when output is spent by nonfinal transaction
match pool.check_double_spend(&chain.at(3)) {
DoubleSpendCheckResult::NonFinalDoubleSpend(set) => {
assert_eq!(set.double_spends.len(), 1);
assert!(set.double_spends.contains(&chain.at(1).inputs[0].previous_output.clone().into()));
assert_eq!(set.dependent_spends.len(), 2);
assert!(set.dependent_spends.contains(&OutPoint {
hash: chain.at(1).hash(),
index: 0,
}.into()));
assert!(set.dependent_spends.contains(&OutPoint {
hash: chain.at(2).hash(),
index: 0,
}.into()));
},
_ => panic!("unexpected"),
}
// when output is spent by final transaction
match pool.check_double_spend(&chain.at(5)) {
DoubleSpendCheckResult::DoubleSpend(inpool_hash, prev_hash, prev_index) => {
assert_eq!(inpool_hash, chain.at(4).hash());
assert_eq!(prev_hash, chain.at(0).hash());
assert_eq!(prev_index, 1);
},
_ => panic!("unexpected"),
}
// when output is not spent at all
match pool.check_double_spend(&chain.at(6)) {
DoubleSpendCheckResult::NoDoubleSpend => (),
_ => panic!("unexpected"),
}
}
#[test]
fn test_memory_pool_check_double_spend_multiple_dependent_outputs() {
let chain = &mut ChainBuilder::new();
TransactionBuilder::with_output(100).store(chain) // t0
.reset().set_input(&chain.at(0), 0).add_output(20).add_output(30).add_output(50).lock().store(chain) // nonfinal: t0[0] -> t1
.reset().set_input(&chain.at(0), 0).add_output(40).store(chain); // good replacement: t0[0] -> t2
let mut pool = MemoryPool::new();
pool.insert_verified(chain.at(1));
// when output is spent by nonfinal transaction
match pool.check_double_spend(&chain.at(2)) {
DoubleSpendCheckResult::NonFinalDoubleSpend(set) => {
assert_eq!(set.double_spends.len(), 1);
assert!(set.double_spends.contains(&chain.at(1).inputs[0].previous_output.clone().into()));
assert_eq!(set.dependent_spends.len(), 3);
assert!(set.dependent_spends.contains(&OutPoint {
hash: chain.at(1).hash(),
index: 0,
}.into()));
assert!(set.dependent_spends.contains(&OutPoint {
hash: chain.at(1).hash(),
index: 1,
}.into()));
assert!(set.dependent_spends.contains(&OutPoint {
hash: chain.at(1).hash(),
index: 2,
}.into()));
},
_ => panic!("unexpected"),
}
}
}

View File

@ -32,5 +32,5 @@ pub use config::Config;
pub use net::Config as NetConfig;
pub use p2p::{P2P, Context};
pub use event_loop::{event_loop, forever};
pub use util::{NodeTableError, PeerId, PeerInfo, InternetProtocol};
pub use util::{NodeTableError, PeerId, PeerInfo, InternetProtocol, Direction};
pub use protocol::{InboundSyncConnection, InboundSyncConnectionRef, OutboundSyncConnection, OutboundSyncConnectionRef, LocalSyncNode, LocalSyncNodeRef};

View File

@ -35,8 +35,13 @@ impl Connections {
self.channels().values().map(|channel| channel.peer_info().address).collect()
}
/// Returns info on every peer
pub fn info(&self) -> Vec<PeerInfo> {
self.channels().values().map(|channel| channel.peer_info()).collect()
}
/// Returns number of connections.
pub fn _count(&self) -> usize {
pub fn count(&self) -> usize {
self.channels.read().len()
}

View File

@ -386,6 +386,14 @@ impl Context {
pub fn create_sync_session(&self, start_height: i32, outbound_connection: OutboundSyncConnectionRef) -> InboundSyncConnectionRef {
self.local_sync_node.create_sync_session(start_height, outbound_connection)
}
pub fn connections(&self) -> &Connections {
&self.connections
}
pub fn nodes(&self) -> Vec<Node> {
self.node_table.read().nodes()
}
}
pub struct P2P {

View File

@ -296,6 +296,11 @@ impl<T> NodeTable<T> where T: Time {
.collect()
}
/// Returnes all nodes
pub fn nodes(&self) -> Vec<Node> {
self.by_addr.iter().map(|(_, n)| n).cloned().collect()
}
/// Returns most recently active nodes.
///
/// The documenation says:

View File

@ -78,3 +78,6 @@ subcommands:
- PATH:
required: true
help: Path of the bitcoin core database
- skip-verification:
long: skip-verification
help: Skip blocks verification

View File

@ -8,9 +8,11 @@ pub fn import(cfg: Config, matches: &ArgMatches) -> Result<(), String> {
// TODO: this might be unnecessary here!
try!(init_db(&cfg, &db));
let mut writer = create_sync_blocks_writer(db, cfg.magic);
let blk_path = matches.value_of("PATH").expect("PATH is required in cli.yml; qed");
let skip_verification = matches.is_present("skip-verification");
let mut writer = create_sync_blocks_writer(db, cfg.magic, !skip_verification);
let blk_dir = try!(::import::open_blk_dir(blk_path).map_err(|_| "Import directory does not exist".to_owned()));
let mut counter = 0;
for blk in blk_dir {
@ -20,7 +22,7 @@ pub fn import(cfg: Config, matches: &ArgMatches) -> Result<(), String> {
Ok(_) => {
counter += 1;
if counter % 1000 == 0 {
info!("Imported {} blocks", counter);
info!(target: "sync", "Imported {} blocks", counter);
}
}
Err(Error::TooManyOrphanBlocks) => return Err("Too many orphan (unordered) blocks".into()),

View File

@ -2,6 +2,7 @@
mod codes {
// NOTE [ToDr] Codes from [-32099, -32000]
pub const UNKNOWN: i64 = -32000;
pub const EXECUTION_ERROR: i64 = -32015;
pub const TRANSACTION_NOT_FOUND: i64 = -32096;
pub const TRANSACTION_OUTPUT_NOT_FOUND: i64 = -32097;
@ -97,3 +98,11 @@ pub fn node_not_added() -> Error {
data: None,
}
}
pub fn unknown() -> Error {
Error {
code: ErrorCode::ServerError(codes::UNKNOWN),
message: "Unknown error has occurred".into(),
data: None,
}
}

View File

@ -9,7 +9,7 @@ use v1::helpers::errors::{block_not_found, block_at_height_not_found, transactio
transaction_output_not_found, transaction_of_side_branch};
use jsonrpc_macros::Trailing;
use jsonrpc_core::Error;
use db;
use {db, chain};
use global_script::Script;
use chain::OutPoint;
use verification;
@ -69,7 +69,7 @@ impl BlockChainClientCoreApi for BlockChainClientCore {
fn verbose_block(&self, hash: GlobalH256) -> Option<VerboseBlock> {
self.storage.block(hash.into())
.map(|block| {
let block: db::IndexedBlock = block.into();
let block: chain::IndexedBlock = block.into();
let height = self.storage.block_number(block.hash());
let confirmations = match height {
Some(block_number) => (self.storage.best_block().expect("genesis block is required").number - block_number + 1) as i64,

View File

@ -1,8 +1,9 @@
use std::sync::Arc;
use std::net::SocketAddr;
use std::net::{SocketAddr, IpAddr};
use v1::traits::Network as NetworkRpc;
use v1::types::AddNodeOperation;
use v1::types::{AddNodeOperation, NodeInfo};
use jsonrpc_core::Error;
use jsonrpc_macros::Trailing;
use v1::helpers::errors;
use p2p;
@ -10,6 +11,9 @@ pub trait NetworkApi : Send + Sync + 'static {
fn add_node(&self, socket_addr: SocketAddr) -> Result<(), p2p::NodeTableError>;
fn remove_node(&self, socket_addr: SocketAddr) -> Result<(), p2p::NodeTableError>;
fn connect(&self, socket_addr: SocketAddr);
fn node_info(&self, node_addr: IpAddr) -> Result<NodeInfo, p2p::NodeTableError>;
fn nodes_info(&self) -> Vec<NodeInfo>;
fn connection_count(&self) -> usize;
}
impl<T> NetworkRpc for NetworkClient<T> where T: NetworkApi {
@ -29,6 +33,27 @@ impl<T> NetworkRpc for NetworkClient<T> where T: NetworkApi {
}
}
}
fn node_info(&self, _dns: bool, node_addr: Trailing<String>) -> Result<Vec<NodeInfo>, Error> {
Ok(
if node_addr.0.is_empty() {
self.api.nodes_info()
}
else {
let addr = try!(node_addr.0.parse().map_err(
|_| errors::invalid_params("node", "Invalid ip address format, should be ip address (127.0.0.1)")));
let node_info = try!(
self.api.node_info(addr)
.map_err(|_| errors::node_not_added())
);
vec![node_info]
}
)
}
fn connection_count(&self) -> Result<usize, Error> {
Ok(self.api.connection_count())
}
}
pub struct NetworkClient<T: NetworkApi> {
@ -65,4 +90,43 @@ impl NetworkApi for NetworkClientCore {
fn connect(&self, socket_addr: SocketAddr) {
p2p::Context::connect_normal(self.p2p.clone(), socket_addr);
}
fn node_info(&self, node_addr: IpAddr) -> Result<NodeInfo, p2p::NodeTableError> {
let exact_node = try!(
self.p2p.nodes()
.iter()
.find(|n| n.address().ip() == node_addr)
.cloned()
.ok_or(p2p::NodeTableError::NoAddressInTable)
);
let peers: Vec<p2p::PeerInfo> = self.p2p.connections().info()
.into_iter()
.filter(|p| p.address == exact_node.address()).collect();
Ok(
NodeInfo {
addednode: format!("{}", exact_node.address()),
connected: !peers.is_empty(),
addresses: peers.into_iter().map(|p| p.into()).collect(),
}
)
}
fn nodes_info(&self) -> Vec<NodeInfo> {
let peers: Vec<p2p::PeerInfo> = self.p2p.connections().info();
self.p2p.nodes().iter().map(|n| {
let node_peers: Vec<p2p::PeerInfo> = peers.iter().filter(|p| p.address == n.address()).cloned().collect();
NodeInfo {
addednode: format!("{}", n.address()),
connected: !node_peers.is_empty(),
addresses: node_peers.into_iter().map(|p| p.into()).collect(),
}
}).collect()
}
fn connection_count(&self) -> usize {
self.p2p.connections().count()
}
}

View File

@ -1,14 +1,24 @@
use jsonrpc_core::Error;
use v1::types::AddNodeOperation;
use jsonrpc_macros::Trailing;
use v1::types::{AddNodeOperation, NodeInfo};
build_rpc_trait! {
/// Parity-bitcoin network interface
pub trait Network {
/// Add/remove/connecto to the node
/// Add/remove/connect to the node
/// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "method": "addnode", "params": ["127.0.0.1:8888", "add"], "id":1 }' -H 'content-type: application/json;' http://127.0.0.1:8332/
/// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "method": "addnode", "params": ["127.0.0.1:8888", "remove"], "id":1 }' -H 'content-type: application/json;' http://127.0.0.1:8332/
/// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "method": "addnode", "params": ["127.0.0.1:8888", "onetry"], "id":1 }' -H 'content-type: application/json;' http://127.0.0.1:8332/
#[rpc(name = "addnode")]
fn add_node(&self, String, AddNodeOperation) -> Result<(), Error>;
/// Query node(s) info
/// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "id":"1", "method": "getaddednodeinfo", "params": [true] }' -H 'content-type: application/json;' http://127.0.0.1:8332/
/// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "id":"1", "method": "getaddednodeinfo", "params": [true, "192.168.0.201"] }' -H 'content-type: application/json;' http://127.0.0.1:8332/
#[rpc(name = "getaddednodeinfo")]
fn node_info(&self, bool, Trailing<String>) -> Result<Vec<NodeInfo>, Error>;
/// Query node(s) info
/// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "id":"1", "method": "getconnectioncount", "params": [] }' -H 'content-type: application/json;' http://127.0.0.1:8332/
#[rpc(name = "getconnectioncount")]
fn connection_count(&self) -> Result<usize, Error>;
}
}

View File

@ -1,7 +1,7 @@
use std::collections::HashMap;
use super::hash::H256;
use chain;
use super::transaction::RawTransaction;
use db;
use miner;
/// Block template as described in:
@ -96,8 +96,8 @@ impl From<miner::BlockTemplate> for BlockTemplate {
}
}
impl From<db::IndexedTransaction> for BlockTemplateTransaction {
fn from(transaction: db::IndexedTransaction) -> Self {
impl From<chain::IndexedTransaction> for BlockTemplateTransaction {
fn from(transaction: chain::IndexedTransaction) -> Self {
use ser::serialize;
let serialize = serialize(&transaction.raw);
BlockTemplateTransaction {

View File

@ -26,4 +26,4 @@ pub use self::transaction::{RawTransaction, Transaction, TransactionInput, Trans
TransactionInputScript, TransactionOutputScript, SignedTransactionInput, GetRawTransactionResponse,
SignedTransactionOutput, TransactionOutputs};
pub use self::uint::U256;
pub use self::nodes::AddNodeOperation;
pub use self::nodes::{AddNodeOperation, NodeInfo};

View File

@ -1,4 +1,5 @@
use serde::{Deserialize, Deserializer};
use serde::{Serialize, Serializer, Deserialize, Deserializer};
use p2p::{Direction, PeerInfo};
#[derive(Debug, PartialEq)]
pub enum AddNodeOperation {
@ -29,3 +30,42 @@ impl Deserialize for AddNodeOperation {
deserializer.deserialize(DummyVisitor)
}
}
#[derive(Serialize)]
pub struct NodeInfoAddress {
address: String,
connected: NodeInfoAddressConnectionType,
}
impl From<PeerInfo> for NodeInfoAddress {
fn from(info: PeerInfo) -> Self {
NodeInfoAddress {
address: format!("{}", info.address),
connected: match info.direction {
Direction::Inbound => NodeInfoAddressConnectionType::Inbound,
Direction::Outbound => NodeInfoAddressConnectionType::Outbound,
},
}
}
}
#[derive(Serialize)]
pub struct NodeInfo {
pub addednode: String,
pub connected: bool,
pub addresses: Vec<NodeInfoAddress>,
}
pub enum NodeInfoAddressConnectionType {
Inbound,
Outbound,
}
impl Serialize for NodeInfoAddressConnectionType {
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error> where S: Serializer {
match *self {
NodeInfoAddressConnectionType::Inbound => "inbound".serialize(serializer),
NodeInfoAddressConnectionType::Outbound => "outbound".serialize(serializer),
}
}
}

View File

@ -1,7 +1,7 @@
use std::{cmp, mem};
use bytes::Bytes;
use keys::{Signature, Public};
use chain::SEQUENCE_LOCKTIME_DISABLE_FLAG;
use chain::constants::SEQUENCE_LOCKTIME_DISABLE_FLAG;
use crypto::{sha1, sha256, dhash160, dhash256, ripemd160};
use {
script, Script, Num, VerificationFlags, Opcode, Error,

View File

@ -1,7 +1,7 @@
use keys::{Public, Signature};
use chain::{
self, SEQUENCE_FINAL, SEQUENCE_LOCKTIME_DISABLE_FLAG,
SEQUENCE_LOCKTIME_MASK, SEQUENCE_LOCKTIME_TYPE_FLAG
use chain::constants::{
SEQUENCE_FINAL, SEQUENCE_LOCKTIME_DISABLE_FLAG,
SEQUENCE_LOCKTIME_MASK, SEQUENCE_LOCKTIME_TYPE_FLAG, LOCKTIME_THRESHOLD
};
use {SignatureVersion, Script, TransactionInputSigner, Num};
@ -64,8 +64,8 @@ impl SignatureChecker for TransactionSignatureChecker {
// the nLockTime in the transaction.
let lock_time_u32: u32 = lock_time.into();
if !(
(self.signer.lock_time < chain::LOCKTIME_THRESHOLD && lock_time_u32 < chain::LOCKTIME_THRESHOLD) ||
(self.signer.lock_time >= chain::LOCKTIME_THRESHOLD && lock_time_u32 >= chain::LOCKTIME_THRESHOLD)
(self.signer.lock_time < LOCKTIME_THRESHOLD && lock_time_u32 < LOCKTIME_THRESHOLD) ||
(self.signer.lock_time >= LOCKTIME_THRESHOLD && lock_time_u32 >= LOCKTIME_THRESHOLD)
) {
return false;
}

View File

@ -84,6 +84,11 @@ impl<R> Reader<R> where R: io::Read {
T::deserialize(self)
}
pub fn read_with_proxy<T, F>(&mut self, proxy: F) -> Result<T, Error> where T: Deserializable, F: FnMut(&[u8]) {
let mut reader = Reader::from_read(Proxy::new(self, proxy));
T::deserialize(&mut reader)
}
pub fn read_slice(&mut self, bytes: &mut [u8]) -> Result<(), Error> {
io::Read::read_exact(self, bytes).map_err(|_| Error::UnexpectedEnd)
}
@ -148,3 +153,26 @@ impl<R, T> Iterator for ReadIterator<R, T> where R: io::Read, T: Deserializable
}
}
}
struct Proxy<F, T> {
from: F,
to: T,
}
impl<F, T> Proxy<F, T> {
fn new(from: F, to: T) -> Self {
Proxy {
from: from,
to: to,
}
}
}
impl<F, T> io::Read for Proxy<F, T> where F: io::Read, T: FnMut(&[u8]) {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, io::Error> {
let len = try!(io::Read::read(&mut self.from, buf));
let to = &mut self.to;
to(&buf[..len]);
Ok(len)
}
}

View File

@ -17,6 +17,7 @@ pub struct BlocksWriter {
orphaned_blocks_pool: OrphanBlocksPool,
verifier: SyncVerifier<BlocksWriterSink>,
sink: Arc<BlocksWriterSinkData>,
verification: bool,
}
struct BlocksWriterSink {
@ -29,7 +30,7 @@ struct BlocksWriterSinkData {
}
impl BlocksWriter {
pub fn new(storage: db::SharedStore, network: Magic) -> BlocksWriter {
pub fn new(storage: db::SharedStore, network: Magic, verification: bool) -> BlocksWriter {
let sink_data = Arc::new(BlocksWriterSinkData::new(storage.clone()));
let sink = Arc::new(BlocksWriterSink::new(sink_data.clone()));
let verifier = SyncVerifier::new(network, storage.clone(), sink);
@ -38,18 +39,18 @@ impl BlocksWriter {
orphaned_blocks_pool: OrphanBlocksPool::new(),
verifier: verifier,
sink: sink_data,
verification: verification,
}
}
pub fn append_block(&mut self, block: chain::Block) -> Result<(), Error> {
let indexed_block: db::IndexedBlock = block.into();
pub fn append_block(&mut self, block: chain::IndexedBlock) -> Result<(), Error> {
// do not append block if it is already there
if self.storage.contains_block(db::BlockRef::Hash(indexed_block.hash().clone())) {
if self.storage.contains_block(db::BlockRef::Hash(block.hash().clone())) {
return Ok(());
}
// verify && insert only if parent block is already in the storage
if !self.storage.contains_block(db::BlockRef::Hash(indexed_block.header.raw.previous_header_hash.clone())) {
self.orphaned_blocks_pool.insert_orphaned_block(indexed_block.hash().clone(), indexed_block);
if !self.storage.contains_block(db::BlockRef::Hash(block.header.raw.previous_header_hash.clone())) {
self.orphaned_blocks_pool.insert_orphaned_block(block.hash().clone(), block);
// we can't hold many orphaned blocks in memory during import
if self.orphaned_blocks_pool.len() > MAX_ORPHANED_BLOCKS {
return Err(Error::TooManyOrphanBlocks);
@ -58,13 +59,17 @@ impl BlocksWriter {
}
// verify && insert block && all its orphan children
let mut verification_queue: VecDeque<db::IndexedBlock> = self.orphaned_blocks_pool.remove_blocks_for_parent(indexed_block.hash()).into_iter().map(|(_, b)| b).collect();
verification_queue.push_front(indexed_block);
let mut verification_queue: VecDeque<chain::IndexedBlock> = self.orphaned_blocks_pool.remove_blocks_for_parent(block.hash()).into_iter().map(|(_, b)| b).collect();
verification_queue.push_front(block);
while let Some(block) = verification_queue.pop_front() {
self.verifier.verify_block(block);
if self.verification {
self.verifier.verify_block(block);
if let Some(err) = self.sink.error() {
return Err(err);
if let Some(err) = self.sink.error() {
return Err(err);
}
} else {
try!(self.storage.insert_indexed_block(&block).map_err(Error::Database));
}
}
@ -97,7 +102,7 @@ impl VerificationSink for BlocksWriterSink {
}
impl BlockVerificationSink for BlocksWriterSink {
fn on_block_verification_success(&self, block: db::IndexedBlock) -> Option<Vec<VerificationTask>> {
fn on_block_verification_success(&self, block: chain::IndexedBlock) -> Option<Vec<VerificationTask>> {
if let Err(err) = self.data.storage.insert_indexed_block(&block) {
*self.data.err.lock() = Some(Error::Database(err));
}
@ -132,8 +137,8 @@ mod tests {
#[test]
fn blocks_writer_appends_blocks() {
let db = Arc::new(db::TestStorage::with_genesis_block());
let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet);
blocks_target.append_block(test_data::block_h1()).expect("Expecting no error");
let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet, true);
blocks_target.append_block(test_data::block_h1().into()).expect("Expecting no error");
assert_eq!(db.best_block().expect("Block is inserted").number, 1);
}
@ -141,9 +146,9 @@ mod tests {
fn blocks_writer_verification_error() {
let db = Arc::new(db::TestStorage::with_genesis_block());
let blocks = test_data::build_n_empty_blocks_from_genesis((MAX_ORPHANED_BLOCKS + 2) as u32, 1);
let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet);
let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet, true);
for (index, block) in blocks.into_iter().skip(1).enumerate() {
match blocks_target.append_block(block) {
match blocks_target.append_block(block.into()) {
Err(Error::TooManyOrphanBlocks) if index == MAX_ORPHANED_BLOCKS => (),
Ok(_) if index != MAX_ORPHANED_BLOCKS => (),
_ => panic!("unexpected"),
@ -155,12 +160,12 @@ mod tests {
#[test]
fn blocks_writer_out_of_order_block() {
let db = Arc::new(db::TestStorage::with_genesis_block());
let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet);
let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet, true);
let wrong_block = test_data::block_builder()
.header().parent(test_data::genesis().hash()).build()
.build();
match blocks_target.append_block(wrong_block).unwrap_err() {
match blocks_target.append_block(wrong_block.into()).unwrap_err() {
Error::Verification(_) => (),
_ => panic!("Unexpected error"),
};
@ -170,12 +175,12 @@ mod tests {
#[test]
fn blocks_writer_append_to_existing_db() {
let db = Arc::new(db::TestStorage::with_genesis_block());
let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet);
let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet, true);
assert!(blocks_target.append_block(test_data::genesis()).is_ok());
assert!(blocks_target.append_block(test_data::genesis().into()).is_ok());
assert_eq!(db.best_block().expect("Block is inserted").number, 0);
assert!(blocks_target.append_block(test_data::block_h1()).is_ok());
assert!(blocks_target.append_block(test_data::block_h1().into()).is_ok());
assert_eq!(db.best_block().expect("Block is inserted").number, 1);
}
}

View File

@ -2,8 +2,7 @@ use std::collections::HashSet;
use rand::{thread_rng, Rng};
use bitcrypto::{sha256, siphash24};
use byteorder::{LittleEndian, ByteOrder};
use chain::{BlockHeader, ShortTransactionID};
use db::IndexedBlock;
use chain::{BlockHeader, ShortTransactionID, IndexedBlock};
use message::common::{BlockHeaderAndIDs, PrefilledTransaction};
use primitives::hash::H256;
use ser::{Stream, Serializable};

View File

@ -63,8 +63,8 @@ pub enum Error {
}
/// Create blocks writer.
pub fn create_sync_blocks_writer(db: db::SharedStore, network: Magic) -> blocks_writer::BlocksWriter {
blocks_writer::BlocksWriter::new(db, network)
pub fn create_sync_blocks_writer(db: db::SharedStore, network: Magic, verification: bool) -> blocks_writer::BlocksWriter {
blocks_writer::BlocksWriter::new(db, network, verification)
}
/// Creates local sync node for given `db`

View File

@ -634,12 +634,12 @@ mod tests {
let transaction_hash = transaction.hash();
let result = local_node.accept_transaction(transaction);
assert_eq!(result, Ok(transaction_hash));
assert_eq!(result, Ok(transaction_hash.clone()));
assert_eq!(executor.lock().take_tasks(), vec![Task::SendInventory(peer_index1,
vec![InventoryVector {
inv_type: InventoryType::MessageTx,
hash: "0791efccd035c5fe501023ff888106eba5eff533965de4a6e06400f623bcac34".into(),
hash: transaction_hash,
}]
)]
);

View File

@ -3,7 +3,7 @@ use std::collections::hash_map::Entry;
use linked_hash_map::LinkedHashMap;
use time;
use primitives::hash::H256;
use db::IndexedBlock;
use chain::IndexedBlock;
#[derive(Debug)]
/// Storage for blocks, for which we have no parent yet.

View File

@ -3,8 +3,8 @@ use std::sync::Arc;
use std::collections::{VecDeque, HashSet};
use linked_hash_map::LinkedHashMap;
use parking_lot::RwLock;
use chain::{BlockHeader, Transaction};
use db::{self, IndexedBlock};
use chain::{BlockHeader, Transaction, IndexedBlock};
use db;
use best_headers_chain::{BestHeadersChain, Information as BestHeadersInformation};
use primitives::bytes::Bytes;
use primitives::hash::H256;
@ -677,6 +677,13 @@ impl Chain {
/// Insert transaction to memory pool
pub fn insert_verified_transaction(&mut self, transaction: Transaction) {
// we have verified transaction, but possibly this transaction replaces
// existing transaction from memory pool
// => remove previous transactions before
for input in &transaction.inputs {
self.memory_pool.remove_by_prevout(&input.previous_output);
}
// now insert transaction itself
self.memory_pool.insert_verified(transaction);
}
@ -1269,4 +1276,20 @@ mod tests {
headers[4].clone(),
]), HeadersIntersection::DeadEnd(0));
}
#[test]
fn update_memory_pool_transaction() {
use test_data::{ChainBuilder, TransactionBuilder};
let data_chain = &mut ChainBuilder::new();
TransactionBuilder::with_output(10).add_output(10).add_output(10).store(data_chain) // transaction0
.reset().set_input(&data_chain.at(0), 0).add_output(20).lock().store(data_chain) // transaction0 -> transaction1
.reset().set_input(&data_chain.at(0), 0).add_output(30).store(data_chain); // transaction0 -> transaction2
let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block()));
chain.insert_verified_transaction(data_chain.at(1));
assert_eq!(chain.information().transactions.transactions_count, 1);
chain.insert_verified_transaction(data_chain.at(2));
assert_eq!(chain.information().transactions.transactions_count, 1); // tx was replaces
}
}

View File

@ -7,8 +7,8 @@ use futures::{BoxFuture, Future, finished};
use futures::stream::Stream;
use tokio_core::reactor::{Handle, Interval};
use futures_cpupool::CpuPool;
use db::{self, IndexedBlock, BlockHeaderProvider, BlockRef};
use chain::{BlockHeader, Transaction};
use db::{self, BlockHeaderProvider, BlockRef};
use chain::{BlockHeader, Transaction, IndexedBlock};
use message::types;
use message::common::{InventoryVector, InventoryType};
use primitives::hash::H256;
@ -1312,7 +1312,7 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
}
},
BlockAnnouncementType::SendCompactBlock => {
let indexed_blocks: Vec<db::IndexedBlock> = {
let indexed_blocks: Vec<IndexedBlock> = {
let chain = self.chain.read();
new_blocks_hashes.iter()
.filter_map(|h| chain.storage().block(db::BlockRef::Hash(h.clone())))
@ -2935,4 +2935,9 @@ pub mod tests {
// should not panic
sync.on_peer_transaction(1, test_data::TransactionBuilder::with_default_input(0).into());
}
#[test]
fn when_transaction_replaces_locked_transaction() {
// TODO
}
}

View File

@ -2,12 +2,13 @@ use std::thread;
use std::collections::VecDeque;
use std::sync::Arc;
use std::sync::mpsc::{channel, Sender, Receiver};
use chain::{Transaction, OutPoint, TransactionOutput};
use chain::{Transaction, OutPoint, TransactionOutput, IndexedBlock};
use network::Magic;
use miner::{DoubleSpendCheckResult, NonFinalDoubleSpendSet};
use primitives::hash::H256;
use synchronization_chain::ChainRef;
use verification::{BackwardsCompatibleChainVerifier as ChainVerifier, Verify as VerificationVerify, Chain};
use db::{SharedStore, IndexedBlock, PreviousTransactionOutputProvider, TransactionOutputObserver};
use verification::{self, BackwardsCompatibleChainVerifier as ChainVerifier, Verify as VerificationVerify, Chain};
use db::{SharedStore, PreviousTransactionOutputProvider, TransactionOutputObserver};
use time::get_time;
/// Block verification events sink
@ -57,12 +58,23 @@ pub struct AsyncVerifier {
verification_worker_thread: Option<thread::JoinHandle<()>>,
}
/// Transaction output observer, which looks into storage && into memory pool
struct ChainMemoryPoolTransactionOutputProvider {
/// Chain reference
chain: ChainRef,
/// Previous outputs, for which we should return 'Not spent' value.
/// These are used when new version of transaction is received.
nonfinal_spends: Option<NonFinalDoubleSpendSet>,
}
#[derive(Default)]
struct EmptyTransactionOutputProvider {
impl VerificationTask {
/// Returns transaction reference if it is transaction verification task
pub fn transaction(&self) -> Option<&Transaction> {
match self {
&VerificationTask::VerifyTransaction(_, ref transaction) => Some(&transaction),
_ => None,
}
}
}
impl AsyncVerifier {
@ -86,8 +98,18 @@ impl AsyncVerifier {
match task {
VerificationTask::Stop => break,
_ => {
let prevout_provider = ChainMemoryPoolTransactionOutputProvider::with_chain(chain.clone());
execute_verification_task(&sink, &prevout_provider, &verifier, task)
let prevout_provider = if let Some(ref transaction) = task.transaction() {
match ChainMemoryPoolTransactionOutputProvider::for_transaction(chain.clone(), transaction) {
Err(e) => {
sink.on_transaction_verification_error(&format!("{:?}", e), &transaction.hash());
return;
},
Ok(prevout_provider) => Some(prevout_provider),
}
} else {
None
};
execute_verification_task(&sink, prevout_provider.as_ref(), &verifier, task)
},
}
}
@ -137,23 +159,23 @@ impl<T> SyncVerifier<T> where T: VerificationSink {
verifier: verifier,
sink: sink,
}
}
}
}
impl<T> Verifier for SyncVerifier<T> where T: VerificationSink {
/// Verify block
fn verify_block(&self, block: IndexedBlock) {
execute_verification_task(&self.sink, &EmptyTransactionOutputProvider::default(), &self.verifier, VerificationTask::VerifyBlock(block))
execute_verification_task::<T, ChainMemoryPoolTransactionOutputProvider>(&self.sink, None, &self.verifier, VerificationTask::VerifyBlock(block))
}
/// Verify transaction
fn verify_transaction(&self, height: u32, transaction: Transaction) {
execute_verification_task(&self.sink, &EmptyTransactionOutputProvider::default(), &self.verifier, VerificationTask::VerifyTransaction(height, transaction))
fn verify_transaction(&self, _height: u32, _transaction: Transaction) {
unimplemented!() // sync verifier is currently only used for blocks verification
}
}
/// Execute single verification task
fn execute_verification_task<T: VerificationSink, U: PreviousTransactionOutputProvider + TransactionOutputObserver>(sink: &Arc<T>, tx_output_provider: &U, verifier: &ChainVerifier, task: VerificationTask) {
fn execute_verification_task<T: VerificationSink, U: TransactionOutputObserver + PreviousTransactionOutputProvider>(sink: &Arc<T>, tx_output_provider: Option<&U>, verifier: &ChainVerifier, task: VerificationTask) {
let mut tasks_queue: VecDeque<VerificationTask> = VecDeque::new();
tasks_queue.push_back(task);
@ -178,6 +200,7 @@ fn execute_verification_task<T: VerificationSink, U: PreviousTransactionOutputPr
},
VerificationTask::VerifyTransaction(height, transaction) => {
let time: u32 = get_time().sec as u32;
let tx_output_provider = tx_output_provider.expect("must be provided for transaction checks");
match verifier.verify_mempool_transaction(tx_output_provider, height, time, &transaction) {
Ok(_) => sink.on_transaction_verification_success(transaction),
Err(e) => sink.on_transaction_verification_error(&format!("{:?}", e), &transaction.hash()),
@ -189,40 +212,59 @@ fn execute_verification_task<T: VerificationSink, U: PreviousTransactionOutputPr
}
impl ChainMemoryPoolTransactionOutputProvider {
pub fn with_chain(chain: ChainRef) -> Self {
ChainMemoryPoolTransactionOutputProvider {
chain: chain,
}
pub fn for_transaction(chain: ChainRef, transaction: &Transaction) -> Result<Self, verification::TransactionError> {
// we have to check if there are another in-mempool transactions which spent same outputs here
let check_result = chain.read().memory_pool().check_double_spend(transaction);
ChainMemoryPoolTransactionOutputProvider::for_double_spend_check_result(chain, check_result)
}
}
impl PreviousTransactionOutputProvider for ChainMemoryPoolTransactionOutputProvider {
fn previous_transaction_output(&self, prevout: &OutPoint) -> Option<TransactionOutput> {
let chain = self.chain.read();
chain.memory_pool().previous_transaction_output(prevout)
.or_else(|| chain.storage().as_previous_transaction_output_provider().previous_transaction_output(prevout))
pub fn for_double_spend_check_result(chain: ChainRef, check_result: DoubleSpendCheckResult) -> Result<Self, verification::TransactionError> {
match check_result {
DoubleSpendCheckResult::DoubleSpend(_, hash, index) => Err(verification::TransactionError::UsingSpentOutput(hash, index)),
DoubleSpendCheckResult::NoDoubleSpend => Ok(ChainMemoryPoolTransactionOutputProvider {
chain: chain.clone(),
nonfinal_spends: None,
}),
DoubleSpendCheckResult::NonFinalDoubleSpend(nonfinal_spends) => Ok(ChainMemoryPoolTransactionOutputProvider {
chain: chain.clone(),
nonfinal_spends: Some(nonfinal_spends),
}),
}
}
}
impl TransactionOutputObserver for ChainMemoryPoolTransactionOutputProvider {
fn is_spent(&self, prevout: &OutPoint) -> Option<bool> {
let chain = self.chain.read();
if chain.memory_pool().is_spent(prevout) {
return Some(true);
// check if this output is 'locked' by mempool transaction
if let Some(ref nonfinal_spends) = self.nonfinal_spends {
if nonfinal_spends.double_spends.contains(&prevout.clone().into()) {
return Some(false);
}
}
chain.storage().transaction_meta(&prevout.hash).and_then(|tm| tm.is_spent(prevout.index as usize))
// we can omit memory_pool check here, because it has been completed in `for_transaction` method
// => just check spending in storage
self.chain.read().storage()
.transaction_meta(&prevout.hash)
.and_then(|tm| tm.is_spent(prevout.index as usize))
}
}
impl PreviousTransactionOutputProvider for EmptyTransactionOutputProvider {
fn previous_transaction_output(&self, _prevout: &OutPoint) -> Option<TransactionOutput> {
None
}
}
impl PreviousTransactionOutputProvider for ChainMemoryPoolTransactionOutputProvider {
fn previous_transaction_output(&self, prevout: &OutPoint) -> Option<TransactionOutput> {
// check if that is output of some transaction, which is vitually removed from memory pool
if let Some(ref nonfinal_spends) = self.nonfinal_spends {
if nonfinal_spends.dependent_spends.contains(&prevout.clone().into()) {
// transaction is trying to replace some nonfinal transaction
// + it is also depends on this transaction
// => this is definitely an error
return None;
}
}
impl TransactionOutputObserver for EmptyTransactionOutputProvider {
fn is_spent(&self, _prevout: &OutPoint) -> Option<bool> {
None
let chain = self.chain.read();
chain.memory_pool().previous_transaction_output(prevout)
.or_else(|| chain.storage().as_previous_transaction_output_provider().previous_transaction_output(prevout))
}
}
@ -230,15 +272,16 @@ impl TransactionOutputObserver for EmptyTransactionOutputProvider {
pub mod tests {
use std::sync::Arc;
use std::collections::HashMap;
use parking_lot::RwLock;
use chain::Transaction;
use chain::{Transaction, OutPoint};
use synchronization_chain::{Chain, ChainRef};
use synchronization_client::CoreVerificationSink;
use synchronization_executor::tests::DummyTaskExecutor;
use primitives::hash::H256;
use chain::IndexedBlock;
use super::{Verifier, BlockVerificationSink, TransactionVerificationSink, ChainMemoryPoolTransactionOutputProvider};
use db::{self, IndexedBlock};
use db::{self, TransactionOutputObserver, PreviousTransactionOutputProvider};
use test_data;
use parking_lot::RwLock;
#[derive(Default)]
pub struct DummyVerifier {
@ -283,16 +326,46 @@ pub mod tests {
#[test]
fn when_transaction_spends_output_twice() {
use db::TransactionOutputObserver;
let tx1: Transaction = test_data::TransactionBuilder::with_default_input(0).into();
let tx2: Transaction = test_data::TransactionBuilder::with_default_input(1).into();
let out1 = tx1.inputs[0].previous_output.clone();
let out2 = tx2.inputs[0].previous_output.clone();
let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block()));
chain.memory_pool_mut().insert_verified(tx1);
assert!(chain.memory_pool().is_spent(&out1));
assert!(!chain.memory_pool().is_spent(&out2));
}
#[test]
fn when_transaction_depends_on_removed_nonfinal_transaction() {
let dchain = &mut test_data::ChainBuilder::new();
test_data::TransactionBuilder::with_output(10).store(dchain) // t0
.reset().set_input(&dchain.at(0), 0).add_output(20).lock().store(dchain) // nonfinal: t0[0] -> t1
.reset().set_input(&dchain.at(1), 0).add_output(30).store(dchain) // dependent: t0[0] -> t1[0] -> t2
.reset().set_input(&dchain.at(0), 0).add_output(40).store(dchain); // good replacement: t0[0] -> t3
let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block()));
chain.memory_pool_mut().insert_verified(dchain.at(0));
chain.memory_pool_mut().insert_verified(dchain.at(1));
chain.memory_pool_mut().insert_verified(dchain.at(2));
// when inserting t3:
// check that is_spent(t0[0]) == Some(false) (as it is spent by nonfinal t1)
// check that is_spent(t1[0]) == None (as t1 is virtually removed)
// check that is_spent(t2[0]) == None (as t2 is virtually removed)
// check that previous_transaction_output(t0[0]) = Some(_)
// check that previous_transaction_output(t1[0]) = None (as t1 is virtually removed)
// check that previous_transaction_output(t2[0]) = None (as t2 is virtually removed)
// =>
// if t3 is also depending on t1[0] || t2[0], it will be rejected by verification as missing inputs
let chain = ChainRef::new(RwLock::new(chain));
let provider = ChainMemoryPoolTransactionOutputProvider::with_chain(chain);
assert!(provider.is_spent(&out1).unwrap_or_default());
assert!(!provider.is_spent(&out2).unwrap_or_default());
let provider = ChainMemoryPoolTransactionOutputProvider::for_transaction(chain, &dchain.at(3)).unwrap();
assert_eq!(provider.is_spent(&OutPoint { hash: dchain.at(0).hash(), index: 0, }), Some(false));
assert_eq!(provider.is_spent(&OutPoint { hash: dchain.at(1).hash(), index: 0, }), None);
assert_eq!(provider.is_spent(&OutPoint { hash: dchain.at(2).hash(), index: 0, }), None);
assert_eq!(provider.previous_transaction_output(&OutPoint { hash: dchain.at(0).hash(), index: 0, }), Some(dchain.at(0).outputs[0].clone()));
assert_eq!(provider.previous_transaction_output(&OutPoint { hash: dchain.at(1).hash(), index: 0, }), None);
assert_eq!(provider.previous_transaction_output(&OutPoint { hash: dchain.at(2).hash(), index: 0, }), None);
}
}

View File

@ -97,7 +97,7 @@ impl TransactionBuilder {
index: output_index,
},
script_sig: Bytes::new_with_len(0),
sequence: 0,
sequence: 0xffffffff,
});
self
}
@ -113,11 +113,17 @@ impl TransactionBuilder {
index: output_index,
},
script_sig: Bytes::new_with_len(0),
sequence: 0,
sequence: 0xffffffff,
}];
self
}
pub fn lock(mut self) -> Self {
self.transaction.inputs[0].sequence = 0;
self.transaction.lock_time = 500000;
self
}
pub fn store(self, chain: &mut ChainBuilder) -> Self {
chain.transactions.push(self.transaction.clone());
self

View File

@ -127,8 +127,16 @@ impl<'a> BlockRule for BlockCoinbaseClaim<'a> {
let claim = self.block.transactions[0].raw.total_spends();
let (fees, overflow) = available.overflowing_sub(spends);
let (reward, overflow2) = fees.overflowing_add(block_reward_satoshi(self.height));
if overflow || overflow2 || claim > reward {
if overflow {
return Err(Error::TransactionFeesOverflow);
}
let (reward, overflow) = fees.overflowing_add(block_reward_satoshi(self.height));
if overflow {
return Err(Error::TransactionFeeAndRewardOverflow);
}
if claim > reward {
Err(Error::CoinbaseOverspend { expected_max: reward, actual: claim })
} else {
Ok(())

View File

@ -1,6 +1,6 @@
use std::ops;
use primitives::hash::H256;
use db::{IndexedBlock, IndexedTransaction, IndexedBlockHeader};
use chain::{IndexedBlock, IndexedTransaction, IndexedBlockHeader};
/// Blocks whose parents are known to be in the chain
#[derive(Clone, Copy)]

View File

@ -1,10 +1,10 @@
//! Bitcoin chain verifier
use hash::H256;
use db::{self, IndexedBlockHeader, BlockLocation, PreviousTransactionOutputProvider, BlockHeaderProvider, TransactionOutputObserver};
use chain::{IndexedBlock, IndexedBlockHeader, BlockHeader, Transaction};
use db::{BlockLocation, SharedStore, PreviousTransactionOutputProvider, BlockHeaderProvider, TransactionOutputObserver};
use network::Magic;
use error::{Error, TransactionError};
use {Verify, chain};
use canon::{CanonBlock, CanonTransaction};
use duplex_store::{DuplexTransactionOutputProvider, NoopStore};
use verify_chain::ChainVerifier;
@ -12,6 +12,7 @@ use verify_header::HeaderVerifier;
use verify_transaction::MemoryPoolTransactionVerifier;
use accept_chain::ChainAcceptor;
use accept_transaction::MemoryPoolTransactionAcceptor;
use Verify;
#[derive(PartialEq, Debug)]
/// Block verification chain
@ -28,13 +29,13 @@ pub enum Chain {
pub type VerificationResult = Result<Chain, Error>;
pub struct BackwardsCompatibleChainVerifier {
store: db::SharedStore,
store: SharedStore,
skip_pow: bool,
network: Magic,
}
impl BackwardsCompatibleChainVerifier {
pub fn new(store: db::SharedStore, network: Magic) -> Self {
pub fn new(store: SharedStore, network: Magic) -> Self {
BackwardsCompatibleChainVerifier {
store: store,
skip_pow: false,
@ -48,7 +49,7 @@ impl BackwardsCompatibleChainVerifier {
self
}
fn verify_block(&self, block: &db::IndexedBlock) -> VerificationResult {
fn verify_block(&self, block: &IndexedBlock) -> VerificationResult {
let current_time = ::time::get_time().sec as u32;
// first run pre-verification
let chain_verifier = ChainVerifier::new(block, self.network, current_time);
@ -77,7 +78,7 @@ impl BackwardsCompatibleChainVerifier {
&self,
_block_header_provider: &BlockHeaderProvider,
hash: &H256,
header: &chain::BlockHeader
header: &BlockHeader
) -> Result<(), Error> {
// let's do only preverifcation
// TODO: full verification
@ -92,7 +93,7 @@ impl BackwardsCompatibleChainVerifier {
prevout_provider: &T,
height: u32,
time: u32,
transaction: &chain::Transaction,
transaction: &Transaction,
) -> Result<(), TransactionError> where T: PreviousTransactionOutputProvider + TransactionOutputObserver {
let indexed_tx = transaction.clone().into();
// let's do preverification first
@ -117,7 +118,7 @@ impl BackwardsCompatibleChainVerifier {
}
impl Verify for BackwardsCompatibleChainVerifier {
fn verify(&self, block: &db::IndexedBlock) -> VerificationResult {
fn verify(&self, block: &IndexedBlock) -> VerificationResult {
let result = self.verify_block(block);
trace!(
target: "verification", "Block {} (transactions: {}) verification finished. Result {:?}",
@ -132,7 +133,8 @@ impl Verify for BackwardsCompatibleChainVerifier {
#[cfg(test)]
mod tests {
use std::sync::Arc;
use db::{TestStorage, Storage, Store, BlockStapler, IndexedBlock};
use chain::IndexedBlock;
use db::{TestStorage, Storage, Store, BlockStapler};
use network::Magic;
use devtools::RandomTempPath;
use {script, test_data};

View File

@ -36,6 +36,10 @@ pub enum Error {
NonFinalBlock,
/// Old version block.
OldVersionBlock,
/// Sum of the transaction fees in block + coinbase reward exceeds u64::max
TransactionFeeAndRewardOverflow,
/// Sum of the transaction fees in block exceeds u64::max
TransactionFeesOverflow,
}
#[derive(Debug, PartialEq)]

View File

@ -112,5 +112,5 @@ pub use work::{work_required, is_valid_proof_of_work, is_valid_proof_of_work_has
/// Interface for block verification
pub trait Verify : Send + Sync {
fn verify(&self, block: &db::IndexedBlock) -> VerificationResult;
fn verify(&self, block: &chain::IndexedBlock) -> VerificationResult;
}

View File

@ -1,5 +1,5 @@
use std::collections::HashSet;
use db::IndexedBlock;
use chain::IndexedBlock;
use sigops::transaction_sigops;
use duplex_store::NoopStore;
use error::{Error, TransactionError};

View File

@ -1,5 +1,5 @@
use rayon::prelude::{IntoParallelRefIterator, IndexedParallelIterator, ParallelIterator};
use db::IndexedBlock;
use chain::IndexedBlock;
use network::Magic;
use error::Error;
use verify_block::BlockVerifier;

View File

@ -1,5 +1,5 @@
use primitives::compact::Compact;
use db::IndexedBlockHeader;
use chain::IndexedBlockHeader;
use network::Magic;
use work::is_valid_proof_of_work;
use error::Error;

View File

@ -1,6 +1,6 @@
use std::ops;
use serialization::Serializable;
use db::IndexedTransaction;
use chain::IndexedTransaction;
use duplex_store::NoopStore;
use sigops::transaction_sigops;
use error::TransactionError;