ChainVerifier refactor in progress

This commit is contained in:
debris 2016-12-11 16:03:16 +01:00
parent f1475696c8
commit b17e466af8
13 changed files with 546 additions and 44 deletions

View File

@ -238,6 +238,14 @@ impl Transaction {
&self.outputs
}
pub fn is_empty(&self) -> bool {
self.inputs.is_empty() || self.outputs.is_empty()
}
pub fn is_null(&self) -> bool {
self.inputs.iter().any(|input| input.previous_output.is_null())
}
pub fn is_coinbase(&self) -> bool {
self.inputs.len() == 1 && self.inputs[0].previous_output.is_null()
}

View File

@ -3,27 +3,27 @@ use network::{Magic, ConsensusParams};
use db::{SharedStore, IndexedBlock, PreviousTransactionOutputProvider, BlockHeaderProvider};
use sigops::{StoreWithUnretainedOutputs, transaction_sigops};
use utils::{work_required, block_reward_satoshi};
use accept_header::CanonHeader;
use accept_transaction::CanonTransaction;
use constants::MAX_BLOCK_SIGOPS;
use error::Error;
// imports to rethink
use chain_verifier::MAX_BLOCK_SIGOPS;
const EXPECT_ORDERED: &'static str = "Block ancestors expected to be found in database";
/// Flexible verification of ordered block
pub struct OrderedBlockVerifier<'a> {
pub struct BlockAcceptor<'a> {
pub finality: BlockFinality<'a>,
pub sigops: BlockSigops<'a>,
pub work: BlockWork<'a>,
pub coinbase_claim: BlockCoinbaseClaim<'a>,
}
impl<'a> OrderedBlockVerifier<'a> {
pub fn new(store: &'a SharedStore, network: Magic, block: OrderedBlock<'a>, height: u32) -> Self {
impl<'a> BlockAcceptor<'a> {
pub fn new(store: &'a SharedStore, network: Magic, block: CanonBlock<'a>, height: u32) -> Self {
let params = network.consensus_params();
OrderedBlockVerifier {
BlockAcceptor {
finality: BlockFinality::new(block, height),
sigops: BlockSigops::new(block, store.as_previous_transaction_output_provider(), params),
sigops: BlockSigops::new(block, store.as_previous_transaction_output_provider(), params, MAX_BLOCK_SIGOPS),
work: BlockWork::new(block, store.as_block_header_provider(), height, network),
coinbase_claim: BlockCoinbaseClaim::new(block, store.as_previous_transaction_output_provider(), height),
}
@ -40,19 +40,27 @@ impl<'a> OrderedBlockVerifier<'a> {
/// Blocks whose parents are known to be in the chain
#[derive(Clone, Copy)]
pub struct OrderedBlock<'a> {
pub struct CanonBlock<'a> {
block: &'a IndexedBlock,
}
impl<'a> OrderedBlock<'a> {
impl<'a> CanonBlock<'a> {
pub fn new(block: &'a IndexedBlock) -> Self {
OrderedBlock {
CanonBlock {
block: block,
}
}
pub fn header<'b>(&'b self) -> CanonHeader<'a> where 'a: 'b {
CanonHeader::new(&self.block.header)
}
pub fn transactions<'b>(&'b self) -> Vec<CanonTransaction<'a>> where 'a: 'b {
self.block.transactions.iter().map(CanonTransaction::new).collect()
}
}
impl<'a> ops::Deref for OrderedBlock<'a> {
impl<'a> ops::Deref for CanonBlock<'a> {
type Target = IndexedBlock;
fn deref(&self) -> &Self::Target {
@ -60,18 +68,18 @@ impl<'a> ops::Deref for OrderedBlock<'a> {
}
}
trait OrderedBlockRule {
trait BlockRule {
/// If verification fails returns an error
fn check(&self) -> Result<(), Error>;
}
pub struct BlockFinality<'a> {
block: OrderedBlock<'a>,
block: CanonBlock<'a>,
height: u32,
}
impl<'a> BlockFinality<'a> {
fn new(block: OrderedBlock<'a>, height: u32) -> Self {
fn new(block: CanonBlock<'a>, height: u32) -> Self {
BlockFinality {
block: block,
height: height,
@ -79,7 +87,7 @@ impl<'a> BlockFinality<'a> {
}
}
impl<'a> OrderedBlockRule for BlockFinality<'a> {
impl<'a> BlockRule for BlockFinality<'a> {
fn check(&self) -> Result<(), Error> {
if self.block.is_final(self.height) {
Ok(())
@ -90,22 +98,24 @@ impl<'a> OrderedBlockRule for BlockFinality<'a> {
}
pub struct BlockSigops<'a> {
block: OrderedBlock<'a>,
block: CanonBlock<'a>,
store: &'a PreviousTransactionOutputProvider,
consensus_params: ConsensusParams,
max_sigops: usize,
}
impl<'a> BlockSigops<'a> {
fn new(block: OrderedBlock<'a>, store: &'a PreviousTransactionOutputProvider, consensus_params: ConsensusParams) -> Self {
fn new(block: CanonBlock<'a>, store: &'a PreviousTransactionOutputProvider, consensus_params: ConsensusParams, max_sigops: usize) -> Self {
BlockSigops {
block: block,
store: store,
consensus_params: consensus_params,
max_sigops: max_sigops,
}
}
}
impl<'a> OrderedBlockRule for BlockSigops<'a> {
impl<'a> BlockRule for BlockSigops<'a> {
fn check(&self) -> Result<(), Error> {
let store = StoreWithUnretainedOutputs::new(self.store, &*self.block);
let bip16_active = self.block.header.raw.time >= self.consensus_params.bip16_time;
@ -113,7 +123,7 @@ impl<'a> OrderedBlockRule for BlockSigops<'a> {
.map(|tx| transaction_sigops(&tx.raw, &store, bip16_active).expect(EXPECT_ORDERED))
.sum::<usize>();
if sigops > MAX_BLOCK_SIGOPS {
if sigops > self.max_sigops {
Err(Error::MaximumSigops)
} else {
Ok(())
@ -122,14 +132,14 @@ impl<'a> OrderedBlockRule for BlockSigops<'a> {
}
pub struct BlockWork<'a> {
block: OrderedBlock<'a>,
block: CanonBlock<'a>,
store: &'a BlockHeaderProvider,
height: u32,
network: Magic,
}
impl<'a> BlockWork<'a> {
fn new(block: OrderedBlock<'a>, store: &'a BlockHeaderProvider, height: u32, network: Magic) -> Self {
fn new(block: CanonBlock<'a>, store: &'a BlockHeaderProvider, height: u32, network: Magic) -> Self {
BlockWork {
block: block,
store: store,
@ -139,7 +149,7 @@ impl<'a> BlockWork<'a> {
}
}
impl<'a> OrderedBlockRule for BlockWork<'a> {
impl<'a> BlockRule for BlockWork<'a> {
fn check(&self) -> Result<(), Error> {
let previous_header_hash = self.block.header.raw.previous_header_hash.clone();
let time = self.block.header.raw.time;
@ -153,13 +163,13 @@ impl<'a> OrderedBlockRule for BlockWork<'a> {
}
pub struct BlockCoinbaseClaim<'a> {
block: OrderedBlock<'a>,
block: CanonBlock<'a>,
store: &'a PreviousTransactionOutputProvider,
height: u32,
}
impl<'a> BlockCoinbaseClaim<'a> {
fn new(block: OrderedBlock<'a>, store: &'a PreviousTransactionOutputProvider, height: u32) -> Self {
fn new(block: CanonBlock<'a>, store: &'a PreviousTransactionOutputProvider, height: u32) -> Self {
BlockCoinbaseClaim {
block: block,
store: store,
@ -168,7 +178,7 @@ impl<'a> BlockCoinbaseClaim<'a> {
}
}
impl<'a> OrderedBlockRule for BlockCoinbaseClaim<'a> {
impl<'a> BlockRule for BlockCoinbaseClaim<'a> {
fn check(&self) -> Result<(), Error> {
let store = StoreWithUnretainedOutputs::new(self.store, &*self.block);
let total_outputs = self.block.transactions.iter()

View File

@ -0,0 +1,37 @@
use scoped_pool::Pool;
use db::SharedStore;
use network::Magic;
use error::Error;
use accept_block::{CanonBlock, BlockAcceptor};
use accept_header::HeaderAcceptor;
use accept_transaction::TransactionAcceptor;
pub struct ChainAcceptor<'a> {
pub block: BlockAcceptor<'a>,
pub header: HeaderAcceptor<'a>,
pub transactions: Vec<TransactionAcceptor<'a>>,
}
impl<'a> ChainAcceptor<'a> {
pub fn new(store: &'a SharedStore, network: Magic, block: CanonBlock<'a>, height: u32) -> Self {
ChainAcceptor {
block: BlockAcceptor::new(store, network, block, height),
header: HeaderAcceptor::new(block.header()),
transactions: block.transactions().into_iter().map(TransactionAcceptor::new).collect(),
}
}
pub fn check(&self) -> Result<(), Error> {
try!(self.block.check());
try!(self.header.check());
self.transactions.iter()
.enumerate()
.map(|(index, tx)| tx.check().map_err(|err| Error::Transaction(index, err)))
.collect::<Result<Vec<_>, _>>()?;
Ok(())
}
pub fn parallel_check(&self, _pool: &Pool) -> Result<(), Error> {
unimplemented!();
}
}

View File

@ -0,0 +1,31 @@
use db::IndexedBlockHeader;
use error::Error;
pub struct HeaderAcceptor<'a> {
_tmp: CanonHeader<'a>,
}
impl<'a> HeaderAcceptor<'a> {
pub fn new(header: CanonHeader<'a>) -> Self {
HeaderAcceptor {
_tmp: header,
}
}
pub fn check(&self) -> Result<(), Error> {
Ok(())
}
}
#[derive(Clone, Copy)]
pub struct CanonHeader<'a> {
header: &'a IndexedBlockHeader,
}
impl<'a> CanonHeader<'a> {
pub fn new(header: &'a IndexedBlockHeader) -> Self {
CanonHeader {
header: header,
}
}
}

View File

@ -0,0 +1,31 @@
use db::IndexedTransaction;
use error::TransactionError;
pub struct TransactionAcceptor<'a> {
_tmp: CanonTransaction<'a>,
}
impl<'a> TransactionAcceptor<'a> {
pub fn new(transaction: CanonTransaction<'a>) -> Self {
TransactionAcceptor {
_tmp: transaction,
}
}
pub fn check(&self) -> Result<(), TransactionError> {
Ok(())
}
}
#[derive(Clone, Copy)]
pub struct CanonTransaction<'a> {
transaction: &'a IndexedTransaction,
}
impl<'a> CanonTransaction<'a> {
pub fn new(transaction: &'a IndexedTransaction) -> Self {
CanonTransaction {
transaction: transaction,
}
}
}

View File

@ -8,11 +8,7 @@ use network::{Magic, ConsensusParams};
use error::{Error, TransactionError};
use sigops::{StoreWithUnretainedOutputs, transaction_sigops};
use {Verify, chain, utils};
const BLOCK_MAX_FUTURE: i64 = 2 * 60 * 60; // 2 hours
const COINBASE_MATURITY: u32 = 100; // 2 hours
pub const MAX_BLOCK_SIZE: usize = 1_000_000;
pub const MAX_BLOCK_SIGOPS: usize = 20_000;
use constants::{BLOCK_MAX_FUTURE, COINBASE_MATURITY, MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS};
const TRANSACTIONS_VERIFY_THREADS: usize = 8;
const TRANSACTIONS_VERIFY_PARALLEL_THRESHOLD: usize = 32;

View File

@ -0,0 +1,8 @@
//! Consenus constants
pub const BLOCK_MAX_FUTURE: i64 = 2 * 60 * 60; // 2 hours
pub const COINBASE_MATURITY: u32 = 100; // 2 hours
pub const MAX_BLOCK_SIZE: usize = 1_000_000;
pub const MAX_BLOCK_SIGOPS: usize = 20_000;
pub const MIN_COINBASE_SIZE: usize = 2;
pub const MAX_COINBASE_SIZE: usize = 100;

View File

@ -39,6 +39,18 @@ pub enum Error {
#[derive(Debug, PartialEq)]
/// Possible transactions verification errors
pub enum TransactionError {
/// Transaction has no inputs or no outputs
Empty,
/// Transaction is not coinbase transaction but has null inputs
NullNonCoinbase,
/// Coinbase signature is not in the range 2-100
CoinbaseSignatureLength(usize),
/// Transaction size exceeds block size limit
MaxSize,
/// Transaction has more sigops than it's allowed
MaxSigops,
/// Transaction is a part of memory pool, but is a coinbase
MemoryPoolCoinbase,
/// Not found corresponding output for transaction input
Input(usize),
/// Referenced coinbase output for the transaction input is not mature enough

View File

@ -1,4 +1,38 @@
//! Bitcoin blocks verification
//! Bitcoin consensus verification
//!
//! --> A. on_new_block:
//!
//! A.1 VerifyHeader
//! A.2 VerifyBlock,
//! A.3 VerifyTransaction for each tx
//!
//! A.4.a if it is block from canon chain
//! A.4.a.1 AcceptHeader
//! A.4.a.2 AcceptBlock
//! A.4.a.3 AcceptTransaction for each tx
//!
//! A.4.b if it is block from side chain becoming canon
//! decanonize old canon chain blocks
//! canonize new canon chain blocks (without currently processed block)
//! A.4.b.1 AcceptHeader for each header
//! A.4.b.2 AcceptBlock for each block
//! A.4.b.3 AcceptTransaction for each tx in each block
//! A.4.b.4 AcceptHeader
//! A.4.b.5 AcceptBlock
//! A.4.b.6 AcceptTransaction for each tx
//! if any step failed, revert chain back to old canon
//!
//! A.4.c if it is block from side chain do nothing
//!
//! --> B. on_memory_pool_transaction
//!
//! B.1 VerifyMemoryPoolTransaction
//! B.2 AcceptMemoryPoolTransaction
//!
//! --> C. on_block_header
//!
//! C.1 VerifyHeader
//! C.2 AcceptHeader (?)
extern crate byteorder;
extern crate parking_lot;
@ -25,15 +59,28 @@ mod sigops;
mod task;
mod utils;
pub mod constants;
mod accept_block;
mod accept_chain;
mod accept_header;
mod accept_transaction;
mod verify_block;
mod verify_ordered_block;
mod verify_chain;
mod verify_header;
mod verify_transaction;
pub use primitives::{uint, hash, compact};
pub use accept_block::{BlockAcceptor, CanonBlock};
pub use accept_chain::ChainAcceptor;
pub use accept_header::{HeaderAcceptor, CanonHeader};
pub use accept_transaction::{TransactionAcceptor, CanonTransaction};
pub use verify_block::BlockVerifier;
pub use verify_ordered_block::{OrderedBlockVerifier, OrderedBlock};
pub use verify_chain::ChainVerifier as XXXChainVerifier;
pub use verify_header::HeaderVerifier;
pub use verify_transaction::{TransactionVerifier, MemoryPoolTransactionVerifier};
pub use chain_verifier::{Chain, ChainVerifier, VerificationResult, MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS};
pub use chain_verifier::{Chain, ChainVerifier, VerificationResult};
pub use error::{Error, TransactionError};
pub use sigops::{transaction_sigops, StoreWithUnretainedOutputs};
pub use utils::{work_required, is_valid_proof_of_work, is_valid_proof_of_work_hash, block_reward_satoshi};

View File

@ -2,9 +2,7 @@ use std::collections::HashSet;
use db::IndexedBlock;
use sigops::transaction_sigops_raw;
use error::{Error, TransactionError};
// imports to rethink
use chain_verifier::{MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS};
use constants::{MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS};
pub struct BlockVerifier<'a> {
pub empty: BlockEmpty<'a>,
@ -21,10 +19,10 @@ impl<'a> BlockVerifier<'a> {
BlockVerifier {
empty: BlockEmpty::new(block),
coinbase: BlockCoinbase::new(block),
serialized_size: BlockSerializedSize::new(block),
serialized_size: BlockSerializedSize::new(block, MAX_BLOCK_SIZE),
extra_coinbases: BlockExtraCoinbases::new(block),
transactions_uniqueness: BlockTransactionsUniqueness::new(block),
sigops: BlockSigops::new(block),
sigops: BlockSigops::new(block, MAX_BLOCK_SIGOPS),
merkle_root: BlockMerkleRoot::new(block),
}
}
@ -69,12 +67,14 @@ impl<'a> BlockRule for BlockEmpty<'a> {
pub struct BlockSerializedSize<'a> {
block: &'a IndexedBlock,
max_size: usize,
}
impl<'a> BlockSerializedSize<'a> {
fn new(block: &'a IndexedBlock) -> Self {
fn new(block: &'a IndexedBlock, max_size: usize) -> Self {
BlockSerializedSize {
block: block,
max_size: max_size,
}
}
}
@ -82,7 +82,7 @@ impl<'a> BlockSerializedSize<'a> {
impl<'a> BlockRule for BlockSerializedSize<'a> {
fn check(&self) -> Result<(), Error> {
let size = self.block.size();
if size > MAX_BLOCK_SIZE {
if size > self.max_size {
Err(Error::Size(size))
} else {
Ok(())
@ -162,12 +162,14 @@ impl<'a> BlockRule for BlockTransactionsUniqueness<'a> {
pub struct BlockSigops<'a> {
block: &'a IndexedBlock,
max_sigops: usize,
}
impl<'a> BlockSigops<'a> {
fn new(block: &'a IndexedBlock) -> Self {
fn new(block: &'a IndexedBlock, max_sigops: usize) -> Self {
BlockSigops {
block: block,
max_sigops: max_sigops,
}
}
}
@ -179,7 +181,7 @@ impl<'a> BlockRule for BlockSigops<'a> {
.map(|tx| transaction_sigops_raw(&tx.raw, None).expect("bip16 is disabled"))
.sum::<usize>();
if sigops > MAX_BLOCK_SIGOPS {
if sigops > self.max_sigops {
Err(Error::MaximumSigops)
} else {
Ok(())
@ -208,3 +210,4 @@ impl<'a> BlockRule for BlockMerkleRoot<'a> {
}
}
}

View File

@ -0,0 +1,37 @@
use scoped_pool::Pool;
use db::IndexedBlock;
use network::Magic;
use error::Error;
use verify_block::BlockVerifier;
use verify_header::HeaderVerifier;
use verify_transaction::TransactionVerifier;
pub struct ChainVerifier<'a> {
pub block: BlockVerifier<'a>,
pub header: HeaderVerifier<'a>,
pub transactions: Vec<TransactionVerifier<'a>>,
}
impl<'a> ChainVerifier<'a> {
pub fn new(block: &'a IndexedBlock, network: Magic, current_time: u32) -> Self {
ChainVerifier {
block: BlockVerifier::new(block),
header: HeaderVerifier::new(&block.header, network, current_time),
transactions: block.transactions.iter().map(TransactionVerifier::new).collect(),
}
}
pub fn check(&self) -> Result<(), Error> {
try!(self.block.check());
try!(self.header.check());
self.transactions.iter()
.enumerate()
.map(|(index, tx)| tx.check().map_err(|err| Error::Transaction(index, err)))
.collect::<Result<Vec<_>, _>>()?;
Ok(())
}
pub fn parallel_check(&self, _pool: &Pool) -> Result<(), Error> {
unimplemented!();
}
}

View File

@ -0,0 +1,79 @@
use primitives::compact::Compact;
use db::IndexedBlockHeader;
use network::Magic;
use utils::is_valid_proof_of_work;
use error::Error;
use constants::BLOCK_MAX_FUTURE;
pub struct HeaderVerifier<'a> {
pub proof_of_work: HeaderProofOfWork<'a>,
pub timestamp: HeaderTimestamp<'a>,
}
impl<'a> HeaderVerifier<'a> {
pub fn new(header: &'a IndexedBlockHeader, network: Magic, current_time: u32) -> Self {
HeaderVerifier {
proof_of_work: HeaderProofOfWork::new(header, network),
timestamp: HeaderTimestamp::new(header, current_time, BLOCK_MAX_FUTURE as u32),
}
}
pub fn check(&self) -> Result<(), Error> {
try!(self.proof_of_work.check());
Ok(())
}
}
pub trait HeaderRule {
fn check(&self) -> Result<(), Error>;
}
pub struct HeaderProofOfWork<'a> {
header: &'a IndexedBlockHeader,
max_work_bits: Compact,
}
impl<'a> HeaderProofOfWork<'a> {
fn new(header: &'a IndexedBlockHeader, network: Magic) -> Self {
HeaderProofOfWork {
header: header,
max_work_bits: network.max_bits(),
}
}
}
impl<'a> HeaderRule for HeaderProofOfWork<'a> {
fn check(&self) -> Result<(), Error> {
if is_valid_proof_of_work(self.max_work_bits, self.header.raw.bits, &self.header.hash) {
Ok(())
} else {
Err(Error::Pow)
}
}
}
pub struct HeaderTimestamp<'a> {
header: &'a IndexedBlockHeader,
current_time: u32,
max_future: u32,
}
impl<'a> HeaderTimestamp<'a> {
fn new(header: &'a IndexedBlockHeader, current_time: u32, max_future: u32) -> Self {
HeaderTimestamp {
header: header,
current_time: current_time,
max_future: max_future,
}
}
}
impl<'a> HeaderRule for HeaderTimestamp<'a> {
fn check(&self) -> Result<(), Error> {
if self.header.raw.time > self.current_time + self.max_future {
Err(Error::FuturisticTimestamp)
} else {
Ok(())
}
}
}

View File

@ -0,0 +1,203 @@
use std::ops;
use serialization::Serializable;
use db::IndexedTransaction;
use sigops::transaction_sigops_raw;
use error::TransactionError;
use constants::{MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS, MIN_COINBASE_SIZE, MAX_COINBASE_SIZE};
pub struct TransactionVerifier<'a> {
pub empty: TransactionEmpty<'a>,
pub null_non_coinbase: TransactionNullNonCoinbase<'a>,
pub oversized_coinbase: TransactionOversizedCoinbase<'a>,
}
impl<'a> TransactionVerifier<'a> {
pub fn new(transaction: &'a IndexedTransaction) -> Self {
TransactionVerifier {
empty: TransactionEmpty::new(transaction),
null_non_coinbase: TransactionNullNonCoinbase::new(transaction),
oversized_coinbase: TransactionOversizedCoinbase::new(transaction, MIN_COINBASE_SIZE..MAX_COINBASE_SIZE),
}
}
pub fn check(&self) -> Result<(), TransactionError> {
try!(self.empty.check());
try!(self.null_non_coinbase.check());
try!(self.oversized_coinbase.check());
Ok(())
}
}
pub struct MemoryPoolTransactionVerifier<'a> {
pub empty: TransactionEmpty<'a>,
pub null_non_coinbase: TransactionNullNonCoinbase<'a>,
pub is_coinbase: TransactionMemoryPoolCoinbase<'a>,
pub size: TransactionSize<'a>,
pub sigops: TransactionSigops<'a>,
}
impl<'a> MemoryPoolTransactionVerifier<'a> {
pub fn new(transaction: &'a IndexedTransaction) -> Self {
MemoryPoolTransactionVerifier {
empty: TransactionEmpty::new(transaction),
null_non_coinbase: TransactionNullNonCoinbase::new(transaction),
is_coinbase: TransactionMemoryPoolCoinbase::new(transaction),
size: TransactionSize::new(transaction, MAX_BLOCK_SIZE),
sigops: TransactionSigops::new(transaction, MAX_BLOCK_SIGOPS),
}
}
pub fn check(&self) -> Result<(), TransactionError> {
try!(self.empty.check());
try!(self.null_non_coinbase.check());
try!(self.is_coinbase.check());
try!(self.size.check());
try!(self.sigops.check());
Ok(())
}
}
trait TransactionRule {
fn check(&self) -> Result<(), TransactionError>;
}
pub struct TransactionEmpty<'a> {
transaction: &'a IndexedTransaction,
}
impl<'a> TransactionEmpty<'a> {
fn new(transaction: &'a IndexedTransaction) -> Self {
TransactionEmpty {
transaction: transaction,
}
}
}
impl<'a> TransactionRule for TransactionEmpty<'a> {
fn check(&self) -> Result<(), TransactionError> {
if self.transaction.raw.is_empty() {
Err(TransactionError::Empty)
} else {
Ok(())
}
}
}
pub struct TransactionNullNonCoinbase<'a> {
transaction: &'a IndexedTransaction,
}
impl<'a> TransactionNullNonCoinbase<'a> {
fn new(transaction: &'a IndexedTransaction) -> Self {
TransactionNullNonCoinbase {
transaction: transaction,
}
}
}
impl<'a> TransactionRule for TransactionNullNonCoinbase<'a> {
fn check(&self) -> Result<(), TransactionError> {
if !self.transaction.raw.is_coinbase() && self.transaction.raw.is_null() {
Err(TransactionError::NullNonCoinbase)
} else {
Ok(())
}
}
}
pub struct TransactionOversizedCoinbase<'a> {
transaction: &'a IndexedTransaction,
size_range: ops::Range<usize>,
}
impl<'a> TransactionOversizedCoinbase<'a> {
fn new(transaction: &'a IndexedTransaction, size_range: ops::Range<usize>) -> Self {
TransactionOversizedCoinbase {
transaction: transaction,
size_range: size_range,
}
}
}
impl<'a> TransactionRule for TransactionOversizedCoinbase<'a> {
fn check(&self) -> Result<(), TransactionError> {
if self.transaction.raw.is_coinbase() {
let script_len = self.transaction.raw.inputs[0].script_sig.len();
if script_len < self.size_range.start || script_len > self.size_range.end {
return Err(TransactionError::CoinbaseSignatureLength(script_len));
}
}
Ok(())
}
}
pub struct TransactionMemoryPoolCoinbase<'a> {
transaction: &'a IndexedTransaction,
}
impl<'a> TransactionMemoryPoolCoinbase<'a> {
fn new(transaction: &'a IndexedTransaction) -> Self {
TransactionMemoryPoolCoinbase {
transaction: transaction,
}
}
}
impl<'a> TransactionRule for TransactionMemoryPoolCoinbase<'a> {
fn check(&self) -> Result<(), TransactionError> {
if self.transaction.raw.is_coinbase() {
Err(TransactionError::MemoryPoolCoinbase)
} else {
Ok(())
}
}
}
pub struct TransactionSize<'a> {
transaction: &'a IndexedTransaction,
max_size: usize,
}
impl<'a> TransactionSize<'a> {
fn new(transaction: &'a IndexedTransaction, max_size: usize) -> Self {
TransactionSize {
transaction: transaction,
max_size: max_size,
}
}
}
impl<'a> TransactionRule for TransactionSize<'a> {
fn check(&self) -> Result<(), TransactionError> {
if self.transaction.raw.serialized_size() > self.max_size {
Err(TransactionError::MaxSize)
} else {
Ok(())
}
}
}
pub struct TransactionSigops<'a> {
transaction: &'a IndexedTransaction,
max_sigops: usize,
}
impl<'a> TransactionSigops<'a> {
fn new(transaction: &'a IndexedTransaction, max_sigops: usize) -> Self {
TransactionSigops {
transaction: transaction,
max_sigops: max_sigops,
}
}
}
impl<'a> TransactionRule for TransactionSigops<'a> {
fn check(&self) -> Result<(), TransactionError> {
let sigops = transaction_sigops_raw(&self.transaction.raw, None).expect("bip16 is disabled");
if sigops > self.max_sigops {
Err(TransactionError::MaxSigops)
} else {
Ok(())
}
}
}