segwit: cleaned up duplicate structs

This commit is contained in:
Svyatoslav Nikolsky 2017-08-21 11:57:28 +03:00
parent 062a68204c
commit 4c432858cf
15 changed files with 147 additions and 241 deletions

View File

@ -11,12 +11,6 @@ pub struct Block {
pub transactions: Vec<Transaction>,
}
#[derive(Debug, PartialEq, Clone)]
pub struct WitnessBlock<'a> {
pub block_header: BlockHeader,
pub transactions: Vec<&'a Transaction>,
}
impl From<&'static str> for Block {
fn from(s: &'static str) -> Self {
deserialize(&s.from_hex().unwrap() as &[u8]).unwrap()
@ -60,10 +54,6 @@ impl Block {
pub fn hash(&self) -> H256 {
self.block_header.hash()
}
pub fn cost(&self) -> u64 {
unimplemented!()
}
}
#[cfg(test)]

View File

@ -11,7 +11,9 @@ use hash::H256;
use constants::{SEQUENCE_FINAL, LOCKTIME_THRESHOLD};
use ser::{Error, Serializable, Deserializable, Stream, Reader};
/// Must be zero.
const WITNESS_MARKER: u8 = 0;
/// Must be nonzero.
const WITNESS_FLAG: u8 = 1;
#[derive(Debug, PartialEq, Eq, Clone, Default, Serializable, Deserializable)]
@ -179,10 +181,6 @@ impl Transaction {
}
result
}
pub fn cost(&self) -> u64 {
unimplemented!()
}
}
impl Serializable for TransactionInput {

View File

View File

@ -1,6 +1,6 @@
use std::cmp::max;
use hash::H256;
use {Magic, Deployment, Deployments};
use {Magic, Deployment};
/// First block of SegWit2x fork.
pub const SEGWIT2X_FORK_BLOCK: u32 = 0xFFFFFFFF; // not known (yet?)
@ -9,9 +9,9 @@ pub const BITCOIN_CASH_FORK_BLOCK: u32 = 478559; // https://blockchair.com/bitco
/// Segwit-related constants.
pub mod segwit {
/// The maximum allowed weight for a block, see BIP 141 (network rule)
/// The maximum allowed weight for a block, see BIP 141 (network rule).
pub const MAX_BLOCK_WEIGHT: usize = 4_000_000;
/// The maximum allowed number of signature check operations in a block (network rule)
/// The maximum allowed number of signature check operations in a block (network rule).
pub const MAX_BLOCK_SIGOPS_COST: usize = 80_000;
/// Witness scale factor.
pub const WITNESS_SCALE_FACTOR: usize = 4;
@ -166,50 +166,33 @@ impl ConsensusFork {
8_000_000
}
// Absolute (across all forks) maximum number of sigops in single block. Currently is max(sigops) for 8MB post-HF BitcoinCash block
/// Absolute (across all forks) maximum number of sigops in single block. Currently is max(sigops) for 8MB post-HF BitcoinCash block
pub fn absolute_maximum_block_sigops() -> usize {
160_000
}
pub fn max_transaction_size(&self) -> usize {
// BitcoinCash: according to REQ-5: max size of tx is still 1_000_000
// SegWit: size * 4 <= 4_000_000 ===> max size of tx is still 1_000_000
1_000_000
}
pub fn min_block_size(&self, height: u32) -> usize {
match *self {
// size of first fork block must be larger than 1MB
ConsensusFork::BitcoinCash(fork_height) if height == fork_height => 1_000_001,
ConsensusFork::NoFork | ConsensusFork::BitcoinCash(_) | ConsensusFork::SegWit2x(_) => 0,
}
}
pub fn max_block_size(&self, height: u32) -> usize {
match *self {
ConsensusFork::BitcoinCash(fork_height) if height >= fork_height => 8_000_000,
ConsensusFork::SegWit2x(fork_height) if height >= fork_height => 2_000_000,
ConsensusFork::BitcoinCash(fork_height) if height >= fork_height => 8_000_000,
ConsensusFork::NoFork | ConsensusFork::BitcoinCash(_) | ConsensusFork::SegWit2x(_) => 1_000_000,
}
}
pub fn check_block_size(&self, size: usize, height: u32, deployments: &Deployments) -> bool {
match *self {
// bitcoin cash fork block must be > 1_000_0000 and <= 8_000_000
ConsensusFork::BitcoinCash(fork_height) if height == fork_height =>
size > 1_000_000 && size <= 8_000_000,
// bitcoin cash support blocks up to 8_000_000
ConsensusFork::BitcoinCash(fork_height) if height > fork_height =>
size <= 8_000_000,
// max size of SegWit2x block is 2MB
ConsensusFork::SegWit2x(fork_height) if height >= fork_height =>
size <= 2_000_000,
// when segwit is deployed, this expression is used. which, in turn, also allows block size <= 1_000_000
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) if deployments.is_active("segwit") =>
size.saturating_mul(segwit::WITNESS_SCALE_FACTOR) <= segwit::MAX_BLOCK_WEIGHT,
// without segwit and before fork, max size is 1_000_000
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) | ConsensusFork::BitcoinCash(_) =>
size <= 1_000_000,
}
}
pub fn check_transaction_size(&self, size: usize, deployments: &Deployments) -> bool {
match *self {
// when segwit is deployed, this expression is used. which, in turn, is the same max tx size 1_000_000
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) if deployments.is_active("segwit") =>
size.saturating_mul(segwit::WITNESS_SCALE_FACTOR) <= segwit::MAX_BLOCK_WEIGHT,
// BitcoinCash: according to REQ-5: max size of tx is still 1_000_000
// ConsensusFork::NoFork | ConsensusFork::SegWit2x: max size of tx is 1_000_000
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) | ConsensusFork::BitcoinCash(_) => size <= 1_000_000,
}
}
pub fn max_block_sigops(&self, height: u32, block_size: usize) -> usize {
match *self {
// according to REQ-5: max_block_sigops = 20000 * ceil((max(blocksize_bytes, 1000000) / 1000000))
@ -219,24 +202,12 @@ impl ConsensusFork {
}
}
pub fn check_block_sigops(&self, sigops: usize, height: u32, block_size: usize, deployments: &Deployments) -> bool {
pub fn max_block_sigops_cost(&self, height: u32, block_size: usize) -> usize {
match *self {
// according to REQ-5: max_block_sigops = 20000 * ceil((max(blocksize_bytes, 1000000) / 1000000))
ConsensusFork::BitcoinCash(fork_height) if height >= fork_height && block_size > 1_000_000 =>
sigops <= 20_000 * (max(block_size, 1_000_000) / 1_000_000),
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) if deployments.is_active("segwit") =>
sigops * segwit::WITNESS_SCALE_FACTOR <= segwit::MAX_BLOCK_SIGOPS_COST,
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) | ConsensusFork::BitcoinCash(_) =>
sigops <= 20_000,
}
}
pub fn check_block_sigops_cost(&self, sigops_cost: usize, deployments: &Deployments) -> bool {
match *self {
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) if deployments.is_active("segwit") =>
sigops_cost <= segwit::MAX_BLOCK_SIGOPS_COST,
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) | ConsensusFork::BitcoinCash(_) =>
true,
ConsensusFork::BitcoinCash(_) =>
self.max_block_sigops(height, block_size) * segwit::WITNESS_SCALE_FACTOR,
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) =>
segwit::MAX_BLOCK_SIGOPS_COST,
}
}
}
@ -245,7 +216,7 @@ impl ConsensusFork {
mod tests {
use super::super::Magic;
use super::{ConsensusParams, ConsensusFork};
use deployments::tests::DummyDeployments;
//use deployments::tests::DummyDeployments;
#[test]
fn test_consensus_params_bip34_height() {
@ -282,33 +253,6 @@ mod tests {
assert_eq!(ConsensusParams::new(Magic::Regtest, ConsensusFork::NoFork).miner_confirmation_window, 144);
}
#[test]
fn test_consensus_fork_check_transaction_size() {
assert_eq!(ConsensusFork::NoFork.check_transaction_size(800_000, &DummyDeployments::default()), true);
assert_eq!(ConsensusFork::NoFork.check_transaction_size(1_000_000, &DummyDeployments::default()), true);
assert_eq!(ConsensusFork::NoFork.check_transaction_size(4_000_000, &DummyDeployments::default()), false);
assert_eq!(ConsensusFork::NoFork.check_transaction_size(800_000, &DummyDeployments::deployed()), true);
assert_eq!(ConsensusFork::NoFork.check_transaction_size(1_000_000, &DummyDeployments::deployed()), true);
assert_eq!(ConsensusFork::NoFork.check_transaction_size(4_000_000, &DummyDeployments::deployed()), false);
assert_eq!(ConsensusFork::SegWit2x(100_000).check_transaction_size(800_000, &DummyDeployments::default()), true);
assert_eq!(ConsensusFork::SegWit2x(100_000).check_transaction_size(1_000_000, &DummyDeployments::default()), true);
assert_eq!(ConsensusFork::SegWit2x(100_000).check_transaction_size(4_000_000, &DummyDeployments::default()), false);
assert_eq!(ConsensusFork::SegWit2x(100_000).check_transaction_size(800_000, &DummyDeployments::deployed()), true);
assert_eq!(ConsensusFork::SegWit2x(100_000).check_transaction_size(1_000_000, &DummyDeployments::deployed()), true);
assert_eq!(ConsensusFork::SegWit2x(100_000).check_transaction_size(4_000_000, &DummyDeployments::deployed()), false);
assert_eq!(ConsensusFork::BitcoinCash(100_000).check_transaction_size(800_000, &DummyDeployments::default()), true);
assert_eq!(ConsensusFork::BitcoinCash(100_000).check_transaction_size(1_000_000, &DummyDeployments::default()), true);
assert_eq!(ConsensusFork::BitcoinCash(100_000).check_transaction_size(4_000_000, &DummyDeployments::default()), false);
assert_eq!(ConsensusFork::BitcoinCash(100_000).check_transaction_size(800_000, &DummyDeployments::deployed()), true);
assert_eq!(ConsensusFork::BitcoinCash(100_000).check_transaction_size(1_000_000, &DummyDeployments::deployed()), true);
assert_eq!(ConsensusFork::BitcoinCash(100_000).check_transaction_size(4_000_000, &DummyDeployments::deployed()), false);
}
#[test]
fn test_consensus_fork_max_block_sigops() {
assert_eq!(ConsensusFork::NoFork.max_block_sigops(0, 1_000_000), 20_000);

View File

@ -15,41 +15,8 @@ pub struct Deployment {
pub activation: Option<u32>,
}
/// Deployments state.
pub trait Deployments {
/// Is deployment currently active?
fn is_active(&self, name: &str) -> bool;
}
impl Deployment {
pub fn matches(&self, version: u32) -> bool {
(version & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS && (version & (1 << self.bit)) != 0
}
}
#[cfg(test)]
pub mod tests {
use super::Deployments;
#[derive(Default, Debug)]
pub struct DummyDeployments {
pub segwit_active: bool,
}
impl DummyDeployments {
pub fn deployed() -> Self {
DummyDeployments {
segwit_active: true,
}
}
}
impl Deployments for DummyDeployments {
fn is_active(&self, name: &str) -> bool {
match name {
"segwit" => self.segwit_active,
_ => false,
}
}
}
}

View File

@ -9,6 +9,6 @@ mod magic;
pub use primitives::{hash, compact};
pub use consensus::{ConsensusParams, ConsensusFork, SEGWIT2X_FORK_BLOCK, BITCOIN_CASH_FORK_BLOCK, segwit};
pub use deployments::{Deployment, Deployments};
pub use deployments::Deployment;
pub use magic::Magic;

View File

@ -1,4 +1,4 @@
use network::{ConsensusParams, Deployments as NetworkDeployments, segwit};
use network::{ConsensusParams, segwit};
use crypto::dhash256;
use db::{TransactionOutputProvider, BlockHeaderProvider};
use script;
@ -6,7 +6,7 @@ use ser::Stream;
use sigops::{transaction_sigops, transaction_sigops_cost} ;
use work::block_reward_satoshi;
use duplex_store::DuplexTransactionOutputProvider;
use deployments::{Deployments, ActiveDeployments};
use deployments::BlockDeployments;
use canon::CanonBlock;
use error::{Error, TransactionError};
use timestamp::median_timestamp;
@ -27,15 +27,15 @@ impl<'a> BlockAcceptor<'a> {
consensus: &'a ConsensusParams,
block: CanonBlock<'a>,
height: u32,
deployments: ActiveDeployments<'a>,
deployments: &'a BlockDeployments<'a>,
headers: &'a BlockHeaderProvider,
) -> Self {
BlockAcceptor {
finality: BlockFinality::new(block, height, deployments.deployments, headers, consensus),
finality: BlockFinality::new(block, height, deployments, headers),
serialized_size: BlockSerializedSize::new(block, consensus, deployments, height),
coinbase_script: BlockCoinbaseScript::new(block, consensus, height),
coinbase_claim: BlockCoinbaseClaim::new(block, store, height),
sigops: BlockSigops::new(block, store, consensus, deployments, height),
sigops: BlockSigops::new(block, store, consensus, height),
witness: BlockWitness::new(block, deployments),
}
}
@ -59,8 +59,8 @@ pub struct BlockFinality<'a> {
}
impl<'a> BlockFinality<'a> {
fn new(block: CanonBlock<'a>, height: u32, deployments: &'a Deployments, headers: &'a BlockHeaderProvider, params: &ConsensusParams) -> Self {
let csv_active = deployments.csv(height, headers, params);
fn new(block: CanonBlock<'a>, height: u32, deployments: &'a BlockDeployments<'a>, headers: &'a BlockHeaderProvider) -> Self {
let csv_active = deployments.csv();
BlockFinality {
block: block,
@ -88,32 +88,30 @@ impl<'a> BlockFinality<'a> {
pub struct BlockSerializedSize<'a> {
block: CanonBlock<'a>,
consensus: &'a ConsensusParams,
deployments: ActiveDeployments<'a>,
height: u32,
segwit_active: bool,
}
impl<'a> BlockSerializedSize<'a> {
fn new(block: CanonBlock<'a>, consensus: &'a ConsensusParams, deployments: ActiveDeployments<'a>, height: u32) -> Self {
fn new(block: CanonBlock<'a>, consensus: &'a ConsensusParams, deployments: &'a BlockDeployments<'a>, height: u32) -> Self {
let segwit_active = deployments.segwit();
BlockSerializedSize {
block: block,
consensus: consensus,
deployments: deployments,
height: height,
segwit_active: segwit_active,
}
}
fn check(&self) -> Result<(), Error> {
let size = self.block.size();
if !self.consensus.fork.check_block_size(size, self.height, &self.deployments) {
return Err(Error::Size(size))
if size < self.consensus.fork.min_block_size(self.height) ||
size > self.consensus.fork.max_block_size(self.height) {
return Err(Error::Size(size));
}
let is_segwit_active = self.deployments.is_active("segwit");
if is_segwit_active {
if self.block.transactions.len() * segwit::WITNESS_SCALE_FACTOR > segwit::MAX_BLOCK_WEIGHT {
return Err(Error::Weight);
}
if self.segwit_active {
let size_with_witness = self.block.size_with_witness();
let weight = size * (segwit::WITNESS_SCALE_FACTOR - 1) + size_with_witness;
if weight > segwit::MAX_BLOCK_WEIGHT {
@ -128,39 +126,42 @@ pub struct BlockSigops<'a> {
block: CanonBlock<'a>,
store: &'a TransactionOutputProvider,
consensus: &'a ConsensusParams,
deployments: ActiveDeployments<'a>,
height: u32,
bip16_active: bool,
}
impl<'a> BlockSigops<'a> {
fn new(block: CanonBlock<'a>, store: &'a TransactionOutputProvider, consensus: &'a ConsensusParams, deployments: ActiveDeployments<'a>, height: u32) -> Self {
fn new(block: CanonBlock<'a>, store: &'a TransactionOutputProvider, consensus: &'a ConsensusParams, height: u32) -> Self {
let bip16_active = block.header.raw.time >= consensus.bip16_time;
BlockSigops {
block: block,
store: store,
consensus: consensus,
deployments: deployments,
height: height,
bip16_active: bip16_active,
}
}
fn check(&self) -> Result<(), Error> {
let store = DuplexTransactionOutputProvider::new(self.store, &*self.block);
let bip16_active = self.block.header.raw.time >= self.consensus.bip16_time;
let sigops = self.block.transactions.iter()
.map(|tx| transaction_sigops(&tx.raw, &store, bip16_active))
.sum::<usize>();
let (sigops, sigops_cost) = self.block.transactions.iter()
.map(|tx| {
let tx_sigops = transaction_sigops(&tx.raw, &store, self.bip16_active);
let tx_sigops_cost = transaction_sigops_cost(&tx.raw, &store, tx_sigops);
(tx_sigops, tx_sigops_cost)
})
.fold((0, 0), |acc, (tx_sigops, tx_sigops_cost)| (acc.0 + tx_sigops, acc.1 + tx_sigops_cost));
// check sigops
let size = self.block.size();
if sigops > self.consensus.fork.max_block_sigops(self.height, size) {
return Err(Error::MaximumSigops)
return Err(Error::MaximumSigops);
}
// TODO: when segwit is enabled, only sigop_cost must be checked!!!
let sigops_cost = self.block.transactions.iter()
.map(|tx| transaction_sigops_cost(&tx.raw, &store, bip16_active))
.sum::<usize>();
if !self.consensus.fork.check_block_sigops_cost(sigops_cost, &self.deployments) {
Err(Error::MaximumSigops)
// check sigops cost
if sigops_cost > self.consensus.fork.max_block_sigops_cost(self.height, size) {
Err(Error::MaximumSigopsCost)
} else {
Ok(())
}
@ -275,10 +276,12 @@ pub struct BlockWitness<'a> {
}
impl<'a> BlockWitness<'a> {
fn new(block: CanonBlock<'a>, deployments: ActiveDeployments<'a>) -> Self {
fn new(block: CanonBlock<'a>, deployments: &'a BlockDeployments<'a>) -> Self {
let segwit_active = deployments.segwit();
BlockWitness {
block: block,
segwit_active: deployments.is_active("segwit"),
segwit_active: segwit_active,
}
}

View File

@ -6,7 +6,7 @@ use canon::CanonBlock;
use accept_block::BlockAcceptor;
use accept_header::HeaderAcceptor;
use accept_transaction::TransactionAcceptor;
use deployments::{Deployments, ActiveDeployments};
use deployments::BlockDeployments;
use duplex_store::DuplexTransactionOutputProvider;
use VerificationLevel;
@ -17,14 +17,13 @@ pub struct ChainAcceptor<'a> {
}
impl<'a> ChainAcceptor<'a> {
pub fn new(store: &'a Store, consensus: &'a ConsensusParams, verification_level: VerificationLevel, block: CanonBlock<'a>, height: u32, deployments: &'a Deployments) -> Self {
pub fn new(store: &'a Store, consensus: &'a ConsensusParams, verification_level: VerificationLevel, block: CanonBlock<'a>, height: u32, deployments: &'a BlockDeployments) -> Self {
trace!(target: "verification", "Block verification {}", block.hash().to_reversed_str());
let output_store = DuplexTransactionOutputProvider::new(store.as_transaction_output_provider(), block.raw());
let headers = store.as_block_header_provider();
let active_deployments = ActiveDeployments::new(deployments, height, headers, consensus);
ChainAcceptor {
block: BlockAcceptor::new(store.as_transaction_output_provider(), consensus, block, height, active_deployments, headers),
block: BlockAcceptor::new(store.as_transaction_output_provider(), consensus, block, height, deployments, headers),
header: HeaderAcceptor::new(headers, consensus, block.header(), height, deployments),
transactions: block.transactions()
.into_iter()
@ -39,8 +38,7 @@ impl<'a> ChainAcceptor<'a> {
height,
block.header.raw.time,
tx_index,
active_deployments,
headers,
deployments,
))
.collect(),
}

View File

@ -3,7 +3,7 @@ use db::BlockHeaderProvider;
use canon::CanonHeader;
use error::Error;
use work::work_required;
use deployments::Deployments;
use deployments::BlockDeployments;
use timestamp::median_timestamp;
pub struct HeaderAcceptor<'a> {
@ -18,11 +18,11 @@ impl<'a> HeaderAcceptor<'a> {
consensus: &'a ConsensusParams,
header: CanonHeader<'a>,
height: u32,
deployments: &'a Deployments,
deployments: &'a BlockDeployments<'a>,
) -> Self {
HeaderAcceptor {
work: HeaderWork::new(header, store, height, consensus),
median_timestamp: HeaderMedianTimestamp::new(header, store, height, deployments, consensus),
median_timestamp: HeaderMedianTimestamp::new(header, store, deployments),
version: HeaderVersion::new(header, height, consensus),
}
}
@ -99,8 +99,8 @@ pub struct HeaderMedianTimestamp<'a> {
}
impl<'a> HeaderMedianTimestamp<'a> {
fn new(header: CanonHeader<'a>, store: &'a BlockHeaderProvider, height: u32, deployments: &'a Deployments, params: &ConsensusParams) -> Self {
let active = deployments.csv(height, store, params);
fn new(header: CanonHeader<'a>, store: &'a BlockHeaderProvider, deployments: &'a BlockDeployments<'a>) -> Self {
let active = deployments.csv();
HeaderMedianTimestamp {
header: header,
store: store,

View File

@ -1,10 +1,10 @@
use primitives::hash::H256;
use primitives::bytes::Bytes;
use db::{TransactionMetaProvider, TransactionOutputProvider, BlockHeaderProvider};
use network::{ConsensusParams, ConsensusFork, Deployments as NetworkDeployments};
use db::{TransactionMetaProvider, TransactionOutputProvider};
use network::{ConsensusParams, ConsensusFork};
use script::{Script, ScriptWitness, verify_script, VerificationFlags, TransactionSignatureChecker, TransactionInputSigner, SignatureVersion};
use duplex_store::DuplexTransactionOutputProvider;
use deployments::{Deployments, ActiveDeployments};
use deployments::BlockDeployments;
use script::Builder;
use sigops::transaction_sigops;
use canon::CanonTransaction;
@ -37,19 +37,18 @@ impl<'a> TransactionAcceptor<'a> {
height: u32,
time: u32,
transaction_index: usize,
deployments: ActiveDeployments<'a>,
headers: &'a BlockHeaderProvider,
deployments: &'a BlockDeployments<'a>,
) -> Self {
trace!(target: "verification", "Tx verification {}", transaction.hash.to_reversed_str());
TransactionAcceptor {
premature_witness: TransactionPrematureWitness::new(transaction, deployments.is_active("segwit")),
premature_witness: TransactionPrematureWitness::new(transaction, deployments),
bip30: TransactionBip30::new_for_sync(transaction, meta_store, consensus, block_hash, height),
missing_inputs: TransactionMissingInputs::new(transaction, output_store, transaction_index),
maturity: TransactionMaturity::new(transaction, meta_store, height),
overspent: TransactionOverspent::new(transaction, output_store),
double_spent: TransactionDoubleSpend::new(transaction, output_store),
return_replay_protection: TransactionReturnReplayProtection::new(transaction, consensus, height),
eval: TransactionEval::new(transaction, output_store, consensus, verification_level, height, time, deployments.deployments, headers),
eval: TransactionEval::new(transaction, output_store, consensus, verification_level, height, time, deployments),
}
}
@ -86,8 +85,7 @@ impl<'a> MemoryPoolTransactionAcceptor<'a> {
transaction: CanonTransaction<'a>,
height: u32,
time: u32,
deployments: &'a Deployments,
headers: &'a BlockHeaderProvider,
deployments: &'a BlockDeployments<'a>,
) -> Self {
trace!(target: "verification", "Mempool-Tx verification {}", transaction.hash.to_reversed_str());
let transaction_index = 0;
@ -99,7 +97,7 @@ impl<'a> MemoryPoolTransactionAcceptor<'a> {
sigops: TransactionSigops::new(transaction, output_store, consensus, max_block_sigops, time),
double_spent: TransactionDoubleSpend::new(transaction, output_store),
return_replay_protection: TransactionReturnReplayProtection::new(transaction, consensus, height),
eval: TransactionEval::new(transaction, output_store, consensus, VerificationLevel::Full, height, time, deployments, headers),
eval: TransactionEval::new(transaction, output_store, consensus, VerificationLevel::Full, height, time, deployments),
}
}
@ -304,8 +302,7 @@ impl<'a> TransactionEval<'a> {
verification_level: VerificationLevel,
height: u32,
time: u32,
deployments: &'a Deployments,
headers: &'a BlockHeaderProvider,
deployments: &'a BlockDeployments,
) -> Self {
let verify_p2sh = time >= params.bip16_time;
let verify_strictenc = match params.fork {
@ -319,8 +316,8 @@ impl<'a> TransactionEval<'a> {
_ => SignatureVersion::Base,
};
let verify_checksequence = deployments.csv(height, headers, params);
let verify_witness = deployments.segwit(height, headers, params);
let verify_checksequence = deployments.csv();
let verify_witness = deployments.segwit();
let verify_nulldummy = verify_witness;
TransactionEval {
@ -450,19 +447,21 @@ impl<'a> TransactionReturnReplayProtection<'a> {
pub struct TransactionPrematureWitness<'a> {
transaction: CanonTransaction<'a>,
is_segwit_active: bool,
segwit_active: bool,
}
impl<'a> TransactionPrematureWitness<'a> {
fn new(transaction: CanonTransaction<'a>, is_segwit_active: bool) -> Self {
fn new(transaction: CanonTransaction<'a>, deployments: &'a BlockDeployments<'a>) -> Self {
let segwit_active = deployments.segwit();
TransactionPrematureWitness {
transaction: transaction,
is_segwit_active: is_segwit_active,
segwit_active: segwit_active,
}
}
fn check(&self) -> Result<(), TransactionError> {
if !self.is_segwit_active && (*self.transaction).raw.has_witness() {
if !self.segwit_active && (*self.transaction).raw.has_witness() {
Err(TransactionError::PrematureWitness)
} else {
Ok(())

View File

@ -12,7 +12,7 @@ use verify_header::HeaderVerifier;
use verify_transaction::MemoryPoolTransactionVerifier;
use accept_chain::ChainAcceptor;
use accept_transaction::MemoryPoolTransactionAcceptor;
use deployments::{Deployments, ActiveDeployments};
use deployments::{Deployments, BlockDeployments};
use {Verify, VerificationLevel};
pub struct BackwardsCompatibleChainVerifier {
@ -49,22 +49,28 @@ impl BackwardsCompatibleChainVerifier {
unreachable!();
},
BlockOrigin::CanonChain { block_number } => {
let header_provider = self.store.as_store().as_block_header_provider();
let deployments = BlockDeployments::new(&self.deployments, block_number, header_provider, &self.consensus);
let canon_block = CanonBlock::new(block);
let chain_acceptor = ChainAcceptor::new(self.store.as_store(), &self.consensus, verification_level, canon_block, block_number, &self.deployments);
let chain_acceptor = ChainAcceptor::new(self.store.as_store(), &self.consensus, verification_level, canon_block, block_number, &deployments);
chain_acceptor.check()?;
},
BlockOrigin::SideChain(origin) => {
let block_number = origin.block_number;
let header_provider = self.store.as_store().as_block_header_provider();
let deployments = BlockDeployments::new(&self.deployments, block_number, header_provider, &self.consensus);
let fork = self.store.fork(origin)?;
let canon_block = CanonBlock::new(block);
let chain_acceptor = ChainAcceptor::new(fork.store(), &self.consensus, verification_level, canon_block, block_number, &self.deployments);
let chain_acceptor = ChainAcceptor::new(fork.store(), &self.consensus, verification_level, canon_block, block_number, &deployments);
chain_acceptor.check()?;
},
BlockOrigin::SideChainBecomingCanonChain(origin) => {
let block_number = origin.block_number;
let header_provider = self.store.as_store().as_block_header_provider();
let deployments = BlockDeployments::new(&self.deployments, block_number, header_provider, &self.consensus);
let fork = self.store.fork(origin)?;
let canon_block = CanonBlock::new(block);
let chain_acceptor = ChainAcceptor::new(fork.store(), &self.consensus, verification_level, canon_block, block_number, &self.deployments);
let chain_acceptor = ChainAcceptor::new(fork.store(), &self.consensus, verification_level, canon_block, block_number, &deployments);
chain_acceptor.check()?;
},
}
@ -97,8 +103,8 @@ impl BackwardsCompatibleChainVerifier {
) -> Result<(), TransactionError> where T: TransactionOutputProvider {
let indexed_tx = transaction.clone().into();
// let's do preverification first
let deployments = ActiveDeployments::new(&self.deployments, height, block_header_provider, &self.consensus);
let tx_verifier = MemoryPoolTransactionVerifier::new(&indexed_tx, &self.consensus, deployments);
let deployments = BlockDeployments::new(&self.deployments, height, block_header_provider, &self.consensus);
let tx_verifier = MemoryPoolTransactionVerifier::new(&indexed_tx, &self.consensus, &deployments);
try!(tx_verifier.check());
let canon_tx = CanonTransaction::new(&indexed_tx);
@ -112,8 +118,7 @@ impl BackwardsCompatibleChainVerifier {
canon_tx,
height,
time,
&self.deployments,
self.store.as_block_header_provider()
&deployments,
);
tx_acceptor.check()
}

View File

@ -1,7 +1,7 @@
use std::collections::HashMap;
use std::collections::hash_map::Entry;
use parking_lot::Mutex;
use network::{ConsensusParams, Deployment, Deployments as NetworkDeployments};
use network::{ConsensusParams, Deployment};
use hash::H256;
use db::{BlockHeaderProvider, BlockRef, BlockAncestors, BlockIterator};
use timestamp::median_timestamp;
@ -56,9 +56,8 @@ pub struct Deployments {
cache: Mutex<DeploymentStateCache>,
}
#[derive(Clone, Copy)]
pub struct ActiveDeployments<'a> {
pub deployments: &'a Deployments,
pub struct BlockDeployments<'a> {
deployments: &'a Deployments,
number: u32,
headers: &'a BlockHeaderProvider,
consensus: &'a ConsensusParams,
@ -92,6 +91,25 @@ impl Deployments {
}
}
impl<'a> BlockDeployments<'a> {
pub fn new(deployments: &'a Deployments, number: u32, headers: &'a BlockHeaderProvider, consensus: &'a ConsensusParams) -> Self {
BlockDeployments {
deployments: deployments,
number: number,
headers: headers,
consensus: consensus,
}
}
pub fn csv(&self) -> bool {
self.deployments.csv(self.number, self.headers, self.consensus)
}
pub fn segwit(&self) -> bool {
self.deployments.segwit(self.number, self.headers, self.consensus)
}
}
/// Calculates threshold state of given deployment
fn threshold_state(cache: &mut DeploymentStateCache, deployment: Deployment, number: u32, headers: &BlockHeaderProvider, consensus: &ConsensusParams) -> ThresholdState {
if let Some(activation) = deployment.activation {
@ -140,27 +158,6 @@ fn threshold_state(cache: &mut DeploymentStateCache, deployment: Deployment, num
}
impl<'a> ActiveDeployments<'a> {
pub fn new(deployments: &'a Deployments, number: u32, headers: &'a BlockHeaderProvider, consensus: &'a ConsensusParams) -> Self {
ActiveDeployments {
deployments: deployments,
number: number,
headers: headers,
consensus: consensus,
}
}
}
impl<'a> NetworkDeployments for ActiveDeployments<'a> {
fn is_active(&self, name: &str) -> bool {
match name {
"csv" => self.deployments.segwit(self.number, self.headers, self.consensus),
"segwit" => self.deployments.segwit(self.number, self.headers, self.consensus),
_ => false,
}
}
}
fn first_of_the_period(block: u32, miner_confirmation_window: u32) -> u32 {
if block < miner_confirmation_window - 1 {
0

View File

@ -32,6 +32,8 @@ pub enum Error {
/// Maximum sigops operations exceeded - will not provide how much it was in total
/// since it stops counting once `MAX_BLOCK_SIGOPS` is reached
MaximumSigops,
/// Maximum sigops operations cost exceeded
MaximumSigopsCost,
/// Coinbase signature is not in the range 2-100
CoinbaseSignatureLength(usize),
/// Block size is invalid

View File

@ -45,12 +45,14 @@ pub fn transaction_sigops(
pub fn transaction_sigops_cost(
transaction: &Transaction,
store: &TransactionOutputProvider,
bip16_active: bool,
sigops: usize,
) -> usize {
let sigops_cost = transaction_sigops(transaction, store, bip16_active) * segwit::WITNESS_SCALE_FACTOR;
let sigops_cost = sigops * segwit::WITNESS_SCALE_FACTOR;
let witness_sigops_cost: usize = transaction.inputs.iter()
.map(|input| store.transaction_output(&input.previous_output, usize::max_value())
.map(|output| witness_sigops(&Script::new(input.script_sig.clone()), &Script::new(output.script_pubkey.clone()), &ScriptWitness { stack: input.script_witness.clone().into() }))
.map(|output| witness_sigops(&Script::new(input.script_sig.clone()), &Script::new(output.script_pubkey.clone()), &ScriptWitness {
stack: input.script_witness.clone().into(), // TODO: excess clone
}))
.unwrap_or(0))
.sum();
sigops_cost + witness_sigops_cost

View File

@ -1,12 +1,12 @@
use std::ops;
use ser::Serializable;
use chain::IndexedTransaction;
use network::{ConsensusParams, ConsensusFork, Deployments};
use network::{ConsensusParams, ConsensusFork};
use deployments::BlockDeployments;
use duplex_store::NoopStore;
use sigops::transaction_sigops;
use error::TransactionError;
use constants::{MIN_COINBASE_SIZE, MAX_COINBASE_SIZE};
use deployments::ActiveDeployments;
pub struct TransactionVerifier<'a> {
pub empty: TransactionEmpty<'a>,
@ -42,14 +42,14 @@ pub struct MemoryPoolTransactionVerifier<'a> {
}
impl<'a> MemoryPoolTransactionVerifier<'a> {
pub fn new(transaction: &'a IndexedTransaction, consensus: &'a ConsensusParams, deployments: ActiveDeployments<'a>) -> Self {
pub fn new(transaction: &'a IndexedTransaction, consensus: &'a ConsensusParams, deployments: &'a BlockDeployments<'a>) -> Self {
trace!(target: "verification", "Mempool-Tx pre-verification {}", transaction.hash.to_reversed_str());
MemoryPoolTransactionVerifier {
empty: TransactionEmpty::new(transaction),
null_non_coinbase: TransactionNullNonCoinbase::new(transaction),
is_coinbase: TransactionMemoryPoolCoinbase::new(transaction),
size: TransactionSize::new(transaction, deployments, consensus),
premature_witness: TransactionPrematureWitness::new(transaction, deployments),
size: TransactionSize::new(transaction, consensus),
premature_witness: TransactionPrematureWitness::new(transaction, &deployments),
sigops: TransactionSigops::new(transaction, ConsensusFork::absolute_maximum_block_sigops()),
}
}
@ -151,21 +151,20 @@ impl<'a> TransactionMemoryPoolCoinbase<'a> {
pub struct TransactionSize<'a> {
transaction: &'a IndexedTransaction,
deployments: ActiveDeployments<'a>,
consensus: &'a ConsensusParams,
}
impl<'a> TransactionSize<'a> {
fn new(transaction: &'a IndexedTransaction, deployments: ActiveDeployments<'a>, consensus: &'a ConsensusParams) -> Self {
fn new(transaction: &'a IndexedTransaction, consensus: &'a ConsensusParams) -> Self {
TransactionSize {
transaction: transaction,
deployments: deployments,
consensus: consensus,
}
}
fn check(&self) -> Result<(), TransactionError> {
if !self.consensus.fork.check_transaction_size(self.transaction.raw.serialized_size(), &self.deployments) {
let size = self.transaction.raw.serialized_size();
if size > self.consensus.fork.max_transaction_size() {
Err(TransactionError::MaxSize)
} else {
Ok(())
@ -198,19 +197,21 @@ impl<'a> TransactionSigops<'a> {
pub struct TransactionPrematureWitness<'a> {
transaction: &'a IndexedTransaction,
deployments: ActiveDeployments<'a>,
segwit_active: bool,
}
impl<'a> TransactionPrematureWitness<'a> {
pub fn new(transaction: &'a IndexedTransaction, deployments: ActiveDeployments<'a>) -> Self {
pub fn new(transaction: &'a IndexedTransaction, deployments: &'a BlockDeployments<'a>) -> Self {
let segwit_active = deployments.segwit();
TransactionPrematureWitness {
transaction: transaction,
deployments: deployments,
segwit_active: segwit_active,
}
}
pub fn check(&self) -> Result<(), TransactionError> {
if self.transaction.raw.has_witness() && !self.deployments.is_active("segwit") {
if !self.segwit_active && self.transaction.raw.has_witness() {
Err(TransactionError::PrematureWitness)
} else {
Ok(())