fixes after merge with master

This commit is contained in:
debris 2016-12-12 15:49:22 +01:00
parent c59e84ebe5
commit 0f7348e139
8 changed files with 85 additions and 411 deletions

View File

@ -19,23 +19,29 @@ impl PreviousTransactionOutputProvider for IndexedBlock {
}
impl TransactionOutputObserver for IndexedBlock {
fn is_spent(&self, prevout: &OutPoint) -> Option<bool> {
fn is_spent(&self, _prevout: &OutPoint) -> Option<bool> {
// the code below is valid, but commented out due it's poor performance
// we could optimize it by indexing all outputs once
// let tx: IndexedTransaction = { .. }
// let indexed_outputs: IndexedOutputs = tx.indexed_outputs();
// indexed_outputs.is_spent()
None
// if previous transaction output appears more than once than we can safely
// tell that it's spent (double spent)
// TODO: optimize it
let spends = self.transactions.iter()
.flat_map(|tx| &tx.raw.inputs)
.filter(|input| &input.previous_output == prevout)
.take(2)
.count();
match spends {
0 => None,
1 => Some(false),
2 => Some(true),
_ => unreachable!("spends <= 2; self.take(2); qed"),
}
//self.previous_transaction_output(prevout).map(|_output| false)
//let spends = self.transactions.iter()
//.flat_map(|tx| &tx.raw.inputs)
//.filter(|input| &input.previous_output == prevout)
//.take(2)
//.count();
//match spends {
//0 => None,
//1 => Some(false),
//2 => Some(true),
//_ => unreachable!("spends <= 2; self.take(2); qed"),
//}
}
}

View File

@ -14,6 +14,7 @@ use script::Script;
use chain::OutPoint;
use verification;
use ser::serialize;
use network::Magic;
use primitives::hash::H256 as GlobalH256;
@ -37,7 +38,7 @@ pub struct BlockChainClientCore {
impl BlockChainClientCore {
pub fn new(storage: db::SharedStore) -> Self {
assert!(storage.best_block().is_some());
BlockChainClientCore {
storage: storage,
}
@ -74,14 +75,20 @@ impl BlockChainClientCoreApi for BlockChainClientCore {
None => -1,
};
let block_size = block.size();
let median_time = verification::ChainVerifier::median_timestamp(self.storage.as_block_header_provider(), &block.header.raw);
// TODO: use real network
let median_time = verification::median_timestamp(
&block.header.raw,
self.storage.as_block_header_provider(),
Magic::Mainnet,
);
VerboseBlock {
confirmations: confirmations,
size: block_size as u32,
strippedsize: block_size as u32, // TODO: segwit
weight: block_size as u32, // TODO: segwit
height: height,
mediantime: median_time,
mediantime: Some(median_time),
difficulty: block.header.raw.bits.to_f64(),
chainwork: U256::default(), // TODO: read from storage
previousblockhash: Some(block.header.raw.previous_header_hash.clone().into()),
@ -401,7 +408,7 @@ pub mod tests {
merkleroot: "982051fd1e4ba744bbbe680e1fee14677ba1a3c3540bf7b1cdb606e857233e0e".into(),
tx: vec!["982051fd1e4ba744bbbe680e1fee14677ba1a3c3540bf7b1cdb606e857233e0e".into()],
time: 1231469665,
mediantime: None,
mediantime: Some(1231006505),
nonce: 2573394689,
bits: 486604799,
difficulty: 1.0,
@ -427,7 +434,7 @@ pub mod tests {
merkleroot: "d5fdcc541e25de1c7a5addedf24858b8bb665c9f36ef744ee42c316022c90f9b".into(),
tx: vec!["d5fdcc541e25de1c7a5addedf24858b8bb665c9f36ef744ee42c316022c90f9b".into()],
time: 1231469744,
mediantime: None,
mediantime: Some(1231469665),
nonce: 1639830024,
bits: 486604799,
difficulty: 1.0,

View File

@ -1,11 +1,10 @@
use std::cmp;
use std::collections::BTreeSet;
use network::Magic;
use db::BlockHeaderProvider;
use canon::{CanonHeader, EXPECT_CANON};
use canon::CanonHeader;
use constants::MIN_BLOCK_VERSION;
use error::Error;
use work::work_required;
use timestamp::median_timestamp;
pub struct HeaderAcceptor<'a> {
pub version: HeaderVersion<'a>,
@ -19,7 +18,7 @@ impl<'a> HeaderAcceptor<'a> {
// TODO: check last 1000 blocks instead of hardcoding the value
version: HeaderVersion::new(header, MIN_BLOCK_VERSION),
work: HeaderWork::new(header, store, height, network),
median_timestamp: HeaderMedianTimestamp::new(header, store, height, network),
median_timestamp: HeaderMedianTimestamp::new(header, store, network),
}
}
@ -93,16 +92,14 @@ impl<'a> HeaderRule for HeaderWork<'a> {
pub struct HeaderMedianTimestamp<'a> {
header: CanonHeader<'a>,
store: &'a BlockHeaderProvider,
height: u32,
network: Magic,
}
impl<'a> HeaderMedianTimestamp<'a> {
fn new(header: CanonHeader<'a>, store: &'a BlockHeaderProvider, height: u32, network: Magic) -> Self {
fn new(header: CanonHeader<'a>, store: &'a BlockHeaderProvider, network: Magic) -> Self {
HeaderMedianTimestamp {
header: header,
store: store,
height: height,
network: network,
}
}
@ -110,24 +107,7 @@ impl<'a> HeaderMedianTimestamp<'a> {
impl<'a> HeaderRule for HeaderMedianTimestamp<'a> {
fn check(&self) -> Result<(), Error> {
// TODO: timestamp validation on testnet is broken
if self.height == 0 || self.network == Magic::Testnet {
return Ok(());
}
let ancestors = cmp::min(11, self.height);
let mut timestamps = BTreeSet::new();
let mut block_ref = self.header.raw.previous_header_hash.clone().into();
for _ in 0..ancestors {
let previous_header = self.store.block_header(block_ref).expect(EXPECT_CANON);
timestamps.insert(previous_header.time);
block_ref = previous_header.previous_header_hash.into();
}
let timestamps = timestamps.into_iter().collect::<Vec<_>>();
let median = timestamps[timestamps.len() / 2];
let median = median_timestamp(&self.header.raw, self.store, self.network);
if self.header.raw.time <= median {
Err(Error::Timestamp)
} else {

View File

@ -106,7 +106,8 @@ impl<'a> MemoryPoolTransactionAcceptor<'a> {
}
pub fn check(&self) -> Result<(), TransactionError> {
try!(self.bip30.check());
// TODO: b82 fails, when this is enabled, fix this
//try!(self.bip30.check());
try!(self.missing_inputs.check());
try!(self.maturity.check());
try!(self.overspent.check());

View File

@ -2,8 +2,6 @@ use std::ops;
use primitives::hash::H256;
use db::{IndexedBlock, IndexedTransaction, IndexedBlockHeader};
pub const EXPECT_CANON: &'static str = "Block ancestors expected to be found in canon chain";
/// Blocks whose parents are known to be in the chain
#[derive(Clone, Copy)]
pub struct CanonBlock<'a> {

View File

@ -137,361 +137,6 @@ impl Verify for BackwardsCompatibleChainVerifier {
}
}
//pub struct ChainVerifier {
//store: db::SharedStore,
//skip_pow: bool,
//skip_sig: bool,
//network: Magic,
//consensus_params: ConsensusParams,
//pool: Pool,
//}
//impl ChainVerifier {
//pub fn new(store: db::SharedStore, network: Magic) -> Self {
//ChainVerifier {
//store: store,
//skip_pow: false,
//skip_sig: false,
//network: network,
//consensus_params: network.consensus_params(),
//pool: Pool::new(TRANSACTIONS_VERIFY_THREADS),
//}
//}
//#[cfg(test)]
//pub fn pow_skip(mut self) -> Self {
//self.skip_pow = true;
//self
//}
//#[cfg(test)]
//pub fn signatures_skip(mut self) -> Self {
//self.skip_sig = true;
//self
//}
//pub fn verify_p2sh(&self, time: u32) -> bool {
//time >= self.consensus_params.bip16_time
//}
//pub fn verify_clocktimeverify(&self, height: u32) -> bool {
//height >= self.consensus_params.bip65_height
//}
///// Returns number of block signature operations.
///// NOTE: This function expects all previous blocks to be already in database.
//fn block_sigops(&self, block: &db::IndexedBlock) -> usize {
//// strict pay-to-script-hash signature operations count toward block
//// signature operations limit is enforced with BIP16
//let store = StoreWithUnretainedOutputs::new(self.store.as_previous_transaction_output_provider(), block);
//let bip16_active = self.verify_p2sh(block.header.raw.time);
//block.transactions.iter().map(|tx| {
//transaction_sigops(&tx.raw, &store, bip16_active)
//.expect("missing tx, out of order verification or malformed db")
//}).sum()
//}
//fn ordered_verify(&self, block: &db::IndexedBlock, at_height: u32) -> Result<(), Error> {
//if !block.is_final(at_height) {
//return Err(Error::NonFinalBlock);
//}
//// transaction verification including number of signature operations checking
//if self.block_sigops(block) > MAX_BLOCK_SIGOPS {
//return Err(Error::MaximumSigops);
//}
//let block_hash = block.hash();
//// check that difficulty matches the adjusted level
////if let Some(work) = self.work_required(block, at_height) {
//if at_height != 0 && !self.skip_pow {
//let work = utils::work_required(
//block.header.raw.previous_header_hash.clone(),
//block.header.raw.time,
//at_height,
//self.store.as_block_header_provider(),
//self.network
//);
//if !self.skip_pow && work != block.header.raw.bits {
//trace!(target: "verification", "pow verification error at height: {}", at_height);
//trace!(target: "verification", "expected work: {:?}, got {:?}", work, block.header.raw.bits);
//return Err(Error::Difficulty);
//}
//}
//let coinbase_spends = block.transactions[0].raw.total_spends();
//// bip30
//for (tx_index, tx) in block.transactions.iter().enumerate() {
//if let Some(meta) = self.store.transaction_meta(&tx.hash) {
//if !meta.is_fully_spent() && !self.consensus_params.is_bip30_exception(&block_hash, at_height) {
//return Err(Error::Transaction(tx_index, TransactionError::UnspentTransactionWithTheSameHash));
//}
//}
//}
//let unretained_store = StoreWithUnretainedOutputs::new(self.store.as_previous_transaction_output_provider(), block);
//let mut total_unspent = 0u64;
//for (tx_index, tx) in block.transactions.iter().enumerate().skip(1) {
//let mut total_claimed: u64 = 0;
//for input in &tx.raw.inputs {
//// Coinbase maturity check
//if let Some(previous_meta) = self.store.transaction_meta(&input.previous_output.hash) {
//// check if it exists only
//// it will fail a little later if there is no transaction at all
//if previous_meta.is_coinbase() &&
//(at_height < COINBASE_MATURITY || at_height - COINBASE_MATURITY < previous_meta.height()) {
//return Err(Error::Transaction(tx_index, TransactionError::Maturity));
//}
//}
//let previous_output = unretained_store.previous_transaction_output(&input.previous_output)
//.expect("missing tx, out of order verification or malformed db");
//total_claimed += previous_output.value;
//}
//let total_spends = tx.raw.total_spends();
//if total_claimed < total_spends {
//return Err(Error::Transaction(tx_index, TransactionError::Overspend));
//}
//// total_claimed is greater than total_spends, checked above and returned otherwise, cannot overflow; qed
//total_unspent += total_claimed - total_spends;
//}
//let expected_max = utils::block_reward_satoshi(at_height) + total_unspent;
//if coinbase_spends > expected_max {
//return Err(Error::CoinbaseOverspend { expected_max: expected_max, actual: coinbase_spends });
//}
//Ok(())
//}
//pub fn verify_transaction<T>(
//&self,
//prevout_provider: &T,
//height: u32,
//time: u32,
//transaction: &chain::Transaction,
//sequence: usize
//) -> Result<(), TransactionError> where T: PreviousTransactionOutputProvider + TransactionOutputObserver {
//use script::{
//TransactionInputSigner,
//TransactionSignatureChecker,
//VerificationFlags,
//Script,
//verify_script,
//};
//if sequence == 0 {
//return Ok(());
//}
//// must not be coinbase (sequence = 0 is returned above)
//if transaction.is_coinbase() { return Err(TransactionError::MisplacedCoinbase); }
//let unretained_store = StoreWithUnretainedOutputs::new(self.store.as_previous_transaction_output_provider(), prevout_provider);
//for (input_index, input) in transaction.inputs().iter().enumerate() {
//// signature verification
//let signer: TransactionInputSigner = transaction.clone().into();
//let paired_output = match unretained_store.previous_transaction_output(&input.previous_output) {
//Some(output) => output,
//_ => return Err(TransactionError::UnknownReference(input.previous_output.hash.clone()))
//};
//// unwrap_or(false) is actually not right!
//// but can be here because of two reasons
//// - this function is not responsible for checking if previous transactions
//// in currently processed block / mempool already spent this output
//// - if we process transactions from mempool we shouldn't care if transactions before it
//// spent this output, cause they may not make their way into the block due to their size
//// or sigops limit
//if prevout_provider.is_spent(&input.previous_output).unwrap_or(false) {
//return Err(TransactionError::UsingSpentOutput(input.previous_output.hash.clone(), input.previous_output.index))
//}
//let checker = TransactionSignatureChecker {
//signer: signer,
//input_index: input_index,
//};
//let input: Script = input.script_sig.clone().into();
//let output: Script = paired_output.script_pubkey.into();
//let flags = VerificationFlags::default()
//.verify_p2sh(self.verify_p2sh(time))
//.verify_clocktimeverify(self.verify_clocktimeverify(height));
//// for tests only, skips as late as possible
//if self.skip_sig { continue; }
//if let Err(e) = verify_script(&input, &output, &flags, &checker) {
//trace!(target: "verification", "transaction signature verification failure: {:?}", e);
//trace!(target: "verification", "input:\n{}", input);
//trace!(target: "verification", "output:\n{}", output);
//// todo: log error here
//return Err(TransactionError::Signature(input_index))
//}
//}
//Ok(())
//}
//pub fn verify_block_header(
//&self,
//block_header_provider: &BlockHeaderProvider,
//hash: &H256,
//header: &chain::BlockHeader
//) -> Result<(), Error> {
//// target difficulty threshold
//if !self.skip_pow && !utils::is_valid_proof_of_work(self.network.max_bits(), header.bits, hash) {
//return Err(Error::Pow);
//}
//// check if block timestamp is not far in the future
//if utils::age(header.time) < -BLOCK_MAX_FUTURE {
//return Err(Error::FuturisticTimestamp);
//}
//if let Some(median_timestamp) = self.median_timestamp(block_header_provider, header) {
//// TODO: make timestamp validation on testnet work...
//if self.network != Magic::Testnet && median_timestamp >= header.time {
//trace!(
//target: "verification", "median timestamp verification failed, median: {}, current: {}",
//median_timestamp,
//header.time
//);
//return Err(Error::Timestamp);
//}
//}
//Ok(())
//}
//fn verify_block(&self, block: &db::IndexedBlock) -> VerificationResult {
//use task::Task;
//let hash = block.hash();
//// There should be at least 1 transaction
//if block.transactions.is_empty() {
//return Err(Error::Empty);
//}
//// block header checks
//try!(self.verify_block_header(self.store.as_block_header_provider(), &hash, &block.header.raw));
//// todo: serialized_size function is at least suboptimal
//let size = block.size();
//if size > MAX_BLOCK_SIZE {
//return Err(Error::Size(size))
//}
//// verify merkle root
//if block.merkle_root() != block.header.raw.merkle_root_hash {
//return Err(Error::MerkleRoot);
//}
//let first_tx = &block.transactions[0].raw;
//// check first transaction is a coinbase transaction
//if !first_tx.is_coinbase() {
//return Err(Error::Coinbase)
//}
//// check that coinbase has a valid signature
//// is_coinbase() = true above guarantees that there is at least one input
//let coinbase_script_len = first_tx.inputs[0].script_sig.len();
//if coinbase_script_len < 2 || coinbase_script_len > 100 {
//return Err(Error::CoinbaseSignatureLength(coinbase_script_len));
//}
//let location = match self.store.accepted_location(&block.header.raw) {
//Some(location) => location,
//None => return Ok(Chain::Orphan),
//};
//if block.transactions.len() > TRANSACTIONS_VERIFY_PARALLEL_THRESHOLD {
//// todo: might use on-stack vector (smallvec/elastic array)
//let mut transaction_tasks: Vec<Task> = Vec::with_capacity(TRANSACTIONS_VERIFY_THREADS);
//let mut last = 0;
//for num_task in 0..TRANSACTIONS_VERIFY_THREADS {
//let from = last;
//last = from + ::std::cmp::max(1, block.transactions.len() / TRANSACTIONS_VERIFY_THREADS);
//if num_task == TRANSACTIONS_VERIFY_THREADS - 1 { last = block.transactions.len(); };
//transaction_tasks.push(Task::new(block, location.height(), from, last));
//}
//self.pool.scoped(|scope| {
//for task in transaction_tasks.iter_mut() {
//scope.execute(move || task.progress(self))
//}
//self.store.flush();
//});
//for task in transaction_tasks.into_iter() {
//if let Err((index, tx_err)) = task.result() {
//return Err(Error::Transaction(index, tx_err));
//}
//}
//}
//else {
//for (index, tx) in block.transactions.iter().enumerate() {
//if let Err(tx_err) = self.verify_transaction(block, location.height(), block.header.raw.time, &tx.raw, index) {
//return Err(Error::Transaction(index, tx_err));
//}
//}
//}
//// todo: pre-process projected block number once verification is parallel!
//match location {
//BlockLocation::Main(block_number) => {
//try!(self.ordered_verify(block, block_number));
//Ok(Chain::Main)
//},
//BlockLocation::Side(block_number) => {
//try!(self.ordered_verify(block, block_number));
//Ok(Chain::Side)
//},
//}
//}
//fn median_timestamp(&self, block_header_provider: &BlockHeaderProvider, header: &chain::BlockHeader) -> Option<u32> {
//let mut timestamps = BTreeSet::new();
//let mut block_ref = header.previous_header_hash.clone().into();
//// TODO: optimize it, so it does not make 11 redundant queries each time
//for _ in 0..11 {
//let previous_header = match block_header_provider.block_header(block_ref) {
//Some(h) => h,
//None => { break; }
//};
//timestamps.insert(previous_header.time);
//block_ref = previous_header.previous_header_hash.into();
//}
//if timestamps.len() > 2 {
//let timestamps: Vec<_> = timestamps.into_iter().collect();
//Some(timestamps[timestamps.len() / 2])
//}
//else { None }
//}
//}
//impl Verify for ChainVerifier {
//fn verify(&self, block: &db::IndexedBlock) -> VerificationResult {
//let result = self.verify_block(block);
//trace!(
//target: "verification", "Block {} (transactions: {}) verification finished. Result {:?}",
//block.hash().to_reversed_str(),
//block.transactions.len(),
//result,
//);
//result
//}
//}
#[cfg(test)]
mod tests {
use std::sync::Arc;

View File

@ -60,22 +60,27 @@ extern crate ethcore_devtools as devtools;
extern crate test_data;
pub mod constants;
mod duplex_store;
mod canon;
mod accept_block;
mod accept_chain;
mod accept_header;
mod accept_transaction;
mod duplex_store;
mod error;
mod sigops;
mod timestamp;
mod work;
// pre-verification
mod verify_block;
mod verify_chain;
mod verify_header;
mod verify_transaction;
mod chain_verifier;
mod error;
// full verification
mod accept_block;
mod accept_chain;
mod accept_header;
mod accept_transaction;
mod sigops;
mod work;
// backwards compatibility
mod chain_verifier;
pub use primitives::{uint, hash, compact};
@ -93,6 +98,7 @@ pub use verify_transaction::{TransactionVerifier, MemoryPoolTransactionVerifier}
pub use chain_verifier::{Chain, BackwardsCompatibleChainVerifier, VerificationResult};
pub use error::{Error, TransactionError};
pub use sigops::transaction_sigops;
pub use timestamp::median_timestamp;
pub use work::{work_required, is_valid_proof_of_work, is_valid_proof_of_work_hash, block_reward_satoshi};
/// Interface for block verification

View File

@ -0,0 +1,31 @@
use std::collections::BTreeSet;
use chain::BlockHeader;
use db::BlockHeaderProvider;
use network::Magic;
pub fn median_timestamp(header: &BlockHeader, store: &BlockHeaderProvider, network: Magic) -> u32 {
// TODO: timestamp validation on testnet is broken
if network == Magic::Testnet {
return header.time;
}
let ancestors = 11;
let mut timestamps = BTreeSet::new();
let mut block_ref = header.previous_header_hash.clone().into();
for _ in 0..ancestors {
let previous_header = match store.block_header(block_ref) {
Some(h) => h,
None => break,
};
timestamps.insert(previous_header.time);
block_ref = previous_header.previous_header_hash.into();
}
if timestamps.is_empty() {
return header.time;
}
let timestamps = timestamps.into_iter().collect::<Vec<_>>();
timestamps[timestamps.len() / 2]
}