introduced ConsensusParams + BIP65 support

This commit is contained in:
Svyatoslav Nikolsky 2016-11-15 14:00:46 +03:00
parent 711b32c6b8
commit b152c8178d
9 changed files with 156 additions and 35 deletions

View File

@ -0,0 +1,37 @@
use super::Magic;
#[derive(Debug, Clone)]
/// Parameters that influence chain consensus.
pub struct ConsensusParams {
/// Block height at which BIP65 becomes active.
/// See https://github.com/bitcoin/bips/blob/master/bip-0065.mediawiki
pub bip65_height: u32,
}
impl ConsensusParams {
pub fn with_magic(magic: Magic) -> Self {
match magic {
Magic::Mainnet => ConsensusParams {
bip65_height: 388381, // 000000000000000004c2b624ed5d7756c508d90fd0da2c7c679febfa6c4735f0
},
Magic::Testnet => ConsensusParams {
bip65_height: 581885, // 00000000007f6655f22f98e72ed80d8b06dc761d5da09df0fa1dc4be4f861eb6
},
Magic::Regtest => ConsensusParams {
bip65_height: 1351,
},
}
}
}
#[cfg(test)]
mod tests {
use super::{Magic, ConsensusParams};
#[test]
fn test_consensus_params_bip65_height() {
assert_eq!(ConsensusParams::with_magic(Magic::Mainnet), 388381);
assert_eq!(ConsensusParams::with_magic(Magic::Testnet), 581885);
assert_eq!(ConsensusParams::with_magic(Magic::Regtest), 1351);
}
}

View File

@ -4,6 +4,7 @@
use ser::{Stream, Serializable};
use chain::Block;
use Error;
use super::ConsensusParams;
const MAGIC_MAINNET: u32 = 0xD9B4BEF9;
const MAGIC_TESTNET: u32 = 0x0709110B;
@ -62,6 +63,10 @@ impl Magic {
Magic::Regtest => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4adae5494dffff7f20020000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
}
}
pub fn consensus_params(&self) -> ConsensusParams {
ConsensusParams::with_magic(*self)
}
}
impl Serializable for Magic {

View File

@ -3,6 +3,7 @@ mod block_header_and_ids;
mod block_transactions;
mod block_transactions_request;
mod command;
mod consensus;
mod inventory;
mod ip;
mod magic;
@ -15,6 +16,7 @@ pub use self::block_header_and_ids::BlockHeaderAndIDs;
pub use self::block_transactions::BlockTransactions;
pub use self::block_transactions_request::BlockTransactionsRequest;
pub use self::command::Command;
pub use self::consensus::ConsensusParams;
pub use self::inventory::{InventoryVector, InventoryType};
pub use self::ip::IpAddress;
pub use self::magic::Magic;

View File

@ -30,7 +30,7 @@ pub fn start(cfg: config::Config) -> Result<(), String> {
};
let sync_handle = el.handle();
let sync_connection_factory = create_sync_connection_factory(&sync_handle, db);
let sync_connection_factory = create_sync_connection_factory(&sync_handle, cfg.magic.consensus_params(), db);
let p2p = try!(p2p::P2P::new(p2p_cfg, sync_connection_factory, el.handle()).map_err(|x| x.to_string()));
try!(p2p.run().map_err(|_| "Failed to start p2p module"));

View File

@ -71,5 +71,10 @@ impl VerificationFlags {
self.verify_p2sh = value;
self
}
pub fn verify_clocktimeverify(mut self, value: bool) -> Self {
self.verify_clocktimeverify = value;
self
}
}

View File

@ -447,35 +447,37 @@ pub fn eval_script(
},
Opcode::OP_NOP => break,
Opcode::OP_CHECKLOCKTIMEVERIFY => {
if !flags.verify_clocktimeverify && flags.verify_discourage_upgradable_nops {
if flags.verify_discourage_upgradable_nops {
return Err(Error::DiscourageUpgradableNops);
}
// Note that elsewhere numeric opcodes are limited to
// operands in the range -2**31+1 to 2**31-1, however it is
// legal for opcodes to produce results exceeding that
// range. This limitation is implemented by CScriptNum's
// default 4-byte limit.
//
// If we kept to that limit we'd have a year 2038 problem,
// even though the nLockTime field in transactions
// themselves is uint32 which only becomes meaningless
// after the year 2106.
//
// Thus as a special case we tell CScriptNum to accept up
// to 5-byte bignums, which are good until 2**39-1, well
// beyond the 2**32-1 limit of the nLockTime field itself.
let lock_time = try!(Num::from_slice(try!(stack.last()), flags.verify_minimaldata, 5));
if flags.verify_clocktimeverify {
// Note that elsewhere numeric opcodes are limited to
// operands in the range -2**31+1 to 2**31-1, however it is
// legal for opcodes to produce results exceeding that
// range. This limitation is implemented by CScriptNum's
// default 4-byte limit.
//
// If we kept to that limit we'd have a year 2038 problem,
// even though the nLockTime field in transactions
// themselves is uint32 which only becomes meaningless
// after the year 2106.
//
// Thus as a special case we tell CScriptNum to accept up
// to 5-byte bignums, which are good until 2**39-1, well
// beyond the 2**32-1 limit of the nLockTime field itself.
let lock_time = try!(Num::from_slice(try!(stack.last()), flags.verify_minimaldata, 5));
// In the rare event that the argument may be < 0 due to
// some arithmetic being done first, you can always use
// 0 MAX CHECKLOCKTIMEVERIFY.
if lock_time.is_negative() {
return Err(Error::NegativeLocktime);
}
// In the rare event that the argument may be < 0 due to
// some arithmetic being done first, you can always use
// 0 MAX CHECKLOCKTIMEVERIFY.
if lock_time.is_negative() {
return Err(Error::NegativeLocktime);
}
if !checker.check_lock_time(lock_time) {
return Err(Error::UnsatisfiedLocktime);
if !checker.check_lock_time(lock_time) {
return Err(Error::UnsatisfiedLocktime);
}
}
},
Opcode::OP_CHECKSEQUENCEVERIFY => {
@ -1870,4 +1872,26 @@ mod tests {
.verify_p2sh(true);
assert_eq!(verify_script(&input, &output, &flags, &checker), Ok(()));
}
// https://blockchain.info/rawtx/eb3b82c0884e3efa6d8b0be55b4915eb20be124c9766245bcc7f34fdac32bccb
#[test]
fn test_transaction_bip65() {
let tx: Transaction = "01000000024de8b0c4c2582db95fa6b3567a989b664484c7ad6672c85a3da413773e63fdb8000000006b48304502205b282fbc9b064f3bc823a23edcc0048cbb174754e7aa742e3c9f483ebe02911c022100e4b0b3a117d36cab5a67404dddbf43db7bea3c1530e0fe128ebc15621bd69a3b0121035aa98d5f77cd9a2d88710e6fc66212aff820026f0dad8f32d1f7ce87457dde50ffffffff4de8b0c4c2582db95fa6b3567a989b664484c7ad6672c85a3da413773e63fdb8010000006f004730440220276d6dad3defa37b5f81add3992d510d2f44a317fd85e04f93a1e2daea64660202200f862a0da684249322ceb8ed842fb8c859c0cb94c81e1c5308b4868157a428ee01ab51210232abdc893e7f0631364d7fd01cb33d24da45329a00357b3a7886211ab414d55a51aeffffffff02e0fd1c00000000001976a914380cb3c594de4e7e9b8e18db182987bebb5a4f7088acc0c62d000000000017142a9bc5447d664c1d0141392a842d23dba45c4f13b17500000000".into();
let signer: TransactionInputSigner = tx.into();
let checker = TransactionSignatureChecker {
signer: signer,
input_index: 1,
};
let input: Script = "004730440220276d6dad3defa37b5f81add3992d510d2f44a317fd85e04f93a1e2daea64660202200f862a0da684249322ceb8ed842fb8c859c0cb94c81e1c5308b4868157a428ee01ab51210232abdc893e7f0631364d7fd01cb33d24da45329a00357b3a7886211ab414d55a51ae".into();
let output: Script = "142a9bc5447d664c1d0141392a842d23dba45c4f13b175".into();
let flags = VerificationFlags::default()
.verify_p2sh(true);
assert_eq!(verify_script(&input, &output, &flags, &checker), Ok(()));
let flags = VerificationFlags::default()
.verify_p2sh(true)
.verify_clocktimeverify(true);
assert_eq!(verify_script(&input, &output, &flags, &checker), Err(Error::NumberOverflow));
}
}

View File

@ -33,6 +33,7 @@ mod synchronization_server;
use std::sync::Arc;
use parking_lot::RwLock;
use tokio_core::reactor::Handle;
use message::common::ConsensusParams;
/// Sync errors.
#[derive(Debug)]
@ -51,7 +52,7 @@ pub fn create_sync_blocks_writer(db: Arc<db::Store>) -> blocks_writer::BlocksWri
}
/// Create inbound synchronization connections factory for given `db`.
pub fn create_sync_connection_factory(handle: &Handle, db: Arc<db::Store>) -> p2p::LocalSyncNodeRef {
pub fn create_sync_connection_factory(handle: &Handle, consensus_params: ConsensusParams, db: Arc<db::Store>) -> p2p::LocalSyncNodeRef {
use synchronization_chain::Chain as SyncChain;
use synchronization_executor::LocalSynchronizationTaskExecutor as SyncExecutor;
use local_node::LocalNode as SyncNode;
@ -62,7 +63,7 @@ pub fn create_sync_connection_factory(handle: &Handle, db: Arc<db::Store>) -> p2
let sync_chain = Arc::new(RwLock::new(SyncChain::new(db)));
let sync_executor = SyncExecutor::new(sync_chain.clone());
let sync_server = Arc::new(SynchronizationServer::new(sync_chain.clone(), sync_executor.clone()));
let sync_client = SynchronizationClient::new(SynchronizationConfig::default(), handle, sync_executor.clone(), sync_chain);
let sync_client = SynchronizationClient::new(SynchronizationConfig::with_consensus_params(consensus_params), handle, sync_executor.clone(), sync_chain);
let sync_node = Arc::new(SyncNode::new(sync_server, sync_client, sync_executor));
SyncConnectionFactory::with_local_node(sync_node)
}

View File

@ -12,6 +12,7 @@ use futures_cpupool::CpuPool;
use linked_hash_map::LinkedHashMap;
use db;
use chain::{Block, BlockHeader, RepresentH256};
use message::common::ConsensusParams;
use primitives::hash::H256;
use synchronization_peers::Peers;
#[cfg(test)] use synchronization_peers::{Information as PeersInformation};
@ -206,6 +207,8 @@ pub struct PeersBlocksWaiter {
/// Synchronization client configuration options.
pub struct Config {
/// Consensus-related parameters.
pub consensus_params: ConsensusParams,
/// Number of threads to allocate in synchronization CpuPool.
pub threads_num: usize,
/// Do not verify incoming blocks before inserting to db.
@ -214,6 +217,8 @@ pub struct Config {
/// Synchronization client.
pub struct SynchronizationClient<T: TaskExecutor> {
/// Synchronization configuration.
config: Config,
/// Synchronization state.
state: State,
/// Cpu pool.
@ -240,9 +245,10 @@ pub struct SynchronizationClient<T: TaskExecutor> {
verifying_blocks_waiters: HashMap<usize, (HashSet<H256>, Option<Arc<PeersBlocksWaiter>>)>,
}
impl Default for Config {
fn default() -> Self {
impl Config {
pub fn with_consensus_params(consensus_params: ConsensusParams) -> Self {
Config {
consensus_params: consensus_params,
threads_num: 4,
skip_verification: false,
}
@ -447,6 +453,7 @@ impl<T> Client for SynchronizationClient<T> where T: TaskExecutor {
impl<T> SynchronizationClient<T> where T: TaskExecutor {
/// Create new synchronization window
pub fn new(config: Config, handle: &Handle, executor: Arc<Mutex<T>>, chain: ChainRef) -> Arc<Mutex<Self>> {
let skip_verification = config.skip_verification;
let sync = Arc::new(Mutex::new(
SynchronizationClient {
state: State::Saturated,
@ -461,19 +468,21 @@ impl<T> SynchronizationClient<T> where T: TaskExecutor {
verification_worker_thread: None,
verifying_blocks_by_peer: HashMap::new(),
verifying_blocks_waiters: HashMap::new(),
config: config,
}
));
if !config.skip_verification {
if !skip_verification {
let (verification_work_sender, verification_work_receiver) = channel();
let csync = sync.clone();
let mut lsync = sync.lock();
let storage = chain.read().storage();
let verifier = ChainVerifier::new(storage);
lsync.verification_work_sender = Some(verification_work_sender);
lsync.verification_worker_thread = Some(thread::Builder::new()
.name("Sync verification thread".to_string())
.spawn(move || {
SynchronizationClient::verification_worker_proc(csync, storage, verification_work_receiver)
SynchronizationClient::verification_worker_proc(csync, verifier, verification_work_receiver)
})
.expect("Error creating verification thread"));
}
@ -525,6 +534,11 @@ impl<T> SynchronizationClient<T> where T: TaskExecutor {
}
}
/// Get configuration parameters.
pub fn config<'a>(&'a self) -> &'a Config {
&self.config
}
/// Process new blocks inventory
fn process_new_blocks_headers(&mut self, peer_index: usize, mut hashes: Vec<H256>, mut headers: Vec<BlockHeader>) {
assert_eq!(hashes.len(), headers.len());
@ -884,11 +898,32 @@ impl<T> SynchronizationClient<T> where T: TaskExecutor {
}
/// Thread procedure for handling verification tasks
fn verification_worker_proc(sync: Arc<Mutex<Self>>, storage: Arc<db::Store>, work_receiver: Receiver<VerificationTask>) {
let verifier = ChainVerifier::new(storage);
fn verification_worker_proc(sync: Arc<Mutex<Self>>, mut verifier: ChainVerifier, work_receiver: Receiver<VerificationTask>) {
let mut parameters_change_steps = Some(0);
while let Ok(task) = work_receiver.recv() {
match task {
VerificationTask::VerifyBlock(block) => {
// change verifier parameters, if needed
if let Some(steps_left) = parameters_change_steps {
if steps_left == 0 {
let sync = sync.lock();
let config = sync.config();
let best_storage_block = sync.chain.read().best_storage_block();
let is_bip65_active = best_storage_block.number >= config.consensus_params.bip65_height;
verifier.verify_clocktimeverify(is_bip65_active);
if is_bip65_active {
parameters_change_steps = None;
} else {
parameters_change_steps = Some(config.consensus_params.bip65_height - best_storage_block.number);
}
} else {
parameters_change_steps = Some(steps_left - 1);
}
}
// verify block
match verifier.verify(&block) {
Ok(_chain) => {
sync.lock().on_block_verification_success(block)

View File

@ -13,13 +13,19 @@ const MAX_BLOCK_SIGOPS: usize = 20000;
pub struct ChainVerifier {
store: Arc<db::Store>,
verify_clocktimeverify: bool,
skip_pow: bool,
skip_sig: bool,
}
impl ChainVerifier {
pub fn new(store: Arc<db::Store>) -> Self {
ChainVerifier { store: store, skip_pow: false, skip_sig: false }
ChainVerifier {
store: store,
verify_clocktimeverify: false,
skip_pow: false,
skip_sig: false
}
}
#[cfg(test)]
@ -34,6 +40,10 @@ impl ChainVerifier {
self
}
pub fn verify_clocktimeverify(&mut self, verify: bool) {
self.verify_clocktimeverify = verify;
}
fn ordered_verify(&self, block: &chain::Block, at_height: u32) -> Result<(), Error> {
let coinbase_spends = block.transactions()[0].total_spends();
@ -138,7 +148,9 @@ impl ChainVerifier {
return Err(TransactionError::SigopsAmount);
}
let flags = VerificationFlags::default().verify_p2sh(true);
let flags = VerificationFlags::default()
.verify_p2sh(true)
.verify_clocktimeverify(self.verify_clocktimeverify);
// for tests only, skips as late as possible
if self.skip_sig { continue; }