Merge pull request #444 from paritytech/support_segwit

Support SegWit && SegWit2x
This commit is contained in:
Nikolay Volf 2017-08-28 15:37:33 +03:00 committed by GitHub
commit d7035e235e
48 changed files with 2095 additions and 239 deletions

1
Cargo.lock generated
View File

@ -2,6 +2,7 @@
name = "verification"
version = "0.1.0"
dependencies = [
"bitcrypto 0.1.0",
"chain 0.1.0",
"db 0.1.0",
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",

View File

@ -32,6 +32,19 @@ impl Block {
merkle_root(&hashes)
}
/// Returns block's witness merkle root.
pub fn witness_merkle_root(&self) -> H256 {
let hashes = match self.transactions.split_first() {
None => vec![],
Some((_, rest)) => {
let mut hashes = vec![H256::from(0)];
hashes.extend(rest.iter().map(Transaction::witness_hash));
hashes
},
};
merkle_root(&hashes)
}
pub fn transactions(&self) -> &[Transaction] {
&self.transactions
}

View File

@ -1,7 +1,7 @@
use std::cmp;
use hash::H256;
use hex::FromHex;
use ser::{Serializable, serialized_list_size, deserialize};
use ser::{Serializable, serialized_list_size, serialized_list_size_with_flags, deserialize, SERIALIZE_TRANSACTION_WITNESS};
use block::Block;
use transaction::Transaction;
use merkle_root::merkle_root;
@ -54,10 +54,29 @@ impl IndexedBlock {
header_size + txs_size
}
pub fn size_with_witness(&self) -> usize {
let header_size = self.header.raw.serialized_size();
let transactions = self.transactions.iter().map(|tx| &tx.raw).collect::<Vec<_>>();
let txs_size = serialized_list_size_with_flags::<Transaction, &Transaction>(&transactions, SERIALIZE_TRANSACTION_WITNESS);
header_size + txs_size
}
pub fn merkle_root(&self) -> H256 {
merkle_root(&self.transactions.iter().map(|tx| &tx.hash).collect::<Vec<&H256>>())
}
pub fn witness_merkle_root(&self) -> H256 {
let hashes = match self.transactions.split_first() {
None => vec![],
Some((_, rest)) => {
let mut hashes = vec![H256::from(0)];
hashes.extend(rest.iter().map(|tx| tx.raw.witness_hash()));
hashes
},
};
merkle_root(&hashes)
}
pub fn is_final(&self, height: u32) -> bool {
self.transactions.iter().all(|tx| tx.raw.is_final_in_block(height, self.header.raw.time))
}
@ -68,3 +87,18 @@ impl From<&'static str> for IndexedBlock {
deserialize(&s.from_hex().unwrap() as &[u8]).unwrap()
}
}
#[cfg(test)]
mod tests {
use super::IndexedBlock;
#[test]
fn size_with_witness_not_equal_to_size() {
let block_without_witness: IndexedBlock = "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into();
assert_eq!(block_without_witness.size(), block_without_witness.size_with_witness());
// bip143 block
let block_with_witness: IndexedBlock = "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000010100000000000000000000000000000000000000000000000000000000000000000000000000000000000001010000000000".into();
assert!(block_with_witness.size() != block_with_witness.size_with_witness());
}
}

View File

@ -1,13 +1,20 @@
//! Bitcoin trainsaction.
//! https://en.bitcoin.it/wiki/Protocol_documentation#tx
use std::io;
use heapsize::HeapSizeOf;
use hex::FromHex;
use bytes::Bytes;
use ser::{deserialize, serialize};
use ser::{deserialize, serialize, serialize_with_flags, SERIALIZE_TRANSACTION_WITNESS};
use crypto::dhash256;
use hash::H256;
use constants::{SEQUENCE_FINAL, LOCKTIME_THRESHOLD};
use ser::{Error, Serializable, Deserializable, Stream, Reader};
/// Must be zero.
const WITNESS_MARKER: u8 = 0;
/// Must be nonzero.
const WITNESS_FLAG: u8 = 1;
#[derive(Debug, PartialEq, Eq, Clone, Default, Serializable, Deserializable)]
pub struct OutPoint {
@ -28,11 +35,12 @@ impl OutPoint {
}
}
#[derive(Debug, PartialEq, Default, Clone, Serializable, Deserializable)]
#[derive(Debug, PartialEq, Default, Clone)]
pub struct TransactionInput {
pub previous_output: OutPoint,
pub script_sig: Bytes,
pub sequence: u32,
pub script_witness: Vec<Bytes>,
}
impl TransactionInput {
@ -41,17 +49,23 @@ impl TransactionInput {
previous_output: OutPoint::null(),
script_sig: script_sig,
sequence: SEQUENCE_FINAL,
script_witness: vec![],
}
}
pub fn is_final(&self) -> bool {
self.sequence == SEQUENCE_FINAL
}
pub fn has_witness(&self) -> bool {
!self.script_witness.is_empty()
}
}
impl HeapSizeOf for TransactionInput {
fn heap_size_of_children(&self) -> usize {
self.script_sig.heap_size_of_children()
self.script_sig.heap_size_of_children() +
self.script_witness.heap_size_of_children()
}
}
@ -76,7 +90,7 @@ impl HeapSizeOf for TransactionOutput {
}
}
#[derive(Debug, PartialEq, Default, Clone, Serializable, Deserializable)]
#[derive(Debug, PartialEq, Default, Clone)]
pub struct Transaction {
pub version: i32,
pub inputs: Vec<TransactionInput>,
@ -101,6 +115,10 @@ impl Transaction {
dhash256(&serialize(self))
}
pub fn witness_hash(&self) -> H256 {
dhash256(&serialize_with_flags(self, SERIALIZE_TRANSACTION_WITNESS))
}
pub fn inputs(&self) -> &[TransactionInput] {
&self.inputs
}
@ -149,6 +167,10 @@ impl Transaction {
self.inputs.iter().all(TransactionInput::is_final)
}
pub fn has_witness(&self) -> bool {
self.inputs.iter().any(TransactionInput::has_witness)
}
pub fn total_spends(&self) -> u64 {
let mut result = 0u64;
for output in self.outputs.iter() {
@ -161,11 +183,87 @@ impl Transaction {
}
}
impl Serializable for TransactionInput {
fn serialize(&self, stream: &mut Stream) {
stream
.append(&self.previous_output)
.append(&self.script_sig)
.append(&self.sequence);
}
}
impl Deserializable for TransactionInput {
fn deserialize<T>(reader: &mut Reader<T>) -> Result<Self, Error> where Self: Sized, T: io::Read {
Ok(TransactionInput {
previous_output: reader.read()?,
script_sig: reader.read()?,
sequence: reader.read()?,
script_witness: vec![],
})
}
}
impl Serializable for Transaction {
fn serialize(&self, stream: &mut Stream) {
let include_transaction_witness = stream.include_transaction_witness() && self.has_witness();
match include_transaction_witness {
false => stream
.append(&self.version)
.append_list(&self.inputs)
.append_list(&self.outputs)
.append(&self.lock_time),
true => {
stream
.append(&self.version)
.append(&WITNESS_MARKER)
.append(&WITNESS_FLAG)
.append_list(&self.inputs)
.append_list(&self.outputs);
for input in &self.inputs {
stream.append_list(&input.script_witness);
}
stream.append(&self.lock_time)
}
};
}
}
impl Deserializable for Transaction {
fn deserialize<T>(reader: &mut Reader<T>) -> Result<Self, Error> where Self: Sized, T: io::Read {
let version = reader.read()?;
let mut inputs: Vec<TransactionInput> = reader.read_list()?;
let read_witness = if inputs.is_empty() {
let witness_flag: u8 = reader.read()?;
if witness_flag != WITNESS_FLAG {
return Err(Error::MalformedData);
}
inputs = reader.read_list()?;
true
} else {
false
};
let outputs = reader.read_list()?;
if read_witness {
for input in inputs.iter_mut() {
input.script_witness = reader.read_list()?;
}
}
Ok(Transaction {
version: version,
inputs: inputs,
outputs: outputs,
lock_time: reader.read()?,
})
}
}
#[cfg(test)]
mod tests {
use hash::H256;
use ser::Serializable;
use super::Transaction;
use ser::{Serializable, serialize_with_flags, SERIALIZE_TRANSACTION_WITNESS};
use super::{Transaction, TransactionInput, OutPoint, TransactionOutput};
// real transaction from block 80000
// https://blockchain.info/rawtx/5a4ebf66822b0b2d56bd9dc64ece0bc38ee7844a23ff1d7320a88c5fdb2ad3e2
@ -183,6 +281,7 @@ mod tests {
let tx_output = &t.outputs[0];
assert_eq!(tx_output.value, 5000000000);
assert_eq!(tx_output.script_pubkey, "76a914404371705fa9bd789a2fcd52d2c580b65d35549d88ac".into());
assert!(!t.has_witness());
}
#[test]
@ -198,4 +297,60 @@ mod tests {
let tx: Transaction = raw_tx.into();
assert_eq!(tx.serialized_size(), raw_tx.len() / 2);
}
#[test]
fn test_transaction_reader_with_witness() {
// test case from https://github.com/bitcoin/bips/blob/master/bip-0143.mediawiki
let actual: Transaction = "01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000".into();
let expected = Transaction {
version: 1,
inputs: vec![TransactionInput {
previous_output: OutPoint {
hash: "fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f".into(),
index: 0,
},
script_sig: "4830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01".into(),
sequence: 0xffffffee,
script_witness: vec![],
}, TransactionInput {
previous_output: OutPoint {
hash: "ef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a".into(),
index: 1,
},
script_sig: "".into(),
sequence: 0xffffffff,
script_witness: vec![
"304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee01".into(),
"025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee6357".into(),
],
}],
outputs: vec![TransactionOutput {
value: 0x0000000006b22c20,
script_pubkey: "76a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac".into(),
}, TransactionOutput {
value: 0x000000000d519390,
script_pubkey: "76a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac".into(),
}],
lock_time: 0x00000011,
};
assert_eq!(actual, expected);
}
#[test]
fn test_serialization_with_flags() {
let transaction_without_witness: Transaction = "000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into();
assert_eq!(serialize_with_flags(&transaction_without_witness, 0), serialize_with_flags(&transaction_without_witness, SERIALIZE_TRANSACTION_WITNESS));
let transaction_with_witness: Transaction = "0000000000010100000000000000000000000000000000000000000000000000000000000000000000000000000000000001010000000000".into();
assert!(serialize_with_flags(&transaction_with_witness, 0) != serialize_with_flags(&transaction_with_witness, SERIALIZE_TRANSACTION_WITNESS));
}
#[test]
fn test_witness_hash_differs() {
let transaction_without_witness: Transaction = "000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into();
assert_eq!(transaction_without_witness.hash(), transaction_without_witness.witness_hash());
let transaction_with_witness: Transaction = "0000000000010100000000000000000000000000000000000000000000000000000000000000000000000000000000000001010000000000".into();
assert!(transaction_with_witness.hash() != transaction_with_witness.witness_hash());
}
}

View File

@ -3,13 +3,16 @@ use hash::H256;
use ser::{Serializable, Stream, Deserializable, Reader, Error as ReaderError};
#[derive(Debug, PartialEq, Clone, Copy)]
#[repr(u8)]
#[repr(u32)]
pub enum InventoryType {
Error = 0,
MessageTx = 1,
MessageBlock = 2,
MessageFilteredBlock = 3,
MessageCompactBlock = 4,
MessageWitnessTx = 0x40000001,
MessageWitnessBlock = 0x40000002,
MessageWitnessFilteredBlock = 0x40000003,
}
impl InventoryType {
@ -20,6 +23,9 @@ impl InventoryType {
2 => Some(InventoryType::MessageBlock),
3 => Some(InventoryType::MessageFilteredBlock),
4 => Some(InventoryType::MessageCompactBlock),
0x40000001 => Some(InventoryType::MessageWitnessTx),
0x40000002 => Some(InventoryType::MessageWitnessBlock),
0x40000003 => Some(InventoryType::MessageWitnessFilteredBlock),
_ => None
}
}
@ -122,11 +128,17 @@ mod tests {
assert_eq!(2u32, InventoryType::MessageBlock.into());
assert_eq!(3u32, InventoryType::MessageFilteredBlock.into());
assert_eq!(4u32, InventoryType::MessageCompactBlock.into());
assert_eq!(0x40000001u32, InventoryType::MessageWitnessTx.into());
assert_eq!(0x40000002u32, InventoryType::MessageWitnessBlock.into());
assert_eq!(0x40000003u32, InventoryType::MessageWitnessFilteredBlock.into());
assert_eq!(InventoryType::from_u32(0).unwrap(), InventoryType::Error);
assert_eq!(InventoryType::from_u32(1).unwrap(), InventoryType::MessageTx);
assert_eq!(InventoryType::from_u32(2).unwrap(), InventoryType::MessageBlock);
assert_eq!(InventoryType::from_u32(3).unwrap(), InventoryType::MessageFilteredBlock);
assert_eq!(InventoryType::from_u32(4).unwrap(), InventoryType::MessageCompactBlock);
assert_eq!(InventoryType::from_u32(0x40000001).unwrap(), InventoryType::MessageWitnessTx);
assert_eq!(InventoryType::from_u32(0x40000002).unwrap(), InventoryType::MessageWitnessBlock);
assert_eq!(InventoryType::from_u32(0x40000003).unwrap(), InventoryType::MessageWitnessFilteredBlock);
}
}

View File

@ -2,7 +2,7 @@ use ser::Stream;
use bytes::{TaggedBytes, Bytes};
use network::Magic;
use common::Command;
use serialization::serialize_payload;
use serialization::serialize_payload_with_flags;
use {Payload, MessageResult, MessageHeader};
pub fn to_raw_message(magic: Magic, command: Command, payload: &Bytes) -> Bytes {
@ -19,7 +19,11 @@ pub struct Message<T> {
impl<T> Message<T> where T: Payload {
pub fn new(magic: Magic, version: u32, payload: &T) -> MessageResult<Self> {
let serialized = try!(serialize_payload(payload, version));
Self::with_flags(magic, version, payload, 0)
}
pub fn with_flags(magic: Magic, version: u32, payload: &T, serialization_flags: u32) -> MessageResult<Self> {
let serialized = try!(serialize_payload_with_flags(payload, version, serialization_flags));
let message = Message {
bytes: TaggedBytes::new(to_raw_message(magic, T::command().into(), &serialized)),

View File

@ -1,5 +1,5 @@
mod stream;
mod reader;
pub use self::stream::serialize_payload;
pub use self::stream::{serialize_payload, serialize_payload_with_flags};
pub use self::reader::deserialize_payload;

View File

@ -3,7 +3,11 @@ use ser::Stream;
use {Payload, Error, MessageResult};
pub fn serialize_payload<T>(t: &T, version: u32) -> MessageResult<Bytes> where T: Payload {
let mut stream = PayloadStream::new(version);
serialize_payload_with_flags(t, version, 0)
}
pub fn serialize_payload_with_flags<T>(t: &T, version: u32, serialization_flags: u32) -> MessageResult<Bytes> where T: Payload {
let mut stream = PayloadStream::new(version, serialization_flags);
try!(stream.append(t));
Ok(stream.out())
}
@ -14,9 +18,9 @@ pub struct PayloadStream {
}
impl PayloadStream {
pub fn new(version: u32) -> Self {
pub fn new(version: u32, serialization_flags: u32) -> Self {
PayloadStream {
stream: Stream::default(),
stream: Stream::with_flags(serialization_flags),
version: version,
}
}

View File

@ -1,9 +1,8 @@
use std::cmp::max;
use hash::H256;
use {Magic, Deployment};
/// First block of SegWit2x fork.
pub const SEGWIT2X_FORK_BLOCK: u32 = 0xFFFFFFFF; // not known (yet?)
pub const SEGWIT2X_FORK_BLOCK: u32 = 494784; // https://segwit2x.github.io/segwit2x-announce.html
/// First block of BitcoinCash fork.
pub const BITCOIN_CASH_FORK_BLOCK: u32 = 478559; // https://blockchair.com/bitcoin-cash/block/478559
@ -47,6 +46,7 @@ pub enum ConsensusFork {
/// Technical specification:
/// Segregated Witness (Consensus layer) - https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki
/// Block size increase to 2MB - https://github.com/bitcoin/bips/blob/master/bip-0102.mediawiki
/// Readiness checklist - https://segwit2x.github.io/segwit2x-announce.html
SegWit2x(u32),
/// Bitcoin Cash (aka UAHF).
/// `u32` is height of the first block, for which new consensus rules are applied.
@ -74,9 +74,18 @@ impl ConsensusParams {
bit: 0,
start_time: 1462060800,
timeout: 1493596800,
activation: Some(770112),
activation: Some(419328),
}),
segwit_deployment: None,
segwit_deployment: match fork {
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) => Some(Deployment {
name: "segwit",
bit: 1,
start_time: 1479168000,
timeout: 1510704000,
activation: None,
}),
ConsensusFork::BitcoinCash(_) => None,
},
},
Magic::Testnet => ConsensusParams {
network: magic,
@ -92,9 +101,18 @@ impl ConsensusParams {
bit: 0,
start_time: 1456790400,
timeout: 1493596800,
activation: Some(419328),
activation: Some(770112),
}),
segwit_deployment: None,
segwit_deployment: match fork {
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) => Some(Deployment {
name: "segwit",
bit: 1,
start_time: 1462060800,
timeout: 1493596800,
activation: None,
}),
ConsensusFork::BitcoinCash(_) => None,
},
},
Magic::Regtest | Magic::Unitest => ConsensusParams {
network: magic,
@ -112,7 +130,16 @@ impl ConsensusParams {
timeout: 0,
activation: Some(0),
}),
segwit_deployment: None,
segwit_deployment: match fork {
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) => Some(Deployment {
name: "segwit",
bit: 1,
start_time: 0,
timeout: ::std::u32::MAX,
activation: None,
}),
ConsensusFork::BitcoinCash(_) => None,
},
},
}
}
@ -129,14 +156,24 @@ impl ConsensusFork {
8_000_000
}
// Absolute (across all forks) maximum number of sigops in single block. Currently is max(sigops) for 8MB post-HF BitcoinCash block
/// Absolute (across all forks) maximum number of sigops in single block. Currently is max(sigops) for 8MB post-HF BitcoinCash block
pub fn absolute_maximum_block_sigops() -> usize {
160_000
}
/// Witness scale factor (equal among all forks)
pub fn witness_scale_factor() -> usize {
4
}
pub fn max_transaction_size(&self) -> usize {
// BitcoinCash: according to REQ-5: max size of tx is still 1_000_000
// SegWit: size * 4 <= 4_000_000 ===> max size of tx is still 1_000_000
1_000_000
}
pub fn min_block_size(&self, height: u32) -> usize {
match *self {
ConsensusFork::SegWit2x(fork_height) if height == fork_height => 0,
// size of first fork block must be larger than 1MB
ConsensusFork::BitcoinCash(fork_height) if height == fork_height => 1_000_001,
ConsensusFork::NoFork | ConsensusFork::BitcoinCash(_) | ConsensusFork::SegWit2x(_) => 0,
@ -151,19 +188,38 @@ impl ConsensusFork {
}
}
pub fn max_transaction_size(&self, _height: u32) -> usize {
// BitcoinCash: according to REQ-5: max size of tx is still 1_000_000
1_000_000
}
pub fn max_block_sigops(&self, height: u32, block_size: usize) -> usize {
match *self {
// according to REQ-5: max_block_sigops = 20000 * ceil((max(blocksize_bytes, 1000000) / 1000000))
ConsensusFork::BitcoinCash(fork_height) if height >= fork_height && block_size > 1_000_000 =>
20_000 * (max(block_size, 1_000_000) / 1_000_000),
ConsensusFork::BitcoinCash(fork_height) if height >= fork_height =>
20_000 * (1 + (block_size - 1) / 1_000_000),
ConsensusFork::SegWit2x(fork_height) if height >= fork_height =>
40_000,
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) | ConsensusFork::BitcoinCash(_) => 20_000,
}
}
pub fn max_block_sigops_cost(&self, height: u32, block_size: usize) -> usize {
match *self {
ConsensusFork::BitcoinCash(_) =>
self.max_block_sigops(height, block_size) * Self::witness_scale_factor(),
ConsensusFork::SegWit2x(fork_height) if height >= fork_height =>
160_000,
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) =>
80_000,
}
}
pub fn max_block_weight(&self, height: u32) -> usize {
match *self {
ConsensusFork::SegWit2x(fork_height) if height >= fork_height =>
8_000_000,
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) =>
4_000_000,
ConsensusFork::BitcoinCash(_) =>
unreachable!("BitcoinCash has no SegWit; weight is only checked with SegWit activated; qed"),
}
}
}
#[cfg(test)]
@ -217,17 +273,17 @@ mod tests {
#[test]
fn test_consensus_fork_max_transaction_size() {
assert_eq!(ConsensusFork::NoFork.max_transaction_size(0), 1_000_000);
assert_eq!(ConsensusFork::SegWit2x(100).max_transaction_size(0), 1_000_000);
assert_eq!(ConsensusFork::BitcoinCash(100).max_transaction_size(0), 1_000_000);
assert_eq!(ConsensusFork::NoFork.max_transaction_size(), 1_000_000);
assert_eq!(ConsensusFork::SegWit2x(100).max_transaction_size(), 1_000_000);
assert_eq!(ConsensusFork::BitcoinCash(100).max_transaction_size(), 1_000_000);
}
#[test]
fn test_consensus_fork_max_block_sigops() {
assert_eq!(ConsensusFork::NoFork.max_block_sigops(0, 1_000_000), 20_000);
assert_eq!(ConsensusFork::SegWit2x(100).max_block_sigops(0, 1_000_000), 20_000);
assert_eq!(ConsensusFork::SegWit2x(100).max_block_sigops(100, 2_000_000), 20_000);
assert_eq!(ConsensusFork::SegWit2x(100).max_block_sigops(200, 3_000_000), 20_000);
assert_eq!(ConsensusFork::SegWit2x(100).max_block_sigops(100, 2_000_000), 40_000);
assert_eq!(ConsensusFork::SegWit2x(100).max_block_sigops(200, 3_000_000), 40_000);
assert_eq!(ConsensusFork::BitcoinCash(100).max_block_sigops(0, 1_000_000), 20_000);
assert_eq!(ConsensusFork::BitcoinCash(100).max_block_sigops(100, 2_000_000), 40_000);
assert_eq!(ConsensusFork::BitcoinCash(100).max_block_sigops(200, 3_000_000), 60_000);

View File

@ -61,7 +61,12 @@ impl PeerContext {
/// Request is always automatically send.
pub fn send_request<T>(&self, payload: &T) where T: Payload {
let send = Context::send_to_peer(self.context.clone(), self.info.id, payload);
self.send_request_with_flags(payload, 0)
}
/// Request is always automatically send.
pub fn send_request_with_flags<T>(&self, payload: &T, serialization_flags: u32) where T: Payload {
let send = Context::send_to_peer(self.context.clone(), self.info.id, payload, serialization_flags);
self.context.spawn(send);
}
@ -94,14 +99,14 @@ impl PeerContext {
let mut queue = self.response_queue.lock();
if is_final {
if sync.permission_for_response(id) {
let send = Context::send_to_peer(self.context.clone(), self.info.id, payload);
let send = Context::send_to_peer(self.context.clone(), self.info.id, payload, 0);
self.context.spawn(send);
self.send_awaiting(&mut sync, &mut queue, id);
} else {
queue.push_finished_response(id, self.to_message(payload).into());
}
} else if sync.is_permitted(id) {
let send = Context::send_to_peer(self.context.clone(), self.info.id, payload);
let send = Context::send_to_peer(self.context.clone(), self.info.id, payload, 0);
self.context.spawn(send);
} else {
queue.push_unfinished_response(id, self.to_message(payload).into());

View File

@ -320,11 +320,11 @@ impl Context {
}
/// Send message to a channel with given peer id.
pub fn send_to_peer<T>(context: Arc<Context>, peer: PeerId, payload: &T) -> IoFuture<()> where T: Payload {
pub fn send_to_peer<T>(context: Arc<Context>, peer: PeerId, payload: &T, serialization_flags: u32) -> IoFuture<()> where T: Payload {
match context.connections.channel(peer) {
Some(channel) => {
let info = channel.peer_info();
let message = Message::new(info.magic, info.version, payload).expect("failed to create outgoing message");
let message = Message::with_flags(info.magic, info.version, payload, serialization_flags).expect("failed to create outgoing message");
channel.session().stats().lock().report_send(T::command().into(), message.len());
Context::send(context, channel, message)
},
@ -395,8 +395,8 @@ impl Context {
}
}
pub fn create_sync_session(&self, start_height: i32, outbound_connection: OutboundSyncConnectionRef) -> InboundSyncConnectionRef {
self.local_sync_node.create_sync_session(start_height, outbound_connection)
pub fn create_sync_session(&self, start_height: i32, services: Services, outbound_connection: OutboundSyncConnectionRef) -> InboundSyncConnectionRef {
self.local_sync_node.create_sync_session(start_height, services, outbound_connection)
}
pub fn connections(&self) -> &Connections {

View File

@ -1,15 +1,16 @@
use std::sync::Arc;
use bytes::Bytes;
use message::{Command, Error, Payload, types, deserialize_payload};
use message::{Command, Error, Payload, Services, types, deserialize_payload};
use protocol::Protocol;
use net::PeerContext;
use ser::SERIALIZE_TRANSACTION_WITNESS;
pub type InboundSyncConnectionRef = Box<InboundSyncConnection>;
pub type OutboundSyncConnectionRef = Arc<OutboundSyncConnection>;
pub type LocalSyncNodeRef = Box<LocalSyncNode>;
pub trait LocalSyncNode : Send + Sync {
fn create_sync_session(&self, height: i32, outbound: OutboundSyncConnectionRef) -> InboundSyncConnectionRef;
fn create_sync_session(&self, height: i32, services: Services, outbound: OutboundSyncConnectionRef) -> InboundSyncConnectionRef;
}
pub trait InboundSyncConnection : Send + Sync {
@ -43,6 +44,8 @@ pub trait OutboundSyncConnection : Send + Sync {
fn send_getheaders(&self, message: &types::GetHeaders);
fn send_transaction(&self, message: &types::Tx);
fn send_block(&self, message: &types::Block);
fn send_witness_transaction(&self, message: &types::Tx);
fn send_witness_block(&self, message: &types::Block);
fn send_headers(&self, message: &types::Headers);
fn respond_headers(&self, message: &types::Headers, id: u32);
fn send_mempool(&self, message: &types::MemPool);
@ -98,6 +101,14 @@ impl OutboundSyncConnection for OutboundSync {
self.context.send_request(message);
}
fn send_witness_transaction(&self, message: &types::Tx) {
self.context.send_request_with_flags(message, SERIALIZE_TRANSACTION_WITNESS);
}
fn send_witness_block(&self, message: &types::Block) {
self.context.send_request_with_flags(message, SERIALIZE_TRANSACTION_WITNESS);
}
fn send_headers(&self, message: &types::Headers) {
self.context.send_request(message);
}
@ -172,7 +183,7 @@ pub struct SyncProtocol {
impl SyncProtocol {
pub fn new(context: Arc<PeerContext>) -> Self {
let outbound_connection = Arc::new(OutboundSync::new(context.clone()));
let inbound_connection = context.global().create_sync_session(0, outbound_connection);
let inbound_connection = context.global().create_sync_session(0, context.info().version_message.services(), outbound_connection);
SyncProtocol {
inbound_connection: inbound_connection,
context: context,

View File

@ -3,7 +3,7 @@ use clap;
use message::Services;
use network::{Magic, ConsensusParams, ConsensusFork, SEGWIT2X_FORK_BLOCK, BITCOIN_CASH_FORK_BLOCK};
use p2p::InternetProtocol;
use seednodes::{mainnet_seednodes, testnet_seednodes};
use seednodes::{mainnet_seednodes, testnet_seednodes, segwit2x_seednodes};
use rpc_apis::ApiSet;
use {USER_AGENT, REGTEST_USER_AGENT};
use primitives::hash::H256;
@ -81,7 +81,7 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
None => None,
};
let seednodes = match matches.value_of("seednode") {
let mut seednodes: Vec<String> = match matches.value_of("seednode") {
Some(s) => vec![s.parse().map_err(|_| "Invalid seednode".to_owned())?],
None => match magic {
Magic::Mainnet => mainnet_seednodes().into_iter().map(Into::into).collect(),
@ -89,6 +89,10 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
Magic::Other(_) | Magic::Regtest | Magic::Unitest => Vec::new(),
},
};
match consensus_fork {
ConsensusFork::SegWit2x(_) => seednodes.extend(segwit2x_seednodes().into_iter().map(Into::into)),
_ => (),
}
let db_cache = match matches.value_of("db-cache") {
Some(s) => s.parse().map_err(|_| "Invalid cache size - should be number in MB".to_owned())?,
@ -115,7 +119,7 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
let services = Services::default().with_network(true);
let services = match consensus.fork {
ConsensusFork::BitcoinCash(_) => services.with_bitcoin_cash(true),
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) => services,
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) => services.with_witness(true),
};
let verification_level = match matches.value_of("verification-level") {

View File

@ -27,3 +27,12 @@ pub fn testnet_seednodes() -> Vec<&'static str> {
"testnet-seed.voskuil.org:18333",
]
}
pub fn segwit2x_seednodes() -> Vec<&'static str> {
vec![
"seed.mainnet.b-pay.net:8333",
"seed.ob1.io:8333",
"seed.blockchain.info:8333",
"bitcoin.bloqseeds.net:8333",
]
}

View File

@ -2,6 +2,7 @@
use std::{ops, str, fmt, io, marker};
use hex::{ToHex, FromHex, FromHexError};
use heapsize::HeapSizeOf;
/// Wrapper around `Vec<u8>`
#[derive(Default, PartialEq, Clone, Eq, Hash)]
@ -25,6 +26,12 @@ impl Bytes {
}
}
impl HeapSizeOf for Bytes {
fn heap_size_of_children(&self) -> usize {
self.0.heap_size_of_children()
}
}
impl<'a> From<&'a [u8]> for Bytes {
fn from(v: &[u8]) -> Self {
Bytes(v.into())

View File

@ -48,6 +48,7 @@ impl RawClientCore {
},
script_sig: GlobalBytes::new(), // default script
sequence: input.sequence.unwrap_or(default_sequence),
script_witness: vec![],
}).collect();
// prepare outputs

View File

@ -51,6 +51,16 @@ pub enum Error {
// Softfork safeness
DiscourageUpgradableNops,
DiscourageUpgradableWitnessProgram,
// SegWit-related errors
WitnessProgramWrongLength,
WitnessProgramWitnessEmpty,
WitnessProgramMismatch,
WitnessMalleated,
WitnessMalleatedP2SH,
WitnessUnexpected,
WitnessPubKeyType,
}
impl fmt::Display for Error {
@ -101,6 +111,16 @@ impl fmt::Display for Error {
// Softfork safeness
Error::DiscourageUpgradableNops => "Discourage Upgradable Nops".fmt(f),
Error::DiscourageUpgradableWitnessProgram => "Discourage Upgradable Witness Program".fmt(f),
// SegWit-related errors
Error::WitnessProgramWrongLength => "Witness program has incorrect length".fmt(f),
Error::WitnessProgramWitnessEmpty => "Witness program was passed an empty witness".fmt(f),
Error::WitnessProgramMismatch => "Witness program hash mismatch".fmt(f),
Error::WitnessMalleated => "Witness requires empty scriptSig".fmt(f),
Error::WitnessMalleatedP2SH => "Witness requires only-redeemscript scriptSig".fmt(f),
Error::WitnessUnexpected => "Witness provided for non-witness script".fmt(f),
Error::WitnessPubKeyType => "Using non-compressed keys in segwit".fmt(f),
}
}
}

View File

@ -92,5 +92,20 @@ impl VerificationFlags {
self.verify_dersig = value;
self
}
pub fn verify_witness(mut self, value: bool) -> Self {
self.verify_witness = value;
self
}
pub fn verify_nulldummy(mut self, value: bool) -> Self {
self.verify_nulldummy = value;
self
}
pub fn verify_discourage_upgradable_witness_program(mut self, value: bool) -> Self {
self.verify_discourage_upgradable_witness_program = value;
self
}
}

File diff suppressed because it is too large Load Diff

View File

@ -24,7 +24,7 @@ pub use self::flags::VerificationFlags;
pub use self::interpreter::{eval_script, verify_script};
pub use self::opcode::Opcode;
pub use self::num::Num;
pub use self::script::{Script, ScriptType, ScriptAddress};
pub use self::script::{Script, ScriptType, ScriptAddress, ScriptWitness, is_witness_commitment_script};
pub use self::sign::{TransactionInputSigner, UnsignedTransactionInput, SignatureVersion};
pub use self::stack::Stack;
pub use self::verify::{SignatureChecker, NoopSignatureChecker, TransactionSignatureChecker};

View File

@ -99,6 +99,11 @@ impl Script {
self.data.clone()
}
/// Is empty script
pub fn is_empty(&self) -> bool {
self.data.len() == 0
}
/// Extra-fast test for pay-to-public-key-hash (P2PKH) scripts.
pub fn is_pay_to_public_key_hash(&self) -> bool {
self.data.len() == 25 &&
@ -139,6 +144,20 @@ impl Script {
self.data[1] == Opcode::OP_PUSHBYTES_20 as u8
}
/// Parse witness program. Returns Some(witness program version, code) or None if not a witness program.
pub fn parse_witness_program(&self) -> Option<(u8, &[u8])> {
if self.data.len() < 4 || self.data.len() > 42 || self.data.len() != self.data[1] as usize + 2 {
return None;
}
let witness_version = match Opcode::from_u8(self.data[0]) {
Some(Opcode::OP_0) => 0,
Some(x) if x >= Opcode::OP_1 && x <= Opcode::OP_16 => (x as u8) - (Opcode::OP_1 as u8) + 1,
_ => return None,
};
let witness_program = &self.data[2..];
Some((witness_version, witness_program))
}
/// Extra-fast test for pay-to-witness-script-hash scripts.
pub fn is_pay_to_witness_script_hash(&self) -> bool {
self.data.len() == 34 &&
@ -331,6 +350,10 @@ impl Script {
ScriptType::Multisig
} else if self.is_null_data_script() {
ScriptType::NullData
} else if self.is_pay_to_witness_key_hash() {
ScriptType::WitnessKey
} else if self.is_pay_to_witness_script_hash() {
ScriptType::WitnessScript
} else {
ScriptType::NonStandard
}
@ -547,6 +570,20 @@ impl fmt::Display for Script {
}
}
pub type ScriptWitness = Vec<Bytes>;
/// Passed bytes array is a commitment script?
/// https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki#Commitment_structure
pub fn is_witness_commitment_script(script: &[u8]) -> bool {
script.len() >= 36 &&
script[0] == Opcode::OP_RETURN as u8 &&
script[1] == 0x24 &&
script[2] == 0xAA &&
script[3] == 0x21 &&
script[4] == 0xA9 &&
script[5] == 0xED
}
#[cfg(test)]
mod tests {
use {Builder, Opcode};

View File

@ -132,18 +132,10 @@ impl From<Transaction> for TransactionInputSigner {
impl TransactionInputSigner {
pub fn signature_hash(&self, input_index: usize, input_amount: u64, script_pubkey: &Script, sigversion: SignatureVersion, sighashtype: u32) -> H256 {
let sighash = Sighash::from_u32(sigversion, sighashtype);
if input_index >= self.inputs.len() {
return 1u8.into();
}
if sighash.base == SighashBase::Single && input_index >= self.outputs.len() {
return 1u8.into();
}
match sigversion {
SignatureVersion::ForkId if sighash.fork_id => self.signature_hash_fork_id(input_index, input_amount, script_pubkey, sighashtype, sighash),
SignatureVersion::Base | SignatureVersion::ForkId => self.signature_hash_original(input_index, script_pubkey, sighashtype, sighash),
_ => 1u8.into(),
SignatureVersion::WitnessV0 => self.signature_hash_witness0(input_index, input_amount, script_pubkey, sighashtype, sighash),
}
}
@ -172,10 +164,19 @@ impl TransactionInputSigner {
previous_output: unsigned_input.previous_output.clone(),
sequence: unsigned_input.sequence,
script_sig: script_sig.to_bytes(),
script_witness: vec![],
}
}
pub fn signature_hash_original(&self, input_index: usize, script_pubkey: &Script, sighashtype: u32, sighash: Sighash) -> H256 {
if input_index >= self.inputs.len() {
return 1u8.into();
}
if sighash.base == SighashBase::Single && input_index >= self.outputs.len() {
return 1u8.into();
}
let script_pubkey = script_pubkey.without_separators();
let inputs = if sighash.anyone_can_pay {
@ -184,6 +185,7 @@ impl TransactionInputSigner {
previous_output: input.previous_output.clone(),
script_sig: script_pubkey.to_bytes(),
sequence: input.sequence,
script_witness: vec![],
}]
} else {
self.inputs.iter()
@ -199,6 +201,7 @@ impl TransactionInputSigner {
SighashBase::Single | SighashBase::None if n != input_index => 0,
_ => input.sequence,
},
script_witness: vec![],
})
.collect()
};
@ -231,44 +234,10 @@ impl TransactionInputSigner {
dhash256(&out)
}
fn signature_hash_fork_id(&self, input_index: usize, input_amount: u64, script_pubkey: &Script, sighashtype: u32, sighash: Sighash) -> H256 {
let hash_prevouts = match sighash.anyone_can_pay {
false => {
let mut stream = Stream::default();
for input in &self.inputs {
stream.append(&input.previous_output);
}
dhash256(&stream.out())
},
true => 0u8.into(),
};
let hash_sequence = match sighash.base {
SighashBase::All if !sighash.anyone_can_pay => {
let mut stream = Stream::default();
for input in &self.inputs {
stream.append(&input.sequence);
}
dhash256(&stream.out())
},
_ => 0u8.into(),
};
let hash_outputs = match sighash.base {
SighashBase::All => {
let mut stream = Stream::default();
for output in &self.outputs {
stream.append(output);
}
dhash256(&stream.out())
},
SighashBase::Single if input_index < self.outputs.len() => {
let mut stream = Stream::default();
stream.append(&self.outputs[input_index]);
dhash256(&stream.out())
},
_ => 0u8.into(),
};
fn signature_hash_witness0(&self, input_index: usize, input_amount: u64, script_pubkey: &Script, sighashtype: u32, sighash: Sighash) -> H256 {
let hash_prevouts = compute_hash_prevouts(sighash, &self.inputs);
let hash_sequence = compute_hash_sequence(sighash, &self.inputs);
let hash_outputs = compute_hash_outputs(sighash, input_index, &self.outputs);
let mut stream = Stream::default();
stream.append(&self.version);
@ -284,6 +253,62 @@ impl TransactionInputSigner {
let out = stream.out();
dhash256(&out)
}
fn signature_hash_fork_id(&self, input_index: usize, input_amount: u64, script_pubkey: &Script, sighashtype: u32, sighash: Sighash) -> H256 {
if input_index >= self.inputs.len() {
return 1u8.into();
}
if sighash.base == SighashBase::Single && input_index >= self.outputs.len() {
return 1u8.into();
}
self.signature_hash_witness0(input_index, input_amount, script_pubkey, sighashtype, sighash)
}
}
fn compute_hash_prevouts(sighash: Sighash, inputs: &[UnsignedTransactionInput]) -> H256 {
match sighash.anyone_can_pay {
false => {
let mut stream = Stream::default();
for input in inputs {
stream.append(&input.previous_output);
}
dhash256(&stream.out())
},
true => 0u8.into(),
}
}
fn compute_hash_sequence(sighash: Sighash, inputs: &[UnsignedTransactionInput]) -> H256 {
match sighash.base {
SighashBase::All if !sighash.anyone_can_pay => {
let mut stream = Stream::default();
for input in inputs {
stream.append(&input.sequence);
}
dhash256(&stream.out())
},
_ => 0u8.into(),
}
}
fn compute_hash_outputs(sighash: Sighash, input_index: usize, outputs: &[TransactionOutput]) -> H256 {
match sighash.base {
SighashBase::All => {
let mut stream = Stream::default();
for output in outputs {
stream.append(output);
}
dhash256(&stream.out())
},
SighashBase::Single if input_index < outputs.len() => {
let mut stream = Stream::default();
stream.append(&outputs[input_index]);
dhash256(&stream.out())
},
_ => 0u8.into(),
}
}
#[cfg(test)]

View File

@ -12,5 +12,8 @@ pub use primitives::{hash, bytes, compact};
pub use compact_integer::CompactInteger;
pub use list::List;
pub use reader::{Reader, Deserializable, deserialize, deserialize_iterator, ReadIterator, Error};
pub use stream::{Stream, Serializable, serialize, serialize_list, serialized_list_size};
pub use stream::{
Stream, Serializable, serialize, serialize_with_flags, serialize_list, serialized_list_size,
serialized_list_size_with_flags, SERIALIZE_TRANSACTION_WITNESS,
};

View File

@ -4,12 +4,21 @@ use std::borrow::Borrow;
use compact_integer::CompactInteger;
use bytes::Bytes;
/// Do not serialize transaction witness data.
pub const SERIALIZE_TRANSACTION_WITNESS: u32 = 0x40000000;
pub fn serialize<T>(t: &T) -> Bytes where T: Serializable{
let mut stream = Stream::default();
stream.append(t);
stream.out()
}
pub fn serialize_with_flags<T>(t: &T, flags: u32) -> Bytes where T: Serializable{
let mut stream = Stream::with_flags(flags);
stream.append(t);
stream.out()
}
pub fn serialize_list<T, K>(t: &[K]) -> Bytes where T: Serializable, K: Borrow<T> {
let mut stream = Stream::default();
stream.append_list(t);
@ -21,6 +30,11 @@ pub fn serialized_list_size<T, K>(t: &[K]) -> usize where T: Serializable, K: Bo
t.iter().map(Borrow::borrow).map(Serializable::serialized_size).sum::<usize>()
}
pub fn serialized_list_size_with_flags<T, K>(t: &[K], flags: u32) -> usize where T: Serializable, K: Borrow<T> {
CompactInteger::from(t.len()).serialized_size() +
t.iter().map(Borrow::borrow).map(|i| Serializable::serialized_size_with_flags(i, flags)).sum::<usize>()
}
pub trait Serializable {
/// Serialize the struct and appends it to the end of stream.
fn serialize(&self, s: &mut Stream);
@ -30,18 +44,35 @@ pub trait Serializable {
// fallback implementation
serialize(self).len()
}
/// Hint about the size of serialized struct with given flags.
fn serialized_size_with_flags(&self, flags: u32) -> usize where Self: Sized {
// fallback implementation
serialize_with_flags(self, flags).len()
}
}
/// Stream used for serialization of Bitcoin structures
#[derive(Default)]
pub struct Stream {
buffer: Vec<u8>,
flags: u32,
}
impl Stream {
/// New stream
pub fn new() -> Self {
Stream { buffer: Vec::new() }
Stream { buffer: Vec::new(), flags: 0 }
}
/// Create stream with given flags,
pub fn with_flags(flags: u32) -> Self {
Stream { buffer: Vec::new(), flags: flags }
}
/// Are transactions written to this stream with witness data?
pub fn include_transaction_witness(&self) -> bool {
(self.flags & SERIALIZE_TRANSACTION_WITNESS) != 0
}
/// Serializes the struct and appends it to the end of stream.

View File

@ -196,6 +196,8 @@ pub mod tests {
fn send_getheaders(&self, _message: &types::GetHeaders) { *self.messages.lock().entry("getheaders".to_owned()).or_insert(0) += 1; }
fn send_transaction(&self, _message: &types::Tx) { *self.messages.lock().entry("transaction".to_owned()).or_insert(0) += 1; }
fn send_block(&self, _message: &types::Block) { *self.messages.lock().entry("block".to_owned()).or_insert(0) += 1; }
fn send_witness_transaction(&self, _message: &types::Tx) { *self.messages.lock().entry("witness_transaction".to_owned()).or_insert(0) += 1; }
fn send_witness_block(&self, _message: &types::Block) { *self.messages.lock().entry("witness_block".to_owned()).or_insert(0) += 1; }
fn send_headers(&self, _message: &types::Headers) { *self.messages.lock().entry("headers".to_owned()).or_insert(0) += 1; }
fn respond_headers(&self, _message: &types::Headers, _id: RequestId) { *self.messages.lock().entry("headers".to_owned()).or_insert(0) += 1; }
fn send_mempool(&self, _message: &types::MemPool) { *self.messages.lock().entry("mempool".to_owned()).or_insert(0) += 1; }

View File

@ -1,5 +1,6 @@
use std::sync::atomic::{AtomicUsize, Ordering};
use p2p::{LocalSyncNode, LocalSyncNodeRef, OutboundSyncConnectionRef, InboundSyncConnectionRef};
use message::Services;
use inbound_connection::InboundConnection;
use types::{PeersRef, LocalNodeRef};
@ -30,11 +31,11 @@ impl InboundConnectionFactory {
}
impl LocalSyncNode for InboundConnectionFactory {
fn create_sync_session(&self, _best_block_height: i32, outbound_connection: OutboundSyncConnectionRef) -> InboundSyncConnectionRef {
fn create_sync_session(&self, _best_block_height: i32, services: Services, outbound_connection: OutboundSyncConnectionRef) -> InboundSyncConnectionRef {
let peer_index = self.counter.fetch_add(1, Ordering::SeqCst) + 1;
trace!(target: "sync", "Creating new sync session with peer#{}", peer_index);
// remember outbound connection
self.peers.insert(peer_index, outbound_connection);
self.peers.insert(peer_index, services, outbound_connection);
// create new inbound connection
InboundConnection::new(peer_index, self.peers.clone(), self.node.clone()).boxed()
}

View File

@ -41,6 +41,7 @@ pub use types::PeersRef;
use std::sync::Arc;
use parking_lot::RwLock;
use message::Services;
use network::{Magic, ConsensusParams};
use primitives::hash::H256;
use verification::BackwardsCompatibleChainVerifier as ChainVerifier;
@ -107,7 +108,11 @@ pub fn create_local_sync_node(consensus: ConsensusParams, db: db::SharedStore, p
let memory_pool = Arc::new(RwLock::new(MemoryPool::new()));
let sync_state = SynchronizationStateRef::new(SynchronizationState::with_storage(db.clone()));
let sync_chain = SyncChain::new(db.clone(), memory_pool.clone());
let sync_chain = SyncChain::new(db.clone(), consensus.clone(), memory_pool.clone());
if sync_chain.is_segwit_active() {
peers.require_peer_services(Services::default().with_witness(true));
}
let chain_verifier = Arc::new(ChainVerifier::new(db.clone(), consensus.clone()));
let sync_executor = SyncExecutor::new(peers.clone());
let sync_server = Arc::new(ServerImpl::new(peers.clone(), db.clone(), memory_pool.clone(), sync_executor.clone()));

View File

@ -374,7 +374,7 @@ pub mod tests {
let memory_pool = Arc::new(RwLock::new(MemoryPool::new()));
let storage = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let sync_state = SynchronizationStateRef::new(SynchronizationState::with_storage(storage.clone()));
let chain = Chain::new(storage.clone(), memory_pool.clone());
let chain = Chain::new(storage.clone(), ConsensusParams::new(Magic::Unitest, ConsensusFork::NoFork), memory_pool.clone());
let sync_peers = Arc::new(PeersImpl::default());
let executor = DummyTaskExecutor::new();
let server = Arc::new(DummyServer::new());

View File

@ -4,10 +4,12 @@ use linked_hash_map::LinkedHashMap;
use chain::{BlockHeader, Transaction, IndexedBlockHeader, IndexedBlock, IndexedTransaction};
use db;
use miner::{MemoryPoolOrderingStrategy, MemoryPoolInformation};
use network::ConsensusParams;
use primitives::bytes::Bytes;
use primitives::hash::H256;
use utils::{BestHeadersChain, BestHeadersChainInformation, HashQueueChain, HashPosition};
use types::{BlockHeight, StorageRef, MemoryPoolRef};
use verification::Deployments;
/// Index of 'verifying' queue
const VERIFYING_QUEUE: usize = 0;
@ -104,6 +106,8 @@ pub struct Chain {
best_storage_block: db::BestBlock,
/// Local blocks storage
storage: StorageRef,
/// Consensus params.
consensus: ConsensusParams,
/// In-memory queue of blocks hashes
hash_chain: HashQueueChain,
/// In-memory queue of blocks headers
@ -114,6 +118,8 @@ pub struct Chain {
memory_pool: MemoryPoolRef,
/// Blocks that have been marked as dead-ends
dead_end_blocks: HashSet<H256>,
/// Is SegWit active?
is_segwit_active: bool,
}
impl BlockState {
@ -138,22 +144,25 @@ impl BlockState {
impl Chain {
/// Create new `Chain` with given storage
pub fn new(storage: StorageRef, memory_pool: MemoryPoolRef) -> Self {
pub fn new(storage: StorageRef, consensus: ConsensusParams, memory_pool: MemoryPoolRef) -> Self {
// we only work with storages with genesis block
let genesis_block_hash = storage.block_hash(0)
.expect("storage with genesis block is required");
let best_storage_block = storage.best_block();
let best_storage_block_hash = best_storage_block.hash.clone();
let is_segwit_active = Deployments::new().segwit(best_storage_block.number, storage.as_block_header_provider(), &consensus);
Chain {
genesis_block_hash: genesis_block_hash,
best_storage_block: best_storage_block,
storage: storage,
consensus: consensus,
hash_chain: HashQueueChain::with_number_of_queues(NUMBER_OF_QUEUES),
headers_chain: BestHeadersChain::new(best_storage_block_hash),
verifying_transactions: LinkedHashMap::new(),
memory_pool: memory_pool,
dead_end_blocks: HashSet::new(),
is_segwit_active: is_segwit_active,
}
}
@ -179,6 +188,11 @@ impl Chain {
self.memory_pool.clone()
}
/// Is segwit active
pub fn is_segwit_active(&self) -> bool {
self.is_segwit_active
}
/// Get number of blocks in given state
pub fn length_of_blocks_state(&self, state: BlockState) -> BlockHeight {
match state {
@ -348,7 +362,8 @@ impl Chain {
self.storage.canonize(block.hash())?;
// remember new best block hash
self.best_storage_block = self.storage.best_block();
self.best_storage_block = self.storage.as_store().best_block();
self.is_segwit_active = Deployments::new().segwit(self.best_storage_block.number, self.storage.as_block_header_provider(), &self.consensus);
// remove inserted block + handle possible reorganization in headers chain
// TODO: mk, not sure if we need both of those params
@ -384,6 +399,7 @@ impl Chain {
// remember new best block hash
self.best_storage_block = self.storage.best_block();
self.is_segwit_active = Deployments::new().segwit(self.best_storage_block.number, self.storage.as_block_header_provider(), &self.consensus);
// remove inserted block + handle possible reorganization in headers chain
// TODO: mk, not sure if we need both of those params
@ -723,6 +739,7 @@ mod tests {
use chain::{Transaction, IndexedBlockHeader};
use db::BlockChainDatabase;
use miner::MemoryPool;
use network::{Magic, ConsensusParams, ConsensusFork};
use primitives::hash::H256;
use super::{Chain, BlockState, TransactionState, BlockInsertionResult};
use utils::HashPosition;
@ -731,7 +748,7 @@ mod tests {
fn chain_empty() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let db_best_block = db.best_block();
let chain = Chain::new(db.clone(), Arc::new(RwLock::new(MemoryPool::new())));
let chain = Chain::new(db.clone(), ConsensusParams::new(Magic::Unitest, ConsensusFork::NoFork), Arc::new(RwLock::new(MemoryPool::new())));
assert_eq!(chain.information().scheduled, 0);
assert_eq!(chain.information().requested, 0);
assert_eq!(chain.information().verifying, 0);
@ -748,7 +765,7 @@ mod tests {
#[test]
fn chain_block_path() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db.clone(), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db.clone(), ConsensusParams::new(Magic::Unitest, ConsensusFork::NoFork), Arc::new(RwLock::new(MemoryPool::new())));
// add 6 blocks to scheduled queue
let blocks = test_data::build_n_empty_blocks_from_genesis(6, 0);
@ -800,7 +817,7 @@ mod tests {
#[test]
fn chain_block_locator_hashes() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db, Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, ConsensusParams::new(Magic::Unitest, ConsensusFork::NoFork), Arc::new(RwLock::new(MemoryPool::new())));
let genesis_hash = chain.best_block().hash;
assert_eq!(chain.block_locator_hashes(), vec![genesis_hash.clone()]);
@ -885,7 +902,7 @@ mod tests {
#[test]
fn chain_transaction_state() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db, Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, ConsensusParams::new(Magic::Unitest, ConsensusFork::NoFork), Arc::new(RwLock::new(MemoryPool::new())));
let genesis_block = test_data::genesis();
let block1 = test_data::block_h1();
let tx1: Transaction = test_data::TransactionBuilder::with_version(1).into();
@ -922,7 +939,7 @@ mod tests {
let tx2_hash = tx2.hash();
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![b0.into()]));
let mut chain = Chain::new(db, Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, ConsensusParams::new(Magic::Unitest, ConsensusFork::NoFork), Arc::new(RwLock::new(MemoryPool::new())));
chain.verify_transaction(tx1.into());
chain.insert_verified_transaction(tx2.into());
@ -946,7 +963,7 @@ mod tests {
.set_default_input(0).set_output(400).store(test_chain); // t4
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db, Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, ConsensusParams::new(Magic::Unitest, ConsensusFork::NoFork), Arc::new(RwLock::new(MemoryPool::new())));
chain.verify_transaction(test_chain.at(0).into());
chain.verify_transaction(test_chain.at(1).into());
chain.verify_transaction(test_chain.at(2).into());
@ -968,7 +985,7 @@ mod tests {
.set_default_input(0).set_output(400).store(test_chain); // t4
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db, Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, ConsensusParams::new(Magic::Unitest, ConsensusFork::NoFork), Arc::new(RwLock::new(MemoryPool::new())));
chain.insert_verified_transaction(test_chain.at(0).into());
chain.insert_verified_transaction(test_chain.at(1).into());
chain.insert_verified_transaction(test_chain.at(2).into());
@ -994,7 +1011,7 @@ mod tests {
let tx2_hash = tx2.hash();
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![b0.into()]));
let mut chain = Chain::new(db, Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, ConsensusParams::new(Magic::Unitest, ConsensusFork::NoFork), Arc::new(RwLock::new(MemoryPool::new())));
chain.verify_transaction(tx1.into());
chain.insert_verified_transaction(tx2.into());
@ -1042,7 +1059,7 @@ mod tests {
let tx5 = b5.transactions[0].clone();
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![genesis.into()]));
let mut chain = Chain::new(db, Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, ConsensusParams::new(Magic::Unitest, ConsensusFork::NoFork), Arc::new(RwLock::new(MemoryPool::new())));
chain.insert_verified_transaction(tx3.into());
chain.insert_verified_transaction(tx4.into());
@ -1086,7 +1103,7 @@ mod tests {
// insert tx2 to memory pool
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db, Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, ConsensusParams::new(Magic::Unitest, ConsensusFork::NoFork), Arc::new(RwLock::new(MemoryPool::new())));
chain.insert_verified_transaction(tx2.clone().into());
chain.insert_verified_transaction(tx3.clone().into());
// insert verified block with tx1
@ -1105,7 +1122,7 @@ mod tests {
.reset().set_input(&data_chain.at(0), 0).add_output(30).store(data_chain); // transaction0 -> transaction2
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db, Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, ConsensusParams::new(Magic::Unitest, ConsensusFork::NoFork), Arc::new(RwLock::new(MemoryPool::new())));
chain.insert_verified_transaction(data_chain.at(1).into());
assert_eq!(chain.information().transactions.transactions_count, 1);
chain.insert_verified_transaction(data_chain.at(2).into());

View File

@ -6,7 +6,7 @@ use futures::Future;
use parking_lot::Mutex;
use time::precise_time_s;
use chain::{IndexedBlockHeader, IndexedTransaction, Transaction, IndexedBlock};
use message::types;
use message::{types, Services};
use message::common::{InventoryType, InventoryVector};
use miner::transaction_fee_rate;
use primitives::hash::H256;
@ -226,6 +226,8 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
}
// else ask for all unknown transactions and blocks
let is_segwit_active = self.chain.is_segwit_active();
let ask_for_witness = is_segwit_active && self.peers.is_segwit_enabled(peer_index);
let unknown_inventory: Vec<_> = message.inventory.into_iter()
.filter(|item| {
match item.inv_type {
@ -243,7 +245,9 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
_ => false,
},
// we never ask for merkle blocks && we never ask for compact blocks
InventoryType::MessageCompactBlock | InventoryType::MessageFilteredBlock => false,
InventoryType::MessageCompactBlock | InventoryType::MessageFilteredBlock
| InventoryType::MessageWitnessBlock | InventoryType::MessageWitnessFilteredBlock
| InventoryType::MessageWitnessTx => false,
// unknown inventory type
InventoryType::Error => {
self.peers.misbehaving(peer_index, &format!("Provided unknown inventory type {:?}", item.hash.to_reversed_str()));
@ -251,6 +255,24 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
}
}
})
// we are not synchronizing =>
// 1) either segwit is active and we are connected to segwit-enabled nodes => we could ask for witness
// 2) or segwit is inactive => we shall not ask for witness
.map(|item| if !ask_for_witness {
item
} else {
match item.inv_type {
InventoryType::MessageTx => InventoryVector {
inv_type: InventoryType::MessageWitnessTx,
hash: item.hash,
},
InventoryType::MessageBlock => InventoryVector {
inv_type: InventoryType::MessageWitnessBlock,
hash: item.hash,
},
_ => item,
}
})
.collect();
// if everything is known => ignore this message
@ -948,6 +970,8 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
let chunk_size = min(limits.max_blocks_in_request, max(hashes.len() as BlockHeight, limits.min_blocks_in_request));
let last_peer_index = peers.len() - 1;
let mut tasks: Vec<Task> = Vec::new();
let is_segwit_active = self.chain.is_segwit_active();
let inv_type = if is_segwit_active { InventoryType::MessageWitnessBlock } else { InventoryType::MessageBlock };
for (peer_index, peer) in peers.into_iter().enumerate() {
// we have to request all blocks => we will request last peer for all remaining blocks
let peer_chunk_size = if peer_index == last_peer_index { hashes.len() } else { min(hashes.len(), chunk_size as usize) };
@ -961,9 +985,12 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
// remember that peer is asked for these blocks
self.peers_tasks.on_blocks_requested(peer, &chunk_hashes);
// request blocks
// request blocks. If block is believed to have witness - ask for witness
let getdata = types::GetData {
inventory: chunk_hashes.into_iter().map(InventoryVector::block).collect(),
inventory: chunk_hashes.into_iter().map(|h| InventoryVector {
inv_type: inv_type,
hash: h,
}).collect(),
};
tasks.push(Task::GetData(peer, getdata));
}
@ -1043,6 +1070,9 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
// update block processing speed
self.block_speed_meter.checkpoint();
// remember if SegWit was active before this block
let segwit_was_active = self.chain.is_segwit_active();
// remove flags
let needs_relay = !self.do_not_relay.remove(block.hash());
@ -1063,6 +1093,13 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
// update shared state
self.shared_state.update_best_storage_block_height(self.chain.best_storage_block().number);
// if SegWit activated after this block insertion:
// 1) no more connections to !NODE_WITNESS nodes
// 2) disconnect from all nodes without NODE_WITNESS support
if !segwit_was_active && self.chain.is_segwit_active() {
self.peers.require_peer_services(Services::default().with_witness(true));
}
// notify listener
if let Some(best_block_hash) = insert_result.canonized_blocks_hashes.last() {
if let Some(ref listener) = self.listener {
@ -1100,7 +1137,7 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
Err(e) => {
// process as irrecoverable failure
panic!("Block {} insertion failed with error {:?}", block_hash.to_reversed_str(), e);
}
},
}
}
@ -1226,7 +1263,7 @@ pub mod tests {
use chain::{Block, Transaction};
use db::BlockChainDatabase;
use message::common::InventoryVector;
use message::types;
use message::{Services, types};
use miner::MemoryPool;
use network::{ConsensusParams, ConsensusFork, Magic};
use primitives::hash::H256;
@ -1279,7 +1316,7 @@ pub mod tests {
};
let sync_state = SynchronizationStateRef::new(SynchronizationState::with_storage(storage.clone()));
let memory_pool = Arc::new(RwLock::new(MemoryPool::new()));
let chain = Chain::new(storage.clone(), memory_pool.clone());
let chain = Chain::new(storage.clone(), ConsensusParams::new(Magic::Unitest, ConsensusFork::NoFork), memory_pool.clone());
let executor = DummyTaskExecutor::new();
let config = Config { close_connection_on_bad_block: true };
@ -2080,7 +2117,7 @@ pub mod tests {
let (_, core, sync) = create_sync(None, Some(dummy_verifier));
core.lock().peers.insert(0, DummyOutboundSyncConnection::new());
core.lock().peers.insert(0, Services::default(), DummyOutboundSyncConnection::new());
assert!(core.lock().peers.enumerate().contains(&0));
sync.on_block(0, b0.into());
@ -2101,7 +2138,7 @@ pub mod tests {
chain.mark_dead_end_block(&b0.hash());
}
core.lock().peers.insert(0, DummyOutboundSyncConnection::new());
core.lock().peers.insert(0, Services::default(), DummyOutboundSyncConnection::new());
assert!(core.lock().peers.enumerate().contains(&0));
sync.on_headers(0, types::Headers::with_headers(vec![b0.block_header.clone(), b1.block_header.clone(), b2.block_header.clone()]));
@ -2123,7 +2160,7 @@ pub mod tests {
}
core.lock().set_verify_headers(true);
core.lock().peers.insert(0, DummyOutboundSyncConnection::new());
core.lock().peers.insert(0, Services::default(), DummyOutboundSyncConnection::new());
assert!(core.lock().peers.enumerate().contains(&0));
sync.on_headers(0, types::Headers::with_headers(vec![b0.block_header.clone(), b1.block_header.clone(), b2.block_header.clone()]));
@ -2142,7 +2179,7 @@ pub mod tests {
chain.mark_dead_end_block(&b0.hash());
}
core.lock().peers.insert(0, DummyOutboundSyncConnection::new());
core.lock().peers.insert(0, Services::default(), DummyOutboundSyncConnection::new());
assert!(core.lock().peers.enumerate().contains(&0));
sync.on_block(0, b0.into());
@ -2162,7 +2199,7 @@ pub mod tests {
chain.mark_dead_end_block(&b0.hash());
}
core.lock().peers.insert(0, DummyOutboundSyncConnection::new());
core.lock().peers.insert(0, Services::default(), DummyOutboundSyncConnection::new());
assert!(core.lock().peers.enumerate().contains(&0));
sync.on_block(0, b1.into());

View File

@ -28,8 +28,12 @@ pub enum Task {
MerkleBlock(PeerIndex, types::MerkleBlock),
/// Send cmpcmblock
CompactBlock(PeerIndex, types::CompactBlock),
/// Send block with witness data
WitnessBlock(PeerIndex, IndexedBlock),
/// Send transaction
Transaction(PeerIndex, IndexedTransaction),
/// Send transaction with witness data
WitnessTransaction(PeerIndex, IndexedTransaction),
/// Send block transactions
BlockTxn(PeerIndex, types::BlockTxn),
/// Send notfound
@ -117,6 +121,17 @@ impl LocalSynchronizationTaskExecutor {
}
}
fn execute_witness_block(&self, peer_index: PeerIndex, block: IndexedBlock) {
if let Some(connection) = self.peers.connection(peer_index) {
trace!(target: "sync", "Sending witness block {} to peer#{}", block.hash().to_reversed_str(), peer_index);
self.peers.hash_known_as(peer_index, block.hash().clone(), KnownHashType::Block);
let block = types::Block {
block: block.to_raw_block(),
};
connection.send_witness_block(&block);
}
}
fn execute_transaction(&self, peer_index: PeerIndex, transaction: IndexedTransaction) {
if let Some(connection) = self.peers.connection(peer_index) {
trace!(target: "sync", "Sending transaction {} to peer#{}", transaction.hash.to_reversed_str(), peer_index);
@ -128,6 +143,17 @@ impl LocalSynchronizationTaskExecutor {
}
}
fn execute_witness_transaction(&self, peer_index: PeerIndex, transaction: IndexedTransaction) {
if let Some(connection) = self.peers.connection(peer_index) {
trace!(target: "sync", "Sending witness transaction {} to peer#{}", transaction.hash.to_reversed_str(), peer_index);
self.peers.hash_known_as(peer_index, transaction.hash, KnownHashType::Transaction);
let transaction = types::Tx {
transaction: transaction.raw,
};
connection.send_witness_transaction(&transaction);
}
}
fn execute_block_txn(&self, peer_index: PeerIndex, blocktxn: types::BlockTxn) {
if let Some(connection) = self.peers.connection(peer_index) {
trace!(target: "sync", "Sending blocktxn with {} transactions to peer#{}", blocktxn.request.transactions.len(), peer_index);
@ -202,7 +228,9 @@ impl TaskExecutor for LocalSynchronizationTaskExecutor {
Task::Block(peer_index, block) => self.execute_block(peer_index, block),
Task::MerkleBlock(peer_index, block) => self.execute_merkleblock(peer_index, block),
Task::CompactBlock(peer_index, block) => self.execute_compact_block(peer_index, block),
Task::WitnessBlock(peer_index, block) => self.execute_witness_block(peer_index, block),
Task::Transaction(peer_index, transaction) => self.execute_transaction(peer_index, transaction),
Task::WitnessTransaction(peer_index, transaction) => self.execute_witness_transaction(peer_index, transaction),
Task::BlockTxn(peer_index, blocktxn) => self.execute_block_txn(peer_index, blocktxn),
Task::NotFound(peer_index, notfound) => self.execute_notfound(peer_index, notfound),
Task::Inventory(peer_index, inventory) => self.execute_inventory(peer_index, inventory),
@ -222,7 +250,7 @@ pub mod tests {
use std::time;
use parking_lot::{Mutex, Condvar};
use chain::Transaction;
use message::types;
use message::{Services, types};
use inbound_connection::tests::DummyOutboundSyncConnection;
use local_node::tests::{default_filterload, make_filteradd};
use synchronization_peers::{PeersImpl, PeersContainer, PeersFilters, PeersOptions, BlockAnnouncementType};
@ -275,9 +303,9 @@ pub mod tests {
let executor = LocalSynchronizationTaskExecutor::new(peers.clone());
let c1 = DummyOutboundSyncConnection::new();
peers.insert(1, c1.clone());
peers.insert(1, Services::default(), c1.clone());
let c2 = DummyOutboundSyncConnection::new();
peers.insert(2, c2.clone());
peers.insert(2, Services::default(), c2.clone());
peers.set_block_announcement_type(2, BlockAnnouncementType::SendCompactBlock);
executor.execute(Task::RelayNewBlock(test_data::genesis().into()));
@ -291,9 +319,9 @@ pub mod tests {
let executor = LocalSynchronizationTaskExecutor::new(peers.clone());
let c1 = DummyOutboundSyncConnection::new();
peers.insert(1, c1.clone());
peers.insert(1, Services::default(), c1.clone());
let c2 = DummyOutboundSyncConnection::new();
peers.insert(2, c2.clone());
peers.insert(2, Services::default(), c2.clone());
peers.set_block_announcement_type(2, BlockAnnouncementType::SendHeaders);
executor.execute(Task::RelayNewBlock(test_data::genesis().into()));
@ -315,26 +343,26 @@ pub mod tests {
// peer#1 wants tx1
let c1 = DummyOutboundSyncConnection::new();
peers.insert(1, c1.clone());
peers.insert(1, Services::default(), c1.clone());
peers.set_bloom_filter(1, default_filterload());
peers.update_bloom_filter(1, make_filteradd(&*tx1_hash));
// peer#2 wants tx2
let c2 = DummyOutboundSyncConnection::new();
peers.insert(2, c2.clone());
peers.insert(2, Services::default(), c2.clone());
peers.set_bloom_filter(2, default_filterload());
peers.update_bloom_filter(2, make_filteradd(&*tx2_hash));
// peer#3 wants tx1 + tx2 transactions
let c3 = DummyOutboundSyncConnection::new();
peers.insert(3, c3.clone());
peers.insert(3, Services::default(), c3.clone());
peers.set_bloom_filter(3, default_filterload());
peers.update_bloom_filter(3, make_filteradd(&*tx1_hash));
peers.update_bloom_filter(3, make_filteradd(&*tx2_hash));
// peer#4 has default behaviour (no filter)
let c4 = DummyOutboundSyncConnection::new();
peers.insert(4, c4.clone());
peers.insert(4, Services::default(), c4.clone());
// peer#5 wants some other transactions
let c5 = DummyOutboundSyncConnection::new();
peers.insert(5, c5.clone());
peers.insert(5, Services::default(), c5.clone());
peers.set_bloom_filter(5, default_filterload());
peers.update_bloom_filter(5, make_filteradd(&*tx3_hash));
@ -361,13 +389,13 @@ pub mod tests {
let executor = LocalSynchronizationTaskExecutor::new(peers.clone());
let c2 = DummyOutboundSyncConnection::new();
peers.insert(2, c2.clone());
peers.insert(2, Services::default(), c2.clone());
peers.set_fee_filter(2, types::FeeFilter::with_fee_rate(3000));
let c3 = DummyOutboundSyncConnection::new();
peers.insert(3, c3.clone());
peers.insert(3, Services::default(), c3.clone());
peers.set_fee_filter(3, types::FeeFilter::with_fee_rate(4000));
let c4 = DummyOutboundSyncConnection::new();
peers.insert(4, c4.clone());
peers.insert(4, Services::default(), c4.clone());
executor.execute(Task::RelayNewTransaction(test_data::genesis().transactions[0].clone().into(), 3500));

View File

@ -1,7 +1,7 @@
use std::collections::HashMap;
use parking_lot::RwLock;
use chain::{IndexedBlock, IndexedTransaction};
use message::types;
use message::{types, Services};
use p2p::OutboundSyncConnectionRef;
use primitives::hash::H256;
use types::PeerIndex;
@ -40,6 +40,8 @@ pub struct MerkleBlockArtefacts {
/// Connected peers
pub trait Peers : Send + Sync + PeersContainer + PeersFilters + PeersOptions {
/// Require peers services.
fn require_peer_services(&self, services: Services);
/// Get peer connection
fn connection(&self, peer_index: PeerIndex) -> Option<OutboundSyncConnectionRef>;
}
@ -49,7 +51,7 @@ pub trait PeersContainer {
/// Enumerate all known peers (TODO: iterator + separate entity 'Peer')
fn enumerate(&self) -> Vec<PeerIndex>;
/// Insert new peer connection
fn insert(&self, peer_index: PeerIndex, connection: OutboundSyncConnectionRef);
fn insert(&self, peer_index: PeerIndex, services: Services, connection: OutboundSyncConnectionRef);
/// Remove peer connection
fn remove(&self, peer_index: PeerIndex);
/// Close and remove peer connection due to misbehaving
@ -84,6 +86,8 @@ pub trait PeersFilters {
/// Options for peers connections
pub trait PeersOptions {
/// Is node supporting SegWit?
fn is_segwit_enabled(&self, peer_index: PeerIndex) -> bool;
/// Set up new block announcement type for the connection
fn set_block_announcement_type(&self, peer_index: PeerIndex, announcement_type: BlockAnnouncementType);
/// Set up new transaction announcement type for the connection
@ -94,6 +98,8 @@ pub trait PeersOptions {
struct Peer {
/// Connection to this peer
pub connection: OutboundSyncConnectionRef,
/// Peer services
pub services: Services,
/// Connection filter
pub filter: ConnectionFilter,
/// Block announcement type
@ -111,9 +117,10 @@ pub struct PeersImpl {
}
impl Peer {
pub fn with_connection(connection: OutboundSyncConnectionRef) -> Self {
pub fn new(services: Services, connection: OutboundSyncConnectionRef) -> Self {
Peer {
connection: connection,
services: services,
filter: ConnectionFilter::default(),
block_announcement_type: BlockAnnouncementType::SendInventory,
transaction_announcement_type: TransactionAnnouncementType::SendInventory,
@ -122,6 +129,19 @@ impl Peer {
}
impl Peers for PeersImpl {
fn require_peer_services(&self, services: Services) {
// possible optimization: force p2p level to establish connections to SegWit-nodes only
// without it, all other nodes will be eventually banned (this could take some time, though)
let mut peers = self.peers.write();
for peer_index in peers.iter().filter(|&(_, p)| p.services.includes(&services)).map(|(p, _)| *p).collect::<Vec<_>>() {
let peer = peers.remove(&peer_index).expect("iterating peers keys; qed");
let expected_services: u64 = services.into();
let actual_services: u64 = peer.services.into();
warn!(target: "sync", "Disconnecting from peer#{} because of insufficient services. Expected {:x}, actual: {:x}", peer_index, expected_services, actual_services);
peer.connection.close();
}
}
fn connection(&self, peer_index: PeerIndex) -> Option<OutboundSyncConnectionRef> {
self.peers.read().get(&peer_index).map(|peer| peer.connection.clone())
}
@ -132,9 +152,9 @@ impl PeersContainer for PeersImpl {
self.peers.read().keys().cloned().collect()
}
fn insert(&self, peer_index: PeerIndex, connection: OutboundSyncConnectionRef) {
fn insert(&self, peer_index: PeerIndex, services: Services, connection: OutboundSyncConnectionRef) {
trace!(target: "sync", "Connected to peer#{}", peer_index);
assert!(self.peers.write().insert(peer_index, Peer::with_connection(connection)).is_none());
assert!(self.peers.write().insert(peer_index, Peer::new(services, connection)).is_none());
}
fn remove(&self, peer_index: PeerIndex) {
@ -227,6 +247,13 @@ impl PeersFilters for PeersImpl {
}
impl PeersOptions for PeersImpl {
fn is_segwit_enabled(&self, peer_index: PeerIndex) -> bool {
self.peers.read()
.get(&peer_index)
.map(|peer| peer.services.witness())
.unwrap_or_default()
}
fn set_block_announcement_type(&self, peer_index: PeerIndex, announcement_type: BlockAnnouncementType) {
if let Some(peer) = self.peers.write().get_mut(&peer_index) {
peer.block_announcement_type = announcement_type;

View File

@ -273,6 +273,16 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
notfound.inventory.push(next_item);
}
},
common::InventoryType::MessageWitnessTx => {
// only transaction from memory pool can be requested
if let Some(transaction) = self.memory_pool.read().read_by_hash(&next_item.hash) {
trace!(target: "sync", "'getblocks' response to peer#{} is ready with witness-tx {}", peer_index, next_item.hash.to_reversed_str());
let transaction = IndexedTransaction::new(next_item.hash, transaction.clone());
self.executor.execute(Task::WitnessTransaction(peer_index, transaction));
} else {
notfound.inventory.push(next_item);
}
},
common::InventoryType::MessageBlock => {
if let Some(block) = self.storage.block(next_item.hash.clone().into()) {
trace!(target: "sync", "'getblocks' response to peer#{} is ready with block {}", peer_index, next_item.hash.to_reversed_str());
@ -312,9 +322,15 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
notfound.inventory.push(next_item);
}
},
_ => {
common::InventoryType::MessageWitnessBlock => {
if let Some(block) = self.storage.block(next_item.hash.clone().into()) {
trace!(target: "sync", "'getblocks' response to peer#{} is ready with witness-block {}", peer_index, next_item.hash.to_reversed_str());
self.executor.execute(Task::WitnessBlock(peer_index, block.into()));
} else {
notfound.inventory.push(next_item);
}
},
common::InventoryType::Error | common::InventoryType::MessageWitnessFilteredBlock => (),
}
Some(ServerTask::ReversedGetData(peer_index, message, notfound))
@ -466,7 +482,7 @@ pub mod tests {
use parking_lot::{Mutex, RwLock};
use db::{BlockChainDatabase};
use message::types;
use message::common::{self, InventoryVector, InventoryType};
use message::common::{self, Services, InventoryVector, InventoryType};
use primitives::hash::H256;
use chain::Transaction;
use inbound_connection::tests::DummyOutboundSyncConnection;
@ -648,7 +664,7 @@ pub mod tests {
fn server_get_block_txn_responds_when_good_request() {
let (_, _, executor, peers, server) = create_synchronization_server();
peers.insert(0, DummyOutboundSyncConnection::new());
peers.insert(0, Services::default(), DummyOutboundSyncConnection::new());
peers.hash_known_as(0, test_data::genesis().hash(), KnownHashType::CompactBlock);
// when asking for block_txns
@ -673,7 +689,7 @@ pub mod tests {
fn server_get_block_txn_do_not_responds_when_bad_request() {
let (_, _, _, peers, server) = create_synchronization_server();
peers.insert(0, DummyOutboundSyncConnection::new());
peers.insert(0, Services::default(), DummyOutboundSyncConnection::new());
assert!(peers.enumerate().contains(&0));
// when asking for block_txns
@ -812,7 +828,7 @@ pub mod tests {
storage.canonize(&b2.hash()).unwrap();
// This peer won't get any blocks, because it has not set filter for the connection
let peer_index2 = 1; peers.insert(peer_index2, DummyOutboundSyncConnection::new());
let peer_index2 = 1; peers.insert(peer_index2, Services::default(), DummyOutboundSyncConnection::new());
let mut loop_task = ServerTask::GetData(peer_index2, types::GetData {inventory: vec![
InventoryVector { inv_type: InventoryType::MessageFilteredBlock, hash: b1_hash.clone() },
@ -835,7 +851,7 @@ pub mod tests {
let mut counter = 2;
for (get_tx1, get_tx2) in peers_config {
let peer_index = counter; peers.insert(peer_index, DummyOutboundSyncConnection::new());
let peer_index = counter; peers.insert(peer_index, Services::default(), DummyOutboundSyncConnection::new());
counter += 1;
// setup filter
peers.set_bloom_filter(peer_index, default_filterload());
@ -906,7 +922,7 @@ pub mod tests {
storage.canonize(&b1.hash()).unwrap();
// This peer will receive compact block
let peer_index2 = 1; peers.insert(peer_index2, DummyOutboundSyncConnection::new());
let peer_index2 = 1; peers.insert(peer_index2, Services::default(), DummyOutboundSyncConnection::new());
// ask for data
let mut loop_task = ServerTask::GetData(peer_index2, types::GetData {inventory: vec![

View File

@ -170,7 +170,7 @@ impl AsyncVerifier {
},
Ok(tx_output_provider) => {
let time: u32 = get_time().sec as u32;
match verifier.verifier.verify_mempool_transaction(&tx_output_provider, height, time, &transaction.raw) {
match verifier.verifier.verify_mempool_transaction(storage.as_block_header_provider(), &tx_output_provider, height, time, &transaction.raw) {
Ok(_) => sink.on_transaction_verification_success(transaction.into()),
Err(e) => sink.on_transaction_verification_error(&format!("{:?}", e), &transaction.hash),
}
@ -271,6 +271,7 @@ pub mod tests {
use chain::{IndexedBlock, IndexedTransaction};
use super::{Verifier, BlockVerificationSink, TransactionVerificationSink, AsyncVerifier, VerificationTask, ChainVerifierWrapper};
use types::{BlockHeight, StorageRef, MemoryPoolRef};
use script::Error as ScriptError;
use VerificationParameters;
#[derive(Default)]
@ -417,7 +418,7 @@ pub mod tests {
verification_level: VerificationLevel::Full,
verification_edge: 1.into(),
});
assert_eq!(wrapper.verify_block(&bad_transaction_block), Err(VerificationError::Transaction(1, TransactionError::Signature(0))));
assert_eq!(wrapper.verify_block(&bad_transaction_block), Err(VerificationError::Transaction(1, TransactionError::Signature(0, ScriptError::InvalidStackOperation))));
}
#[test]

View File

@ -440,6 +440,7 @@ impl<F> TransactionInputBuilder<F> where F: Invoke<chain::TransactionInput> {
previous_output: self.output.expect("Building input without previous output"),
script_sig: self.signature,
sequence: self.sequence,
script_witness: vec![],
}
)
}

View File

@ -107,6 +107,7 @@ impl TransactionBuilder {
},
script_sig: Bytes::new_with_len(0),
sequence: 0xffffffff,
script_witness: vec![],
});
self
}
@ -123,6 +124,7 @@ impl TransactionBuilder {
},
script_sig: Bytes::new_with_len(0),
sequence: 0xffffffff,
script_witness: vec![],
}];
self
}

View File

@ -15,6 +15,7 @@ serialization = { path = "../serialization" }
script = { path = "../script" }
network = { path = "../network" }
db = { path = "../db" }
bitcrypto = { path = "../crypto" }
[dev-dependencies]
test-data = { path = "../test-data" }

View File

@ -1,10 +1,12 @@
use network::ConsensusParams;
use network::{ConsensusParams, ConsensusFork};
use crypto::dhash256;
use db::{TransactionOutputProvider, BlockHeaderProvider};
use script;
use sigops::transaction_sigops;
use ser::Stream;
use sigops::{transaction_sigops, transaction_sigops_cost} ;
use work::block_reward_satoshi;
use duplex_store::DuplexTransactionOutputProvider;
use deployments::Deployments;
use deployments::BlockDeployments;
use canon::CanonBlock;
use error::{Error, TransactionError};
use timestamp::median_timestamp;
@ -16,6 +18,7 @@ pub struct BlockAcceptor<'a> {
pub sigops: BlockSigops<'a>,
pub coinbase_claim: BlockCoinbaseClaim<'a>,
pub coinbase_script: BlockCoinbaseScript<'a>,
pub witness: BlockWitness<'a>,
}
impl<'a> BlockAcceptor<'a> {
@ -24,15 +27,16 @@ impl<'a> BlockAcceptor<'a> {
consensus: &'a ConsensusParams,
block: CanonBlock<'a>,
height: u32,
deployments: &'a Deployments,
deployments: &'a BlockDeployments<'a>,
headers: &'a BlockHeaderProvider,
) -> Self {
BlockAcceptor {
finality: BlockFinality::new(block, height, deployments, headers, consensus),
serialized_size: BlockSerializedSize::new(block, consensus, height),
finality: BlockFinality::new(block, height, deployments, headers),
serialized_size: BlockSerializedSize::new(block, consensus, deployments, height),
coinbase_script: BlockCoinbaseScript::new(block, consensus, height),
coinbase_claim: BlockCoinbaseClaim::new(block, store, height),
sigops: BlockSigops::new(block, store, consensus, height),
witness: BlockWitness::new(block, deployments),
}
}
@ -42,6 +46,7 @@ impl<'a> BlockAcceptor<'a> {
self.serialized_size.check()?;
self.coinbase_claim.check()?;
self.coinbase_script.check()?;
self.witness.check()?;
Ok(())
}
}
@ -54,8 +59,8 @@ pub struct BlockFinality<'a> {
}
impl<'a> BlockFinality<'a> {
fn new(block: CanonBlock<'a>, height: u32, deployments: &'a Deployments, headers: &'a BlockHeaderProvider, params: &ConsensusParams) -> Self {
let csv_active = deployments.csv(height, headers, params);
fn new(block: CanonBlock<'a>, height: u32, deployments: &'a BlockDeployments<'a>, headers: &'a BlockHeaderProvider) -> Self {
let csv_active = deployments.csv();
BlockFinality {
block: block,
@ -84,25 +89,43 @@ pub struct BlockSerializedSize<'a> {
block: CanonBlock<'a>,
consensus: &'a ConsensusParams,
height: u32,
segwit_active: bool,
}
impl<'a> BlockSerializedSize<'a> {
fn new(block: CanonBlock<'a>, consensus: &'a ConsensusParams, height: u32) -> Self {
fn new(block: CanonBlock<'a>, consensus: &'a ConsensusParams, deployments: &'a BlockDeployments<'a>, height: u32) -> Self {
let segwit_active = deployments.segwit();
BlockSerializedSize {
block: block,
consensus: consensus,
height: height,
segwit_active: segwit_active,
}
}
fn check(&self) -> Result<(), Error> {
let size = self.block.size();
// block size (without witness) is valid for all forks:
// before SegWit: it is main check for size
// after SegWit: without witness data, block size should be <= 1_000_000
// after BitcoinCash fork: block size is increased to 8_000_000
// after SegWit2x fork: without witness data, block size should be <= 2_000_000
if size < self.consensus.fork.min_block_size(self.height) ||
size > self.consensus.fork.max_block_size(self.height) {
Err(Error::Size(size))
} else {
Ok(())
return Err(Error::Size(size));
}
// there's no need to define weight for pre-SegWit blocks
if self.segwit_active {
let size_with_witness = self.block.size_with_witness();
let weight = size * (ConsensusFork::witness_scale_factor() - 1) + size_with_witness;
if weight > self.consensus.fork.max_block_weight(self.height) {
return Err(Error::Weight);
}
}
Ok(())
}
}
@ -111,28 +134,49 @@ pub struct BlockSigops<'a> {
store: &'a TransactionOutputProvider,
consensus: &'a ConsensusParams,
height: u32,
bip16_active: bool,
}
impl<'a> BlockSigops<'a> {
fn new(block: CanonBlock<'a>, store: &'a TransactionOutputProvider, consensus: &'a ConsensusParams, height: u32) -> Self {
let bip16_active = block.header.raw.time >= consensus.bip16_time;
BlockSigops {
block: block,
store: store,
consensus: consensus,
height: height,
bip16_active: bip16_active,
}
}
fn check(&self) -> Result<(), Error> {
let store = DuplexTransactionOutputProvider::new(self.store, &*self.block);
let bip16_active = self.block.header.raw.time >= self.consensus.bip16_time;
let sigops = self.block.transactions.iter()
.map(|tx| transaction_sigops(&tx.raw, &store, bip16_active))
.sum::<usize>();
let (sigops, sigops_cost) = self.block.transactions.iter()
.map(|tx| {
let tx_sigops = transaction_sigops(&tx.raw, &store, self.bip16_active);
let tx_sigops_cost = transaction_sigops_cost(&tx.raw, &store, tx_sigops);
(tx_sigops, tx_sigops_cost)
})
.fold((0, 0), |acc, (tx_sigops, tx_sigops_cost)| (acc.0 + tx_sigops, acc.1 + tx_sigops_cost));
// sigops check is valid for all forks:
// before SegWit: 20_000
// after SegWit: cost of sigops is sigops * 4 and max cost is 80_000 => max sigops is still 20_000
// after BitcoinCash fork: 20_000 sigops for each full/partial 1_000_000 bytes of block
// after SegWit2x fork: cost of sigops is sigops * 4 and max cost is 160_000 => max sigops is 40_000
let size = self.block.size();
if sigops > self.consensus.fork.max_block_sigops(self.height, size) {
Err(Error::MaximumSigops)
return Err(Error::MaximumSigops);
}
// sigops check is valid for all forks:
// before SegWit: no witnesses => cost is sigops * 4 and max cost is 80_000
// after SegWit: it is main check for sigops
// after BitcoinCash fork: no witnesses => cost is sigops * 4 and max cost depends on block size
// after SegWit2x: it is basic check for sigops, limits are increased
if sigops_cost > self.consensus.fork.max_block_sigops_cost(self.height, size) {
Err(Error::MaximumSigopsCost)
} else {
Ok(())
}
@ -241,6 +285,60 @@ impl<'a> BlockCoinbaseScript<'a> {
}
}
pub struct BlockWitness<'a> {
block: CanonBlock<'a>,
segwit_active: bool,
}
impl<'a> BlockWitness<'a> {
fn new(block: CanonBlock<'a>, deployments: &'a BlockDeployments<'a>) -> Self {
let segwit_active = deployments.segwit();
BlockWitness {
block: block,
segwit_active: segwit_active,
}
}
fn check(&self) -> Result<(), Error> {
if !self.segwit_active {
return Ok(());
}
// check witness from coinbase transaction
let mut has_witness = false;
if let Some(coinbase) = self.block.transactions.first() {
let commitment = coinbase.raw.outputs.iter().rev()
.find(|output| script::is_witness_commitment_script(&output.script_pubkey));
if let Some(commitment) = commitment {
let witness_merkle_root = self.block.raw().witness_merkle_root();
if coinbase.raw.inputs.get(0).map(|i| i.script_witness.len()).unwrap_or_default() != 1 ||
coinbase.raw.inputs[0].script_witness[0].len() != 32 {
return Err(Error::WitnessInvalidNonceSize);
}
let mut stream = Stream::new();
stream.append(&witness_merkle_root);
stream.append_slice(&coinbase.raw.inputs[0].script_witness[0]);
let hash_witness = dhash256(&stream.out());
if hash_witness != commitment.script_pubkey[6..].into() {
return Err(Error::WitnessMerkleCommitmentMismatch);
}
has_witness = true;
}
}
// witness commitment is required when block contains transactions with witness
if !has_witness && self.block.transactions.iter().any(|tx| tx.raw.has_witness()) {
return Err(Error::UnexpectedWitness);
}
Ok(())
}
}
#[cfg(test)]
mod tests {
extern crate test_data;

View File

@ -6,7 +6,7 @@ use canon::CanonBlock;
use accept_block::BlockAcceptor;
use accept_header::HeaderAcceptor;
use accept_transaction::TransactionAcceptor;
use deployments::Deployments;
use deployments::BlockDeployments;
use duplex_store::DuplexTransactionOutputProvider;
use VerificationLevel;
@ -17,7 +17,7 @@ pub struct ChainAcceptor<'a> {
}
impl<'a> ChainAcceptor<'a> {
pub fn new(store: &'a Store, consensus: &'a ConsensusParams, verification_level: VerificationLevel, block: CanonBlock<'a>, height: u32, deployments: &'a Deployments) -> Self {
pub fn new(store: &'a Store, consensus: &'a ConsensusParams, verification_level: VerificationLevel, block: CanonBlock<'a>, height: u32, deployments: &'a BlockDeployments) -> Self {
trace!(target: "verification", "Block verification {}", block.hash().to_reversed_str());
let output_store = DuplexTransactionOutputProvider::new(store.as_transaction_output_provider(), block.raw());
let headers = store.as_block_header_provider();
@ -39,7 +39,6 @@ impl<'a> ChainAcceptor<'a> {
block.header.raw.time,
tx_index,
deployments,
headers,
))
.collect(),
}

View File

@ -3,7 +3,7 @@ use db::BlockHeaderProvider;
use canon::CanonHeader;
use error::Error;
use work::work_required;
use deployments::Deployments;
use deployments::BlockDeployments;
use timestamp::median_timestamp;
pub struct HeaderAcceptor<'a> {
@ -18,11 +18,11 @@ impl<'a> HeaderAcceptor<'a> {
consensus: &'a ConsensusParams,
header: CanonHeader<'a>,
height: u32,
deployments: &'a Deployments,
deployments: &'a BlockDeployments<'a>,
) -> Self {
HeaderAcceptor {
work: HeaderWork::new(header, store, height, consensus),
median_timestamp: HeaderMedianTimestamp::new(header, store, height, deployments, consensus),
median_timestamp: HeaderMedianTimestamp::new(header, store, deployments),
version: HeaderVersion::new(header, height, consensus),
}
}
@ -99,8 +99,8 @@ pub struct HeaderMedianTimestamp<'a> {
}
impl<'a> HeaderMedianTimestamp<'a> {
fn new(header: CanonHeader<'a>, store: &'a BlockHeaderProvider, height: u32, deployments: &'a Deployments, params: &ConsensusParams) -> Self {
let active = deployments.csv(height, store, params);
fn new(header: CanonHeader<'a>, store: &'a BlockHeaderProvider, deployments: &'a BlockDeployments<'a>) -> Self {
let active = deployments.csv();
HeaderMedianTimestamp {
header: header,
store: store,

View File

@ -1,10 +1,10 @@
use primitives::hash::H256;
use primitives::bytes::Bytes;
use db::{TransactionMetaProvider, TransactionOutputProvider, BlockHeaderProvider};
use db::{TransactionMetaProvider, TransactionOutputProvider};
use network::{ConsensusParams, ConsensusFork};
use script::{Script, verify_script, VerificationFlags, TransactionSignatureChecker, TransactionInputSigner, SignatureVersion};
use duplex_store::DuplexTransactionOutputProvider;
use deployments::Deployments;
use deployments::BlockDeployments;
use script::Builder;
use sigops::transaction_sigops;
use canon::CanonTransaction;
@ -13,6 +13,7 @@ use error::TransactionError;
use VerificationLevel;
pub struct TransactionAcceptor<'a> {
pub premature_witness: TransactionPrematureWitness<'a>,
pub bip30: TransactionBip30<'a>,
pub missing_inputs: TransactionMissingInputs<'a>,
pub maturity: TransactionMaturity<'a>,
@ -36,22 +37,23 @@ impl<'a> TransactionAcceptor<'a> {
height: u32,
time: u32,
transaction_index: usize,
deployments: &'a Deployments,
headers: &'a BlockHeaderProvider,
deployments: &'a BlockDeployments<'a>,
) -> Self {
trace!(target: "verification", "Tx verification {}", transaction.hash.to_reversed_str());
TransactionAcceptor {
premature_witness: TransactionPrematureWitness::new(transaction, deployments),
bip30: TransactionBip30::new_for_sync(transaction, meta_store, consensus, block_hash, height),
missing_inputs: TransactionMissingInputs::new(transaction, output_store, transaction_index),
maturity: TransactionMaturity::new(transaction, meta_store, height),
overspent: TransactionOverspent::new(transaction, output_store),
double_spent: TransactionDoubleSpend::new(transaction, output_store),
return_replay_protection: TransactionReturnReplayProtection::new(transaction, consensus, height),
eval: TransactionEval::new(transaction, output_store, consensus, verification_level, height, time, deployments, headers),
eval: TransactionEval::new(transaction, output_store, consensus, verification_level, height, time, deployments),
}
}
pub fn check(&self) -> Result<(), TransactionError> {
try!(self.premature_witness.check());
try!(self.bip30.check());
try!(self.missing_inputs.check());
try!(self.maturity.check());
@ -83,8 +85,7 @@ impl<'a> MemoryPoolTransactionAcceptor<'a> {
transaction: CanonTransaction<'a>,
height: u32,
time: u32,
deployments: &'a Deployments,
headers: &'a BlockHeaderProvider,
deployments: &'a BlockDeployments<'a>,
) -> Self {
trace!(target: "verification", "Mempool-Tx verification {}", transaction.hash.to_reversed_str());
let transaction_index = 0;
@ -96,7 +97,7 @@ impl<'a> MemoryPoolTransactionAcceptor<'a> {
sigops: TransactionSigops::new(transaction, output_store, consensus, max_block_sigops, time),
double_spent: TransactionDoubleSpend::new(transaction, output_store),
return_replay_protection: TransactionReturnReplayProtection::new(transaction, consensus, height),
eval: TransactionEval::new(transaction, output_store, consensus, VerificationLevel::Full, height, time, deployments, headers),
eval: TransactionEval::new(transaction, output_store, consensus, VerificationLevel::Full, height, time, deployments),
}
}
@ -288,6 +289,8 @@ pub struct TransactionEval<'a> {
verify_locktime: bool,
verify_checksequence: bool,
verify_dersig: bool,
verify_witness: bool,
verify_nulldummy: bool,
signature_version: SignatureVersion,
}
@ -299,8 +302,7 @@ impl<'a> TransactionEval<'a> {
verification_level: VerificationLevel,
height: u32,
time: u32,
deployments: &'a Deployments,
headers: &'a BlockHeaderProvider,
deployments: &'a BlockDeployments,
) -> Self {
let verify_p2sh = time >= params.bip16_time;
let verify_strictenc = match params.fork {
@ -311,10 +313,12 @@ impl<'a> TransactionEval<'a> {
let verify_dersig = height >= params.bip66_height;
let signature_version = match params.fork {
ConsensusFork::BitcoinCash(fork_height) if height >= fork_height => SignatureVersion::ForkId,
_ => SignatureVersion::Base,
ConsensusFork::NoFork | ConsensusFork::BitcoinCash(_) | ConsensusFork::SegWit2x(_) => SignatureVersion::Base,
};
let verify_checksequence = deployments.csv(height, headers, params);
let verify_checksequence = deployments.csv();
let verify_witness = deployments.segwit();
let verify_nulldummy = verify_witness;
TransactionEval {
transaction: transaction,
@ -325,6 +329,8 @@ impl<'a> TransactionEval<'a> {
verify_locktime: verify_locktime,
verify_checksequence: verify_checksequence,
verify_dersig: verify_dersig,
verify_witness: verify_witness,
verify_nulldummy: verify_nulldummy,
signature_version: signature_version,
}
}
@ -354,6 +360,7 @@ impl<'a> TransactionEval<'a> {
checker.input_index = index;
checker.input_amount = output.value;
let script_witness = &input.script_witness;
let input: Script = input.script_sig.clone().into();
let output: Script = output.script_pubkey.into();
@ -362,10 +369,12 @@ impl<'a> TransactionEval<'a> {
.verify_strictenc(self.verify_strictenc)
.verify_locktime(self.verify_locktime)
.verify_checksequence(self.verify_checksequence)
.verify_dersig(self.verify_dersig);
.verify_dersig(self.verify_dersig)
.verify_nulldummy(self.verify_nulldummy)
.verify_witness(self.verify_witness);
try!(verify_script(&input, &output, &flags, &checker, self.signature_version)
.map_err(|_| TransactionError::Signature(index)));
try!(verify_script(&input, &output, &script_witness, &flags, &checker, self.signature_version)
.map_err(|e| TransactionError::Signature(index, e)));
}
Ok(())
@ -434,6 +443,30 @@ impl<'a> TransactionReturnReplayProtection<'a> {
}
}
pub struct TransactionPrematureWitness<'a> {
transaction: CanonTransaction<'a>,
segwit_active: bool,
}
impl<'a> TransactionPrematureWitness<'a> {
fn new(transaction: CanonTransaction<'a>, deployments: &'a BlockDeployments<'a>) -> Self {
let segwit_active = deployments.segwit();
TransactionPrematureWitness {
transaction: transaction,
segwit_active: segwit_active,
}
}
fn check(&self) -> Result<(), TransactionError> {
if !self.segwit_active && (*self.transaction).raw.has_witness() {
Err(TransactionError::PrematureWitness)
} else {
Ok(())
}
}
}
#[cfg(test)]
mod tests {
use chain::{IndexedTransaction, Transaction, TransactionOutput};

View File

@ -12,7 +12,7 @@ use verify_header::HeaderVerifier;
use verify_transaction::MemoryPoolTransactionVerifier;
use accept_chain::ChainAcceptor;
use accept_transaction::MemoryPoolTransactionAcceptor;
use deployments::Deployments;
use deployments::{Deployments, BlockDeployments};
use {Verify, VerificationLevel};
pub struct BackwardsCompatibleChainVerifier {
@ -49,22 +49,28 @@ impl BackwardsCompatibleChainVerifier {
unreachable!();
},
BlockOrigin::CanonChain { block_number } => {
let header_provider = self.store.as_store().as_block_header_provider();
let deployments = BlockDeployments::new(&self.deployments, block_number, header_provider, &self.consensus);
let canon_block = CanonBlock::new(block);
let chain_acceptor = ChainAcceptor::new(self.store.as_store(), &self.consensus, verification_level, canon_block, block_number, &self.deployments);
let chain_acceptor = ChainAcceptor::new(self.store.as_store(), &self.consensus, verification_level, canon_block, block_number, &deployments);
chain_acceptor.check()?;
},
BlockOrigin::SideChain(origin) => {
let block_number = origin.block_number;
let header_provider = self.store.as_store().as_block_header_provider();
let deployments = BlockDeployments::new(&self.deployments, block_number, header_provider, &self.consensus);
let fork = self.store.fork(origin)?;
let canon_block = CanonBlock::new(block);
let chain_acceptor = ChainAcceptor::new(fork.store(), &self.consensus, verification_level, canon_block, block_number, &self.deployments);
let chain_acceptor = ChainAcceptor::new(fork.store(), &self.consensus, verification_level, canon_block, block_number, &deployments);
chain_acceptor.check()?;
},
BlockOrigin::SideChainBecomingCanonChain(origin) => {
let block_number = origin.block_number;
let header_provider = self.store.as_store().as_block_header_provider();
let deployments = BlockDeployments::new(&self.deployments, block_number, header_provider, &self.consensus);
let fork = self.store.fork(origin)?;
let canon_block = CanonBlock::new(block);
let chain_acceptor = ChainAcceptor::new(fork.store(), &self.consensus, verification_level, canon_block, block_number, &self.deployments);
let chain_acceptor = ChainAcceptor::new(fork.store(), &self.consensus, verification_level, canon_block, block_number, &deployments);
chain_acceptor.check()?;
},
}
@ -89,6 +95,7 @@ impl BackwardsCompatibleChainVerifier {
pub fn verify_mempool_transaction<T>(
&self,
block_header_provider: &BlockHeaderProvider,
prevout_provider: &T,
height: u32,
time: u32,
@ -96,7 +103,8 @@ impl BackwardsCompatibleChainVerifier {
) -> Result<(), TransactionError> where T: TransactionOutputProvider {
let indexed_tx = transaction.clone().into();
// let's do preverification first
let tx_verifier = MemoryPoolTransactionVerifier::new(&indexed_tx, &self.consensus, height);
let deployments = BlockDeployments::new(&self.deployments, height, block_header_provider, &self.consensus);
let tx_verifier = MemoryPoolTransactionVerifier::new(&indexed_tx, &self.consensus, &deployments);
try!(tx_verifier.check());
let canon_tx = CanonTransaction::new(&indexed_tx);
@ -110,8 +118,7 @@ impl BackwardsCompatibleChainVerifier {
canon_tx,
height,
time,
&self.deployments,
self.store.as_block_header_provider()
&deployments,
);
tx_acceptor.check()
}

View File

@ -51,11 +51,18 @@ struct DeploymentState {
/// Last known deployment states
type DeploymentStateCache = HashMap<&'static str, DeploymentState>;
#[derive(Default)]
#[derive(Default, Debug)]
pub struct Deployments {
cache: Mutex<DeploymentStateCache>,
}
pub struct BlockDeployments<'a> {
deployments: &'a Deployments,
number: u32,
headers: &'a BlockHeaderProvider,
consensus: &'a ConsensusParams,
}
impl Deployments {
pub fn new() -> Self {
Deployments::default()
@ -71,10 +78,41 @@ impl Deployments {
None => false
}
}
/// Returns true if SegWit deployment is active
pub fn segwit(&self, number: u32, headers: &BlockHeaderProvider, consensus: &ConsensusParams) -> bool {
match consensus.segwit_deployment {
Some(segwit) => {
let mut cache = self.cache.lock();
threshold_state(&mut cache, segwit, number, headers, consensus).is_active()
},
None => false
}
}
}
impl<'a> BlockDeployments<'a> {
pub fn new(deployments: &'a Deployments, number: u32, headers: &'a BlockHeaderProvider, consensus: &'a ConsensusParams) -> Self {
BlockDeployments {
deployments: deployments,
number: number,
headers: headers,
consensus: consensus,
}
}
pub fn csv(&self) -> bool {
self.deployments.csv(self.number, self.headers, self.consensus)
}
pub fn segwit(&self) -> bool {
self.deployments.segwit(self.number, self.headers, self.consensus)
}
}
/// Calculates threshold state of given deployment
fn threshold_state(cache: &mut DeploymentStateCache, deployment: Deployment, number: u32, headers: &BlockHeaderProvider, consensus: &ConsensusParams) -> ThresholdState {
// deployments are checked using previous block index
if let Some(activation) = deployment.activation {
if activation <= number {
return ThresholdState::Active;
@ -84,7 +122,7 @@ fn threshold_state(cache: &mut DeploymentStateCache, deployment: Deployment, num
}
// get number of the first block in the period
let number = first_of_the_period(number, consensus.miner_confirmation_window);
let number = first_of_the_period(number.saturating_sub(1), consensus.miner_confirmation_window);
let hash = match headers.block_header(BlockRef::Number(number)) {
Some(header) => header.hash(),
@ -105,7 +143,14 @@ fn threshold_state(cache: &mut DeploymentStateCache, deployment: Deployment, num
let from_block = deployment_state.block_number + consensus.miner_confirmation_window;
let threshold_state = deployment_state.state;
let deployment_iter = ThresholdIterator::new(deployment, headers, from_block, consensus, threshold_state);
let state = deployment_iter.last().expect("iter must have at least one item");
let state = match deployment_iter.last() {
Some(state) => state,
None => DeploymentState {
block_number: number,
block_hash: hash,
state: deployment_state.state,
},
};
let result = state.state;
entry.insert(state);
result
@ -118,7 +163,6 @@ fn threshold_state(cache: &mut DeploymentStateCache, deployment: Deployment, num
result
},
}
}
fn first_of_the_period(block: u32, miner_confirmation_window: u32) -> u32 {

View File

@ -1,6 +1,7 @@
use hash::H256;
use compact::Compact;
use db::Error as DBError;
use script::Error as SignatureError;
#[derive(Debug, PartialEq)]
/// All possible verification errors
@ -32,10 +33,14 @@ pub enum Error {
/// Maximum sigops operations exceeded - will not provide how much it was in total
/// since it stops counting once `MAX_BLOCK_SIGOPS` is reached
MaximumSigops,
/// Maximum sigops operations cost exceeded
MaximumSigopsCost,
/// Coinbase signature is not in the range 2-100
CoinbaseSignatureLength(usize),
/// Block size is invalid
Size(usize),
/// Block weight is invalid
Weight,
/// Block transactions are not final.
NonFinalBlock,
/// Old version block.
@ -46,6 +51,12 @@ pub enum Error {
TransactionFeesOverflow,
/// Sum of all referenced outputs in block transactions resulted in the overflow
ReferencedInputsSumOverflow,
/// SegWit: bad witess nonce size
WitnessInvalidNonceSize,
/// SegWit: witness merkle mismatch
WitnessMerkleCommitmentMismatch,
/// SegWit: unexpected witness
UnexpectedWitness,
/// Database error
Database(DBError),
}
@ -76,7 +87,7 @@ pub enum TransactionError {
/// Referenced coinbase output for the transaction input is not mature enough
Maturity,
/// Signature invalid for given input
Signature(usize),
Signature(usize, SignatureError),
/// Unknown previous transaction referenced
UnknownReference(H256),
/// Spends more than claims
@ -95,5 +106,7 @@ pub enum TransactionError {
UsingSpentOutput(H256, u32),
/// Transaction, protected using BitcoinCash OP_RETURN replay protection (REQ-6-1).
ReturnReplayProtection,
/// Transaction with witness is received before SegWit is activated.
PrematureWitness,
}

View File

@ -65,6 +65,7 @@ extern crate network;
extern crate primitives;
extern crate serialization as ser;
extern crate script;
extern crate bitcrypto as crypto;
pub mod constants;
mod canon;
@ -108,6 +109,7 @@ pub use error::{Error, TransactionError};
pub use sigops::transaction_sigops;
pub use timestamp::median_timestamp;
pub use work::{work_required, is_valid_proof_of_work, is_valid_proof_of_work_hash, block_reward_satoshi};
pub use deployments::Deployments;
#[derive(Debug, Clone, Copy, PartialEq)]
/// Blocks verification level.

View File

@ -1,6 +1,7 @@
use network::ConsensusFork;
use chain::Transaction;
use db::TransactionOutputProvider;
use script::Script;
use script::{Script, ScriptWitness};
/// Counts signature operations in given transaction
/// bip16_active flag indicates if we should also count signature operations
@ -16,6 +17,7 @@ pub fn transaction_sigops(
output_script.sigops_count(false)
}).sum();
// TODO: bitcoin/bitcoin also includes input_sigops here
if transaction.is_coinbase() {
return output_sigops;
}
@ -38,3 +40,55 @@ pub fn transaction_sigops(
input_sigops + output_sigops + bip16_sigops
}
pub fn transaction_sigops_cost(
transaction: &Transaction,
store: &TransactionOutputProvider,
sigops: usize,
) -> usize {
let sigops_cost = sigops * ConsensusFork::witness_scale_factor();
let witness_sigops_cost: usize = transaction.inputs.iter()
.map(|input| store.transaction_output(&input.previous_output, usize::max_value())
.map(|output| witness_sigops(&Script::new(input.script_sig.clone()), &Script::new(output.script_pubkey.clone()), &input.script_witness,))
.unwrap_or(0))
.sum();
sigops_cost + witness_sigops_cost
}
fn witness_sigops(
script_sig: &Script,
script_pubkey: &Script,
script_witness: &ScriptWitness,
) -> usize {
if let Some((witness_version, witness_program)) = script_pubkey.parse_witness_program() {
return witness_program_sigops(witness_version, witness_program, script_witness);
}
if script_pubkey.is_pay_to_script_hash() && script_sig.is_push_only() {
if let Some(Ok(instruction)) = script_sig.iter().last() {
if let Some(data) = instruction.data {
let subscript = Script::new(data.into());
if let Some((witness_version, witness_program)) = subscript.parse_witness_program() {
return witness_program_sigops(witness_version, witness_program, script_witness);
}
}
}
}
0
}
fn witness_program_sigops(
witness_version: u8,
witness_program: &[u8],
script_witness: &ScriptWitness,
) -> usize {
match witness_version {
0 if witness_program.len() == 20 => 1,
0 if witness_program.len() == 32 => match script_witness.last() {
Some(subscript) => Script::new(subscript.clone()).sigops_count(true),
_ => 0,
},
_ => 0,
}
}

View File

@ -1,4 +1,3 @@
use std::collections::BTreeSet;
use chain::BlockHeader;
use db::{BlockHeaderProvider, BlockAncestors};
use primitives::hash::H256;
@ -14,7 +13,7 @@ pub fn median_timestamp(header: &BlockHeader, store: &BlockHeaderProvider) -> u3
/// The header should be later expected to have higher timestamp
/// than this median timestamp
pub fn median_timestamp_inclusive(previous_header_hash: H256, store: &BlockHeaderProvider) -> u32 {
let timestamps: BTreeSet<_> = BlockAncestors::new(previous_header_hash.clone().into(), store)
let mut timestamps: Vec<_> = BlockAncestors::new(previous_header_hash.clone().into(), store)
.take(11)
.map(|header| header.time)
.collect();
@ -23,6 +22,6 @@ pub fn median_timestamp_inclusive(previous_header_hash: H256, store: &BlockHeade
return 0;
}
let timestamps = timestamps.into_iter().collect::<Vec<_>>();
timestamps.sort();
timestamps[timestamps.len() / 2]
}

View File

@ -2,6 +2,7 @@ use std::ops;
use ser::Serializable;
use chain::IndexedTransaction;
use network::{ConsensusParams, ConsensusFork};
use deployments::BlockDeployments;
use duplex_store::NoopStore;
use sigops::transaction_sigops;
use error::TransactionError;
@ -36,17 +37,19 @@ pub struct MemoryPoolTransactionVerifier<'a> {
pub null_non_coinbase: TransactionNullNonCoinbase<'a>,
pub is_coinbase: TransactionMemoryPoolCoinbase<'a>,
pub size: TransactionSize<'a>,
pub premature_witness: TransactionPrematureWitness<'a>,
pub sigops: TransactionSigops<'a>,
}
impl<'a> MemoryPoolTransactionVerifier<'a> {
pub fn new(transaction: &'a IndexedTransaction, consensus: &'a ConsensusParams, height: u32) -> Self {
pub fn new(transaction: &'a IndexedTransaction, consensus: &'a ConsensusParams, deployments: &'a BlockDeployments<'a>) -> Self {
trace!(target: "verification", "Mempool-Tx pre-verification {}", transaction.hash.to_reversed_str());
MemoryPoolTransactionVerifier {
empty: TransactionEmpty::new(transaction),
null_non_coinbase: TransactionNullNonCoinbase::new(transaction),
is_coinbase: TransactionMemoryPoolCoinbase::new(transaction),
size: TransactionSize::new(transaction, consensus, height),
size: TransactionSize::new(transaction, consensus),
premature_witness: TransactionPrematureWitness::new(transaction, &deployments),
sigops: TransactionSigops::new(transaction, ConsensusFork::absolute_maximum_block_sigops()),
}
}
@ -56,6 +59,7 @@ impl<'a> MemoryPoolTransactionVerifier<'a> {
try!(self.null_non_coinbase.check());
try!(self.is_coinbase.check());
try!(self.size.check());
try!(self.premature_witness.check());
try!(self.sigops.check());
Ok(())
}
@ -148,20 +152,19 @@ impl<'a> TransactionMemoryPoolCoinbase<'a> {
pub struct TransactionSize<'a> {
transaction: &'a IndexedTransaction,
consensus: &'a ConsensusParams,
height: u32,
}
impl<'a> TransactionSize<'a> {
fn new(transaction: &'a IndexedTransaction, consensus: &'a ConsensusParams, height: u32) -> Self {
fn new(transaction: &'a IndexedTransaction, consensus: &'a ConsensusParams) -> Self {
TransactionSize {
transaction: transaction,
consensus: consensus,
height: height,
}
}
fn check(&self) -> Result<(), TransactionError> {
if self.transaction.raw.serialized_size() > self.consensus.fork.max_transaction_size(self.height) {
let size = self.transaction.raw.serialized_size();
if size > self.consensus.fork.max_transaction_size() {
Err(TransactionError::MaxSize)
} else {
Ok(())
@ -191,3 +194,27 @@ impl<'a> TransactionSigops<'a> {
}
}
}
pub struct TransactionPrematureWitness<'a> {
transaction: &'a IndexedTransaction,
segwit_active: bool,
}
impl<'a> TransactionPrematureWitness<'a> {
pub fn new(transaction: &'a IndexedTransaction, deployments: &'a BlockDeployments<'a>) -> Self {
let segwit_active = deployments.segwit();
TransactionPrematureWitness {
transaction: transaction,
segwit_active: segwit_active,
}
}
pub fn check(&self) -> Result<(), TransactionError> {
if !self.segwit_active && self.transaction.raw.has_witness() {
Err(TransactionError::PrematureWitness)
} else {
Ok(())
}
}
}