segwit: initial flush

This commit is contained in:
Svyatoslav Nikolsky 2017-08-16 11:26:03 +03:00
parent d2acccd1bc
commit d4a191aec1
36 changed files with 634 additions and 133 deletions

View File

@ -11,6 +11,12 @@ pub struct Block {
pub transactions: Vec<Transaction>,
}
#[derive(Debug, PartialEq, Clone)]
pub struct WitnessBlock<'a> {
pub block_header: BlockHeader,
pub transactions: Vec<&'a Transaction>,
}
impl From<&'static str> for Block {
fn from(s: &'static str) -> Self {
deserialize(&s.from_hex().unwrap() as &[u8]).unwrap()
@ -32,6 +38,17 @@ impl Block {
merkle_root(&hashes)
}
/// Returns block's witness merkle root.
pub fn witness_merkle_root(&self) -> H256 {
let hashes = self.transactions.iter()
.enumerate()
.map(|(i, tx)| match i {
0 => H256::from(0),
_ => tx.witness_hash()
}).collect::<Vec<H256>>();
merkle_root(&hashes)
}
pub fn transactions(&self) -> &[Transaction] {
&self.transactions
}
@ -43,6 +60,10 @@ impl Block {
pub fn hash(&self) -> H256 {
self.block_header.hash()
}
pub fn cost(&self) -> u64 {
unimplemented!()
}
}
#[cfg(test)]

View File

@ -1,13 +1,18 @@
//! Bitcoin trainsaction.
//! https://en.bitcoin.it/wiki/Protocol_documentation#tx
use std::io;
use heapsize::HeapSizeOf;
use hex::FromHex;
use bytes::Bytes;
use ser::{deserialize, serialize};
use ser::{deserialize, serialize, serialize_with_flags, SERIALIZE_TRANSACTION_WITNESS};
use crypto::dhash256;
use hash::H256;
use constants::{SEQUENCE_FINAL, LOCKTIME_THRESHOLD};
use ser::{Error, Serializable, Deserializable, Stream, Reader};
const WITNESS_MARKER: u8 = 0;
const WITNESS_FLAG: u8 = 1;
#[derive(Debug, PartialEq, Eq, Clone, Default, Serializable, Deserializable)]
pub struct OutPoint {
@ -28,11 +33,12 @@ impl OutPoint {
}
}
#[derive(Debug, PartialEq, Default, Clone, Serializable, Deserializable)]
#[derive(Debug, PartialEq, Default, Clone)]
pub struct TransactionInput {
pub previous_output: OutPoint,
pub script_sig: Bytes,
pub sequence: u32,
pub script_witness: Vec<Bytes>,
}
impl TransactionInput {
@ -41,17 +47,23 @@ impl TransactionInput {
previous_output: OutPoint::null(),
script_sig: script_sig,
sequence: SEQUENCE_FINAL,
script_witness: vec![],
}
}
pub fn is_final(&self) -> bool {
self.sequence == SEQUENCE_FINAL
}
pub fn has_witness(&self) -> bool {
!self.script_witness.is_empty()
}
}
impl HeapSizeOf for TransactionInput {
fn heap_size_of_children(&self) -> usize {
self.script_sig.heap_size_of_children()
self.script_sig.heap_size_of_children() +
self.script_witness.heap_size_of_children()
}
}
@ -76,7 +88,7 @@ impl HeapSizeOf for TransactionOutput {
}
}
#[derive(Debug, PartialEq, Default, Clone, Serializable, Deserializable)]
#[derive(Debug, PartialEq, Default, Clone)]
pub struct Transaction {
pub version: i32,
pub inputs: Vec<TransactionInput>,
@ -101,6 +113,10 @@ impl Transaction {
dhash256(&serialize(self))
}
pub fn witness_hash(&self) -> H256 {
dhash256(&serialize_with_flags(self, SERIALIZE_TRANSACTION_WITNESS))
}
pub fn inputs(&self) -> &[TransactionInput] {
&self.inputs
}
@ -149,6 +165,10 @@ impl Transaction {
self.inputs.iter().all(TransactionInput::is_final)
}
pub fn has_witness(&self) -> bool {
self.inputs.iter().any(TransactionInput::has_witness)
}
pub fn total_spends(&self) -> u64 {
let mut result = 0u64;
for output in self.outputs.iter() {
@ -159,13 +179,93 @@ impl Transaction {
}
result
}
pub fn cost(&self) -> u64 {
unimplemented!()
}
}
impl Serializable for TransactionInput {
fn serialize(&self, stream: &mut Stream) {
stream
.append(&self.previous_output)
.append(&self.script_sig)
.append(&self.sequence);
}
}
impl Deserializable for TransactionInput {
fn deserialize<T>(reader: &mut Reader<T>) -> Result<Self, Error> where Self: Sized, T: io::Read {
Ok(TransactionInput {
previous_output: reader.read()?,
script_sig: reader.read()?,
sequence: reader.read()?,
script_witness: vec![],
})
}
}
impl Serializable for Transaction {
fn serialize(&self, stream: &mut Stream) {
let include_transaction_witness = stream.include_transaction_witness() && self.has_witness();
match include_transaction_witness {
false => stream
.append(&self.version)
.append_list(&self.inputs)
.append_list(&self.outputs)
.append(&self.lock_time),
true => {
stream
.append(&self.version)
.append(&WITNESS_MARKER)
.append(&WITNESS_FLAG)
.append_list(&self.inputs)
.append_list(&self.outputs);
for input in &self.inputs {
stream.append_list(&input.script_witness);
}
stream.append(&self.lock_time)
}
};
}
}
impl Deserializable for Transaction {
fn deserialize<T>(reader: &mut Reader<T>) -> Result<Self, Error> where Self: Sized, T: io::Read {
let version = reader.read()?;
let mut inputs: Vec<TransactionInput> = reader.read_list()?;
let read_witness = if inputs.is_empty() {
let witness_flag: u8 = reader.read()?;
if witness_flag != WITNESS_FLAG {
return Err(Error::MalformedData);
}
inputs = reader.read_list()?;
true
} else {
false
};
let outputs = reader.read_list()?;
if read_witness {
for input in inputs.iter_mut() {
input.script_witness = reader.read_list()?;
}
}
Ok(Transaction {
version: version,
inputs: inputs,
outputs: outputs,
lock_time: reader.read()?,
})
}
}
#[cfg(test)]
mod tests {
use hash::H256;
use ser::Serializable;
use super::Transaction;
use super::{Transaction, TransactionInput, OutPoint, TransactionOutput};
// real transaction from block 80000
// https://blockchain.info/rawtx/5a4ebf66822b0b2d56bd9dc64ece0bc38ee7844a23ff1d7320a88c5fdb2ad3e2
@ -183,6 +283,7 @@ mod tests {
let tx_output = &t.outputs[0];
assert_eq!(tx_output.value, 5000000000);
assert_eq!(tx_output.script_pubkey, "76a914404371705fa9bd789a2fcd52d2c580b65d35549d88ac".into());
assert!(!t.has_witness());
}
#[test]
@ -198,4 +299,42 @@ mod tests {
let tx: Transaction = raw_tx.into();
assert_eq!(tx.serialized_size(), raw_tx.len() / 2);
}
#[test]
fn test_transaction_reader_with_witness() {
// test case from https://github.com/bitcoin/bips/blob/master/bip-0143.mediawiki
let actual: Transaction = "01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000".into();
let expected = Transaction {
version: 1,
inputs: vec![TransactionInput {
previous_output: OutPoint {
hash: "fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f".into(),
index: 0,
},
script_sig: "4830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01".into(),
sequence: 0xffffffee,
script_witness: vec![],
}, TransactionInput {
previous_output: OutPoint {
hash: "ef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a".into(),
index: 1,
},
script_sig: "".into(),
sequence: 0xffffffff,
script_witness: vec![
"304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee01".into(),
"025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee6357".into(),
],
}],
outputs: vec![TransactionOutput {
value: 0x0000000006b22c20,
script_pubkey: "76a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac".into(),
}, TransactionOutput {
value: 0x000000000d519390,
script_pubkey: "76a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac".into(),
}],
lock_time: 0x00000011,
};
assert_eq!(actual, expected);
}
}

0
chain/src/witness.rs Normal file
View File

View File

@ -3,13 +3,16 @@ use hash::H256;
use ser::{Serializable, Stream, Deserializable, Reader, Error as ReaderError};
#[derive(Debug, PartialEq, Clone, Copy)]
#[repr(u8)]
#[repr(u32)]
pub enum InventoryType {
Error = 0,
MessageTx = 1,
MessageBlock = 2,
MessageFilteredBlock = 3,
MessageCompactBlock = 4,
MessageWitnessTx = 0x40000001,
MessageWitnessBlock = 0x40000002,
MessageWitnessFilteredBlock = 0x40000003,
}
impl InventoryType {
@ -20,6 +23,9 @@ impl InventoryType {
2 => Some(InventoryType::MessageBlock),
3 => Some(InventoryType::MessageFilteredBlock),
4 => Some(InventoryType::MessageCompactBlock),
0x40000001 => Some(InventoryType::MessageWitnessTx),
0x40000002 => Some(InventoryType::MessageWitnessBlock),
0x40000003 => Some(InventoryType::MessageWitnessFilteredBlock),
_ => None
}
}
@ -122,11 +128,17 @@ mod tests {
assert_eq!(2u32, InventoryType::MessageBlock.into());
assert_eq!(3u32, InventoryType::MessageFilteredBlock.into());
assert_eq!(4u32, InventoryType::MessageCompactBlock.into());
assert_eq!(0x40000001u32, InventoryType::MessageWitnessTx.into());
assert_eq!(0x40000002u32, InventoryType::MessageWitnessBlock.into());
assert_eq!(0x40000003u32, InventoryType::MessageWitnessFilteredBlock.into());
assert_eq!(InventoryType::from_u32(0).unwrap(), InventoryType::Error);
assert_eq!(InventoryType::from_u32(1).unwrap(), InventoryType::MessageTx);
assert_eq!(InventoryType::from_u32(2).unwrap(), InventoryType::MessageBlock);
assert_eq!(InventoryType::from_u32(3).unwrap(), InventoryType::MessageFilteredBlock);
assert_eq!(InventoryType::from_u32(4).unwrap(), InventoryType::MessageCompactBlock);
assert_eq!(InventoryType::from_u32(0x40000001).unwrap(), InventoryType::MessageWitnessTx);
assert_eq!(InventoryType::from_u32(0x40000002).unwrap(), InventoryType::MessageWitnessBlock);
assert_eq!(InventoryType::from_u32(0x40000003).unwrap(), InventoryType::MessageWitnessFilteredBlock);
}
}

View File

@ -2,7 +2,7 @@ use ser::Stream;
use bytes::{TaggedBytes, Bytes};
use network::Magic;
use common::Command;
use serialization::serialize_payload;
use serialization::serialize_payload_with_flags;
use {Payload, MessageResult, MessageHeader};
pub fn to_raw_message(magic: Magic, command: Command, payload: &Bytes) -> Bytes {
@ -19,7 +19,11 @@ pub struct Message<T> {
impl<T> Message<T> where T: Payload {
pub fn new(magic: Magic, version: u32, payload: &T) -> MessageResult<Self> {
let serialized = try!(serialize_payload(payload, version));
Self::with_flags(magic, version, payload, 0)
}
pub fn with_flags(magic: Magic, version: u32, payload: &T, serialization_flags: u32) -> MessageResult<Self> {
let serialized = try!(serialize_payload_with_flags(payload, version, serialization_flags));
let message = Message {
bytes: TaggedBytes::new(to_raw_message(magic, T::command().into(), &serialized)),

View File

@ -1,5 +1,5 @@
mod stream;
mod reader;
pub use self::stream::serialize_payload;
pub use self::stream::{serialize_payload, serialize_payload_with_flags};
pub use self::reader::deserialize_payload;

View File

@ -3,7 +3,11 @@ use ser::Stream;
use {Payload, Error, MessageResult};
pub fn serialize_payload<T>(t: &T, version: u32) -> MessageResult<Bytes> where T: Payload {
let mut stream = PayloadStream::new(version);
serialize_payload_with_flags(t, version, 0)
}
pub fn serialize_payload_with_flags<T>(t: &T, version: u32, serialization_flags: u32) -> MessageResult<Bytes> where T: Payload {
let mut stream = PayloadStream::new(version, serialization_flags);
try!(stream.append(t));
Ok(stream.out())
}
@ -14,9 +18,9 @@ pub struct PayloadStream {
}
impl PayloadStream {
pub fn new(version: u32) -> Self {
pub fn new(version: u32, serialization_flags: u32) -> Self {
PayloadStream {
stream: Stream::default(),
stream: Stream::with_flags(serialization_flags),
version: version,
}
}

View File

@ -1,12 +1,20 @@
use std::cmp::max;
use hash::H256;
use {Magic, Deployment};
use {Magic, Deployment, Deployments};
/// First block of SegWit2x fork.
pub const SEGWIT2X_FORK_BLOCK: u32 = 0xFFFFFFFF; // not known (yet?)
/// First block of BitcoinCash fork.
pub const BITCOIN_CASH_FORK_BLOCK: u32 = 478559; // https://blockchair.com/bitcoin-cash/block/478559
mod segwit {
/// The maximum allowed weight for a block, see BIP 141 (network rule)
pub const MAX_BLOCK_WEIGHT: usize = 4_000_000;
/// Witness scale factor.
pub const WITNESS_SCALE_FACTOR: usize = 4;
}
#[derive(Debug, Clone)]
/// Parameters that influence chain consensus.
pub struct ConsensusParams {
@ -76,7 +84,16 @@ impl ConsensusParams {
timeout: 1493596800,
activation: Some(770112),
}),
segwit_deployment: None,
segwit_deployment: match fork {
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) => Some(Deployment {
name: "segwit",
bit: 1,
start_time: 1479168000,
timeout: 1510704000,
activation: None,
}),
ConsensusFork::BitcoinCash(_) => None,
},
},
Magic::Testnet => ConsensusParams {
network: magic,
@ -94,7 +111,16 @@ impl ConsensusParams {
timeout: 1493596800,
activation: Some(419328),
}),
segwit_deployment: None,
segwit_deployment: match fork {
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) => Some(Deployment {
name: "segwit",
bit: 1,
start_time: 1462060800,
timeout: 1493596800,
activation: None,
}),
ConsensusFork::BitcoinCash(_) => None,
},
},
Magic::Regtest | Magic::Unitest => ConsensusParams {
network: magic,
@ -112,7 +138,16 @@ impl ConsensusParams {
timeout: 0,
activation: Some(0),
}),
segwit_deployment: None,
segwit_deployment: match fork {
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) => Some(Deployment {
name: "segwit",
bit: 1,
start_time: 0,
timeout: ::std::u32::MAX,
activation: None,
}),
ConsensusFork::BitcoinCash(_) => None,
},
},
}
}
@ -134,26 +169,39 @@ impl ConsensusFork {
160_000
}
pub fn min_block_size(&self, height: u32) -> usize {
match *self {
ConsensusFork::SegWit2x(fork_height) if height == fork_height => 0,
// size of first fork block must be larger than 1MB
ConsensusFork::BitcoinCash(fork_height) if height == fork_height => 1_000_001,
ConsensusFork::NoFork | ConsensusFork::BitcoinCash(_) | ConsensusFork::SegWit2x(_) => 0,
}
}
pub fn max_block_size(&self, height: u32) -> usize {
match *self {
ConsensusFork::SegWit2x(fork_height) if height >= fork_height => 2_000_000,
ConsensusFork::BitcoinCash(fork_height) if height >= fork_height => 8_000_000,
ConsensusFork::NoFork | ConsensusFork::BitcoinCash(_) | ConsensusFork::SegWit2x(_) => 1_000_000,
}
}
pub fn max_transaction_size(&self, _height: u32) -> usize {
// BitcoinCash: according to REQ-5: max size of tx is still 1_000_000
1_000_000
pub fn check_block_size(&self, size: usize, height: u32, deployments: &Deployments) -> bool {
match *self {
// bitcoin cash fork block must be > 1_000_0000 and <= 8_000_000
ConsensusFork::BitcoinCash(fork_height) if height == fork_height =>
size > 1_000_000 && size <= 8_000_000,
// bitcoin cash support blocks up to 8_000_000
ConsensusFork::BitcoinCash(fork_height) if height > fork_height =>
size <= 8_000_000,
// when segwit is deployed, this expression is used. which, in turn, also allows block size <= 1_000_000
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) if deployments.is_active("segwit") =>
size.saturating_mul(segwit::WITNESS_SCALE_FACTOR) <= segwit::MAX_BLOCK_WEIGHT,
// without segwit and before fork, max size is 1_000_000
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) | ConsensusFork::BitcoinCash(_) =>
size <= 1_000_000,
}
}
pub fn check_transaction_size(&self, size: usize, deployments: &Deployments) -> bool {
match *self {
// when segwit is deployed, this expression is used. which, in turn, is the same max tx size 1_000_000
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) if deployments.is_active("segwit") =>
size.saturating_mul(segwit::WITNESS_SCALE_FACTOR) <= segwit::MAX_BLOCK_WEIGHT,
// BitcoinCash: according to REQ-5: max size of tx is still 1_000_000
// ConsensusFork::NoFork | ConsensusFork::SegWit2x: max size of tx is 1_000_000
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) | ConsensusFork::BitcoinCash(_) => size <= 1_000_000,
}
}
pub fn max_block_sigops(&self, height: u32, block_size: usize) -> usize {
@ -170,6 +218,7 @@ impl ConsensusFork {
mod tests {
use super::super::Magic;
use super::{ConsensusParams, ConsensusFork};
use deployments::tests::DummyDeployments;
#[test]
fn test_consensus_params_bip34_height() {
@ -207,19 +256,30 @@ mod tests {
}
#[test]
fn test_consensus_fork_min_block_size() {
assert_eq!(ConsensusFork::NoFork.min_block_size(0), 0);
assert_eq!(ConsensusFork::SegWit2x(100).min_block_size(0), 0);
assert_eq!(ConsensusFork::SegWit2x(100).min_block_size(100), 0);
assert_eq!(ConsensusFork::BitcoinCash(100).min_block_size(0), 0);
assert_eq!(ConsensusFork::BitcoinCash(100).min_block_size(100), 1_000_001);
}
fn test_consensus_fork_check_transaction_size() {
assert_eq!(ConsensusFork::NoFork.check_transaction_size(800_000, &DummyDeployments::default()), true);
assert_eq!(ConsensusFork::NoFork.check_transaction_size(1_000_000, &DummyDeployments::default()), true);
assert_eq!(ConsensusFork::NoFork.check_transaction_size(4_000_000, &DummyDeployments::default()), false);
#[test]
fn test_consensus_fork_max_transaction_size() {
assert_eq!(ConsensusFork::NoFork.max_transaction_size(0), 1_000_000);
assert_eq!(ConsensusFork::SegWit2x(100).max_transaction_size(0), 1_000_000);
assert_eq!(ConsensusFork::BitcoinCash(100).max_transaction_size(0), 1_000_000);
assert_eq!(ConsensusFork::NoFork.check_transaction_size(800_000, &DummyDeployments::deployed()), true);
assert_eq!(ConsensusFork::NoFork.check_transaction_size(1_000_000, &DummyDeployments::deployed()), true);
assert_eq!(ConsensusFork::NoFork.check_transaction_size(4_000_000, &DummyDeployments::deployed()), false);
assert_eq!(ConsensusFork::SegWit2x(100_000).check_transaction_size(800_000, &DummyDeployments::default()), true);
assert_eq!(ConsensusFork::SegWit2x(100_000).check_transaction_size(1_000_000, &DummyDeployments::default()), true);
assert_eq!(ConsensusFork::SegWit2x(100_000).check_transaction_size(4_000_000, &DummyDeployments::default()), false);
assert_eq!(ConsensusFork::SegWit2x(100_000).check_transaction_size(800_000, &DummyDeployments::deployed()), true);
assert_eq!(ConsensusFork::SegWit2x(100_000).check_transaction_size(1_000_000, &DummyDeployments::deployed()), true);
assert_eq!(ConsensusFork::SegWit2x(100_000).check_transaction_size(4_000_000, &DummyDeployments::deployed()), false);
assert_eq!(ConsensusFork::BitcoinCash(100_000).check_transaction_size(800_000, &DummyDeployments::default()), true);
assert_eq!(ConsensusFork::BitcoinCash(100_000).check_transaction_size(1_000_000, &DummyDeployments::default()), true);
assert_eq!(ConsensusFork::BitcoinCash(100_000).check_transaction_size(4_000_000, &DummyDeployments::default()), false);
assert_eq!(ConsensusFork::BitcoinCash(100_000).check_transaction_size(800_000, &DummyDeployments::deployed()), true);
assert_eq!(ConsensusFork::BitcoinCash(100_000).check_transaction_size(1_000_000, &DummyDeployments::deployed()), true);
assert_eq!(ConsensusFork::BitcoinCash(100_000).check_transaction_size(4_000_000, &DummyDeployments::deployed()), false);
}
#[test]

View File

@ -15,9 +15,41 @@ pub struct Deployment {
pub activation: Option<u32>,
}
/// Deployments state.
pub trait Deployments {
/// Is deployment currently active?
fn is_active(&self, name: &str) -> bool;
}
impl Deployment {
pub fn matches(&self, version: u32) -> bool {
(version & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS && (version & (1 << self.bit)) != 0
}
}
#[cfg(test)]
pub mod tests {
use super::Deployments;
#[derive(Default, Debug)]
pub struct DummyDeployments {
pub segwit_active: bool,
}
impl DummyDeployments {
pub fn deployed() -> Self {
DummyDeployments {
segwit_active: true,
}
}
}
impl Deployments for DummyDeployments {
fn is_active(&self, name: &str) -> bool {
match name {
"segwit" => self.segwit_active,
_ => false,
}
}
}
}

View File

@ -9,6 +9,6 @@ mod magic;
pub use primitives::{hash, compact};
pub use consensus::{ConsensusParams, ConsensusFork, SEGWIT2X_FORK_BLOCK, BITCOIN_CASH_FORK_BLOCK};
pub use deployments::Deployment;
pub use deployments::{Deployment, Deployments};
pub use magic::Magic;

View File

@ -61,7 +61,12 @@ impl PeerContext {
/// Request is always automatically send.
pub fn send_request<T>(&self, payload: &T) where T: Payload {
let send = Context::send_to_peer(self.context.clone(), self.info.id, payload);
self.send_request_with_flags(payload, 0)
}
/// Request is always automatically send.
pub fn send_request_with_flags<T>(&self, payload: &T, serialization_flags: u32) where T: Payload {
let send = Context::send_to_peer(self.context.clone(), self.info.id, payload, serialization_flags);
self.context.spawn(send);
}
@ -94,14 +99,14 @@ impl PeerContext {
let mut queue = self.response_queue.lock();
if is_final {
if sync.permission_for_response(id) {
let send = Context::send_to_peer(self.context.clone(), self.info.id, payload);
let send = Context::send_to_peer(self.context.clone(), self.info.id, payload, 0);
self.context.spawn(send);
self.send_awaiting(&mut sync, &mut queue, id);
} else {
queue.push_finished_response(id, self.to_message(payload).into());
}
} else if sync.is_permitted(id) {
let send = Context::send_to_peer(self.context.clone(), self.info.id, payload);
let send = Context::send_to_peer(self.context.clone(), self.info.id, payload, 0);
self.context.spawn(send);
} else {
queue.push_unfinished_response(id, self.to_message(payload).into());

View File

@ -320,11 +320,11 @@ impl Context {
}
/// Send message to a channel with given peer id.
pub fn send_to_peer<T>(context: Arc<Context>, peer: PeerId, payload: &T) -> IoFuture<()> where T: Payload {
pub fn send_to_peer<T>(context: Arc<Context>, peer: PeerId, payload: &T, serialization_flags: u32) -> IoFuture<()> where T: Payload {
match context.connections.channel(peer) {
Some(channel) => {
let info = channel.peer_info();
let message = Message::new(info.magic, info.version, payload).expect("failed to create outgoing message");
let message = Message::with_flags(info.magic, info.version, payload, serialization_flags).expect("failed to create outgoing message");
channel.session().stats().lock().report_send(T::command().into(), message.len());
Context::send(context, channel, message)
},

View File

@ -3,6 +3,7 @@ use bytes::Bytes;
use message::{Command, Error, Payload, types, deserialize_payload};
use protocol::Protocol;
use net::PeerContext;
use ser::SERIALIZE_TRANSACTION_WITNESS;
pub type InboundSyncConnectionRef = Box<InboundSyncConnection>;
pub type OutboundSyncConnectionRef = Arc<OutboundSyncConnection>;
@ -43,6 +44,8 @@ pub trait OutboundSyncConnection : Send + Sync {
fn send_getheaders(&self, message: &types::GetHeaders);
fn send_transaction(&self, message: &types::Tx);
fn send_block(&self, message: &types::Block);
fn send_witness_transaction(&self, message: &types::Tx);
fn send_witness_block(&self, message: &types::Block);
fn send_headers(&self, message: &types::Headers);
fn respond_headers(&self, message: &types::Headers, id: u32);
fn send_mempool(&self, message: &types::MemPool);
@ -98,6 +101,14 @@ impl OutboundSyncConnection for OutboundSync {
self.context.send_request(message);
}
fn send_witness_transaction(&self, message: &types::Tx) {
self.context.send_request_with_flags(message, SERIALIZE_TRANSACTION_WITNESS);
}
fn send_witness_block(&self, message: &types::Block) {
self.context.send_request_with_flags(message, SERIALIZE_TRANSACTION_WITNESS);
}
fn send_headers(&self, message: &types::Headers) {
self.context.send_request(message);
}

View File

@ -115,7 +115,7 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
let services = Services::default().with_network(true);
let services = match consensus.fork {
ConsensusFork::BitcoinCash(_) => services.with_bitcoin_cash(true),
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) => services,
ConsensusFork::NoFork | ConsensusFork::SegWit2x(_) => services.with_witness(true),
};
let verification_level = match matches.value_of("verification-level") {

View File

@ -2,6 +2,7 @@
use std::{ops, str, fmt, io, marker};
use hex::{ToHex, FromHex, FromHexError};
use heapsize::HeapSizeOf;
/// Wrapper around `Vec<u8>`
#[derive(Default, PartialEq, Clone, Eq, Hash)]
@ -25,6 +26,12 @@ impl Bytes {
}
}
impl HeapSizeOf for Bytes {
fn heap_size_of_children(&self) -> usize {
self.0.heap_size_of_children()
}
}
impl<'a> From<&'a [u8]> for Bytes {
fn from(v: &[u8]) -> Self {
Bytes(v.into())

View File

@ -48,6 +48,7 @@ impl RawClientCore {
},
script_sig: GlobalBytes::new(), // default script
sequence: input.sequence.unwrap_or(default_sequence),
script_witness: vec![],
}).collect();
// prepare outputs

View File

@ -51,6 +51,15 @@ pub enum Error {
// Softfork safeness
DiscourageUpgradableNops,
// SegWit-related errors
WitnessProgramWrongLength,
WitnessProgramWitnessEmpty,
WitnessProgramMismatch,
WitnessMalleated,
WitnessMalleatedP2SH,
WitnessUnexpected,
WitnessPubKeyType,
}
impl fmt::Display for Error {
@ -101,6 +110,15 @@ impl fmt::Display for Error {
// Softfork safeness
Error::DiscourageUpgradableNops => "Discourage Upgradable Nops".fmt(f),
// SegWit-related errors
Error::WitnessProgramWrongLength => "Witness program has incorrect length".fmt(f),
Error::WitnessProgramWitnessEmpty => "Witness program was passed an empty witness".fmt(f),
Error::WitnessProgramMismatch => "Witness program hash mismatch".fmt(f),
Error::WitnessMalleated => "Witness requires empty scriptSig".fmt(f),
Error::WitnessMalleatedP2SH => "Witness requires only-redeemscript scriptSig".fmt(f),
Error::WitnessUnexpected => "Witness provided for non-witness script".fmt(f),
Error::WitnessPubKeyType => "Using non-compressed keys in segwit".fmt(f),
}
}
}

View File

@ -5,7 +5,7 @@ use chain::constants::SEQUENCE_LOCKTIME_DISABLE_FLAG;
use crypto::{sha1, sha256, dhash160, dhash256, ripemd160};
use sign::{SignatureVersion, Sighash};
use {
script, Builder, Script, Num, VerificationFlags, Opcode, Error, SignatureChecker, Stack
script, Builder, Script, ScriptWitness, Num, VerificationFlags, Opcode, Error, SignatureChecker, Stack
};
/// Helper function.
@ -244,6 +244,7 @@ fn cast_to_bool(data: &[u8]) -> bool {
pub fn verify_script(
script_sig: &Script,
script_pubkey: &Script,
witness: &ScriptWitness,
flags: &VerificationFlags,
checker: &SignatureChecker,
version: SignatureVersion,
@ -266,6 +267,18 @@ pub fn verify_script(
return Err(Error::EvalFalse);
}
// Verify witness program
let mut verify_cleanstack = flags.verify_cleanstack;
if flags.verify_witness {
if let Some((witness_version, witness_program)) = script_pubkey.parse_witness_program() {
if !script_sig.is_empty() {
return Err(Error::WitnessMalleated);
}
verify_witness_program(witness, witness_version, witness_program, flags, checker)?;
verify_cleanstack = false;
}
}
// Additional validation for spend-to-script-hash transactions:
if flags.verify_p2sh && script_pubkey.is_pay_to_script_hash() {
if !script_sig.is_push_only() {
@ -290,7 +303,7 @@ pub fn verify_script(
// The CLEANSTACK check is only performed after potential P2SH evaluation,
// as the non-P2SH evaluation of a P2SH script will obviously not result in
// a clean stack (the P2SH inputs remain). The same holds for witness evaluation.
if flags.verify_cleanstack {
if verify_cleanstack {
// Disallow CLEANSTACK without P2SH, as otherwise a switch CLEANSTACK->P2SH+CLEANSTACK
// would be possible, which is not a softfork (and P2SH should be one).
assert!(flags.verify_p2sh);
@ -303,6 +316,16 @@ pub fn verify_script(
Ok(())
}
fn verify_witness_program(
_witness: &ScriptWitness,
_witness_version: u8,
_witness_program: &[u8],
_flags: &VerificationFlags,
_checker: &SignatureChecker
) -> Result<(), Error> {
unimplemented!()
}
/// Evaluautes the script
#[cfg_attr(feature="cargo-clippy", allow(match_same_arms))]
pub fn eval_script(
@ -920,7 +943,7 @@ mod tests {
use chain::Transaction;
use sign::SignatureVersion;
use {
Opcode, Script, VerificationFlags, Builder, Error, Num, TransactionInputSigner,
Opcode, Script, ScriptWitness, VerificationFlags, Builder, Error, Num, TransactionInputSigner,
NoopSignatureChecker, TransactionSignatureChecker, Stack
};
use super::{eval_script, verify_script, is_public_key};
@ -1870,7 +1893,7 @@ mod tests {
let output: Script = "76a914df3bd30160e6c6145baaf2c88a8844c13a00d1d588ac".into();
let flags = VerificationFlags::default()
.verify_p2sh(true);
assert_eq!(verify_script(&input, &output, &flags, &checker, SignatureVersion::Base), Ok(()));
assert_eq!(verify_script(&input, &output, &ScriptWitness, &flags, &checker, SignatureVersion::Base), Ok(()));
}
// https://blockchain.info/rawtx/02b082113e35d5386285094c2829e7e2963fa0b5369fb7f4b79c4c90877dcd3d
@ -1887,7 +1910,7 @@ mod tests {
let output: Script = "a9141a8b0026343166625c7475f01e48b5ede8c0252e87".into();
let flags = VerificationFlags::default()
.verify_p2sh(true);
assert_eq!(verify_script(&input, &output, &flags, &checker, SignatureVersion::Base), Ok(()));
assert_eq!(verify_script(&input, &output, &ScriptWitness, &flags, &checker, SignatureVersion::Base), Ok(()));
}
// https://blockchain.info/en/tx/12b5633bad1f9c167d523ad1aa1947b2732a865bf5414eab2f9e5ae5d5c191ba?show_adv=true
@ -1904,7 +1927,7 @@ mod tests {
let output: Script = "410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac".into();
let flags = VerificationFlags::default()
.verify_p2sh(true);
assert_eq!(verify_script(&input, &output, &flags, &checker, SignatureVersion::Base), Ok(()));
assert_eq!(verify_script(&input, &output, &ScriptWitness, &flags, &checker, SignatureVersion::Base), Ok(()));
}
// https://blockchain.info/rawtx/fb0a1d8d34fa5537e461ac384bac761125e1bfa7fec286fa72511240fa66864d
@ -1921,7 +1944,7 @@ mod tests {
let output: Script = "76a9147a2a3b481ca80c4ba7939c54d9278e50189d94f988ac".into();
let flags = VerificationFlags::default()
.verify_p2sh(true);
assert_eq!(verify_script(&input, &output, &flags, &checker, SignatureVersion::Base), Ok(()));
assert_eq!(verify_script(&input, &output, &ScriptWitness, &flags, &checker, SignatureVersion::Base), Ok(()));
}
// https://blockchain.info/rawtx/eb3b82c0884e3efa6d8b0be55b4915eb20be124c9766245bcc7f34fdac32bccb
@ -1939,12 +1962,12 @@ mod tests {
let flags = VerificationFlags::default()
.verify_p2sh(true);
assert_eq!(verify_script(&input, &output, &flags, &checker, SignatureVersion::Base), Ok(()));
assert_eq!(verify_script(&input, &output, &ScriptWitness, &flags, &checker, SignatureVersion::Base), Ok(()));
let flags = VerificationFlags::default()
.verify_p2sh(true)
.verify_locktime(true);
assert_eq!(verify_script(&input, &output, &flags, &checker, SignatureVersion::Base), Err(Error::NumberOverflow));
assert_eq!(verify_script(&input, &output, &ScriptWitness, &flags, &checker, SignatureVersion::Base), Err(Error::NumberOverflow));
}
// https://blockchain.info/rawtx/54fabd73f1d20c980a0686bf0035078e07f69c58437e4d586fb29aa0bee9814f
@ -1960,7 +1983,7 @@ mod tests {
let input: Script = "483045022100d92e4b61452d91a473a43cde4b469a472467c0ba0cbd5ebba0834e4f4762810402204802b76b7783db57ac1f61d2992799810e173e91055938750815b6d8a675902e014f".into();
let output: Script = "76009f69905160a56b210378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71ad6c".into();
let flags = VerificationFlags::default();
assert_eq!(verify_script(&input, &output, &flags, &checker, SignatureVersion::Base), Ok(()));
assert_eq!(verify_script(&input, &output, &ScriptWitness, &flags, &checker, SignatureVersion::Base), Ok(()));
}
#[test]
@ -2013,7 +2036,7 @@ mod tests {
let flags = VerificationFlags::default()
.verify_p2sh(true);
assert_eq!(verify_script(&input, &output, &flags, &checker, SignatureVersion::Base), Ok(()));
assert_eq!(verify_script(&input, &output, &ScriptWitness, &flags, &checker, SignatureVersion::Base), Ok(()));
}
@ -2064,7 +2087,7 @@ mod tests {
let signed_input = checker.signer.signed_input(&key_pair, 0, amount, &script_pubkey, SignatureVersion::ForkId, sighashtype);
let script_sig = signed_input.script_sig.into();
assert_eq!(verify_script(&script_sig, &script_pubkey, &flags, &checker, SignatureVersion::ForkId), Ok(()));
assert_eq!(verify_script(&script_sig, &script_pubkey, &ScriptWitness, &flags, &checker, SignatureVersion::ForkId), Ok(()));
}
// signature with wrong amount
@ -2072,7 +2095,7 @@ mod tests {
let signed_input = checker.signer.signed_input(&key_pair, 0, amount + 1, &script_pubkey, SignatureVersion::ForkId, sighashtype);
let script_sig = signed_input.script_sig.into();
assert_eq!(verify_script(&script_sig, &script_pubkey, &flags, &checker, SignatureVersion::ForkId), Err(Error::EvalFalse));
assert_eq!(verify_script(&script_sig, &script_pubkey, &ScriptWitness, &flags, &checker, SignatureVersion::ForkId), Err(Error::EvalFalse));
}
// fork-id signature passed when not expected
@ -2080,7 +2103,7 @@ mod tests {
let signed_input = checker.signer.signed_input(&key_pair, 0, amount + 1, &script_pubkey, SignatureVersion::ForkId, sighashtype);
let script_sig = signed_input.script_sig.into();
assert_eq!(verify_script(&script_sig, &script_pubkey, &flags, &checker, SignatureVersion::Base), Err(Error::EvalFalse));
assert_eq!(verify_script(&script_sig, &script_pubkey, &ScriptWitness, &flags, &checker, SignatureVersion::Base), Err(Error::EvalFalse));
}
// non-fork-id signature passed when expected
@ -2088,7 +2111,7 @@ mod tests {
let signed_input = checker.signer.signed_input(&key_pair, 0, amount + 1, &script_pubkey, SignatureVersion::Base, 1);
let script_sig = signed_input.script_sig.into();
assert_eq!(verify_script(&script_sig, &script_pubkey, &flags.verify_strictenc(true), &checker, SignatureVersion::ForkId), Err(Error::SignatureMustUseForkId));
assert_eq!(verify_script(&script_sig, &script_pubkey, &ScriptWitness, &flags.verify_strictenc(true), &checker, SignatureVersion::ForkId), Err(Error::SignatureMustUseForkId));
}
}
}

View File

@ -24,7 +24,7 @@ pub use self::flags::VerificationFlags;
pub use self::interpreter::{eval_script, verify_script};
pub use self::opcode::Opcode;
pub use self::num::Num;
pub use self::script::{Script, ScriptType, ScriptAddress};
pub use self::script::{Script, ScriptType, ScriptAddress, ScriptWitness};
pub use self::sign::{TransactionInputSigner, UnsignedTransactionInput, SignatureVersion};
pub use self::stack::Stack;
pub use self::verify::{SignatureChecker, NoopSignatureChecker, TransactionSignatureChecker};

View File

@ -99,6 +99,11 @@ impl Script {
self.data.clone()
}
/// Is empty script
pub fn is_empty(&self) -> bool {
self.data.len() == 0
}
/// Extra-fast test for pay-to-public-key-hash (P2PKH) scripts.
pub fn is_pay_to_public_key_hash(&self) -> bool {
self.data.len() == 25 &&
@ -139,6 +144,20 @@ impl Script {
self.data[1] == Opcode::OP_PUSHBYTES_20 as u8
}
/// Parse witness program. Returns Some(witness program version, code) or None if not a witness program.
pub fn parse_witness_program(&self) -> Option<(u8, &[u8])> {
if self.data.len() > 4 || self.data.len() > 42 || self.data.len() != self.data[1] as usize + 2 {
return None;
}
let witness_version = match Opcode::from_u8(self.data[0]) {
Some(Opcode::OP_0) => 0,
Some(x) if x >= Opcode::OP_1 && x <= Opcode::OP_16 => (x as u8) - (Opcode::OP_1 as u8) - 1,
_ => return None,
};
let witness_program = &self.data[2..];
Some((witness_version, witness_program))
}
/// Extra-fast test for pay-to-witness-script-hash scripts.
pub fn is_pay_to_witness_script_hash(&self) -> bool {
self.data.len() == 34 &&
@ -331,6 +350,10 @@ impl Script {
ScriptType::Multisig
} else if self.is_null_data_script() {
ScriptType::NullData
} else if self.is_pay_to_witness_key_hash() {
ScriptType::WitnessKey
} else if self.is_pay_to_witness_script_hash() {
ScriptType::WitnessScript
} else {
ScriptType::NonStandard
}
@ -547,6 +570,8 @@ impl fmt::Display for Script {
}
}
pub struct ScriptWitness;
#[cfg(test)]
mod tests {
use {Builder, Opcode};

View File

@ -143,7 +143,7 @@ impl TransactionInputSigner {
match sigversion {
SignatureVersion::ForkId if sighash.fork_id => self.signature_hash_fork_id(input_index, input_amount, script_pubkey, sighashtype, sighash),
SignatureVersion::Base | SignatureVersion::ForkId => self.signature_hash_original(input_index, script_pubkey, sighashtype, sighash),
_ => 1u8.into(),
SignatureVersion::WitnessV0 => self.signature_hash_witness0(input_index, input_amount, script_pubkey, sighashtype, sighash),
}
}
@ -172,6 +172,7 @@ impl TransactionInputSigner {
previous_output: unsigned_input.previous_output.clone(),
sequence: unsigned_input.sequence,
script_sig: script_sig.to_bytes(),
script_witness: vec![],
}
}
@ -184,6 +185,7 @@ impl TransactionInputSigner {
previous_output: input.previous_output.clone(),
script_sig: script_pubkey.to_bytes(),
sequence: input.sequence,
script_witness: vec![],
}]
} else {
self.inputs.iter()
@ -199,6 +201,7 @@ impl TransactionInputSigner {
SighashBase::Single | SighashBase::None if n != input_index => 0,
_ => input.sequence,
},
script_witness: vec![],
})
.collect()
};
@ -231,44 +234,14 @@ impl TransactionInputSigner {
dhash256(&out)
}
fn signature_hash_witness0(&self, input_index: usize, input_amount: u64, script_pubkey: &Script, sighashtype: u32, sighash: Sighash) -> H256 {
self.signature_hash_fork_id(input_index, input_amount, script_pubkey, sighashtype, sighash)
}
fn signature_hash_fork_id(&self, input_index: usize, input_amount: u64, script_pubkey: &Script, sighashtype: u32, sighash: Sighash) -> H256 {
let hash_prevouts = match sighash.anyone_can_pay {
false => {
let mut stream = Stream::default();
for input in &self.inputs {
stream.append(&input.previous_output);
}
dhash256(&stream.out())
},
true => 0u8.into(),
};
let hash_sequence = match sighash.base {
SighashBase::All if !sighash.anyone_can_pay => {
let mut stream = Stream::default();
for input in &self.inputs {
stream.append(&input.sequence);
}
dhash256(&stream.out())
},
_ => 0u8.into(),
};
let hash_outputs = match sighash.base {
SighashBase::All => {
let mut stream = Stream::default();
for output in &self.outputs {
stream.append(output);
}
dhash256(&stream.out())
},
SighashBase::Single if input_index < self.outputs.len() => {
let mut stream = Stream::default();
stream.append(&self.outputs[input_index]);
dhash256(&stream.out())
},
_ => 0u8.into(),
};
let hash_prevouts = compute_hash_prevouts(sighash, &self.inputs);
let hash_sequence = compute_hash_sequence(sighash, &self.inputs);
let hash_outputs = compute_hash_outputs(sighash, input_index, &self.outputs);
let mut stream = Stream::default();
stream.append(&self.version);
@ -286,6 +259,50 @@ impl TransactionInputSigner {
}
}
fn compute_hash_prevouts(sighash: Sighash, inputs: &[UnsignedTransactionInput]) -> H256 {
match sighash.anyone_can_pay {
false => {
let mut stream = Stream::default();
for input in inputs {
stream.append(&input.previous_output);
}
dhash256(&stream.out())
},
true => 0u8.into(),
}
}
fn compute_hash_sequence(sighash: Sighash, inputs: &[UnsignedTransactionInput]) -> H256 {
match sighash.base {
SighashBase::All if !sighash.anyone_can_pay => {
let mut stream = Stream::default();
for input in inputs {
stream.append(&input.sequence);
}
dhash256(&stream.out())
},
_ => 0u8.into(),
}
}
fn compute_hash_outputs(sighash: Sighash, input_index: usize, outputs: &[TransactionOutput]) -> H256 {
match sighash.base {
SighashBase::All => {
let mut stream = Stream::default();
for output in outputs {
stream.append(output);
}
dhash256(&stream.out())
},
SighashBase::Single if input_index < outputs.len() => {
let mut stream = Stream::default();
stream.append(&outputs[input_index]);
dhash256(&stream.out())
},
_ => 0u8.into(),
}
}
#[cfg(test)]
mod tests {
use bytes::Bytes;

View File

@ -12,5 +12,8 @@ pub use primitives::{hash, bytes, compact};
pub use compact_integer::CompactInteger;
pub use list::List;
pub use reader::{Reader, Deserializable, deserialize, deserialize_iterator, ReadIterator, Error};
pub use stream::{Stream, Serializable, serialize, serialize_list, serialized_list_size};
pub use stream::{
Stream, Serializable, serialize, serialize_with_flags, serialize_list, serialized_list_size,
SERIALIZE_TRANSACTION_WITNESS,
};

View File

@ -4,12 +4,21 @@ use std::borrow::Borrow;
use compact_integer::CompactInteger;
use bytes::Bytes;
/// Do not serialize transaction witness data.
pub const SERIALIZE_TRANSACTION_WITNESS: u32 = 0x40000000;
pub fn serialize<T>(t: &T) -> Bytes where T: Serializable{
let mut stream = Stream::default();
stream.append(t);
stream.out()
}
pub fn serialize_with_flags<T>(t: &T, flags: u32) -> Bytes where T: Serializable{
let mut stream = Stream::with_flags(flags);
stream.append(t);
stream.out()
}
pub fn serialize_list<T, K>(t: &[K]) -> Bytes where T: Serializable, K: Borrow<T> {
let mut stream = Stream::default();
stream.append_list(t);
@ -36,12 +45,23 @@ pub trait Serializable {
#[derive(Default)]
pub struct Stream {
buffer: Vec<u8>,
flags: u32,
}
impl Stream {
/// New stream
pub fn new() -> Self {
Stream { buffer: Vec::new() }
Stream { buffer: Vec::new(), flags: 0 }
}
/// Create stream with given flags,
pub fn with_flags(flags: u32) -> Self {
Stream { buffer: Vec::new(), flags: flags }
}
/// Are transactions written to this stream with witness data?
pub fn include_transaction_witness(&self) -> bool {
(self.flags & SERIALIZE_TRANSACTION_WITNESS) != 0
}
/// Serializes the struct and appends it to the end of stream.

View File

@ -196,6 +196,8 @@ pub mod tests {
fn send_getheaders(&self, _message: &types::GetHeaders) { *self.messages.lock().entry("getheaders".to_owned()).or_insert(0) += 1; }
fn send_transaction(&self, _message: &types::Tx) { *self.messages.lock().entry("transaction".to_owned()).or_insert(0) += 1; }
fn send_block(&self, _message: &types::Block) { *self.messages.lock().entry("block".to_owned()).or_insert(0) += 1; }
fn send_witness_transaction(&self, _message: &types::Tx) { *self.messages.lock().entry("witness_transaction".to_owned()).or_insert(0) += 1; }
fn send_witness_block(&self, _message: &types::Block) { *self.messages.lock().entry("witness_block".to_owned()).or_insert(0) += 1; }
fn send_headers(&self, _message: &types::Headers) { *self.messages.lock().entry("headers".to_owned()).or_insert(0) += 1; }
fn respond_headers(&self, _message: &types::Headers, _id: RequestId) { *self.messages.lock().entry("headers".to_owned()).or_insert(0) += 1; }
fn send_mempool(&self, _message: &types::MemPool) { *self.messages.lock().entry("mempool".to_owned()).or_insert(0) += 1; }

View File

@ -243,7 +243,9 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
_ => false,
},
// we never ask for merkle blocks && we never ask for compact blocks
InventoryType::MessageCompactBlock | InventoryType::MessageFilteredBlock => false,
InventoryType::MessageCompactBlock | InventoryType::MessageFilteredBlock
| InventoryType::MessageWitnessBlock | InventoryType::MessageWitnessFilteredBlock
| InventoryType::MessageWitnessTx => false,
// unknown inventory type
InventoryType::Error => {
self.peers.misbehaving(peer_index, &format!("Provided unknown inventory type {:?}", item.hash.to_reversed_str()));

View File

@ -28,8 +28,12 @@ pub enum Task {
MerkleBlock(PeerIndex, types::MerkleBlock),
/// Send cmpcmblock
CompactBlock(PeerIndex, types::CompactBlock),
/// Send block with witness data
WitnessBlock(PeerIndex, IndexedBlock),
/// Send transaction
Transaction(PeerIndex, IndexedTransaction),
/// Send transaction with witness data
WitnessTransaction(PeerIndex, IndexedTransaction),
/// Send block transactions
BlockTxn(PeerIndex, types::BlockTxn),
/// Send notfound
@ -117,6 +121,17 @@ impl LocalSynchronizationTaskExecutor {
}
}
fn execute_witness_block(&self, peer_index: PeerIndex, block: IndexedBlock) {
if let Some(connection) = self.peers.connection(peer_index) {
trace!(target: "sync", "Sending witness block {} to peer#{}", block.hash().to_reversed_str(), peer_index);
self.peers.hash_known_as(peer_index, block.hash().clone(), KnownHashType::Block);
let block = types::Block {
block: block.to_raw_block(),
};
connection.send_witness_block(&block);
}
}
fn execute_transaction(&self, peer_index: PeerIndex, transaction: IndexedTransaction) {
if let Some(connection) = self.peers.connection(peer_index) {
trace!(target: "sync", "Sending transaction {} to peer#{}", transaction.hash.to_reversed_str(), peer_index);
@ -128,6 +143,17 @@ impl LocalSynchronizationTaskExecutor {
}
}
fn execute_witness_transaction(&self, peer_index: PeerIndex, transaction: IndexedTransaction) {
if let Some(connection) = self.peers.connection(peer_index) {
trace!(target: "sync", "Sending witness transaction {} to peer#{}", transaction.hash.to_reversed_str(), peer_index);
self.peers.hash_known_as(peer_index, transaction.hash, KnownHashType::Transaction);
let transaction = types::Tx {
transaction: transaction.raw,
};
connection.send_witness_transaction(&transaction);
}
}
fn execute_block_txn(&self, peer_index: PeerIndex, blocktxn: types::BlockTxn) {
if let Some(connection) = self.peers.connection(peer_index) {
trace!(target: "sync", "Sending blocktxn with {} transactions to peer#{}", blocktxn.request.transactions.len(), peer_index);
@ -202,7 +228,9 @@ impl TaskExecutor for LocalSynchronizationTaskExecutor {
Task::Block(peer_index, block) => self.execute_block(peer_index, block),
Task::MerkleBlock(peer_index, block) => self.execute_merkleblock(peer_index, block),
Task::CompactBlock(peer_index, block) => self.execute_compact_block(peer_index, block),
Task::WitnessBlock(peer_index, block) => self.execute_witness_block(peer_index, block),
Task::Transaction(peer_index, transaction) => self.execute_transaction(peer_index, transaction),
Task::WitnessTransaction(peer_index, transaction) => self.execute_witness_transaction(peer_index, transaction),
Task::BlockTxn(peer_index, blocktxn) => self.execute_block_txn(peer_index, blocktxn),
Task::NotFound(peer_index, notfound) => self.execute_notfound(peer_index, notfound),
Task::Inventory(peer_index, inventory) => self.execute_inventory(peer_index, inventory),

View File

@ -273,6 +273,16 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
notfound.inventory.push(next_item);
}
},
common::InventoryType::MessageWitnessTx => {
// only transaction from memory pool can be requested
if let Some(transaction) = self.memory_pool.read().read_by_hash(&next_item.hash) {
trace!(target: "sync", "'getblocks' response to peer#{} is ready with witness-tx {}", peer_index, next_item.hash.to_reversed_str());
let transaction = IndexedTransaction::new(next_item.hash, transaction.clone());
self.executor.execute(Task::WitnessTransaction(peer_index, transaction));
} else {
notfound.inventory.push(next_item);
}
},
common::InventoryType::MessageBlock => {
if let Some(block) = self.storage.block(next_item.hash.clone().into()) {
trace!(target: "sync", "'getblocks' response to peer#{} is ready with block {}", peer_index, next_item.hash.to_reversed_str());
@ -312,9 +322,15 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
notfound.inventory.push(next_item);
}
},
_ => {
common::InventoryType::MessageWitnessBlock => {
if let Some(block) = self.storage.block(next_item.hash.clone().into()) {
trace!(target: "sync", "'getblocks' response to peer#{} is ready with witness-block {}", peer_index, next_item.hash.to_reversed_str());
self.executor.execute(Task::WitnessBlock(peer_index, block.into()));
} else {
notfound.inventory.push(next_item);
}
},
common::InventoryType::Error | common::InventoryType::MessageWitnessFilteredBlock => (),
}
Some(ServerTask::ReversedGetData(peer_index, message, notfound))

View File

@ -170,7 +170,7 @@ impl AsyncVerifier {
},
Ok(tx_output_provider) => {
let time: u32 = get_time().sec as u32;
match verifier.verifier.verify_mempool_transaction(&tx_output_provider, height, time, &transaction.raw) {
match verifier.verifier.verify_mempool_transaction(storage.as_block_header_provider(), &tx_output_provider, height, time, &transaction.raw) {
Ok(_) => sink.on_transaction_verification_success(transaction.into()),
Err(e) => sink.on_transaction_verification_error(&format!("{:?}", e), &transaction.hash),
}

View File

@ -440,6 +440,7 @@ impl<F> TransactionInputBuilder<F> where F: Invoke<chain::TransactionInput> {
previous_output: self.output.expect("Building input without previous output"),
script_sig: self.signature,
sequence: self.sequence,
script_witness: vec![],
}
)
}

View File

@ -107,6 +107,7 @@ impl TransactionBuilder {
},
script_sig: Bytes::new_with_len(0),
sequence: 0xffffffff,
script_witness: vec![],
});
self
}
@ -123,6 +124,7 @@ impl TransactionBuilder {
},
script_sig: Bytes::new_with_len(0),
sequence: 0xffffffff,
script_witness: vec![],
}];
self
}

View File

@ -1,10 +1,10 @@
use network::ConsensusParams;
use network::{ConsensusParams, Deployments as NetworkDeployments};
use db::{TransactionOutputProvider, BlockHeaderProvider};
use script;
use sigops::transaction_sigops;
use work::block_reward_satoshi;
use duplex_store::DuplexTransactionOutputProvider;
use deployments::Deployments;
use deployments::{Deployments, ActiveDeployments};
use canon::CanonBlock;
use error::{Error, TransactionError};
use timestamp::median_timestamp;
@ -24,12 +24,12 @@ impl<'a> BlockAcceptor<'a> {
consensus: &'a ConsensusParams,
block: CanonBlock<'a>,
height: u32,
deployments: &'a Deployments,
deployments: ActiveDeployments<'a>,
headers: &'a BlockHeaderProvider,
) -> Self {
BlockAcceptor {
finality: BlockFinality::new(block, height, deployments, headers, consensus),
serialized_size: BlockSerializedSize::new(block, consensus, height),
finality: BlockFinality::new(block, height, deployments.deployments, headers, consensus),
serialized_size: BlockSerializedSize::new(block, consensus, deployments, height),
coinbase_script: BlockCoinbaseScript::new(block, consensus, height),
coinbase_claim: BlockCoinbaseClaim::new(block, store, height),
sigops: BlockSigops::new(block, store, consensus, height),
@ -83,26 +83,29 @@ impl<'a> BlockFinality<'a> {
pub struct BlockSerializedSize<'a> {
block: CanonBlock<'a>,
consensus: &'a ConsensusParams,
deployments: ActiveDeployments<'a>,
height: u32,
}
impl<'a> BlockSerializedSize<'a> {
fn new(block: CanonBlock<'a>, consensus: &'a ConsensusParams, height: u32) -> Self {
fn new(block: CanonBlock<'a>, consensus: &'a ConsensusParams, deployments: ActiveDeployments<'a>, height: u32) -> Self {
BlockSerializedSize {
block: block,
consensus: consensus,
deployments: deployments,
height: height,
}
}
fn check(&self) -> Result<(), Error> {
let size = self.block.size();
if size < self.consensus.fork.min_block_size(self.height) ||
size > self.consensus.fork.max_block_size(self.height) {
Err(Error::Size(size))
} else {
Ok(())
if !self.consensus.fork.check_block_size(size, self.height, &self.deployments) {
return Err(Error::Size(size))
}
if self.deployments.is_active("segwit") {
// TODO: block.vtx.size() * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT
}
Ok(())
}
}

View File

@ -6,7 +6,7 @@ use canon::CanonBlock;
use accept_block::BlockAcceptor;
use accept_header::HeaderAcceptor;
use accept_transaction::TransactionAcceptor;
use deployments::Deployments;
use deployments::{Deployments, ActiveDeployments};
use duplex_store::DuplexTransactionOutputProvider;
use VerificationLevel;
@ -21,9 +21,10 @@ impl<'a> ChainAcceptor<'a> {
trace!(target: "verification", "Block verification {}", block.hash().to_reversed_str());
let output_store = DuplexTransactionOutputProvider::new(store.as_transaction_output_provider(), block.raw());
let headers = store.as_block_header_provider();
let active_deployments = ActiveDeployments::new(deployments, height, headers, consensus);
ChainAcceptor {
block: BlockAcceptor::new(store.as_transaction_output_provider(), consensus, block, height, deployments, headers),
block: BlockAcceptor::new(store.as_transaction_output_provider(), consensus, block, height, active_deployments, headers),
header: HeaderAcceptor::new(headers, consensus, block.header(), height, deployments),
transactions: block.transactions()
.into_iter()

View File

@ -2,7 +2,7 @@ use primitives::hash::H256;
use primitives::bytes::Bytes;
use db::{TransactionMetaProvider, TransactionOutputProvider, BlockHeaderProvider};
use network::{ConsensusParams, ConsensusFork};
use script::{Script, verify_script, VerificationFlags, TransactionSignatureChecker, TransactionInputSigner, SignatureVersion};
use script::{Script, ScriptWitness, verify_script, VerificationFlags, TransactionSignatureChecker, TransactionInputSigner, SignatureVersion};
use duplex_store::DuplexTransactionOutputProvider;
use deployments::Deployments;
use script::Builder;
@ -356,6 +356,7 @@ impl<'a> TransactionEval<'a> {
let input: Script = input.script_sig.clone().into();
let output: Script = output.script_pubkey.into();
let script_witness = ScriptWitness;
let flags = VerificationFlags::default()
.verify_p2sh(self.verify_p2sh)
@ -364,7 +365,7 @@ impl<'a> TransactionEval<'a> {
.verify_checksequence(self.verify_checksequence)
.verify_dersig(self.verify_dersig);
try!(verify_script(&input, &output, &flags, &checker, self.signature_version)
try!(verify_script(&input, &output, &script_witness, &flags, &checker, self.signature_version)
.map_err(|_| TransactionError::Signature(index)));
}

View File

@ -12,7 +12,7 @@ use verify_header::HeaderVerifier;
use verify_transaction::MemoryPoolTransactionVerifier;
use accept_chain::ChainAcceptor;
use accept_transaction::MemoryPoolTransactionAcceptor;
use deployments::Deployments;
use deployments::{Deployments, ActiveDeployments};
use {Verify, VerificationLevel};
pub struct BackwardsCompatibleChainVerifier {
@ -89,6 +89,7 @@ impl BackwardsCompatibleChainVerifier {
pub fn verify_mempool_transaction<T>(
&self,
block_header_provider: &BlockHeaderProvider,
prevout_provider: &T,
height: u32,
time: u32,
@ -96,7 +97,8 @@ impl BackwardsCompatibleChainVerifier {
) -> Result<(), TransactionError> where T: TransactionOutputProvider {
let indexed_tx = transaction.clone().into();
// let's do preverification first
let tx_verifier = MemoryPoolTransactionVerifier::new(&indexed_tx, &self.consensus, height);
let deployments = ActiveDeployments::new(&self.deployments, height, block_header_provider, &self.consensus);
let tx_verifier = MemoryPoolTransactionVerifier::new(&indexed_tx, &self.consensus, deployments);
try!(tx_verifier.check());
let canon_tx = CanonTransaction::new(&indexed_tx);

View File

@ -1,7 +1,7 @@
use std::collections::HashMap;
use std::collections::hash_map::Entry;
use parking_lot::Mutex;
use network::{ConsensusParams, Deployment};
use network::{ConsensusParams, Deployment, Deployments as NetworkDeployments};
use hash::H256;
use db::{BlockHeaderProvider, BlockRef, BlockAncestors, BlockIterator};
use timestamp::median_timestamp;
@ -51,11 +51,19 @@ struct DeploymentState {
/// Last known deployment states
type DeploymentStateCache = HashMap<&'static str, DeploymentState>;
#[derive(Default)]
#[derive(Default, Debug)]
pub struct Deployments {
cache: Mutex<DeploymentStateCache>,
}
#[derive(Clone, Copy)]
pub struct ActiveDeployments<'a> {
pub deployments: &'a Deployments,
number: u32,
headers: &'a BlockHeaderProvider,
consensus: &'a ConsensusParams,
}
impl Deployments {
pub fn new() -> Self {
Deployments::default()
@ -71,6 +79,17 @@ impl Deployments {
None => false
}
}
/// Returns true if SegWit deployment is active
pub fn segwit(&self, number: u32, headers: &BlockHeaderProvider, consensus: &ConsensusParams) -> bool {
match consensus.segwit_deployment {
Some(segwit) => {
let mut cache = self.cache.lock();
threshold_state(&mut cache, segwit, number, headers, consensus).is_active()
},
None => false
}
}
}
/// Calculates threshold state of given deployment
@ -121,6 +140,27 @@ fn threshold_state(cache: &mut DeploymentStateCache, deployment: Deployment, num
}
impl<'a> ActiveDeployments<'a> {
pub fn new(deployments: &'a Deployments, number: u32, headers: &'a BlockHeaderProvider, consensus: &'a ConsensusParams) -> Self {
ActiveDeployments {
deployments: deployments,
number: number,
headers: headers,
consensus: consensus,
}
}
}
impl<'a> NetworkDeployments for ActiveDeployments<'a> {
fn is_active(&self, name: &str) -> bool {
match name {
"csv" => self.deployments.segwit(self.number, self.headers, self.consensus),
"segwit" => self.deployments.segwit(self.number, self.headers, self.consensus),
_ => false,
}
}
}
fn first_of_the_period(block: u32, miner_confirmation_window: u32) -> u32 {
if block < miner_confirmation_window - 1 {
0

View File

@ -6,6 +6,7 @@ use duplex_store::NoopStore;
use sigops::transaction_sigops;
use error::TransactionError;
use constants::{MIN_COINBASE_SIZE, MAX_COINBASE_SIZE};
use deployments::ActiveDeployments;
pub struct TransactionVerifier<'a> {
pub empty: TransactionEmpty<'a>,
@ -40,13 +41,13 @@ pub struct MemoryPoolTransactionVerifier<'a> {
}
impl<'a> MemoryPoolTransactionVerifier<'a> {
pub fn new(transaction: &'a IndexedTransaction, consensus: &'a ConsensusParams, height: u32) -> Self {
pub fn new(transaction: &'a IndexedTransaction, consensus: &'a ConsensusParams, deployments: ActiveDeployments<'a>) -> Self {
trace!(target: "verification", "Mempool-Tx pre-verification {}", transaction.hash.to_reversed_str());
MemoryPoolTransactionVerifier {
empty: TransactionEmpty::new(transaction),
null_non_coinbase: TransactionNullNonCoinbase::new(transaction),
is_coinbase: TransactionMemoryPoolCoinbase::new(transaction),
size: TransactionSize::new(transaction, consensus, height),
size: TransactionSize::new(transaction, deployments, consensus),
sigops: TransactionSigops::new(transaction, ConsensusFork::absolute_maximum_block_sigops()),
}
}
@ -147,21 +148,21 @@ impl<'a> TransactionMemoryPoolCoinbase<'a> {
pub struct TransactionSize<'a> {
transaction: &'a IndexedTransaction,
deployments: ActiveDeployments<'a>,
consensus: &'a ConsensusParams,
height: u32,
}
impl<'a> TransactionSize<'a> {
fn new(transaction: &'a IndexedTransaction, consensus: &'a ConsensusParams, height: u32) -> Self {
fn new(transaction: &'a IndexedTransaction, deployments: ActiveDeployments<'a>, consensus: &'a ConsensusParams) -> Self {
TransactionSize {
transaction: transaction,
deployments: deployments,
consensus: consensus,
height: height,
}
}
fn check(&self) -> Result<(), TransactionError> {
if self.transaction.raw.serialized_size() > self.consensus.fork.max_transaction_size(self.height) {
if !self.consensus.fork.check_transaction_size(self.transaction.raw.serialized_size(), &self.deployments) {
Err(TransactionError::MaxSize)
} else {
Ok(())