Merge branch 'master' into zcash_init

This commit is contained in:
Svyatoslav Nikolsky 2018-11-12 12:31:14 +03:00
commit c9132eb99d
63 changed files with 1203 additions and 196 deletions

51
Cargo.lock generated
View File

@ -162,7 +162,7 @@ dependencies = [
"bitcrypto 0.1.0",
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"primitives 0.1.0",
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-hex 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serialization 0.1.0",
"serialization_derive 0.1.0",
]
@ -229,12 +229,19 @@ dependencies = [
[[package]]
name = "csv"
version = "0.15.0"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"memchr 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
"csv-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.21 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "csv-core"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -537,7 +544,7 @@ dependencies = [
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"primitives 0.1.0",
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-hex 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -619,6 +626,14 @@ dependencies = [
"libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "memchr"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "memoffset"
version = "0.2.1"
@ -720,6 +735,7 @@ dependencies = [
"chain 0.1.0",
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"primitives 0.1.0",
"rustc-hex 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serialization 0.1.0",
]
@ -807,7 +823,7 @@ version = "0.1.0"
dependencies = [
"abstract-ns 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
"bitcrypto 0.1.0",
"csv 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)",
"csv 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)",
"futures-cpupool 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -840,7 +856,7 @@ dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)",
"smallvec 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
"smallvec 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -883,7 +899,7 @@ dependencies = [
"bigint 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-hex 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -1025,7 +1041,7 @@ dependencies = [
"network 0.1.0",
"p2p 0.1.0",
"primitives 0.1.0",
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-hex 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"script 0.1.0",
"serde 1.0.21 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.21 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1050,6 +1066,11 @@ dependencies = [
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rustc-hex"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rustc-serialize"
version = "0.3.24"
@ -1144,6 +1165,7 @@ version = "0.1.0"
dependencies = [
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"primitives 0.1.0",
"rustc-hex 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -1195,7 +1217,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "smallvec"
version = "0.4.4"
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@ -1546,7 +1568,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f739f8c5363aca78cfb059edf753d8f0d36908c348f3d8d1503f03d8b75d9cf3"
"checksum crossbeam-epoch 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "927121f5407de9956180ff5e936fe3cf4324279280001cd56b669d28ee7e9150"
"checksum crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2760899e32a1d58d5abb31129f8fae5de75220bc2176e77ff7c627ae45c918d9"
"checksum csv 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7ef22b37c7a51c564a365892c012dc0271221fdcc64c69b19ba4d6fa8bd96d9c"
"checksum csv 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "71903184af9960c555e7f3b32ff17390d20ecaaf17d4f18c4a0993f2df8a49e3"
"checksum csv-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4dd8e6d86f7ba48b4276ef1317edc8cc36167546d8972feb4a2b5fec0b374105"
"checksum display_derive 0.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4bba5dcd6d2855639fcf65a9af7bbad0bfb6dbf6fe68fba70bab39a6eb973ef4"
"checksum domain 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c1850bf2c3c3349e1dba2aa214d86cf9edaa057a09ce46b1a02d5c07d5da5e65"
"checksum dtoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "09c3753c3db574d215cba4ea76018483895d7bff25a31b49ba45db21c48e50ab"
@ -1587,6 +1610,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "89f010e843f2b1a31dbd316b3b8d443758bc634bed37aabade59c686d644e0a2"
"checksum lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4d06ff7ff06f729ce5f4e227876cb88d10bc59cd4ae1e09fbb2bde15c850dc21"
"checksum memchr 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "148fab2e51b4f1cfc66da2a7c32981d1d3c083a803978268bb11fe4b86925e7a"
"checksum memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "796fba70e76612589ed2ce7f45282f5af869e0fdd7cc6199fa1aa1f1d591ba9d"
"checksum memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0f9dc261e2b62d7a622bf416ea3c5245cdd5d9a7fcc428c0d06804dfce1775b3"
"checksum mime 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e2e00e17be181010a91dbfefb01660b17311059dc8c7f48b9017677721e732bd"
"checksum mio 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)" = "0e8411968194c7b139e9105bc4ae7db0bae232af087147e72f0616ebf5fdb9cb"
@ -1622,6 +1646,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum rocksdb 0.4.5 (git+https://github.com/ethcore/rust-rocksdb)" = "<none>"
"checksum rocksdb-sys 0.3.0 (git+https://github.com/ethcore/rust-rocksdb)" = "<none>"
"checksum rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)" = "f76d05d3993fd5f4af9434e8e436db163a12a9d40e1a58a726f27a01dfd12a2a"
"checksum rustc-hex 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d2b03280c2813907a030785570c577fb27d3deec8da4c18566751ade94de0ace"
"checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda"
"checksum rustc_version 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b9743a7670d88d5d52950408ecdb7c71d8986251ab604d4689dd2ca25c9bca69"
"checksum safemem 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e27a8b19b835f7aea908818e871f5cc3a5a186550c30773be987e155e8163d8f"
@ -1639,7 +1664,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23"
"checksum slab 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fdeff4cd9ecff59ec7e3744cbca73dfe5ac35c2aedb2cfba8a1c715a18912e9d"
"checksum smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4c8cbcd6df1e117c2210e13ab5109635ad68a929fcbb8964dc965b76cb5ee013"
"checksum smallvec 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ee4f357e8cd37bf8822e1b964e96fd39e2cb5a0424f8aaa284ccaccc2162411c"
"checksum smallvec 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f90c5e5fe535e48807ab94fc611d323935f39d4660c52b26b96446a7b33aef10"
"checksum stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "15132e0e364248108c5e2c02e3ab539be8d6f5d52a01ca9bbf27ed657316f02b"
"checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694"
"checksum syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d3b891b9015c88c576343b9b3e41c2c11a51c219ef067b264bd9c8aa9b441dad"

View File

@ -37,9 +37,6 @@ debug = true
[profile.test]
debug = true
[profile.doc]
debug = true
[[bin]]
path = "pbtc/main.rs"
name = "pbtc"

View File

@ -212,6 +212,8 @@ SUBCOMMANDS:
## JSON-RPC
The JSON-RPC interface is served on port :8332 for mainnet and :18332 for testnet unless you specified otherwise. So if you are using testnet, you will need to change the port in the sample curl requests shown below.
#### Network
The Parity-bitcoin `network` interface.

View File

@ -23,6 +23,7 @@ pub fn fetch(benchmark: &mut Benchmark) {
let next_block = test_data::block_builder()
.transaction()
.coinbase()
.lock_time(x as u32)
.output().value(5000000000).build()
.build()
.merkled_header().parent(rolling_hash.clone()).nonce(x as u32).build()
@ -65,6 +66,7 @@ pub fn write(benchmark: &mut Benchmark) {
let next_block = test_data::block_builder()
.transaction()
.coinbase()
.lock_time(x as u32)
.output().value(5000000000).build()
.build()
.merkled_header().parent(rolling_hash.clone()).nonce(x as u32).build()
@ -102,6 +104,7 @@ pub fn reorg_short(benchmark: &mut Benchmark) {
let next_block = test_data::block_builder()
.transaction()
.coinbase()
.lock_time(x as u32)
.output().value(5000000000).build()
.build()
.merkled_header().parent(rolling_hash.clone()).nonce(x as u32 * 4).build()
@ -112,6 +115,7 @@ pub fn reorg_short(benchmark: &mut Benchmark) {
let next_block_side = test_data::block_builder()
.transaction()
.coinbase()
.lock_time(x as u32)
.output().value(5000000000).build()
.build()
.merkled_header().parent(base).nonce(x as u32 * 4 + 2).build()
@ -122,6 +126,7 @@ pub fn reorg_short(benchmark: &mut Benchmark) {
let next_block_side_continue = test_data::block_builder()
.transaction()
.coinbase()
.lock_time(x as u32)
.output().value(5000000000).build()
.build()
.merkled_header().parent(next_base).nonce(x as u32 * 4 + 3).build()
@ -131,6 +136,7 @@ pub fn reorg_short(benchmark: &mut Benchmark) {
let next_block_continue = test_data::block_builder()
.transaction()
.coinbase()
.lock_time(x as u32)
.output().value(5000000000).build()
.build()
.merkled_header().parent(rolling_hash.clone()).nonce(x as u32 * 4 + 1).build()
@ -199,6 +205,7 @@ pub fn write_heavy(benchmark: &mut Benchmark) {
let next_block = test_data::block_builder()
.transaction()
.coinbase()
.lock_time(x as u32)
.output().value(5000000000).build()
.build()
.merkled_header().parent(rolling_hash.clone()).nonce(x as u32).build()

View File

@ -34,6 +34,7 @@ pub fn main(benchmark: &mut Benchmark) {
LittleEndian::write_u64(&mut coinbase_nonce[..], x as u64);
let next_block = test_data::block_builder()
.transaction()
.lock_time(x as u32)
.input()
.coinbase()
.signature_bytes(coinbase_nonce.to_vec().into())
@ -62,6 +63,7 @@ pub fn main(benchmark: &mut Benchmark) {
LittleEndian::write_u64(&mut coinbase_nonce[..], (b + BLOCKS_INITIAL) as u64);
let mut builder = test_data::block_builder()
.transaction()
.lock_time(b as u32)
.input().coinbase().signature_bytes(coinbase_nonce.to_vec().into()).build()
.output().value(5000000000).build()
.build();

View File

@ -4,7 +4,7 @@ version = "0.1.0"
authors = ["debris <marek.kotewicz@gmail.com>"]
[dependencies]
rustc-serialize = "0.3"
rustc-hex = "2"
heapsize = "0.4"
bitcrypto = { path = "../crypto" }
primitives = { path = "../primitives" }

286
chain/README.md Normal file
View File

@ -0,0 +1,286 @@
# Chain
In this crate, you will find the structures and functions that make up the blockchain, Bitcoin's core data structure.
## Conceptual Overview
Here we will dive deep into how the blockchain is created, organized, etc. as a preface for understanding the code in this crate.
We will cover the following concepts:
* Blockchain
* Block
* Block Header
* Merkle Tree
* Transaction
* Witnesses and SegWit
* Coinbase
### Blockchain
So what is a blockchain? A blockchain is a *chain* of *blocks*...
![mind blown gif](https://media.giphy.com/media/OK27wINdQS5YQ/giphy.gif)
Yep, actually.
### Block
The real question is, what is a [block](https://github.com/bitcoinbook/bitcoinbook/blob/develop/ch09.asciidoc#structure-of-a-block)?
A block is a data structure with two fields:
* **Block header:** a data structure containing the block's metadata
* **Transactions:** an array ([vector](https://doc.rust-lang.org/book/second-edition/ch08-01-vectors.html) in rust) of transactions
![Blockchain diagram](https://raw.githubusercontent.com/pluralsight/guides/master/images/8cd8b94f-d05f-41e8-a0f1-70853f390094.png)
### Block Header
So what is a [block header](https://github.com/bitcoinbook/bitcoinbook/blob/develop/ch09.asciidoc#block-header)?
A block header is a data structure with the following fields:
* **Version:** indicates which set of block validation rules to follow
* **Previous Header Hash:** a reference to the parent/previous block in the blockchain
* **Merkle Root Hash:** a hash (root hash) of the merkle tree data structure containing a block's transactions
* **Time:** a timestamp (seconds from Unix Epoch)
* **Bits:** aka the difficulty target for this block
* **Nonce:** value used in proof-of-work
![Block header diagram](https://i.stack.imgur.com/BiaJK.png)
*How are blocks chained together?* They are chained together via the backwards reference (previous header hash) present in the block header. Each block points backwards to its parent, all the way back to the [genesis block](https://github.com/bitcoinbook/bitcoinbook/blob/develop/ch09.asciidoc#the-genesis-block) (the first block in the Bitcoin blockchain that is hard coded into all clients).
### Merkle Root
*What is a Merkle Root?* A merkle root is the root of a merkle tree. As best stated in *Mastering Bitcoin*:
> A _merkle tree_, also known as a _binary hash tree_, is a data
> structure used for efficiently summarizing and verifying the integrity of large sets of data.
In a merkle tree, all the data, in this case transactions, are leaves in the tree. Each of these is hashed and concatenated with its sibling... all the way up the tree until you are left with a single *root* hash (the merkle root hash).
![Merkle tree](https://upload.wikimedia.org/wikipedia/commons/9/95/Hash_Tree.svg)
### Transaction
According to [Mastering Bitcoin](https://github.com/bitcoinbook/bitcoinbook/) :
> Transactions are the most important part of the bitcoin system. Everything else in bitcoin is designed to ensure that transactions can created, propagated on the network, validated, and finally added to the global ledger of transactions (the blockchain).
At its most basic level, a transaction is an encoded data structure that facilitates the transfer of value between two public key addresses on the Bitcoin blockchain.
The most fundamental building block of a transaction is a `transaction output` -- the bitcoin you own in your "wallet" is in fact a subset of `unspent transaction outputs` or `UTXO's` of the global `UTXO set`. `UTXOs` are indivisible, discrete units of value which can only be consumed in their entirety. Thus, if I want to send you 1 BTC and I only own one `UTXO` worth 2 BTC, I would construct a transaction that spends my `UTXO` and sends 1 BTC to you and 1 BTC back to me (just like receiving change).
**Transaction Output:** transaction outputs have two fields:
* *value*: the value of a transaction
* *scriptPubKey (aka locking script or witness script)*: conditions required to unlock (spend) a transaction value
**Transaction Input:** transaction inputs have four fields:
* *previous output*: the previous output transaction reference, as an OutPoint structure (see below)
* *scriptSig*: a script satisfying the conditions set on the UTXO ([BIP16](https://github.com/bitcoin/bips/blob/master/bip-0016.mediawiki))
* *scriptWitness*: a script satisfying the conditions set on the UTXO ([BIP141](https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki))
* *sequence number*: transaction version as defined by the sender. Intended for "replacement" of transactions when information is updated before inclusion into a block.
**Outpoint**:
* *hash*: references the transaction that contains the UTXO being spent
* *index*: identifies which UTXO from that transaction is referenced
**Transaction Version:** the version of the data formatting
**Transaction Locktime:** this specifies either a block number or a unix time at which this transaction is valid
**Transaction Fee:** A transaction's input value must equal the transaction's output value or else the transaction is invalid. The difference between these two values is the transaction fee, a fee paid to the miner who includes this transaction in his/her block.
### Witnesses and SegWit
**Preface**: here I will try to give the minimal context surrounding segwit as is necessary to understand why witnesses exist in terms of blocks, block headers, and transactions.
SegWit is defined in [BIP141](https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki).
A witness is defined as:
> The witness is a serialization of all witness data of the transaction.
Most importantly:
> Witness data is NOT script.
Thus:
> A non-witness program (defined hereinafter) txin MUST be associated with an empty witness field, represented by a 0x00. If all txins are not witness program, a transaction's wtxid is equal to its txid.
*Regular Transaction Id vs. Witness Transaction Id*
* Regular transaction id:
```[nVersion][txins][txouts][nLockTime]```
* Witness transaction id:
```[nVersion][marker][flag][txins][txouts][witness][nLockTime]```
A `witness root hash` is calculated with all those `wtxid` as leaves, in a way similar to the `hashMerkleRoot` in the block header.
In the transaction, there are two different script fields:
* **script_sig**: original/old signature script ([BIP16](https://github.com/bitcoin/bips/blob/master/bip-0016.mediawiki)/P2SH)
* **script_witness**: witness script
Depending on the content of these two fields and the scriptPubKey, witness validation logic may be triggered. Here are the two cases (note these definitions are straight from the BIP so may be quite dense):
1. **Native witness program**: *a scriptPubKey that is exactly a push of a version byte, plus a push of a witness program. The scriptSig must be exactly empty or validation fails.*
2. **P2SH witness program**: *a scriptPubKey is a P2SH script, and the BIP16 redeemScript pushed in the scriptSig is exactly a push of a version byte plus a push of a witness program. The scriptSig must be exactly a push of the BIP16 redeemScript or validation fails.*
[Here](https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki#witness-program) are the nitty gritty details of how witnesses and scripts work together -- this goes into the fine details of how the above situations are implemented.
Here are a couple StackOverflow Questions/Answers that help clarify some of the above information:
* [What's the purpose of ScriptSig in a SegWit transaction?](https://bitcoin.stackexchange.com/questions/49372/whats-the-purpose-of-scriptsig-in-a-segwit-transaction)
* [Can old wallets redeem segwit outputs it receives? If so how?](https://bitcoin.stackexchange.com/questions/50254/can-old-wallets-redeem-segwit-outputs-it-receives-if-so-how?rq=1)
### Coinbase
Whenever a miner mines a block, it includes a special transaction called a coinbase transaction. This transaction has no inputs and creates X bitcoins equal to the current block reward (at this time 12.5) which are awarded to the miner of the block. Read more about the coinbase transaction [here](https://github.com/bitcoinbook/bitcoinbook/blob/f8b883dcd4e3d1b9adf40fed59b7e898fbd9241f/ch10.asciidoc#the-coinbase-transaction).
**Need a more visual demonstration of the above information? Check out [this awesome website](https://anders.com/blockchain/).**
## Crate Dependencies
#### 1. [rustc-hex](https://crates.io/crates/rustc-hex):
*Serialization and deserialization support from hexadecimal strings.*
**One thing to note**: *This crate is deprecated in favor of [`serde`](https://serde.rs/). No new feature development will happen in this crate, although bug fixes proposed through PRs will still be merged. It is very highly recommended by the Rust Library Team that you use [`serde`](https://serde.rs/), not this crate.*
#### 2. [heapsize](https://crates.io/crates/heapsize):
*infrastructure for measuring the total runtime size of an object on the heap*
#### 3. Crates from within the Parity Bitcoin Repo:
* bitcrypto (crypto)
* primitives
* serialization
* serialization_derive
## Crate Content
### Block (block.rs)
A relatively straight forward implementation of the data structure described above. A `block` is a rust `struct`. It implements the following traits:
* ```From<&'static str>```: this trait takes in a string and outputs a `block`. It is implemented via the `from` function which deserializes the received string into a `block` data structure. Read more about serialization [here](https://github.com/bitcoinbook/bitcoinbook/blob/develop/ch06.asciidoc#transaction-serializationoutputs) (in the context of transactions).
* ```RepresentH256```: this trait takes a `block` data structure and hashes it, returning the hash.
The `block` has a few methods of its own. The entirety of these are simple getter methods.
### Block Header (block_header.rs)
A relatively straight forward implementation of the data structure described above. A `block header` is a rust `struct`. It implements the following traits:
* ```From<&'static str>```: this trait takes in a string and outputs a `block`. It is implemented via the `from` function which deserializes the received string into a `block` data structure. Read more about serialization [here](https://github.com/bitcoinbook/bitcoinbook/blob/develop/ch06.asciidoc#transaction-serializationoutputs) (in the context of transactions).
* `fmt::Debug`: this trait formats the `block header` struct for pretty printing the debug context -- ie it allows the programmer to print out the context of the struct in a way that makes it easier to debug. Once this trait is implemented, you can do:
```rust
println!("{:?}", some_block_header);
```
Which will print out:
```
Block Header {
version: VERSION_VALUE,
previous_header_hash: PREVIOUS_HASH_HEADER_VALUE,
merkle_root_hash: MERKLE_ROOT_HASH_VALUE,
time: TIME_VALUE,
bits: BITS_VALUE,
nonce: NONCE_VALUE,
}
```
The `block header` only has a single method of its own, the `hash` method that returns a hash of itself.
### Constants (constants.rs)
There are a few constants included in this crate. Since these are nicely documented, documenting them here would be redundant. [Here](https://doc.rust-lang.org/rust-by-example/custom_types/constants.html) you can read more about constants in rust.
### Read and Hash (read_and_hash.rs)
This is a small file that deals with the reading and hashing of serialized data, utilizing a few nifty rust features.
First, a `HashedData` struct is defined over a generic T. Generics in rust work in a similar way to generics in other languages. If you need to brush up on generics, [read here](https://doc.rust-lang.org/1.8.0/book/generics.html). This data structure stores the data for a hashed value along with the size (length of the hash in bytes) and the original hash.
Next the `ReadAndHash` trait is defined. Traits in rust define abstract behaviors that can be shared between many different types. For example, let's say I am writing some code about food. To do this, I might want to create an `Eatable` trait that has a method `eat` describing how to eat this food (borrowing an example from the [New Rustacean podcast](https://newrustacean.com/)). To do this, I would define the trait as follows:
```rust
pub trait Eatable {
fn eat(&self) -> String;
}
```
Here I have defined a trait along with a method signature that must be implemented by any type that implements this trait. For example, let's say I define a candy type that is eatable:
```rust
struct Candy {
flavor: String,
}
impl Eatable for Candy {
fn eat(&self) -> String {
format!("Unwrap candy and munch on that {} goodness.", &self.flavor)
}
}
// Create candy and eat it
let candy = Candy { flavor: chocolate };
prinln!("{}", candy.eat()); // "Unwrap candy and munch on that chocolate goodness."
```
Now let's take this one step further. Let's say we want to recreate Eatable so that the eat function returns a `Compost` type with generic T where presumably T is some type that is `Compostable` (another trait). Now here, it is important that we only return `Compostable` types because only `Compostable` foods can be made into Compost. Thus, we can recreate the `Eatable` trait, this time limiting what types can implement it to those that also implement the `Compostable` trait using the where keyword (note this is called a bounded trait):
```rust
pub trait Eatable {
fn eat<T>(&self) -> Compost<T> where T: Compostable;
}
pub trait Compostable {} // Here Compostable is a marker trait
struct Compost<T> {
compostable_food: T,
}
impl<T> Compost<t> {
fn celebrate() {
println!("Thank you for saving the earth!");
}
}
```
So, let's now redefine `Candy`:
```rust
struct Candy {
flavor: String,
}
impl Compostable for Candy {}
impl Eatable for Candy {
fn eat<T>(&self) -> Compost<T> where T: Compostable{
Compost { compostable_food: format("A {} candy", &self.flavor) }
}
}
// Create candy and eat it
let candy = Candy { flavor: chocolate };
let compost = candy.eat();
compost.celebrate(); // "Thank you for saving the earth!"
```
If this example doesn't quite make sense, I recommend checking out the [traits chapter](https://doc.rust-lang.org/book/second-edition/ch10-02-traits.html) in the Rust Book.
Now that you understand traits, generics, and bounded traits, let's get back to `ReadAndHash`. This is a trait that implements a `read_and_hash<T>` method where T is `Deserializable`, hence it can be deserialized (which as you might guess is important since the input here is a serialized string). The output of this method is a Result (unfamiliar with Results in rust... [read more here](https://doc.rust-lang.org/std/result/)) returning the `HashedData` type described above.
Finally, the `ReadAndHash` trait is implemented for the `Reader` type. You can read more about the `Reader` type in the serialization crate.
### Transaction (transaction.rs)
As described above, there are four structs related to transactions defined in this file:
* OutPoint
* TransactionInput
* TransactionOutput
* Transaction
The implementations of these are pretty straight forward -- a majority of the defined methods are getters and each of these structs implements the `Serializable` and `Deserializable` traits.
A few things to note:
* The `HeapSizeOf` trait is implemented for `TransactionInput`, `TransactionOutput`, and `Transaction`. It has the method `heap_size_of_children` which calculates and returns the heap sizes of various struct fields.
* The `total_spends` method on `Transaction` calculates the sum of all the outputs in a transaction.
### Merkle Root (merkle_root.rs)
The main function in this file is the function that calculates the merkle root (a filed on the block header struct). This function has two helper functions:
* **concat**: takes two values and returns the concatenation of the two hashed values (512 bit)
* **merkle_root_hash**: hashes the 512 bit hash of two concatenated values
Using these two functions, the merkle root function takes a vector of values and calculates the merkle root row-by-row (a row being the level of a binary tree). Note, if there is an uneven number of values in the vector, the last value will be duplicated to create a full tree.
### Indexed
There are indexed equivalents of `block`, `block header`, and `transaction`:
* indexed_block.rs
* indexed_header.rs
* indexed_transaction.rs
These are essentially wrappers around the "raw" data structures with the following:
* methods to convert to and from the raw data structures (i.e. block <-> indexed_block)
* an equivalence method to compare equality against other indexed structures (specifically the PartialEq trait)
* a deserialize method

View File

@ -13,7 +13,7 @@ pub struct Block {
impl From<&'static str> for Block {
fn from(s: &'static str) -> Self {
deserialize(&s.from_hex().unwrap() as &[u8]).unwrap()
deserialize(&s.from_hex::<Vec<u8>>().unwrap() as &[u8]).unwrap()
}
}

View File

@ -1,6 +1,6 @@
use std::io;
use std::fmt;
use hex::FromHex;
use hex::{ToHex, FromHex};
use ser::{deserialize, serialize};
use crypto::dhash256;
use compact::Compact;
@ -66,8 +66,6 @@ impl BlockHeader {
impl fmt::Debug for BlockHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use rustc_serialize::hex::ToHex;
f.debug_struct("BlockHeader")
.field("version", &self.version)
.field("previous_header_hash", &self.previous_header_hash.reversed())
@ -76,14 +74,14 @@ impl fmt::Debug for BlockHeader {
.field("time", &self.time)
.field("bits", &self.bits)
.field("nonce", &self.nonce)
.field("equihash_solution", &self.equihash_solution.as_ref().map(|s| s.0.to_hex()))
.field("equihash_solution", &self.equihash_solution.as_ref().map(|s| s.0.to_hex::<String>()))
.finish()
}
}
impl From<&'static str> for BlockHeader {
fn from(s: &'static str) -> Self {
deserialize(&s.from_hex().unwrap() as &[u8]).unwrap()
deserialize(&s.from_hex::<Vec<u8>>().unwrap() as &[u8]).unwrap()
}
}

View File

@ -84,7 +84,7 @@ impl IndexedBlock {
impl From<&'static str> for IndexedBlock {
fn from(s: &'static str) -> Self {
deserialize(&s.from_hex().unwrap() as &[u8]).unwrap()
deserialize(&s.from_hex::<Vec<u8>>().unwrap() as &[u8]).unwrap()
}
}

View File

@ -1,5 +1,5 @@
use std::io;
use rustc_serialize::hex::ToHex;
use hex::{ToHex, FromHex};
use hash::{H256, H512};
use ser::{Error, Serializable, Deserializable, Stream, Reader, FixedArray_H256_2,
FixedArray_u8_296, FixedArray_u8_601_2};

View File

@ -1,4 +1,4 @@
extern crate rustc_serialize;
extern crate rustc_hex as hex;
extern crate heapsize;
extern crate primitives;
extern crate bitcrypto as crypto;
@ -24,7 +24,6 @@ pub trait RepresentH256 {
fn h256(&self) -> hash::H256;
}
pub use rustc_serialize::hex;
pub use primitives::{hash, bytes, bigint, compact};
pub use block::Block;

View File

@ -102,7 +102,7 @@ pub struct Transaction {
impl From<&'static str> for Transaction {
fn from(s: &'static str) -> Self {
deserialize(&s.from_hex().unwrap() as &[u8]).unwrap()
deserialize(&s.from_hex::<Vec<u8>>().unwrap() as &[u8]).unwrap()
}
}

View File

@ -5,7 +5,7 @@ authors = ["debris <marek.kotewicz@gmail.com>"]
[dependencies]
rand = "0.4"
rustc-serialize = "0.3"
rustc-hex = "2"
lazy_static = "1.0"
base58 = "0.1"
eth-secp256k1 = { git = "https://github.com/ethcore/rust-secp256k1" }

View File

@ -1,7 +1,7 @@
//! Bitcoin keys.
extern crate rand;
extern crate rustc_serialize;
extern crate rustc_hex as hex;
#[macro_use]
extern crate lazy_static;
extern crate base58;
@ -19,7 +19,6 @@ mod private;
mod public;
mod signature;
pub use rustc_serialize::hex;
pub use primitives::{hash, bytes};
pub use address::{Type, Address};

View File

@ -108,7 +108,7 @@ impl DisplayLayout for Private {
impl fmt::Debug for Private {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(writeln!(f, "network: {:?}", self.network));
try!(writeln!(f, "secret: {}", self.secret.to_hex()));
try!(writeln!(f, "secret: {}", self.secret.to_hex::<String>()));
writeln!(f, "compressed: {}", self.compressed)
}
}

View File

@ -92,14 +92,14 @@ impl PartialEq for Public {
impl fmt::Debug for Public {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Public::Normal(ref hash) => writeln!(f, "normal: {}", hash.to_hex()),
Public::Compressed(ref hash) => writeln!(f, "compressed: {}", hash.to_hex()),
Public::Normal(ref hash) => writeln!(f, "normal: {}", hash.to_hex::<String>()),
Public::Compressed(ref hash) => writeln!(f, "compressed: {}", hash.to_hex::<String>()),
}
}
}
impl fmt::Display for Public {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.to_hex().fmt(f)
self.to_hex::<String>().fmt(f)
}
}

View File

@ -12,13 +12,13 @@ pub struct Signature(Vec<u8>);
impl fmt::Debug for Signature {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.to_hex().fmt(f)
self.0.to_hex::<String>().fmt(f)
}
}
impl fmt::Display for Signature {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.to_hex().fmt(f)
self.0.to_hex::<String>().fmt(f)
}
}
@ -74,13 +74,13 @@ pub struct CompactSignature(H520);
impl fmt::Debug for CompactSignature {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.0.to_hex())
f.write_str(&self.0.to_hex::<String>())
}
}
impl fmt::Display for CompactSignature {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.0.to_hex())
f.write_str(&self.0.to_hex::<String>())
}
}

View File

@ -64,12 +64,26 @@ impl InventoryVector {
}
}
pub fn witness_tx(hash: H256) -> Self {
InventoryVector {
inv_type: InventoryType::MessageWitnessTx,
hash: hash,
}
}
pub fn block(hash: H256) -> Self {
InventoryVector {
inv_type: InventoryType::MessageBlock,
hash: hash,
}
}
pub fn witness_block(hash: H256) -> Self {
InventoryVector {
inv_type: InventoryType::MessageWitnessBlock,
hash: hash,
}
}
}
impl Serializable for InventoryVector {

View File

@ -3,9 +3,9 @@ use primitives::hash::H256;
use primitives::compact::Compact;
use chain::{OutPoint, TransactionOutput, IndexedTransaction};
use storage::{SharedStore, TransactionOutputProvider};
use network::ConsensusParams;
use network::{ConsensusParams, ConsensusFork, TransactionOrdering};
use memory_pool::{MemoryPool, OrderingStrategy, Entry};
use verification::{work_required, block_reward_satoshi, transaction_sigops};
use verification::{work_required, block_reward_satoshi, transaction_sigops, median_timestamp_inclusive};
const BLOCK_VERSION: u32 = 0x20000000;
const BLOCK_HEADER_SIZE: u32 = 4 + 32 + 32 + 4 + 4 + 4;
@ -116,7 +116,9 @@ impl SizePolicy {
/// Block assembler
pub struct BlockAssembler {
/// Maximal block size.
pub max_block_size: u32,
/// Maximal # of sigops in the block.
pub max_block_sigops: u32,
}
@ -130,6 +132,8 @@ struct FittingTransactionsIterator<'a, T> {
block_height: u32,
/// New block time
block_time: u32,
/// Are OP_CHECKDATASIG && OP_CHECKDATASIGVERIFY enabled for this block.
checkdatasig_active: bool,
/// Size policy decides if transactions size fits the block
block_size: SizePolicy,
/// Sigops policy decides if transactions sigops fits the block
@ -143,12 +147,21 @@ struct FittingTransactionsIterator<'a, T> {
}
impl<'a, T> FittingTransactionsIterator<'a, T> where T: Iterator<Item = &'a Entry> {
fn new(store: &'a TransactionOutputProvider, iter: T, max_block_size: u32, max_block_sigops: u32, block_height: u32, block_time: u32) -> Self {
fn new(
store: &'a TransactionOutputProvider,
iter: T,
max_block_size: u32,
max_block_sigops: u32,
block_height: u32,
block_time: u32,
checkdatasig_active: bool,
) -> Self {
FittingTransactionsIterator {
store: store,
iter: iter,
block_height: block_height,
block_time: block_time,
checkdatasig_active,
// reserve some space for header and transations len field
block_size: SizePolicy::new(BLOCK_HEADER_SIZE + 4, max_block_size, 1_000, 50),
sigops: SizePolicy::new(0, max_block_sigops, 8, 50),
@ -190,7 +203,7 @@ impl<'a, T> Iterator for FittingTransactionsIterator<'a, T> where T: Iterator<It
let transaction_size = entry.size as u32;
let bip16_active = true;
let sigops_count = transaction_sigops(&entry.transaction, self, bip16_active) as u32;
let sigops_count = transaction_sigops(&entry.transaction, self, bip16_active, self.checkdatasig_active) as u32;
let size_step = self.block_size.decide(transaction_size);
let sigops_step = self.sigops.decide(sigops_count);
@ -233,7 +246,7 @@ impl<'a, T> Iterator for FittingTransactionsIterator<'a, T> where T: Iterator<It
}
impl BlockAssembler {
pub fn create_new_block(&self, store: &SharedStore, mempool: &MemoryPool, time: u32, consensus: &ConsensusParams) -> BlockTemplate {
pub fn create_new_block(&self, store: &SharedStore, mempool: &MemoryPool, time: u32, median_timestamp: u32, consensus: &ConsensusParams) -> BlockTemplate {
// get best block
// take it's hash && height
let best_block = store.best_block();
@ -242,11 +255,23 @@ impl BlockAssembler {
let bits = work_required(previous_header_hash.clone(), time, height, store.as_block_header_provider(), consensus);
let version = BLOCK_VERSION;
let checkdatasig_active = match consensus.fork {
ConsensusFork::BitcoinCash(ref fork) => median_timestamp >= fork.magnetic_anomaly_time,
_ => false
};
let mut coinbase_value = block_reward_satoshi(height);
let mut transactions = Vec::new();
let mempool_iter = mempool.iter(OrderingStrategy::ByTransactionScore);
let tx_iter = FittingTransactionsIterator::new(store.as_transaction_output_provider(), mempool_iter, self.max_block_size, self.max_block_sigops, height, time);
let tx_iter = FittingTransactionsIterator::new(
store.as_transaction_output_provider(),
mempool_iter,
self.max_block_size,
self.max_block_sigops,
height,
time,
checkdatasig_active);
for entry in tx_iter {
// miner_fee is i64, but we can safely cast it to u64
// memory pool should restrict miner fee to be positive
@ -255,6 +280,15 @@ impl BlockAssembler {
transactions.push(tx);
}
// sort block transactions
let median_time_past = median_timestamp_inclusive(previous_header_hash.clone(), store.as_block_header_provider());
match consensus.fork.transaction_ordering(median_time_past) {
TransactionOrdering::Canonical => transactions.sort_unstable_by(|tx1, tx2|
tx1.hash.cmp(&tx2.hash)),
// memory pool iter returns transactions in topological order
TransactionOrdering::Topological => (),
}
BlockTemplate {
version: version,
previous_header_hash: previous_header_hash,
@ -271,7 +305,16 @@ impl BlockAssembler {
#[cfg(test)]
mod tests {
use super::{SizePolicy, NextStep};
extern crate test_data;
use std::sync::Arc;
use db::BlockChainDatabase;
use primitives::hash::H256;
use storage::SharedStore;
use network::{ConsensusParams, ConsensusFork, Network, BitcoinCashConsensusParams};
use memory_pool::MemoryPool;
use self::test_data::{ChainBuilder, TransactionBuilder};
use super::{BlockAssembler, SizePolicy, NextStep, BlockTemplate};
#[test]
fn test_size_policy() {
@ -317,4 +360,41 @@ mod tests {
fn test_fitting_transactions_iterator_locked_transaction() {
// TODO
}
#[test]
fn block_assembler_transaction_order() {
fn construct_block(consensus: ConsensusParams) -> (BlockTemplate, H256, H256) {
let chain = &mut ChainBuilder::new();
TransactionBuilder::with_default_input(0).set_output(30).store(chain) // transaction0
.into_input(0).set_output(50).store(chain); // transaction0 -> transaction1
let hash0 = chain.at(0).hash();
let hash1 = chain.at(1).hash();
let mut pool = MemoryPool::new();
let storage: SharedStore = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
pool.insert_verified(chain.at(0).into());
pool.insert_verified(chain.at(1).into());
(BlockAssembler {
max_block_size: 0xffffffff,
max_block_sigops: 0xffffffff,
}.create_new_block(&storage, &pool, 0, 0, &consensus), hash0, hash1)
}
// when topological consensus is used
let topological_consensus = ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore);
let (block, hash0, hash1) = construct_block(topological_consensus);
assert!(hash1 < hash0);
assert_eq!(block.transactions[0].hash, hash0);
assert_eq!(block.transactions[1].hash, hash1);
// when canonocal consensus is used
let mut canonical_fork = BitcoinCashConsensusParams::new(Network::Mainnet);
canonical_fork.magnetic_anomaly_time = 0;
let canonical_consensus = ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCash(canonical_fork));
let (block, hash0, hash1) = construct_block(canonical_consensus);
assert!(hash1 < hash0);
assert_eq!(block.transactions[0].hash, hash1);
assert_eq!(block.transactions[1].hash, hash0);
}
}

View File

@ -8,3 +8,4 @@ lazy_static = "1.0"
chain = { path = "../chain" }
primitives = { path = "../primitives" }
serialization = { path = "../serialization" }
rustc-hex = "2"

View File

@ -41,6 +41,9 @@ pub struct BitcoinCashConsensusParams {
/// Time of monolith (aka May 2018) hardfork.
/// https://github.com/bitcoincashorg/spec/blob/4fbb0face661e293bcfafe1a2a4744dcca62e50d/may-2018-hardfork.md
pub monolith_time: u32,
/// Time of magnetic anomaly (aka Nov 2018) hardfork.
/// https://github.com/bitcoincashorg/bitcoincash.org/blob/f92f5412f2ed60273c229f68dd8703b6d5d09617/spec/2018-nov-upgrade.md
pub magnetic_anomaly_time: u32,
}
#[derive(Debug, Clone)]
@ -68,6 +71,17 @@ pub enum ConsensusFork {
ZCash(ZCashConsensusParams),
}
#[derive(Debug, Clone, Copy)]
/// Describes the ordering of transactions within single block.
pub enum TransactionOrdering {
/// Topological tranasaction ordering: if tx TX2 depends on tx TX1,
/// it should come AFTER TX1 (not necessary **right** after it).
Topological,
/// Canonical transaction ordering: transactions are ordered by their
/// hash (in ascending order).
Canonical,
}
impl ConsensusParams {
pub fn new(network: Network, fork: ConsensusFork) -> Self {
match network {
@ -199,6 +213,15 @@ impl ConsensusParams {
(height == 91842 && hash == &H256::from_reversed_str("00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
(height == 91880 && hash == &H256::from_reversed_str("00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721"))
}
/// Returns true if SegWit is possible on this chain.
pub fn is_segwit_possible(&self) -> bool {
match self.fork {
// SegWit is not supported in (our?) regtests
ConsensusFork::BitcoinCore if self.network != Network::Regtest => true,
ConsensusFork::BitcoinCore | ConsensusFork::BitcoinCash(_) | ConsensusFork::ZCash(_) => false,
}
}
}
impl ConsensusFork {
@ -225,6 +248,13 @@ impl ConsensusFork {
}
}
pub fn min_transaction_size(&self, median_time_past: u32) -> usize {
match *self {
ConsensusFork::BitcoinCash(ref fork) if median_time_past >= fork.magnetic_anomaly_time => 100,
_ => 0,
}
}
pub fn max_transaction_size(&self) -> usize {
// BitcoinCash: according to REQ-5: max size of tx is still 1_000_000
// SegWit: size * 4 <= 4_000_000 ===> max size of tx is still 1_000_000
@ -275,6 +305,14 @@ impl ConsensusFork {
unreachable!("BitcoinCash has no SegWit; weight is only checked with SegWit activated; qed"),
}
}
pub fn transaction_ordering(&self, median_time_past: u32) -> TransactionOrdering {
match *self {
ConsensusFork::BitcoinCash(ref fork) if median_time_past >= fork.magnetic_anomaly_time
=> TransactionOrdering::Canonical,
_ => TransactionOrdering::Topological,
}
}
}
impl BitcoinCashConsensusParams {
@ -284,16 +322,19 @@ impl BitcoinCashConsensusParams {
height: 478559,
difficulty_adjustion_height: 504031,
monolith_time: 1526400000,
magnetic_anomaly_time: 1542300000,
},
Network::Testnet => BitcoinCashConsensusParams {
height: 1155876,
difficulty_adjustion_height: 1188697,
monolith_time: 1526400000,
magnetic_anomaly_time: 1542300000,
},
Network::Regtest | Network::Unitest => BitcoinCashConsensusParams {
height: 0,
difficulty_adjustion_height: 0,
monolith_time: 1526400000,
magnetic_anomaly_time: 1542300000,
},
}
}
@ -390,6 +431,14 @@ mod tests {
assert_eq!(ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(Network::Mainnet)).max_transaction_size(), 1_000_000);
}
#[test]
fn test_consensus_fork_min_transaction_size() {
assert_eq!(ConsensusFork::BitcoinCore.min_transaction_size(0), 0);
assert_eq!(ConsensusFork::BitcoinCore.min_transaction_size(2000000000), 0);
assert_eq!(ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(Network::Mainnet)).min_transaction_size(0), 0);
assert_eq!(ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(Network::Mainnet)).min_transaction_size(2000000000), 100);
}
#[test]
fn test_consensus_fork_max_block_sigops() {
assert_eq!(ConsensusFork::BitcoinCore.max_block_sigops(0, 1_000_000), 20_000);

View File

@ -4,6 +4,7 @@ extern crate lazy_static;
extern crate chain;
extern crate primitives;
extern crate serialization;
extern crate rustc_hex as hex;
mod consensus;
mod deployments;
@ -11,6 +12,6 @@ mod network;
pub use primitives::{hash, compact};
pub use consensus::{ConsensusParams, ConsensusFork, BitcoinCashConsensusParams, ZCashConsensusParams};
pub use consensus::{ConsensusParams, ConsensusFork, BitcoinCashConsensusParams, ZCashConsensusParams, TransactionOrdering};
pub use deployments::Deployment;
pub use network::{Magic, Network};

View File

@ -108,9 +108,9 @@ impl Network {
(&ConsensusFork::ZCash(_), Network::Mainnet) | (&ConsensusFork::ZCash(_), Network::Other(_)) => {
use serialization;
use chain;
use chain::hex::FromHex;
use hex::FromHex;
let origin = "040000000000000000000000000000000000000000000000000000000000000000000000db4d7a85b768123f1dff1d4c4cece70083b2d27e117b4ac2e31d087988a5eac4000000000000000000000000000000000000000000000000000000000000000090041358ffff071f5712000000000000000000000000000000000000000000000000000000000000fd4005000a889f00854b8665cd555f4656f68179d31ccadc1b1f7fb0952726313b16941da348284d67add4686121d4e3d930160c1348d8191c25f12b267a6a9c131b5031cbf8af1f79c9d513076a216ec87ed045fa966e01214ed83ca02dc1797270a454720d3206ac7d931a0a680c5c5e099057592570ca9bdf6058343958b31901fce1a15a4f38fd347750912e14004c73dfe588b903b6c03166582eeaf30529b14072a7b3079e3a684601b9b3024054201f7440b0ee9eb1a7120ff43f713735494aa27b1f8bab60d7f398bca14f6abb2adbf29b04099121438a7974b078a11635b594e9170f1086140b4173822dd697894483e1c6b4e8b8dcd5cb12ca4903bc61e108871d4d915a9093c18ac9b02b6716ce1013ca2c1174e319c1a570215bc9ab5f7564765f7be20524dc3fdf8aa356fd94d445e05ab165ad8bb4a0db096c097618c81098f91443c719416d39837af6de85015dca0de89462b1d8386758b2cf8a99e00953b308032ae44c35e05eb71842922eb69797f68813b59caf266cb6c213569ae3280505421a7e3a0a37fdf8e2ea354fc5422816655394a9454bac542a9298f176e211020d63dee6852c40de02267e2fc9d5e1ff2ad9309506f02a1a71a0501b16d0d36f70cdfd8de78116c0c506ee0b8ddfdeb561acadf31746b5a9dd32c21930884397fb1682164cb565cc14e089d66635a32618f7eb05fe05082b8a3fae620571660a6b89886eac53dec109d7cbb6930ca698a168f301a950be152da1be2b9e07516995e20baceebecb5579d7cdbc16d09f3a50cb3c7dffe33f26686d4ff3f8946ee6475e98cf7b3cf9062b6966e838f865ff3de5fb064a37a21da7bb8dfd2501a29e184f207caaba364f36f2329a77515dcb710e29ffbf73e2bbd773fab1f9a6b005567affff605c132e4e4dd69f36bd201005458cfbd2c658701eb2a700251cefd886b1e674ae816d3f719bac64be649c172ba27a4fd55947d95d53ba4cbc73de97b8af5ed4840b659370c556e7376457f51e5ebb66018849923db82c1c9a819f173cccdb8f3324b239609a300018d0fb094adf5bd7cbb3834c69e6d0b3798065c525b20f040e965e1a161af78ff7561cd874f5f1b75aa0bc77f720589e1b810f831eac5073e6dd46d00a2793f70f7427f0f798f2f53a67e615e65d356e66fe40609a958a05edb4c175bcc383ea0530e67ddbe479a898943c6e3074c6fcc252d6014de3a3d292b03f0d88d312fe221be7be7e3c59d07fa0f2f4029e364f1f355c5d01fa53770d0cd76d82bf7e60f6903bc1beb772e6fde4a70be51d9c7e03c8d6d8dfb361a234ba47c470fe630820bbd920715621b9fbedb49fcee165ead0875e6c2b1af16f50b5d6140cc981122fcbcf7c5a4e3772b3661b628e08380abc545957e59f634705b1bbde2f0b4e055a5ec5676d859be77e20962b645e051a880fddb0180b4555789e1f9344a436a84dc5579e2553f1e5fb0a599c137be36cabbed0319831fea3fddf94ddc7971e4bcf02cdc93294a9aab3e3b13e3b058235b4f4ec06ba4ceaa49d675b4ba80716f3bc6976b1fbf9c8bf1f3e3a4dc1cd83ef9cf816667fb94f1e923ff63fef072e6a19321e4812f96cb0ffa864da50ad74deb76917a336f31dce03ed5f0303aad5e6a83634f9fcc371096f8288b8f02ddded5ff1bb9d49331e4a84dbe1543164438fde9ad71dab024779dcdde0b6602b5ae0a6265c14b94edd83b37403f4b78fcd2ed555b596402c28ee81d87a909c4e8722b30c71ecdd861b05f61f8b1231795c76adba2fdefa451b283a5d527955b9f3de1b9828e7b2e74123dd47062ddcc09b05e7fa13cb2212a6fdbc65d7e852cec463ec6fd929f5b8483cf3052113b13dac91b69f49d1b7d1aec01c4a68e41ce1570101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff071f0104455a6361736830623963346565663862376363343137656535303031653335303039383462366665613335363833613763616331343161303433633432303634383335643334ffffffff010000000000000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000";
let origin = origin.from_hex().unwrap();
let origin = origin.from_hex::<Vec<u8>>().unwrap();
let genesis: chain::Block = serialization::deserialize_with_flags(&origin as &[u8], serialization::DESERIALIZE_ZCASH).unwrap();
genesis
},

View File

@ -14,7 +14,7 @@ rand = "0.4"
log = "0.4"
abstract-ns = "0.3"
ns-dns-tokio = "0.3"
csv = "0.15"
csv = "1"
primitives = { path = "../primitives" }
bitcrypto = { path = "../crypto" }

View File

@ -375,8 +375,9 @@ impl<T> NodeTable<T> where T: Time {
/// Save node table in csv format.
pub fn save<W>(&self, write: W) -> Result<(), io::Error> where W: io::Write {
let mut writer = csv::Writer::from_writer(write)
.delimiter(b' ');
let mut writer = csv::WriterBuilder::new()
.delimiter(b' ')
.from_writer(write);
let iter = self.by_score.iter()
.map(|node| &node.0)
.take(1000);
@ -385,7 +386,7 @@ impl<T> NodeTable<T> where T: Time {
for n in iter {
let record = (n.addr.to_string(), n.time, u64::from(n.services), n.failures);
try!(writer.encode(record).map_err(|_| err()));
try!(writer.serialize(record).map_err(|_| err()));
}
Ok(())
@ -393,16 +394,17 @@ impl<T> NodeTable<T> where T: Time {
/// Loads table in from a csv source.
pub fn load<R>(preferable_services: Services, read: R) -> Result<Self, io::Error> where R: io::Read, T: Default {
let mut rdr = csv::Reader::from_reader(read)
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.delimiter(b' ');
.delimiter(b' ')
.from_reader(read);
let mut node_table = NodeTable::default();
node_table.preferable_services = preferable_services;
let err = || io::Error::new(io::ErrorKind::Other, "Load csv error");
for row in rdr.decode() {
for row in rdr.deserialize() {
let (addr, time, services, failures): (String, i64, u64, u32) = try!(row.map_err(|_| err()));
let services = services.into();

View File

@ -24,6 +24,12 @@ args:
value_name: IP
help: Connect only to the specified node.
takes_value: true
- host:
short: h
long: host
value_name: HOST
help: Listen for connections on HOST.
takes_value: true
- seednode:
short: s
long: seednode

View File

@ -109,7 +109,7 @@ pub fn start(cfg: config::Config) -> Result<(), String> {
_ => PROTOCOL_MINIMUM,
},
magic: cfg.consensus.magic(),
local_address: SocketAddr::new("127.0.0.1".parse().unwrap(), cfg.port),
local_address: SocketAddr::new(cfg.host, cfg.port),
services: cfg.services,
user_agent: cfg.user_agent,
start_height: 0,

View File

@ -21,6 +21,7 @@ pub struct Config {
pub services: Services,
pub port: u16,
pub connect: Option<net::SocketAddr>,
pub host: net::IpAddr,
pub seednodes: Vec<String>,
pub quiet: bool,
pub inbound_connections: u32,
@ -120,6 +121,14 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
None => InternetProtocol::default(),
};
let host = match matches.value_of("host") {
Some(s) => s.parse::<net::IpAddr>().map_err(|_| "Invalid host".to_owned())?,
None => match only_net {
InternetProtocol::IpV6 => "::".parse().unwrap(),
_ => "0.0.0.0".parse().unwrap(),
}
};
let rpc_config = parse_rpc_config(network, matches)?;
let block_notify_command = match matches.value_of("blocknotify") {
@ -157,6 +166,7 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
services: services,
port: port,
connect: connect,
host: host,
seednodes: seednodes,
inbound_connections: in_connections,
outbound_connections: out_connections,
@ -197,12 +207,12 @@ fn parse_consensus_fork(network: Network, db: &storage::SharedStore, matches: &c
return Err(format!("Cannot select '{}' fork with non-empty database of '{}' fork", new_consensus_fork, old_consensus_fork)),
}
Ok(match new_consensus_fork {
"btc" => ConsensusFork::BitcoinCore,
"bch" => ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(network)),
match new_consensus_fork {
"btc" => Ok(ConsensusFork::BitcoinCore),
"bch" => Ok(ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(network))),
"zcash" => ConsensusFork::ZCash(ZCashConsensusParams::new(network)),
_ => unreachable!("hardcoded above"),
})
_ => Err(String::from("Fork mandatory")),
}
}
fn parse_rpc_config(network: Network, matches: &clap::ArgMatches) -> Result<RpcHttpConfig, String> {

View File

@ -30,15 +30,21 @@ pub fn testnet_seednodes() -> Vec<&'static str> {
pub fn bitcoin_cash_seednodes() -> Vec<&'static str> {
vec![
"cash-seed.bitcoin.thomaszander.se:8333",
"seed.bitcoinabc.org:8333",
"seed-abc.bitcoinforks.org:8333",
"seed.bitprim.org:8333",
"seed.deadalnix.me:8333",
"seeder.criptolayer.net:8333"
]
}
pub fn bitcoin_cash_testnet_seednodes() -> Vec<&'static str> {
vec![
"testnet-seed.bitcoinabc.org:18333",
"testnet-seed-abc.bitcoinforks.org:18333",
"testnet-seed.bitprim.org:18333",
"testnet-seed.deadalnix.me:18333",
"testnet-seeder.criptolayer.net:18333"
]
}

View File

@ -5,6 +5,6 @@ authors = ["debris <marek.kotewicz@gmail.com>"]
[dependencies]
heapsize = "0.4"
rustc-serialize = "0.3"
rustc-hex = "2"
byteorder = "1.0"
bigint = "1.0"

View File

@ -84,7 +84,7 @@ impl io::Write for Bytes {
impl fmt::Debug for Bytes {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.0.to_hex())
f.write_str(&self.0.to_hex::<String>())
}
}

View File

@ -85,11 +85,17 @@ impl Compact {
}
pub fn to_f64(&self) -> f64 {
let max_body = f64::from(0x00ffff).ln();
let scaland = f64::from(256).ln();
let ln1 = f64::from(self.0 & 0x00ffffff).ln();
let s1 = scaland * f64::from(0x1d - ((self.0 & 0xff000000) >> 24));
(max_body - ln1 + s1).exp()
let mut shift = (self.0 >> 24) & 0xff;
let mut diff = f64::from(0x0000ffffu32) / f64::from(self.0 & 0x00ffffffu32);
while shift < 29 {
diff *= f64::from(256);
shift += 1;
}
while shift > 29 {
diff /= f64::from(256.0);
shift -= 1;
}
diff
}
}
@ -132,7 +138,19 @@ mod tests {
#[test]
fn difficulty() {
let nbits = Compact::new(0x1b0404cb);
assert_eq!(nbits.to_f64(), 16307.420938523994f64);
fn compare_f64(v1: f64, v2: f64) -> bool {
(v1 - v2).abs() < 0.00001
}
assert!(compare_f64(Compact::new(0x1b0404cb).to_f64(), 16307.42094));
// tests from original bitcoin client:
// https://github.com/bitcoin/bitcoin/blob/1e8f88e071019907785b260477bd359bef6f9a8f/src/test/blockchain_tests.cpp
assert!(compare_f64(Compact::new(0x1f111111).to_f64(), 0.000001));
assert!(compare_f64(Compact::new(0x1ef88f6f).to_f64(), 0.000016));
assert!(compare_f64(Compact::new(0x1df88f6f).to_f64(), 0.004023));
assert!(compare_f64(Compact::new(0x1cf88f6f).to_f64(), 1.029916));
assert!(compare_f64(Compact::new(0x12345678).to_f64(), 5913134931067755359633408.0));
}
}

View File

@ -67,7 +67,7 @@ macro_rules! impl_hash {
type Err = FromHexError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let vec = try!(s.from_hex());
let vec: Vec<u8> = try!(s.from_hex());
match vec.len() {
$size => {
let mut result = [0u8; $size];
@ -81,13 +81,13 @@ macro_rules! impl_hash {
impl fmt::Debug for $name {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.0.to_hex())
f.write_str(&self.0.to_hex::<String>())
}
}
impl fmt::Display for $name {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.0.to_hex())
f.write_str(&self.0.to_hex::<String>())
}
}

View File

@ -1,11 +1,9 @@
extern crate byteorder;
#[macro_use]
extern crate heapsize;
extern crate rustc_serialize;
extern crate rustc_hex as hex;
pub extern crate bigint;
pub mod bytes;
pub mod compact;
pub mod hash;
pub use rustc_serialize::hex;

View File

@ -10,7 +10,7 @@ log = "0.4"
serde = "1.0"
serde_json = "1.0"
serde_derive = "1.0"
rustc-serialize = "0.3"
rustc-hex = "2"
tokio-core = "0.1.1"
jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git" }
jsonrpc-macros = { git = "https://github.com/ethcore/jsonrpc.git" }

View File

@ -1,5 +1,5 @@
extern crate log;
extern crate rustc_serialize;
extern crate rustc_hex as hex;
extern crate serde;
extern crate serde_json;
#[macro_use]
@ -25,8 +25,6 @@ extern crate keys;
pub mod v1;
pub mod rpc_server;
pub use rustc_serialize::hex;
pub use jsonrpc_core::{MetaIoHandler, Compatibility, Error};
pub use jsonrpc_http_server::tokio_core::reactor::{Remote};

View File

@ -37,7 +37,7 @@ impl Serialize for Bytes {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
let mut serialized = String::new();
serialized.push_str(self.0.to_hex().as_ref());
serialized.push_str(self.0.to_hex::<String>().as_ref());
serializer.serialize_str(serialized.as_ref())
}
}

View File

@ -22,7 +22,7 @@ macro_rules! impl_hash {
impl fmt::Debug for $name {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{}", $other::from(self.0.clone()).to_hex())
write!(f, "{}", $other::from(self.0.clone()).to_hex::<String>())
}
}
@ -89,7 +89,7 @@ macro_rules! impl_hash {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer {
let mut hex = String::new();
hex.push_str(&$other::from(self.0.clone()).to_hex());
hex.push_str(&$other::from(self.0.clone()).to_hex::<String>());
serializer.serialize_str(&hex)
}
}
@ -111,7 +111,7 @@ macro_rules! impl_hash {
return Err(E::invalid_value(Unexpected::Str(value), &self))
}
match value[..].from_hex() {
match value[..].from_hex::<Vec<u8>>() {
Ok(ref v) => {
let mut result = [0u8; $size];
result.copy_from_slice(v);

View File

@ -217,7 +217,7 @@ impl<'a> Deserialize<'a> for TransactionOutputs {
}
}
deserializer.deserialize_identifier(TransactionOutputsVisitor)
deserializer.deserialize_any(TransactionOutputsVisitor)
}
}

View File

@ -19,11 +19,13 @@ pub enum Error {
NumberNotMinimallyEncoded,
SigCount,
PubkeyCount,
InvalidOperandSize,
// Failed verify operations
Verify,
EqualVerify,
CheckSigVerify,
CheckDataSigVerify,
NumEqualVerify,
// Logical/Format/Canonical errors.
@ -33,7 +35,6 @@ pub enum Error {
InvalidAltstackOperation,
UnbalancedConditional,
InvalidSplitRange,
InvalidBitwiseOperation,
DivisionByZero,
ImpossibleEncoding,
@ -78,6 +79,7 @@ impl fmt::Display for Error {
Error::Verify => "Failed verify operation".fmt(f),
Error::EqualVerify => "Failed equal verify operation".fmt(f),
Error::CheckSigVerify => "Failed signature check".fmt(f),
Error::CheckDataSigVerify => "Failed data signature check".fmt(f),
Error::NumEqualVerify => "Failed num equal verify operation".fmt(f),
Error::SigCount => "Maximum number of signature exceeded".fmt(f),
Error::PubkeyCount => "Maximum number of pubkeys per multisig exceeded".fmt(f),
@ -97,7 +99,7 @@ impl fmt::Display for Error {
Error::InvalidAltstackOperation => "Invalid altstack operation".fmt(f),
Error::UnbalancedConditional => "Unbalanced conditional".fmt(f),
Error::InvalidSplitRange => "Invalid OP_SPLIT range".fmt(f),
Error::InvalidBitwiseOperation => "Invalid bitwise operation (check length of inputs)".fmt(f),
Error::InvalidOperandSize => "Invalid operand size".fmt(f),
Error::DivisionByZero => "Invalid division operation".fmt(f),
Error::ImpossibleEncoding => "The requested encoding is impossible to satisfy".fmt(f),

View File

@ -98,6 +98,9 @@ pub struct VerificationFlags {
///
/// This opcode replaces OP_LEFT => enabling both OP_NUM2BIN && OP_LEFT would be an error
pub verify_num2bin: bool,
/// Support OP_CHECKDATASIG and OP_CHECKDATASIGVERIFY opcodes.
pub verify_checkdatasig: bool,
}
impl VerificationFlags {
@ -136,6 +139,16 @@ impl VerificationFlags {
self
}
pub fn verify_sigpushonly(mut self, value: bool) -> Self {
self.verify_sigpushonly = value;
self
}
pub fn verify_cleanstack(mut self, value: bool) -> Self {
self.verify_cleanstack = value;
self
}
pub fn verify_discourage_upgradable_witness_program(mut self, value: bool) -> Self {
self.verify_discourage_upgradable_witness_program = value;
self
@ -185,4 +198,9 @@ impl VerificationFlags {
self.verify_num2bin = value;
self
}
pub fn verify_checkdatasig(mut self, value: bool) -> Self {
self.verify_checkdatasig = value;
self
}
}

View File

@ -1,6 +1,6 @@
use std::{cmp, mem};
use bytes::Bytes;
use keys::{Signature, Public};
use keys::{Message, Signature, Public};
use chain::constants::SEQUENCE_LOCKTIME_DISABLE_FLAG;
use crypto::{sha1, sha256, dhash160, dhash256, ripemd160};
use sign::{SignatureVersion, Sighash};
@ -32,6 +32,25 @@ fn check_signature(
checker.check_signature(&signature, &public, script_code, hash_type, version)
}
/// Helper function.
fn verify_signature(
checker: &SignatureChecker,
signature: Vec<u8>,
public: Vec<u8>,
message: Message,
) -> bool {
let public = match Public::from_slice(&public) {
Ok(public) => public,
_ => return false,
};
if signature.is_empty() {
return false;
}
checker.verify_signature(&signature.into(), &public, &message.into())
}
fn is_public_key(v: &[u8]) -> bool {
match v.len() {
33 if v[0] == 2 || v[0] == 3 => true,
@ -327,7 +346,6 @@ pub fn verify_script(
// Disallow CLEANSTACK without P2SH, as otherwise a switch CLEANSTACK->P2SH+CLEANSTACK
// would be possible, which is not a softfork (and P2SH should be one).
assert!(flags.verify_p2sh);
assert!(flags.verify_witness);
if stack.len() != 1 {
return Err(Error::Cleanstack);
}
@ -603,7 +621,7 @@ pub fn eval_script(
let mask_len = mask.len();
let value_to_update = stack.last_mut()?;
if mask_len != value_to_update.len() {
return Err(Error::InvalidBitwiseOperation);
return Err(Error::InvalidOperandSize);
}
for (byte_to_update, byte_mask) in (*value_to_update).iter_mut().zip(mask.iter()) {
*byte_to_update = *byte_to_update & byte_mask;
@ -614,7 +632,7 @@ pub fn eval_script(
let mask_len = mask.len();
let value_to_update = stack.last_mut()?;
if mask_len != value_to_update.len() {
return Err(Error::InvalidBitwiseOperation);
return Err(Error::InvalidOperandSize);
}
for (byte_to_update, byte_mask) in (*value_to_update).iter_mut().zip(mask.iter()) {
*byte_to_update = *byte_to_update | byte_mask;
@ -625,7 +643,7 @@ pub fn eval_script(
let mask_len = mask.len();
let value_to_update = stack.last_mut()?;
if mask_len != value_to_update.len() {
return Err(Error::InvalidBitwiseOperation);
return Err(Error::InvalidOperandSize);
}
for (byte_to_update, byte_mask) in (*value_to_update).iter_mut().zip(mask.iter()) {
*byte_to_update = *byte_to_update ^ byte_mask;
@ -1116,6 +1134,34 @@ pub fn eval_script(
Opcode::OP_VERNOTIF => {
return Err(Error::DisabledOpcode(opcode));
},
Opcode::OP_CHECKDATASIG | Opcode::OP_CHECKDATASIGVERIFY if flags.verify_checkdatasig => {
let pubkey = stack.pop()?;
let message = stack.pop()?;
let signature = stack.pop()?;
check_signature_encoding(&signature, flags, version)?;
check_pubkey_encoding(&pubkey, flags)?;
let signature: Vec<u8> = signature.into();
let message_hash = sha256(&message);
let success = verify_signature(checker, signature.into(), pubkey.into(), message_hash);
match opcode {
Opcode::OP_CHECKDATASIG => {
if success {
stack.push(vec![1].into());
} else {
stack.push(vec![0].into());
}
},
Opcode::OP_CHECKDATASIGVERIFY if !success => {
return Err(Error::CheckDataSigVerify);
},
_ => {},
}
},
Opcode::OP_CHECKDATASIG | Opcode::OP_CHECKDATASIGVERIFY => {
return Err(Error::DisabledOpcode(opcode));
},
}
if stack.len() + altstack.len() > 1000 {
@ -1139,6 +1185,8 @@ pub fn eval_script(
mod tests {
use bytes::Bytes;
use chain::Transaction;
use crypto::sha256;
use keys::{KeyPair, Private, Message, Network};
use sign::SignatureVersion;
use script::MAX_SCRIPT_ELEMENT_SIZE;
use {
@ -2245,7 +2293,6 @@ mod tests {
#[test]
fn test_script_with_forkid_signature() {
use keys::{KeyPair, Private, Network};
use sign::UnsignedTransactionInput;
use chain::{OutPoint, TransactionOutput};
@ -3425,7 +3472,7 @@ mod tests {
.push_data(&[0x22])
.push_opcode(Opcode::OP_AND)
.into_script();
let result = Err(Error::InvalidBitwiseOperation);
let result = Err(Error::InvalidOperandSize);
basic_test_with_flags(&script, &VerificationFlags::default().verify_and(true), result,
vec![].into());
}
@ -3461,7 +3508,7 @@ mod tests {
.push_data(&[0x22])
.push_opcode(Opcode::OP_OR)
.into_script();
let result = Err(Error::InvalidBitwiseOperation);
let result = Err(Error::InvalidOperandSize);
basic_test_with_flags(&script, &VerificationFlags::default().verify_or(true), result,
vec![].into());
}
@ -3497,7 +3544,7 @@ mod tests {
.push_data(&[0x22])
.push_opcode(Opcode::OP_XOR)
.into_script();
let result = Err(Error::InvalidBitwiseOperation);
let result = Err(Error::InvalidOperandSize);
basic_test_with_flags(&script, &VerificationFlags::default().verify_xor(true), result,
vec![].into());
}
@ -3845,4 +3892,160 @@ mod tests {
.verify_split(true);
basic_test_with_flags(&script, &flags, Ok(true), vec![vec![0x01].into()].into());
}
#[test]
fn checkdatasig_spec_tests() {
// official tests from:
// https://github.com/bitcoincashorg/bitcoincash.org/blob/0c6f91b0b713aae3bc6c9834b46e80e247ff5fab/spec/op_checkdatasig.md
let kp = KeyPair::from_private(Private { network: Network::Mainnet, secret: 1.into(), compressed: false, }).unwrap();
let pubkey = kp.public().clone();
let message = vec![42u8; 32];
let correct_signature = kp.private().sign(&Message::from(sha256(&message))).unwrap();
let correct_signature_for_other_message = kp.private().sign(&[43u8; 32].into()).unwrap();
let mut correct_signature = correct_signature.to_vec();
let mut correct_signature_for_other_message = correct_signature_for_other_message.to_vec();
correct_signature.push(0x81);
correct_signature_for_other_message.push(0x81);
let correct_flags = VerificationFlags::default()
.verify_checkdatasig(true)
.verify_dersig(true)
.verify_strictenc(true);
let incorrect_flags = VerificationFlags::default().verify_checkdatasig(false);
let correct_signature_script = Builder::default()
.push_data(&*correct_signature)
.push_data(&*message)
.push_data(&*pubkey)
.push_opcode(Opcode::OP_CHECKDATASIG)
.into_script();
// <sig> <msg> <pubKey> OP_CHECKDATASIG fails if 15 November 2018 protocol upgrade is not yet activated.
basic_test_with_flags(&correct_signature_script, &incorrect_flags, Err(Error::DisabledOpcode(Opcode::OP_CHECKDATASIG)), vec![].into());
// <sig> <msg> OP_CHECKDATASIG fails if there are fewer than 3 items on stack.
let too_few_args_sig_script = Builder::default()
.push_data(&[1u8; 32])
.push_data(&*message)
.push_opcode(Opcode::OP_CHECKDATASIG)
.into_script();
basic_test_with_flags(&too_few_args_sig_script, &correct_flags, Err(Error::InvalidStackOperation), vec![].into());
// <sig> <msg> <pubKey> OP_CHECKDATASIG fails if <pubKey> is not a validly encoded public key.
let incorrect_pubkey_script = Builder::default()
.push_data(&*correct_signature)
.push_data(&*message)
.push_data(&[77u8; 15])
.push_opcode(Opcode::OP_CHECKDATASIG)
.into_script();
basic_test_with_flags(&incorrect_pubkey_script, &correct_flags, Err(Error::PubkeyType), vec![].into());
// assuming that check_signature_encoding correctness is proved by other tests:
// <sig> <msg> <pubKey> OP_CHECKDATASIG fails if <sig> is not a validly encoded signature with strict DER encoding.
// <sig> <msg> <pubKey> OP_CHECKDATASIG fails if signature <sig> is not empty and does not pass the Low S check.
let incorrectly_encoded_signature_script = Builder::default()
.push_data(&[0u8; 65])
.push_data(&*message)
.push_data(&*pubkey)
.push_opcode(Opcode::OP_CHECKDATASIG)
.into_script();
basic_test_with_flags(&incorrectly_encoded_signature_script, &correct_flags, Err(Error::SignatureDer), vec![].into());
// <sig> <msg> <pubKey> OP_CHECKDATASIG fails if signature <sig> is not empty and does not pass signature validation of <msg> and <pubKey>.
let incorrect_signature_script = Builder::default()
.push_data(&*correct_signature_for_other_message)
.push_data(&*message)
.push_data(&*pubkey)
.push_opcode(Opcode::OP_CHECKDATASIG)
.into_script();
basic_test_with_flags(&incorrect_signature_script, &correct_flags, Ok(false), vec![vec![0].into()].into());
// <sig> <msg> <pubKey> OP_CHECKDATASIG pops three elements and pushes false onto the stack if <sig> is an empty byte array.
let empty_signature_script = Builder::default()
.push_data(&[])
.push_data(&*message)
.push_data(&*pubkey)
.push_opcode(Opcode::OP_CHECKDATASIG)
.into_script();
basic_test_with_flags(&empty_signature_script, &correct_flags, Ok(false), vec![vec![0].into()].into());
// <sig> <msg> <pubKey> OP_CHECKDATASIG pops three elements and pushes true onto the stack if <sig> is a valid signature of <msg> with respect to <pubKey>.
basic_test_with_flags(&correct_signature_script, &correct_flags, Ok(true), vec![vec![1].into()].into());
}
#[test]
fn checkdatasigverify_spec_tests() {
// official tests from:
// https://github.com/bitcoincashorg/bitcoincash.org/blob/0c6f91b0b713aae3bc6c9834b46e80e247ff5fab/spec/op_checkdatasig.md
let kp = KeyPair::from_private(Private { network: Network::Mainnet, secret: 1.into(), compressed: false, }).unwrap();
let pubkey = kp.public().clone();
let message = vec![42u8; 32];
let correct_signature = kp.private().sign(&Message::from(sha256(&message))).unwrap();
let correct_signature_for_other_message = kp.private().sign(&[43u8; 32].into()).unwrap();
let mut correct_signature = correct_signature.to_vec();
let mut correct_signature_for_other_message = correct_signature_for_other_message.to_vec();
correct_signature.push(0x81);
correct_signature_for_other_message.push(0x81);
let correct_flags = VerificationFlags::default()
.verify_checkdatasig(true)
.verify_dersig(true)
.verify_strictenc(true);
let incorrect_flags = VerificationFlags::default().verify_checkdatasig(false);
let correct_signature_script = Builder::default()
.push_data(&*correct_signature)
.push_data(&*message)
.push_data(&*pubkey)
.push_opcode(Opcode::OP_CHECKDATASIGVERIFY)
.into_script();
// <sig> <msg> <pubKey> OP_CHECKDATASIGVERIFY fails if 15 November 2018 protocol upgrade is not yet activated.
basic_test_with_flags(&correct_signature_script, &incorrect_flags, Err(Error::DisabledOpcode(Opcode::OP_CHECKDATASIGVERIFY)), vec![].into());
// <sig> <msg> OP_CHECKDATASIGVERIFY fails if there are fewer than 3 item on stack.
let too_few_args_sig_script = Builder::default()
.push_data(&[1u8; 32])
.push_data(&*message)
.push_opcode(Opcode::OP_CHECKDATASIGVERIFY)
.into_script();
basic_test_with_flags(&too_few_args_sig_script, &correct_flags, Err(Error::InvalidStackOperation), vec![].into());
// <sig> <msg> <pubKey> OP_CHECKDATASIGVERIFYfails if <pubKey> is not a validly encoded public key.
let incorrect_pubkey_script = Builder::default()
.push_data(&*correct_signature)
.push_data(&*message)
.push_data(&[77u8; 15])
.push_opcode(Opcode::OP_CHECKDATASIGVERIFY)
.into_script();
basic_test_with_flags(&incorrect_pubkey_script, &correct_flags, Err(Error::PubkeyType), vec![].into());
// assuming that check_signature_encoding correctness is proved by other tests:
// <sig> <msg> <pubKey> OP_CHECKDATASIGVERIFY fails if <sig> is not a validly encoded signature with strict DER encoding.
// <sig> <msg> <pubKey> OP_CHECKDATASIGVERIFY fails if signature <sig> is not empty and does not pass the Low S check.
let incorrectly_encoded_signature_script = Builder::default()
.push_data(&[0u8; 65])
.push_data(&*message)
.push_data(&*pubkey)
.push_opcode(Opcode::OP_CHECKDATASIGVERIFY)
.into_script();
basic_test_with_flags(&incorrectly_encoded_signature_script, &correct_flags, Err(Error::SignatureDer), vec![].into());
// <sig> <msg> <pubKey> OP_CHECKDATASIGVERIFY fails if <sig> is not a valid signature of <msg> with respect to <pubKey>.
let incorrect_signature_script = Builder::default()
.push_data(&*correct_signature_for_other_message)
.push_data(&*message)
.push_data(&*pubkey)
.push_opcode(Opcode::OP_CHECKDATASIGVERIFY)
.into_script();
basic_test_with_flags(&incorrect_signature_script, &correct_flags, Err(Error::CheckDataSigVerify), vec![].into());
// <sig> <msg> <pubKey> OP_CHECKDATASIGVERIFY pops the top three stack elements if <sig> is a valid signature of <msg> with respect to <pubKey>.
// Ok(false) means success here, because OP_CHECKDATASIGVERIFY leaves empty stack
basic_test_with_flags(&correct_signature_script, &correct_flags, Ok(false), vec![].into());
}
}

View File

@ -213,6 +213,10 @@ pub enum Opcode {
OP_NOP8 = 0xb7,
OP_NOP9 = 0xb8,
OP_NOP10 = 0xb9,
// BCH crypto
OP_CHECKDATASIG = 0xba,
OP_CHECKDATASIGVERIFY = 0xbb,
}
impl fmt::Display for Opcode {
@ -430,6 +434,11 @@ impl Opcode {
0xb7 => Some(OP_NOP8),
0xb8 => Some(OP_NOP9),
0xb9 => Some(OP_NOP10),
// BCH crypto
0xba => Some(OP_CHECKDATASIG),
0xbb => Some(OP_CHECKDATASIGVERIFY),
_ => None,
}
}
@ -688,5 +697,9 @@ mod tests {
assert_eq!(Opcode::OP_NOP8, Opcode::from_u8(Opcode::OP_NOP8 as u8).unwrap());
assert_eq!(Opcode::OP_NOP9, Opcode::from_u8(Opcode::OP_NOP9 as u8).unwrap());
assert_eq!(Opcode::OP_NOP10, Opcode::from_u8(Opcode::OP_NOP10 as u8).unwrap());
// BCH crypto
assert_eq!(Opcode::OP_CHECKDATASIG, Opcode::from_u8(Opcode::OP_CHECKDATASIG as u8).unwrap());
assert_eq!(Opcode::OP_CHECKDATASIGVERIFY, Opcode::from_u8(Opcode::OP_CHECKDATASIGVERIFY as u8).unwrap());
}
}

View File

@ -367,7 +367,7 @@ impl Script {
Opcodes { position: 0, script: self }
}
pub fn sigops_count(&self, serialized_script: bool) -> usize {
pub fn sigops_count(&self, checkdatasig_active: bool, serialized_script: bool) -> usize {
let mut last_opcode = Opcode::OP_0;
let mut total = 0;
for opcode in self.opcodes() {
@ -381,6 +381,9 @@ impl Script {
Opcode::OP_CHECKSIG | Opcode::OP_CHECKSIGVERIFY => {
total += 1;
},
Opcode::OP_CHECKDATASIG | Opcode::OP_CHECKDATASIGVERIFY if checkdatasig_active => {
total += 1;
},
Opcode::OP_CHECKMULTISIG | Opcode::OP_CHECKMULTISIGVERIFY => {
if serialized_script && last_opcode.is_within_op_n() {
total += last_opcode.decode_op_n() as usize;
@ -454,7 +457,7 @@ impl Script {
}
}
pub fn pay_to_script_hash_sigops(&self, prev_out: &Script) -> usize {
pub fn pay_to_script_hash_sigops(&self, checkdatasig_active: bool, prev_out: &Script) -> usize {
if !prev_out.is_pay_to_script_hash() {
return 0;
}
@ -470,7 +473,7 @@ impl Script {
.to_vec()
.into();
script.sigops_count(true)
script.sigops_count(checkdatasig_active, true)
}
}
@ -669,11 +672,11 @@ OP_ADD
#[test]
fn test_sigops_count() {
assert_eq!(1usize, Script::from("76a914aab76ba4877d696590d94ea3e02948b55294815188ac").sigops_count(false));
assert_eq!(2usize, Script::from("522102004525da5546e7603eefad5ef971e82f7dad2272b34e6b3036ab1fe3d299c22f21037d7f2227e6c646707d1c61ecceb821794124363a2cf2c1d2a6f28cf01e5d6abe52ae").sigops_count(true));
assert_eq!(20usize, Script::from("522102004525da5546e7603eefad5ef971e82f7dad2272b34e6b3036ab1fe3d299c22f21037d7f2227e6c646707d1c61ecceb821794124363a2cf2c1d2a6f28cf01e5d6abe52ae").sigops_count(false));
assert_eq!(0usize, Script::from("a9146262b64aec1f4a4c1d21b32e9c2811dd2171fd7587").sigops_count(false));
assert_eq!(1usize, Script::from("4104ae1a62fe09c5f51b13905f07f06b99a2f7159b2225f374cd378d71302fa28414e7aab37397f554a7df5f142c21c1b7303b8a0626f1baded5c72a704f7e6cd84cac").sigops_count(false));
assert_eq!(1usize, Script::from("76a914aab76ba4877d696590d94ea3e02948b55294815188ac").sigops_count(false, false));
assert_eq!(2usize, Script::from("522102004525da5546e7603eefad5ef971e82f7dad2272b34e6b3036ab1fe3d299c22f21037d7f2227e6c646707d1c61ecceb821794124363a2cf2c1d2a6f28cf01e5d6abe52ae").sigops_count(false, true));
assert_eq!(20usize, Script::from("522102004525da5546e7603eefad5ef971e82f7dad2272b34e6b3036ab1fe3d299c22f21037d7f2227e6c646707d1c61ecceb821794124363a2cf2c1d2a6f28cf01e5d6abe52ae").sigops_count(false, false));
assert_eq!(0usize, Script::from("a9146262b64aec1f4a4c1d21b32e9c2811dd2171fd7587").sigops_count(false, false));
assert_eq!(1usize, Script::from("4104ae1a62fe09c5f51b13905f07f06b99a2f7159b2225f374cd378d71302fa28414e7aab37397f554a7df5f142c21c1b7303b8a0626f1baded5c72a704f7e6cd84cac").sigops_count(false, false));
}
#[test]
@ -688,7 +691,7 @@ OP_ADD
script[max_block_sigops - block_sigops + 3] = (overmax >> 16) as u8;
script[max_block_sigops - block_sigops + 4] = (overmax >> 24) as u8;
let script: Script = script.into();
assert_eq!(script.sigops_count(false), 20001);
assert_eq!(script.sigops_count(false, false), 20001);
}
#[test]
@ -702,7 +705,7 @@ OP_ADD
script[max_block_sigops - block_sigops + 4] = 0xff;
script[max_block_sigops - block_sigops + 5] = 0xff;
let script: Script = script.into();
assert_eq!(script.sigops_count(false), 20001);
assert_eq!(script.sigops_count(false, false), 20001);
}
#[test]
@ -802,4 +805,14 @@ OP_ADD
assert_eq!(script.script_type(), ScriptType::ScriptHash);
assert_eq!(script.num_signatures_required(), 1);
}
#[test]
fn test_num_signatures_with_checkdatasig() {
let script = Builder::default().push_opcode(Opcode::OP_CHECKDATASIG).into_script();
assert_eq!(script.sigops_count(false, false), 0);
assert_eq!(script.sigops_count(true, false), 1);
let script = Builder::default().push_opcode(Opcode::OP_CHECKDATASIGVERIFY).into_script();
assert_eq!(script.sigops_count(false, false), 0);
assert_eq!(script.sigops_count(true, false), 1);
}
}

View File

@ -1,4 +1,4 @@
use keys::{Public, Signature};
use keys::{Public, Signature, Message};
use chain::constants::{
SEQUENCE_FINAL, SEQUENCE_LOCKTIME_DISABLE_FLAG,
SEQUENCE_LOCKTIME_MASK, SEQUENCE_LOCKTIME_TYPE_FLAG, LOCKTIME_THRESHOLD
@ -8,6 +8,13 @@ use {Script, TransactionInputSigner, Num};
/// Checks transaction signature
pub trait SignatureChecker {
fn verify_signature(
&self,
signature: &Signature,
public: &Public,
hash: &Message,
) -> bool;
fn check_signature(
&self,
signature: &Signature,
@ -25,6 +32,10 @@ pub trait SignatureChecker {
pub struct NoopSignatureChecker;
impl SignatureChecker for NoopSignatureChecker {
fn verify_signature(&self, signature: &Signature, public: &Public, hash: &Message) -> bool {
public.verify(hash, signature).unwrap_or(false)
}
fn check_signature(&self, _: &Signature, _: &Public, _: &Script, _: u32, _: SignatureVersion) -> bool {
false
}
@ -46,6 +57,15 @@ pub struct TransactionSignatureChecker {
}
impl SignatureChecker for TransactionSignatureChecker {
fn verify_signature(
&self,
signature: &Signature,
public: &Public,
hash: &Message,
) -> bool {
public.verify(hash, signature).unwrap_or(false)
}
fn check_signature(
&self,
signature: &Signature,
@ -55,7 +75,7 @@ impl SignatureChecker for TransactionSignatureChecker {
version: SignatureVersion
) -> bool {
let hash = self.signer.signature_hash(self.input_index, self.input_amount, script_code, version, sighashtype);
public.verify(&hash, signature).unwrap_or(false)
self.verify_signature(signature, public, &hash)
}
fn check_lock_time(&self, lock_time: Num) -> bool {

View File

@ -6,3 +6,4 @@ authors = ["debris <marek.kotewicz@gmail.com>"]
[dependencies]
byteorder = "1.0"
primitives = { path = "../primitives" }
rustc-hex = "2"

View File

@ -1,6 +1,6 @@
use std::{fmt, io};
use hex::ToHex;
use primitives::hash::H256;
use primitives::hex::ToHex;
use {
Serializable, Stream,
Deserializable, Reader, Error as ReaderError
@ -60,7 +60,7 @@ macro_rules! impl_fixed_array_u8 {
impl fmt::Debug for $name {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.to_hex().fmt(f)
self.0.to_hex::<String>().fmt(f)
}
}

View File

@ -1,5 +1,6 @@
extern crate byteorder;
extern crate primitives;
extern crate rustc_hex as hex;
mod compact_integer;
mod fixed_array;

View File

@ -110,7 +110,7 @@ pub fn create_local_sync_node(consensus: ConsensusParams, db: storage::SharedSto
let memory_pool = Arc::new(RwLock::new(MemoryPool::new()));
let sync_state = SynchronizationStateRef::new(SynchronizationState::with_storage(db.clone()));
let sync_chain = SyncChain::new(db.clone(), consensus.clone(), memory_pool.clone());
if sync_chain.is_segwit_active() {
if sync_chain.is_segwit_possible() {
peers.require_peer_services(Services::default().with_witness(true));
}

View File

@ -286,7 +286,7 @@ impl<T, U, V> LocalNode<T, U, V> where T: TaskExecutor, U: Server, V: Client {
max_block_sigops: self.consensus.fork.max_block_sigops(new_block_height, max_block_size) as u32,
};
let memory_pool = &*self.memory_pool.read();
block_assembler.create_new_block(&self.storage, memory_pool, time::get_time().sec as u32, &self.consensus)
block_assembler.create_new_block(&self.storage, memory_pool, time::get_time().sec as u32, median_timestamp, &self.consensus)
}
/// Install synchronization events listener

View File

@ -9,7 +9,6 @@ use primitives::bytes::Bytes;
use primitives::hash::H256;
use utils::{BestHeadersChain, BestHeadersChainInformation, HashQueueChain, HashPosition};
use types::{BlockHeight, StorageRef, MemoryPoolRef};
use verification::Deployments;
/// Index of 'verifying' queue
const VERIFYING_QUEUE: usize = 0;
@ -106,8 +105,6 @@ pub struct Chain {
best_storage_block: storage::BestBlock,
/// Local blocks storage
storage: StorageRef,
/// Consensus params.
consensus: ConsensusParams,
/// In-memory queue of blocks hashes
hash_chain: HashQueueChain,
/// In-memory queue of blocks headers
@ -118,10 +115,9 @@ pub struct Chain {
memory_pool: MemoryPoolRef,
/// Blocks that have been marked as dead-ends
dead_end_blocks: HashSet<H256>,
/// Deployments cache
deployments: Deployments,
/// Is SegWit active?
is_segwit_active: bool,
/// Is SegWit is possible on this chain? SegWit inventory types are used when block/tx-es are
/// requested and this flag is true.
is_segwit_possible: bool,
}
impl BlockState {
@ -152,21 +148,18 @@ impl Chain {
.expect("storage with genesis block is required");
let best_storage_block = storage.best_block();
let best_storage_block_hash = best_storage_block.hash.clone();
let deployments = Deployments::new();
let is_segwit_active = deployments.segwit(best_storage_block.number, storage.as_block_header_provider(), &consensus);
let is_segwit_possible = consensus.is_segwit_possible();
Chain {
genesis_block_hash: genesis_block_hash,
best_storage_block: best_storage_block,
storage: storage,
consensus: consensus,
hash_chain: HashQueueChain::with_number_of_queues(NUMBER_OF_QUEUES),
headers_chain: BestHeadersChain::new(best_storage_block_hash),
verifying_transactions: LinkedHashMap::new(),
memory_pool: memory_pool,
dead_end_blocks: HashSet::new(),
deployments: deployments,
is_segwit_active: is_segwit_active,
is_segwit_possible,
}
}
@ -193,8 +186,8 @@ impl Chain {
}
/// Is segwit active
pub fn is_segwit_active(&self) -> bool {
self.is_segwit_active
pub fn is_segwit_possible(&self) -> bool {
self.is_segwit_possible
}
/// Get number of blocks in given state
@ -367,7 +360,6 @@ impl Chain {
// remember new best block hash
self.best_storage_block = self.storage.as_store().best_block();
self.is_segwit_active = self.deployments.segwit(self.best_storage_block.number, self.storage.as_block_header_provider(), &self.consensus);
// remove inserted block + handle possible reorganization in headers chain
// TODO: mk, not sure if we need both of those params
@ -403,7 +395,6 @@ impl Chain {
// remember new best block hash
self.best_storage_block = self.storage.best_block();
self.is_segwit_active = self.deployments.segwit(self.best_storage_block.number, self.storage.as_block_header_provider(), &self.consensus);
// remove inserted block + handle possible reorganization in headers chain
// TODO: mk, not sure if we need both of those params

View File

@ -6,7 +6,7 @@ use futures::Future;
use parking_lot::Mutex;
use time::precise_time_s;
use chain::{IndexedBlockHeader, IndexedTransaction, Transaction, IndexedBlock};
use message::{types, Services};
use message::types;
use message::common::{InventoryType, InventoryVector};
use miner::transaction_fee_rate;
use primitives::hash::H256;
@ -226,16 +226,16 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
}
// else ask for all unknown transactions and blocks
let is_segwit_active = self.chain.is_segwit_active();
let ask_for_witness = is_segwit_active && self.peers.is_segwit_enabled(peer_index);
let is_segwit_possible = self.chain.is_segwit_possible();
let unknown_inventory: Vec<_> = message.inventory.into_iter()
.filter(|item| {
match item.inv_type {
// check that transaction is unknown to us
InventoryType::MessageTx => self.chain.transaction_state(&item.hash) == TransactionState::Unknown
&& !self.orphaned_transactions_pool.contains(&item.hash),
InventoryType::MessageTx| InventoryType::MessageWitnessTx =>
self.chain.transaction_state(&item.hash) == TransactionState::Unknown
&& !self.orphaned_transactions_pool.contains(&item.hash),
// check that block is unknown to us
InventoryType::MessageBlock => match self.chain.block_state(&item.hash) {
InventoryType::MessageBlock | InventoryType::MessageWitnessBlock => match self.chain.block_state(&item.hash) {
BlockState::Unknown => !self.orphaned_blocks_pool.contains_unknown_block(&item.hash),
BlockState::DeadEnd if !self.config.close_connection_on_bad_block => true,
BlockState::DeadEnd if self.config.close_connection_on_bad_block => {
@ -246,8 +246,8 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
},
// we never ask for merkle blocks && we never ask for compact blocks
InventoryType::MessageCompactBlock | InventoryType::MessageFilteredBlock
| InventoryType::MessageWitnessBlock | InventoryType::MessageWitnessFilteredBlock
| InventoryType::MessageWitnessTx => false,
| InventoryType::MessageWitnessFilteredBlock
=> false,
// unknown inventory type
InventoryType::Error => {
self.peers.misbehaving(peer_index, &format!("Provided unknown inventory type {:?}", item.hash.to_reversed_str()));
@ -258,7 +258,7 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
// we are not synchronizing =>
// 1) either segwit is active and we are connected to segwit-enabled nodes => we could ask for witness
// 2) or segwit is inactive => we shall not ask for witness
.map(|item| if !ask_for_witness {
.map(|item| if !is_segwit_possible {
item
} else {
match item.inv_type {
@ -973,8 +973,8 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
let chunk_size = min(limits.max_blocks_in_request, max(hashes.len() as BlockHeight, limits.min_blocks_in_request));
let last_peer_index = peers.len() - 1;
let mut tasks: Vec<Task> = Vec::new();
let is_segwit_active = self.chain.is_segwit_active();
let inv_type = if is_segwit_active { InventoryType::MessageWitnessBlock } else { InventoryType::MessageBlock };
let is_segwit_possible = self.chain.is_segwit_possible();
let inv_type = if is_segwit_possible { InventoryType::MessageWitnessBlock } else { InventoryType::MessageBlock };
for (peer_index, peer) in peers.into_iter().enumerate() {
// we have to request all blocks => we will request last peer for all remaining blocks
let peer_chunk_size = if peer_index == last_peer_index { hashes.len() } else { min(hashes.len(), chunk_size as usize) };
@ -1073,9 +1073,6 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
// update block processing speed
self.block_speed_meter.checkpoint();
// remember if SegWit was active before this block
let segwit_was_active = self.chain.is_segwit_active();
// remove flags
let needs_relay = !self.do_not_relay.remove(block.hash());
@ -1096,13 +1093,6 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
// update shared state
self.shared_state.update_best_storage_block_height(self.chain.best_storage_block().number);
// if SegWit activated after this block insertion:
// 1) no more connections to !NODE_WITNESS nodes
// 2) disconnect from all nodes without NODE_WITNESS support
if !segwit_was_active && self.chain.is_segwit_active() {
self.peers.require_peer_services(Services::default().with_witness(true));
}
// notify listener
if let Some(best_block_hash) = insert_result.canonized_blocks_hashes.last() {
if let Some(ref listener) = self.listener {
@ -1349,7 +1339,7 @@ pub mod tests {
fn request_blocks(peer_index: PeerIndex, hashes: Vec<H256>) -> Task {
Task::GetData(peer_index, types::GetData {
inventory: hashes.into_iter().map(InventoryVector::block).collect(),
inventory: hashes.into_iter().map(InventoryVector::witness_block).collect(),
})
}
@ -1749,13 +1739,13 @@ pub mod tests {
sync.on_block(1, test_data::block_h2().into());
sync.on_inventory(1, types::Inv::with_inventory(vec![
InventoryVector::block(test_data::block_h1().hash()),
InventoryVector::block(test_data::block_h2().hash()),
InventoryVector::witness_block(test_data::block_h1().hash()),
InventoryVector::witness_block(test_data::block_h2().hash()),
]));
let tasks = executor.take_tasks();
assert_eq!(tasks, vec![Task::GetData(1, types::GetData::with_inventory(vec![
InventoryVector::block(test_data::block_h1().hash())
InventoryVector::witness_block(test_data::block_h1().hash())
]))]);
}
@ -1883,11 +1873,11 @@ pub mod tests {
fn transaction_is_requested_when_not_synchronizing() {
let (executor, core, sync) = create_sync(None, None);
sync.on_inventory(0, types::Inv::with_inventory(vec![InventoryVector::tx(H256::from(0))]));
sync.on_inventory(0, types::Inv::with_inventory(vec![InventoryVector::witness_tx(H256::from(0))]));
{
let tasks = executor.take_tasks();
assert_eq!(tasks, vec![Task::GetData(0, types::GetData::with_inventory(vec![InventoryVector::tx(H256::from(0))]))]);
assert_eq!(tasks, vec![Task::GetData(0, types::GetData::with_inventory(vec![InventoryVector::witness_tx(H256::from(0))]))]);
}
let b1 = test_data::block_h1();
@ -1896,28 +1886,28 @@ pub mod tests {
assert!(core.lock().information().state.is_nearly_saturated());
{ executor.take_tasks(); } // forget tasks
sync.on_inventory(0, types::Inv::with_inventory(vec![InventoryVector::tx(H256::from(1))]));
sync.on_inventory(0, types::Inv::with_inventory(vec![InventoryVector::witness_tx(H256::from(1))]));
let tasks = executor.take_tasks();
assert_eq!(tasks, vec![Task::GetData(0, types::GetData::with_inventory(vec![InventoryVector::tx(H256::from(1))]))]);
assert_eq!(tasks, vec![Task::GetData(0, types::GetData::with_inventory(vec![InventoryVector::witness_tx(H256::from(1))]))]);
}
#[test]
fn same_transaction_can_be_requested_twice() {
let (executor, _, sync) = create_sync(None, None);
sync.on_inventory(0, types::Inv::with_inventory(vec![InventoryVector::tx(H256::from(0))]));
sync.on_inventory(0, types::Inv::with_inventory(vec![InventoryVector::witness_tx(H256::from(0))]));
let tasks = executor.take_tasks();
assert_eq!(tasks, vec![Task::GetData(0, types::GetData::with_inventory(vec![
InventoryVector::tx(H256::from(0))
InventoryVector::witness_tx(H256::from(0))
]))]);
sync.on_inventory(0, types::Inv::with_inventory(vec![InventoryVector::tx(H256::from(0))]));
sync.on_inventory(0, types::Inv::with_inventory(vec![InventoryVector::witness_tx(H256::from(0))]));
let tasks = executor.take_tasks();
assert_eq!(tasks, vec![Task::GetData(0, types::GetData::with_inventory(vec![
InventoryVector::tx(H256::from(0))
InventoryVector::witness_tx(H256::from(0))
]))]);
}
@ -1926,11 +1916,11 @@ pub mod tests {
let (executor, _, sync) = create_sync(None, None);
sync.on_inventory(0, types::Inv::with_inventory(vec![
InventoryVector::tx(test_data::genesis().transactions[0].hash()),
InventoryVector::tx(H256::from(0)),
InventoryVector::witness_tx(test_data::genesis().transactions[0].hash()),
InventoryVector::witness_tx(H256::from(0)),
]));
assert_eq!(executor.take_tasks(), vec![Task::GetData(0, types::GetData::with_inventory(vec![
InventoryVector::tx(H256::from(0))
InventoryVector::witness_tx(H256::from(0))
]))]);
}

View File

@ -1,11 +1,11 @@
use network::{ConsensusParams, ConsensusFork};
use network::{ConsensusParams, ConsensusFork, TransactionOrdering};
use crypto::dhash256;
use storage::{TransactionOutputProvider, BlockHeaderProvider};
use script;
use ser::Stream;
use sigops::{transaction_sigops, transaction_sigops_cost} ;
use work::block_reward_satoshi;
use duplex_store::DuplexTransactionOutputProvider;
use duplex_store::{transaction_index_for_output_check, DuplexTransactionOutputProvider};
use deployments::BlockDeployments;
use canon::CanonBlock;
use error::{Error, TransactionError};
@ -19,6 +19,7 @@ pub struct BlockAcceptor<'a> {
pub coinbase_claim: BlockCoinbaseClaim<'a>,
pub coinbase_script: BlockCoinbaseScript<'a>,
pub witness: BlockWitness<'a>,
pub ordering: BlockTransactionOrdering<'a>,
}
impl<'a> BlockAcceptor<'a> {
@ -35,9 +36,10 @@ impl<'a> BlockAcceptor<'a> {
finality: BlockFinality::new(block, height, deployments, headers),
serialized_size: BlockSerializedSize::new(block, consensus, deployments, height, median_time_past),
coinbase_script: BlockCoinbaseScript::new(block, consensus, height),
coinbase_claim: BlockCoinbaseClaim::new(block, store, height),
sigops: BlockSigops::new(block, store, consensus, height),
coinbase_claim: BlockCoinbaseClaim::new(block, consensus, store, height, median_time_past),
sigops: BlockSigops::new(block, store, consensus, height, median_time_past),
witness: BlockWitness::new(block, deployments),
ordering: BlockTransactionOrdering::new(block, consensus, median_time_past),
}
}
@ -48,6 +50,7 @@ impl<'a> BlockAcceptor<'a> {
self.coinbase_claim.check()?;
self.coinbase_script.check()?;
self.witness.check()?;
self.ordering.check()?;
Ok(())
}
}
@ -137,18 +140,30 @@ pub struct BlockSigops<'a> {
consensus: &'a ConsensusParams,
height: u32,
bip16_active: bool,
checkdatasig_active: bool,
}
impl<'a> BlockSigops<'a> {
fn new(block: CanonBlock<'a>, store: &'a TransactionOutputProvider, consensus: &'a ConsensusParams, height: u32) -> Self {
fn new(
block: CanonBlock<'a>,
store: &'a TransactionOutputProvider,
consensus: &'a ConsensusParams,
height: u32,
median_time_past: u32,
) -> Self {
let bip16_active = block.header.raw.time >= consensus.bip16_time;
let checkdatasig_active = match consensus.fork {
ConsensusFork::BitcoinCash(ref fork) => median_time_past >= fork.magnetic_anomaly_time,
_ => false,
};
BlockSigops {
block: block,
store: store,
consensus: consensus,
height: height,
bip16_active: bip16_active,
bip16_active,
checkdatasig_active,
}
}
@ -156,7 +171,7 @@ impl<'a> BlockSigops<'a> {
let store = DuplexTransactionOutputProvider::new(self.store, &*self.block);
let (sigops, sigops_cost) = self.block.transactions.iter()
.map(|tx| {
let tx_sigops = transaction_sigops(&tx.raw, &store, self.bip16_active);
let tx_sigops = transaction_sigops(&tx.raw, &store, self.bip16_active, self.checkdatasig_active);
let tx_sigops_cost = transaction_sigops_cost(&tx.raw, &store, tx_sigops);
(tx_sigops, tx_sigops_cost)
})
@ -187,14 +202,22 @@ pub struct BlockCoinbaseClaim<'a> {
block: CanonBlock<'a>,
store: &'a TransactionOutputProvider,
height: u32,
transaction_ordering: TransactionOrdering,
}
impl<'a> BlockCoinbaseClaim<'a> {
fn new(block: CanonBlock<'a>, store: &'a TransactionOutputProvider, height: u32) -> Self {
fn new(
block: CanonBlock<'a>,
consensus_params: &ConsensusParams,
store: &'a TransactionOutputProvider,
height: u32,
median_time_past: u32
) -> Self {
BlockCoinbaseClaim {
block: block,
store: store,
height: height,
transaction_ordering: consensus_params.fork.transaction_ordering(median_time_past),
}
}
@ -207,8 +230,9 @@ impl<'a> BlockCoinbaseClaim<'a> {
// (1) Total sum of all referenced outputs
let mut incoming: u64 = 0;
for input in tx.raw.inputs.iter() {
let (sum, overflow) = incoming.overflowing_add(
store.transaction_output(&input.previous_output, tx_idx).map(|o| o.value).unwrap_or(0));
let prevout_tx_idx = transaction_index_for_output_check(self.transaction_ordering, tx_idx);
let prevout = store.transaction_output(&input.previous_output, prevout_tx_idx);
let (sum, overflow) = incoming.overflowing_add(prevout.map(|o| o.value).unwrap_or(0));
if overflow {
return Err(Error::ReferencedInputsSumOverflow);
}
@ -339,12 +363,43 @@ impl<'a> BlockWitness<'a> {
}
}
pub struct BlockTransactionOrdering<'a> {
block: CanonBlock<'a>,
transaction_ordering: TransactionOrdering,
}
impl<'a> BlockTransactionOrdering<'a> {
fn new(block: CanonBlock<'a>, consensus: &'a ConsensusParams, median_time_past: u32) -> Self {
BlockTransactionOrdering {
block,
transaction_ordering: consensus.fork.transaction_ordering(median_time_past),
}
}
fn check(&self) -> Result<(), Error> {
match self.transaction_ordering {
// topological transaction ordering is checked in TransactionMissingInputs
TransactionOrdering::Topological => Ok(()),
// canonical transaction ordering means that transactions are ordered by
// their id (i.e. hash) in ascending order
TransactionOrdering::Canonical =>
if self.block.transactions.windows(2).skip(1).all(|w| w[0].hash < w[1].hash) {
Ok(())
} else {
Err(Error::NonCanonicalTransactionOrdering)
},
}
}
}
#[cfg(test)]
mod tests {
extern crate test_data;
use chain::{IndexedBlock, Transaction};
use network::{Network, ConsensusFork, ConsensusParams, BitcoinCashConsensusParams};
use {Error, CanonBlock};
use super::BlockCoinbaseScript;
use super::{BlockCoinbaseScript, BlockTransactionOrdering};
#[test]
fn test_block_coinbase_script() {
@ -374,4 +429,42 @@ mod tests {
assert_eq!(coinbase_script_validator2.check(), Err(Error::CoinbaseScript));
}
#[test]
fn block_transaction_ordering_works() {
let tx1: Transaction = test_data::TransactionBuilder::with_output(1).into();
let tx2: Transaction = test_data::TransactionBuilder::with_output(2).into();
let tx3: Transaction = test_data::TransactionBuilder::with_output(3).into();
let bad_block: IndexedBlock = test_data::block_builder()
.with_transaction(tx1.clone())
.with_transaction(tx2.clone())
.with_transaction(tx3.clone())
.header().build()
.build()
.into();
let good_block: IndexedBlock = test_data::block_builder()
.with_transaction(tx1)
.with_transaction(tx3)
.with_transaction(tx2)
.header().build()
.build()
.into();
let bad_block = CanonBlock::new(&bad_block);
let good_block = CanonBlock::new(&good_block);
// when topological ordering is used => we don't care about tx ordering
let consensus = ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore);
let checker = BlockTransactionOrdering::new(bad_block, &consensus, 0);
assert_eq!(checker.check(), Ok(()));
// when topological ordering is used => we care about tx ordering
let mut bch = BitcoinCashConsensusParams::new(Network::Unitest);
bch.magnetic_anomaly_time = 0;
let consensus = ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCash(bch));
let checker = BlockTransactionOrdering::new(bad_block, &consensus, 0);
assert_eq!(checker.check(), Err(Error::NonCanonicalTransactionOrdering));
let checker = BlockTransactionOrdering::new(good_block, &consensus, 0);
assert_eq!(checker.check(), Ok(()));
}
}

View File

@ -1,9 +1,10 @@
use primitives::hash::H256;
use primitives::bytes::Bytes;
use ser::Serializable;
use storage::{TransactionMetaProvider, TransactionOutputProvider};
use network::{ConsensusParams, ConsensusFork};
use script::{Script, verify_script, VerificationFlags, TransactionSignatureChecker, TransactionInputSigner, SignatureVersion};
use duplex_store::DuplexTransactionOutputProvider;
use duplex_store::{DuplexTransactionOutputProvider, transaction_index_for_output_check};
use deployments::BlockDeployments;
use script::Builder;
use sigops::transaction_sigops;
@ -13,6 +14,7 @@ use error::TransactionError;
use VerificationLevel;
pub struct TransactionAcceptor<'a> {
pub size: TransactionSize<'a>,
pub premature_witness: TransactionPrematureWitness<'a>,
pub bip30: TransactionBip30<'a>,
pub missing_inputs: TransactionMissingInputs<'a>,
@ -41,10 +43,13 @@ impl<'a> TransactionAcceptor<'a> {
deployments: &'a BlockDeployments<'a>,
) -> Self {
trace!(target: "verification", "Tx verification {}", transaction.hash.to_reversed_str());
let tx_ordering = consensus.fork.transaction_ordering(median_time_past);
let missing_input_tx_index = transaction_index_for_output_check(tx_ordering,transaction_index);
TransactionAcceptor {
size: TransactionSize::new(transaction, consensus, median_time_past),
premature_witness: TransactionPrematureWitness::new(transaction, deployments),
bip30: TransactionBip30::new_for_sync(transaction, meta_store, consensus, block_hash, height),
missing_inputs: TransactionMissingInputs::new(transaction, output_store, transaction_index),
missing_inputs: TransactionMissingInputs::new(transaction, output_store, missing_input_tx_index),
maturity: TransactionMaturity::new(transaction, meta_store, height),
overspent: TransactionOverspent::new(transaction, output_store),
double_spent: TransactionDoubleSpend::new(transaction, output_store),
@ -54,6 +59,7 @@ impl<'a> TransactionAcceptor<'a> {
}
pub fn check(&self) -> Result<(), TransactionError> {
try!(self.size.check());
try!(self.premature_witness.check());
try!(self.bip30.check());
try!(self.missing_inputs.check());
@ -67,6 +73,7 @@ impl<'a> TransactionAcceptor<'a> {
}
pub struct MemoryPoolTransactionAcceptor<'a> {
pub size: TransactionSize<'a>,
pub missing_inputs: TransactionMissingInputs<'a>,
pub maturity: TransactionMaturity<'a>,
pub overspent: TransactionOverspent<'a>,
@ -93,6 +100,7 @@ impl<'a> MemoryPoolTransactionAcceptor<'a> {
let transaction_index = 0;
let max_block_sigops = consensus.fork.max_block_sigops(height, consensus.fork.max_block_size(height, median_time_past));
MemoryPoolTransactionAcceptor {
size: TransactionSize::new(transaction, consensus, median_time_past),
missing_inputs: TransactionMissingInputs::new(transaction, output_store, transaction_index),
maturity: TransactionMaturity::new(transaction, meta_store, height),
overspent: TransactionOverspent::new(transaction, output_store),
@ -106,6 +114,7 @@ impl<'a> MemoryPoolTransactionAcceptor<'a> {
pub fn check(&self) -> Result<(), TransactionError> {
// Bip30 is not checked because we don't need to allow tx pool acceptance of an unspent duplicate.
// Tx pool validation is not strinctly a matter of consensus.
try!(self.size.check());
try!(self.missing_inputs.check());
try!(self.maturity.check());
try!(self.overspent.check());
@ -273,7 +282,11 @@ impl<'a> TransactionSigops<'a> {
fn check(&self) -> Result<(), TransactionError> {
let bip16_active = self.time >= self.consensus_params.bip16_time;
let sigops = transaction_sigops(&self.transaction.raw, &self.store, bip16_active);
let checkdatasig_active = match self.consensus_params.fork {
ConsensusFork::BitcoinCash(ref fork) => self.time >= fork.magnetic_anomaly_time,
_ => false
};
let sigops = transaction_sigops(&self.transaction.raw, &self.store, bip16_active, checkdatasig_active);
if sigops > self.max_sigops {
Err(TransactionError::MaxSigops)
} else {
@ -294,6 +307,9 @@ pub struct TransactionEval<'a> {
verify_witness: bool,
verify_nulldummy: bool,
verify_monolith_opcodes: bool,
verify_magnetic_anomaly_opcodes: bool,
verify_sigpushonly: bool,
verify_cleanstack: bool,
signature_version: SignatureVersion,
}
@ -316,7 +332,11 @@ impl<'a> TransactionEval<'a> {
let verify_locktime = height >= params.bip65_height;
let verify_dersig = height >= params.bip66_height;
let verify_monolith_opcodes = match params.fork {
ConsensusFork::BitcoinCash(ref fork) if median_timestamp >= fork.monolith_time => true,
ConsensusFork::BitcoinCash(ref fork) => median_timestamp >= fork.monolith_time,
_ => false,
};
let verify_magnetic_anomaly_opcodes = match params.fork {
ConsensusFork::BitcoinCash(ref fork) => median_timestamp >= fork.magnetic_anomaly_time,
_ => false,
};
let signature_version = match params.fork {
@ -327,6 +347,8 @@ impl<'a> TransactionEval<'a> {
let verify_checksequence = deployments.csv();
let verify_witness = deployments.segwit();
let verify_nulldummy = verify_witness;
let verify_sigpushonly = verify_magnetic_anomaly_opcodes;
let verify_cleanstack = verify_magnetic_anomaly_opcodes;
TransactionEval {
transaction: transaction,
@ -340,6 +362,9 @@ impl<'a> TransactionEval<'a> {
verify_witness: verify_witness,
verify_nulldummy: verify_nulldummy,
verify_monolith_opcodes: verify_monolith_opcodes,
verify_magnetic_anomaly_opcodes: verify_magnetic_anomaly_opcodes,
verify_sigpushonly: verify_sigpushonly,
verify_cleanstack: verify_cleanstack,
signature_version: signature_version,
}
}
@ -389,7 +414,10 @@ impl<'a> TransactionEval<'a> {
.verify_div(self.verify_monolith_opcodes)
.verify_mod(self.verify_monolith_opcodes)
.verify_bin2num(self.verify_monolith_opcodes)
.verify_num2bin(self.verify_monolith_opcodes);
.verify_num2bin(self.verify_monolith_opcodes)
.verify_checkdatasig(self.verify_magnetic_anomaly_opcodes)
.verify_sigpushonly(self.verify_sigpushonly)
.verify_cleanstack(self.verify_cleanstack);
try!(verify_script(&input, &output, &script_witness, &flags, &checker, self.signature_version)
.map_err(|e| TransactionError::Signature(index, e)));
@ -485,6 +513,29 @@ impl<'a> TransactionPrematureWitness<'a> {
}
}
pub struct TransactionSize<'a> {
transaction: CanonTransaction<'a>,
min_transaction_size: usize,
}
impl<'a> TransactionSize<'a> {
fn new(transaction: CanonTransaction<'a>, consensus: &'a ConsensusParams, median_time_past: u32) -> Self {
let min_transaction_size = consensus.fork.min_transaction_size(median_time_past);
TransactionSize {
transaction: transaction,
min_transaction_size,
}
}
fn check(&self) -> Result<(), TransactionError> {
if self.min_transaction_size != 0 && self.transaction.raw.serialized_size() < self.min_transaction_size {
Err(TransactionError::MinSize)
} else {
Ok(())
}
}
}
#[cfg(test)]
mod tests {
use chain::{IndexedTransaction, Transaction, TransactionOutput};
@ -492,7 +543,7 @@ mod tests {
use script::Builder;
use canon::CanonTransaction;
use error::TransactionError;
use super::TransactionReturnReplayProtection;
use super::{TransactionReturnReplayProtection, TransactionSize};
#[test]
fn return_replay_protection_works() {
@ -521,4 +572,29 @@ mod tests {
let checker = TransactionReturnReplayProtection::new(CanonTransaction::new(&transaction), &consensus, 100);
assert_eq!(checker.check(), Ok(()));
}
#[test]
fn transaction_size_works() {
let small_tx = Transaction::default();
let big_tx: Transaction = "01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000".into();
let small_tx = IndexedTransaction::new(small_tx.hash(), small_tx);
let big_tx = IndexedTransaction::new(big_tx.hash(), big_tx);
let small_tx = CanonTransaction::new(&small_tx);
let big_tx = CanonTransaction::new(&big_tx);
let unrestricted_consensus = ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore);
let restricted_consensus = ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(Network::Unitest)));
// no restrictions
let checker = TransactionSize::new(small_tx, &unrestricted_consensus, 10000000);
assert_eq!(checker.check(), Ok(()));
// big + restricted
let checker = TransactionSize::new(big_tx, &restricted_consensus, 2000000000);
assert_eq!(checker.check(), Ok(()));
// small + restricted
let checker = TransactionSize::new(small_tx, &restricted_consensus, 2000000000);
assert_eq!(checker.check(), Err(TransactionError::MinSize));
}
}

View File

@ -154,11 +154,12 @@ mod tests {
extern crate test_data;
use std::sync::Arc;
use chain::IndexedBlock;
use storage::{Error as DBError};
use chain::{IndexedBlock, Transaction, Block};
use storage::Error as DBError;
use db::BlockChainDatabase;
use network::{Network, ConsensusParams, ConsensusFork};
use network::{Network, ConsensusParams, ConsensusFork, BitcoinCashConsensusParams};
use script;
use constants::DOUBLE_SPACING_SECONDS;
use super::BackwardsCompatibleChainVerifier as ChainVerifier;
use {Verify, Error, TransactionError, VerificationLevel};
@ -178,7 +179,6 @@ mod tests {
assert!(verifier.verify(VerificationLevel::Full, &b1.into()).is_ok());
}
#[test]
fn first_tx() {
let storage = BlockChainDatabase::init_test_chain(
@ -335,6 +335,70 @@ mod tests {
assert_eq!(expected, verifier.verify(VerificationLevel::Full, &block.into()));
}
#[test]
fn transaction_references_same_block_and_goes_before_previous() {
let mut blocks = vec![test_data::block_builder()
.transaction()
.coinbase()
.output().value(50).build()
.build()
.merkled_header().build()
.build()];
let input_tx = blocks[0].transactions()[0].clone();
let mut parent_hash = blocks[0].hash();
// waiting 100 blocks for genesis coinbase to become valid
for _ in 0..100 {
let block: Block = test_data::block_builder()
.transaction().coinbase().build()
.merkled_header().parent(parent_hash).build()
.build()
.into();
parent_hash = block.hash();
blocks.push(block);
}
let storage = Arc::new(BlockChainDatabase::init_test_chain(blocks.into_iter().map(Into::into).collect()));
let tx1: Transaction = test_data::TransactionBuilder::with_version(4)
.add_input(&input_tx, 0)
.add_output(10).add_output(10).add_output(10)
.add_output(5).add_output(5).add_output(5)
.into();
let tx2: Transaction = test_data::TransactionBuilder::with_version(1)
.add_input(&tx1, 0)
.add_output(1).add_output(1).add_output(1)
.add_output(2).add_output(2).add_output(2)
.into();
assert!(tx1.hash() > tx2.hash());
let block = test_data::block_builder()
.transaction()
.coinbase()
.output().value(2).script_pubkey_with_sigops(100).build()
.build()
.with_transaction(tx2)
.with_transaction(tx1)
.merkled_header()
.time(DOUBLE_SPACING_SECONDS + 101) // to pass BCH work check
.parent(parent_hash)
.build()
.build();
// when topological order is required
let topological_consensus = ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore);
let verifier = ChainVerifier::new(storage.clone(), topological_consensus);
let expected = Err(Error::Transaction(1, TransactionError::Overspend));
assert_eq!(expected, verifier.verify(VerificationLevel::Header, &block.clone().into()));
// when canonical order is required
let mut canonical_params = BitcoinCashConsensusParams::new(Network::Unitest);
canonical_params.magnetic_anomaly_time = 0;
let canonical_consensus = ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCash(canonical_params));
let verifier = ChainVerifier::new(storage, canonical_consensus);
let expected = Ok(());
assert_eq!(expected, verifier.verify(VerificationLevel::Header, &block.into()));
}
#[test]
#[ignore]
fn coinbase_happy() {

View File

@ -2,6 +2,7 @@
//! require sophisticated (in more than one source) previous transaction lookups
use chain::{OutPoint, TransactionOutput};
use network::TransactionOrdering;
use storage::TransactionOutputProvider;
#[derive(Clone, Copy)]
@ -41,3 +42,19 @@ impl TransactionOutputProvider for NoopStore {
false
}
}
/// Converts actual transaction index into transaction index to use in
/// TransactionOutputProvider::transaction_output call.
/// When topological ordering is used, we expect ascendant transaction (TX1)
/// to come BEFORE descendant transaction (TX2) in the block, like this:
/// [ ... TX1 ... TX2 ... ]
/// When canonical ordering is used, transactions order within block is not
/// relevant for this check and ascendant transaction (TX1) can come AFTER
/// descendant, like this:
/// [ ... TX2 ... TX1 ... ]
pub fn transaction_index_for_output_check(ordering: TransactionOrdering, tx_idx: usize) -> usize {
match ordering {
TransactionOrdering::Topological => tx_idx,
TransactionOrdering::Canonical => ::std::usize::MAX,
}
}

View File

@ -3,7 +3,7 @@
use blake2_rfc::blake2b::Blake2b;
use byteorder::{BigEndian, LittleEndian, ByteOrder, WriteBytesExt};
use chain::BlockHeader;
use primitives::hex::ToHex;
//use hex::ToHex;
pub struct EquihashParams {
pub N: u32,

View File

@ -57,6 +57,8 @@ pub enum Error {
WitnessMerkleCommitmentMismatch,
/// SegWit: unexpected witness
UnexpectedWitness,
/// Non-canonical tranasctions ordering within block
NonCanonicalTransactionOrdering,
/// Database error
Database(DBError),
InvalidEquihashSolution,
@ -79,6 +81,8 @@ pub enum TransactionError {
CoinbaseSignatureLength(usize),
/// Transaction size exceeds block size limit
MaxSize,
/// Transaction size is below min size limit
MinSize,
/// Transaction has more sigops than it's allowed
MaxSigops,
/// Transaction is a part of memory pool, but is a coinbase

View File

@ -10,11 +10,12 @@ use script::{Script, ScriptWitness};
pub fn transaction_sigops(
transaction: &Transaction,
store: &TransactionOutputProvider,
bip16_active: bool
bip16_active: bool,
checkdatasig_active: bool,
) -> usize {
let output_sigops: usize = transaction.outputs.iter().map(|output| {
let output_script: Script = output.script_pubkey.clone().into();
output_script.sigops_count(false)
output_script.sigops_count(checkdatasig_active, false)
}).sum();
// TODO: bitcoin/bitcoin also includes input_sigops here
@ -27,14 +28,14 @@ pub fn transaction_sigops(
for input in &transaction.inputs {
let input_script: Script = input.script_sig.clone().into();
input_sigops += input_script.sigops_count(false);
input_sigops += input_script.sigops_count(checkdatasig_active, false);
if bip16_active {
let previous_output = match store.transaction_output(&input.previous_output, usize::max_value()) {
Some(output) => output,
None => continue,
};
let prevout_script: Script = previous_output.script_pubkey.into();
bip16_sigops += input_script.pay_to_script_hash_sigops(&prevout_script);
bip16_sigops += input_script.pay_to_script_hash_sigops(checkdatasig_active, &prevout_script);
}
}
@ -86,7 +87,7 @@ fn witness_program_sigops(
match witness_version {
0 if witness_program.len() == 20 => 1,
0 if witness_program.len() == 32 => match script_witness.last() {
Some(subscript) => Script::new(subscript.clone()).sigops_count(true),
Some(subscript) => Script::new(subscript.clone()).sigops_count(false, true),
_ => 0,
},
_ => 0,

View File

@ -163,7 +163,7 @@ impl<'a> BlockSigops<'a> {
fn check(&self) -> Result<(), Error> {
// We cannot know if bip16 is enabled at this point so we disable it.
let sigops = self.block.transactions.iter()
.map(|tx| transaction_sigops(&tx.raw, &NoopStore, false))
.map(|tx| transaction_sigops(&tx.raw, &NoopStore, false, false))
.sum::<usize>();
if sigops > self.max_sigops {

View File

@ -186,7 +186,7 @@ impl<'a> TransactionSigops<'a> {
}
fn check(&self) -> Result<(), TransactionError> {
let sigops = transaction_sigops(&self.transaction.raw, &NoopStore, false);
let sigops = transaction_sigops(&self.transaction.raw, &NoopStore, false, false);
if sigops > self.max_sigops {
Err(TransactionError::MaxSigops)
} else {

View File

@ -141,7 +141,7 @@ fn work_required_bitcoin_cash_adjusted(parent_header: IndexedBlockHeader, time:
// If the new block's timestamp is more than 2 * 10 minutes then allow
// mining of a min-difficulty block.
let max_bits = consensus.network.max_bits(&consensus.fork);
if consensus.network == Network::Testnet {
if consensus.network == Network::Testnet || consensus.network == Network::Unitest {
let max_time_gap = parent_header.raw.time + DOUBLE_SPACING_SECONDS;
if time > max_time_gap {
return max_bits.into();
@ -222,6 +222,7 @@ pub mod tests {
height: 1000,
difficulty_adjustion_height: 0xffffffff,
monolith_time: 0xffffffff,
magnetic_anomaly_time: 0xffffffff,
}));
let mut header_provider = MemoryBlockHeaderProvider::default();
header_provider.insert(BlockHeader {
@ -277,6 +278,7 @@ pub mod tests {
height: 1000,
difficulty_adjustion_height: 0xffffffff,
monolith_time: 0xffffffff,
magnetic_anomaly_time: 0xffffffff,
}));