Merge pull request #1 from paritytech/zcash_init

Initial PR
This commit is contained in:
Svyatoslav Nikolsky 2018-11-14 18:08:40 +03:00 committed by GitHub
commit 1ff4fbc2ec
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
99 changed files with 1864 additions and 4534 deletions

168
Cargo.lock generated
View File

@ -61,7 +61,7 @@ name = "base64"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"safemem 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -69,7 +69,7 @@ dependencies = [
name = "bencher"
version = "0.1.0"
dependencies = [
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"chain 0.1.0",
"db 0.1.0",
"network 0.1.0",
@ -85,7 +85,7 @@ name = "bigint"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"heapsize 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
@ -121,9 +121,19 @@ name = "bitflags"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "blake2-rfc"
version = "0.2.18"
source = "git+https://github.com/gtank/blake2-rfc.git?branch=persona#c7c458429c429b81fea845421f5ab859710fa8af"
dependencies = [
"arrayvec 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "byteorder"
version = "1.1.0"
version = "1.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@ -131,7 +141,7 @@ name = "bytes"
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"iovec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -157,15 +167,6 @@ dependencies = [
"serialization_derive 0.1.0",
]
[[package]]
name = "chrono"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"num 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "clap"
version = "2.27.1"
@ -181,6 +182,11 @@ dependencies = [
"yaml-rust 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "constant_time_eq"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "crossbeam-deque"
version = "0.2.0"
@ -262,7 +268,7 @@ name = "domain"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
@ -288,14 +294,14 @@ dependencies = [
[[package]]
name = "env_logger"
version = "0.5.3"
version = "0.5.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"atty 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"chrono 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"termcolor 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -405,6 +411,14 @@ name = "httparse"
version = "1.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "humantime"
version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"quick-error 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "hyper"
version = "0.11.7"
@ -590,7 +604,7 @@ name = "logs"
version = "0.1.0"
dependencies = [
"ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -629,7 +643,7 @@ name = "message"
version = "0.1.0"
dependencies = [
"bitcrypto 0.1.0",
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"chain 0.1.0",
"network 0.1.0",
"primitives 0.1.0",
@ -650,7 +664,7 @@ name = "miner"
version = "0.1.0"
dependencies = [
"bitcrypto 0.1.0",
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"chain 0.1.0",
"db 0.1.0",
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -698,7 +712,7 @@ name = "murmur3"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -720,6 +734,8 @@ dependencies = [
"chain 0.1.0",
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"primitives 0.1.0",
"rustc-hex 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serialization 0.1.0",
]
[[package]]
@ -738,43 +754,11 @@ dependencies = [
"tokio-core 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "num"
version = "0.1.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"num-integer 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)",
"num-iter 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "num-integer"
version = "0.1.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"num-traits 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "num-iter"
version = "0.1.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"num-integer 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "num-traits"
version = "0.1.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "num-traits"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "num_cpus"
version = "1.7.0"
@ -851,7 +835,7 @@ dependencies = [
"chain 0.1.0",
"clap 2.27.1 (registry+https://github.com/rust-lang/crates.io-index)",
"db 0.1.0",
"env_logger 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)",
"import 0.1.0",
"keys 0.1.0",
"libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
@ -879,7 +863,7 @@ name = "primitives"
version = "0.1.0"
dependencies = [
"bigint 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-hex 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -976,11 +960,31 @@ dependencies = [
"utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "regex"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"regex-syntax 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
"thread_local 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "regex-syntax"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "regex-syntax"
version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"ucd-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "relay"
version = "0.1.0"
@ -1145,8 +1149,9 @@ dependencies = [
name = "serialization"
version = "0.1.0"
dependencies = [
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"primitives 0.1.0",
"rustc-hex 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -1241,7 +1246,7 @@ version = "0.1.0"
dependencies = [
"bit-vec 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
"bitcrypto 0.1.0",
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"chain 0.1.0",
"db 0.1.0",
"futures 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1295,10 +1300,10 @@ dependencies = [
[[package]]
name = "termcolor"
version = "0.3.3"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"wincolor 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"wincolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -1400,6 +1405,11 @@ dependencies = [
"futures 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "ucd-util"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "unicase"
version = "2.1.0"
@ -1441,14 +1451,17 @@ name = "verification"
version = "0.1.0"
dependencies = [
"bitcrypto 0.1.0",
"blake2-rfc 0.2.18 (git+https://github.com/gtank/blake2-rfc.git?branch=persona)",
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"chain 0.1.0",
"db 0.1.0",
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"network 0.1.0",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"primitives 0.1.0",
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-hex 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"script 0.1.0",
"serialization 0.1.0",
"storage 0.1.0",
@ -1490,6 +1503,14 @@ name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "winapi-util"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
@ -1497,10 +1518,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "wincolor"
version = "0.1.5"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -1536,12 +1558,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d"
"checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5"
"checksum bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b3c30d3802dfb7281680d6285f2ccdaa8c2d8fee41f93805dba5c4cf50dc23cf"
"checksum byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff81738b726f5d099632ceaffe7fb65b90212e8dce59d518729e7e8634032d3d"
"checksum blake2-rfc 0.2.18 (git+https://github.com/gtank/blake2-rfc.git?branch=persona)" = "<none>"
"checksum byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "74c0b906e9446b0a2e4f760cdb3fa4b2c48cdc6db8766a845c54b6ff063fd2e9"
"checksum bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d828f97b58cc5de3e40c421d0cf2132d6b2da4ee0e11b8632fa838f0f9333ad6"
"checksum cc 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a9b13a57efd6b30ecd6598ebdb302cca617930b5470647570468a65d12ef9719"
"checksum cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c819a1287eb618df47cc647173c5c4c66ba19d888a6e50d605672aed3140de"
"checksum chrono 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7c20ebe0b2b08b0aeddba49c609fe7957ba2e33449882cb186a180bc60682fa9"
"checksum clap 2.27.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1b8c532887f1a292d17de05ae858a8fe50a301e196f9ef0ddb7ccd0d1d00f180"
"checksum constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8ff012e225ce166d4422e0e78419d901719760f62ae2b7969ca6b564d1b54a9e"
"checksum crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f739f8c5363aca78cfb059edf753d8f0d36908c348f3d8d1503f03d8b75d9cf3"
"checksum crossbeam-epoch 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "927121f5407de9956180ff5e936fe3cf4324279280001cd56b669d28ee7e9150"
"checksum crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2760899e32a1d58d5abb31129f8fae5de75220bc2176e77ff7c627ae45c918d9"
@ -1552,7 +1575,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum dtoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "09c3753c3db574d215cba4ea76018483895d7bff25a31b49ba45db21c48e50ab"
"checksum either 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "740178ddf48b1a9e878e6d6509a1442a2d42fd2928aae8e7a6f8a36fb01981b3"
"checksum elastic-array 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "058dc1040bbf28853bc48ec5f59190bac41b246c43c30064f0d318e6d1362fd6"
"checksum env_logger 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f15f0b172cb4f52ed5dbf47f774a387cd2315d1bf7894ab5af9b083ae27efa5a"
"checksum env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)" = "f4d7e69c283751083d53d01eac767407343b8b69c4bd70058e08adc2637cb257"
"checksum eth-secp256k1 0.5.7 (git+https://github.com/ethcore/rust-secp256k1)" = "<none>"
"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3"
"checksum fuchsia-zircon 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f6c0581a4e363262e52b87f59ee2afe3415361c6ec35e665924eb08afe8ff159"
@ -1567,6 +1590,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum heapsize 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "556cd479866cf85c3f671209c85e8a6990211c916d1002c2fcb2e9b7cf60bc36"
"checksum heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "54fab2624374e5137ae4df13bf32b0b269cb804df42d13a51221bbd431d1a237"
"checksum httparse 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "af2f2dd97457e8fb1ae7c5a420db346af389926e36f43768b96f101546b04a07"
"checksum humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0484fda3e7007f2a4a0d9c3a703ca38c71c54c55602ce4660c419fd32e188c9e"
"checksum hyper 0.11.7 (registry+https://github.com/rust-lang/crates.io-index)" = "4959ca95f55df4265bff2ad63066147255e6fa733682cf6d1cb5eaff6e53324b"
"checksum iovec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b6e8b9c2247fcf6c6a1151f1156932be5606c9fd6f55a2d7f9fc1cb29386b2f7"
"checksum itoa 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8324a32baf01e2ae060e9de58ed0bc2320c9a2833491ee36cd3b4c414de4db8c"
@ -1596,11 +1620,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)" = "3a80f842784ef6c9a958b68b7516bc7e35883c614004dd94959a4dca1b716c09"
"checksum nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "9a2228dca57108069a5262f2ed8bd2e82496d2e074a06d1ccc7ce1687b6ae0a2"
"checksum ns-dns-tokio 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d13b872a5a65428c1d4628fa04391f5c05ba8a23d5ee3094e22284fdddebed86"
"checksum num 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "4703ad64153382334aa8db57c637364c322d3372e097840c72000dabdcf6156e"
"checksum num-integer 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)" = "f8d26da319fb45674985c78f1d1caf99aa4941f785d384a2ae36d0740bc3e2fe"
"checksum num-iter 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "4b226df12c5a59b63569dd57fafb926d91b385dfce33d8074a412411b689d593"
"checksum num-traits 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "99843c856d68d8b4313b03a17e33c4bb42ae8f6610ea81b28abe076ac721b9b0"
"checksum num-traits 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e7de20f146db9d920c45ee8ed8f71681fd9ade71909b48c3acbd766aa504cf10"
"checksum num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "514f0d73e64be53ff320680ca671b64fe3fb91da01e1ae2ddc99eb51d453b20d"
"checksum ole32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d2c49021782e5233cd243168edfa8037574afed4eba4bbaf538b3d8d1789d8c"
"checksum owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37"
@ -1618,7 +1638,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum redox_syscall 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)" = "8dde11f18c108289bef24469638a04dce49da56084f2d50618b226e47eb04509"
"checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76"
"checksum regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1731164734096285ec2a5ec7fea5248ae2f5485b3feeb0115af4fda2183b2d1b"
"checksum regex 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5bbbea44c5490a1e84357ff28b7d518b4619a159fed5d25f6c1de2d19cc42814"
"checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db"
"checksum regex-syntax 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "fbc557aac2b708fe84121caf261346cc2eed71978024337e42eb46b8a252ac6e"
"checksum relay 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f301bafeb60867c85170031bdb2fcf24c8041f33aee09e7b116a58d4e9f781c5"
"checksum rocksdb 0.4.5 (git+https://github.com/ethcore/rust-rocksdb)" = "<none>"
"checksum rocksdb-sys 0.3.0 (git+https://github.com/ethcore/rust-rocksdb)" = "<none>"
@ -1649,7 +1671,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum synstructure 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3a761d12e6d8dcb4dcf952a7a89b475e3a9d69e4a69307e01a470977642914bd"
"checksum take 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b157868d8ac1f56b64604539990685fa7611d8fa9e5476cf0c02cf34d32917c5"
"checksum tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "87974a6f5c1dfb344d733055601650059a3363de2a6104819293baff662132d6"
"checksum termcolor 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9065bced9c3e43453aa3d56f1e98590b8455b341d2fa191a1090c0dd0b242c75"
"checksum termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4096add70612622289f2fdcdbd5086dc81c1e2675e6ae58d6c4f62a16c6d7f2f"
"checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096"
"checksum textwrap 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c0b59b6b4b44d867f1370ef1bd91bfb262bf07bf0ae65c202ea2fbc16153b693"
"checksum thread_local 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "279ef31c19ededf577bfd12dfae728040a21f635b06a24cd670ff510edd38963"
@ -1658,6 +1680,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum tokio-io 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "514aae203178929dbf03318ad7c683126672d4d96eccb77b29603d33c9e25743"
"checksum tokio-proto 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8fbb47ae81353c63c487030659494b295f6cb6576242f907f203473b191b0389"
"checksum tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162"
"checksum ucd-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d0f8bfa9ff0cadcd210129ad9d2c5f145c13e9ced3d3e5d948a6213487d52444"
"checksum unicase 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "284b6d3db520d67fbe88fd778c21510d1b0ba4a551e5d0fbb023d33405f6de8a"
"checksum unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "bf3a113775714a22dcb774d8ea3655c53a32debae63a063acc00a91cc586245f"
"checksum unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f860d7d29cf02cb2f3f359fd35991af3d30bac52c57d265a3c461074cb4dc"
@ -1670,8 +1693,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "04e3bd221fcbe8a271359c04f21a76db7d0c6028862d1bb5512d85e1e2eb5bb3"
"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
"checksum winapi-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "afc5508759c5bf4285e61feb862b6083c8480aec864fa17a81fdec6f69b461ab"
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
"checksum wincolor 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0878187fa88838d2006c0a76f30d64797098426245b375383f60acb6aed8a203"
"checksum wincolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "561ed901ae465d6185fa7864d63fbd5720d0ef718366c9a4dc83cf6170d7e9ba"
"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e"
"checksum xdg 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a66b7c2281ebde13cf4391d70d4c7e5946c3c25e72a7b859ca8f677dcd0b0c61"
"checksum yaml-rust 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e66366e18dc58b46801afbf2ca7661a9f59cc8c5962c29892b6039b4f86fa992"

View File

@ -26,7 +26,7 @@ pub fn fetch(benchmark: &mut Benchmark) {
.lock_time(x as u32)
.output().value(5000000000).build()
.build()
.merkled_header().parent(rolling_hash.clone()).nonce(x as u32).build()
.merkled_header().parent(rolling_hash.clone()).nonce((x as u8).into()).build()
.build();
rolling_hash = next_block.hash();
blocks.push(next_block);
@ -69,7 +69,7 @@ pub fn write(benchmark: &mut Benchmark) {
.lock_time(x as u32)
.output().value(5000000000).build()
.build()
.merkled_header().parent(rolling_hash.clone()).nonce(x as u32).build()
.merkled_header().parent(rolling_hash.clone()).nonce((x as u8).into()).build()
.build();
rolling_hash = next_block.hash();
blocks.push(next_block.into());
@ -107,7 +107,7 @@ pub fn reorg_short(benchmark: &mut Benchmark) {
.lock_time(x as u32)
.output().value(5000000000).build()
.build()
.merkled_header().parent(rolling_hash.clone()).nonce(x as u32 * 4).build()
.merkled_header().parent(rolling_hash.clone()).nonce(((x * 4) as u8).into()).build()
.build();
rolling_hash = next_block.hash();
blocks.push(next_block);
@ -118,7 +118,7 @@ pub fn reorg_short(benchmark: &mut Benchmark) {
.lock_time(x as u32)
.output().value(5000000000).build()
.build()
.merkled_header().parent(base).nonce(x as u32 * 4 + 2).build()
.merkled_header().parent(base).nonce(((x * 4 + 2) as u8).into()).build()
.build();
let next_base = next_block_side.hash();
blocks.push(next_block_side);
@ -129,7 +129,7 @@ pub fn reorg_short(benchmark: &mut Benchmark) {
.lock_time(x as u32)
.output().value(5000000000).build()
.build()
.merkled_header().parent(next_base).nonce(x as u32 * 4 + 3).build()
.merkled_header().parent(next_base).nonce(((x * 4 + 3) as u8).into()).build()
.build();
blocks.push(next_block_side_continue);
@ -139,7 +139,7 @@ pub fn reorg_short(benchmark: &mut Benchmark) {
.lock_time(x as u32)
.output().value(5000000000).build()
.build()
.merkled_header().parent(rolling_hash.clone()).nonce(x as u32 * 4 + 1).build()
.merkled_header().parent(rolling_hash.clone()).nonce(((x * 4 + 1) as u8).into()).build()
.build();
rolling_hash = next_block_continue.hash();
blocks.push(next_block_continue);
@ -208,7 +208,7 @@ pub fn write_heavy(benchmark: &mut Benchmark) {
.lock_time(x as u32)
.output().value(5000000000).build()
.build()
.merkled_header().parent(rolling_hash.clone()).nonce(x as u32).build()
.merkled_header().parent(rolling_hash.clone()).nonce((x as u8).into()).build()
.build();
rolling_hash = next_block.hash();
blocks.push(next_block);

View File

@ -2,7 +2,7 @@ use std::sync::Arc;
use db::BlockChainDatabase;
use chain::IndexedBlock;
use verification::{BackwardsCompatibleChainVerifier as ChainVerifier, Verify, VerificationLevel};
use network::{Network, ConsensusParams, ConsensusFork};
use network::{Network, ConsensusParams};
use test_data;
use byteorder::{LittleEndian, ByteOrder};
@ -43,7 +43,7 @@ pub fn main(benchmark: &mut Benchmark) {
.build()
.merkled_header()
.parent(rolling_hash.clone())
.nonce(x as u32)
.nonce((x as u8).into())
.build()
.build();
rolling_hash = next_block.hash();
@ -96,7 +96,7 @@ pub fn main(benchmark: &mut Benchmark) {
assert_eq!(store.best_block().hash, rolling_hash);
let chain_verifier = ChainVerifier::new(store.clone(), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
let chain_verifier = ChainVerifier::new(store.clone(), ConsensusParams::new(Network::Unitest));
// bench
benchmark.start();

File diff suppressed because one or more lines are too long

View File

@ -4,21 +4,38 @@ use ser::{deserialize, serialize};
use crypto::dhash256;
use compact::Compact;
use hash::H256;
use primitives::bytes::Bytes;
use solution::EquihashSolution;
use ser::Stream;
#[derive(PartialEq, Clone, Serializable, Deserializable)]
pub struct BlockHeader {
pub version: u32,
pub previous_header_hash: H256,
pub merkle_root_hash: H256,
pub reserved_hash: H256,
pub time: u32,
pub bits: Compact,
pub nonce: u32,
pub nonce: H256,
pub solution: EquihashSolution,
}
impl BlockHeader {
pub fn hash(&self) -> H256 {
dhash256(&serialize(self))
}
pub fn equihash_input(&self) -> Bytes {
let mut stream = Stream::new();
stream.append(&self.version)
.append(&self.previous_header_hash)
.append(&self.merkle_root_hash)
.append(&self.reserved_hash)
.append(&self.time)
.append(&self.bits)
.append(&self.nonce);
stream.out()
}
}
impl fmt::Debug for BlockHeader {
@ -30,6 +47,7 @@ impl fmt::Debug for BlockHeader {
.field("time", &self.time)
.field("bits", &self.bits)
.field("nonce", &self.nonce)
.field("equihash_solution", &self.solution)
.finish()
}
}
@ -43,54 +61,57 @@ impl From<&'static str> for BlockHeader {
#[cfg(test)]
mod tests {
use ser::{Reader, Error as ReaderError, Stream};
use solution::SOLUTION_SIZE;
use super::BlockHeader;
fn test_block_buffer() -> Vec<u8> {
let mut buffer = vec![
1, 0, 0, 0,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4, 0, 0, 0,
5, 0, 0, 0,
6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
253, 64, 5,
];
buffer.extend_from_slice(&[0u8; SOLUTION_SIZE]);
buffer
}
#[test]
fn test_block_header_stream() {
let block_header = BlockHeader {
version: 1,
previous_header_hash: [2; 32].into(),
merkle_root_hash: [3; 32].into(),
reserved_hash: Default::default(),
time: 4,
bits: 5.into(),
nonce: 6,
nonce: 6.into(),
solution: Default::default(),
};
let mut stream = Stream::default();
let mut stream = Stream::new();
stream.append(&block_header);
let expected = vec![
1, 0, 0, 0,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
4, 0, 0, 0,
5, 0, 0, 0,
6, 0, 0, 0,
].into();
assert_eq!(stream.out(), expected);
assert_eq!(stream.out(), test_block_buffer().into());
}
#[test]
fn test_block_header_reader() {
let buffer = vec![
1, 0, 0, 0,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
4, 0, 0, 0,
5, 0, 0, 0,
6, 0, 0, 0,
];
let buffer = test_block_buffer();
let mut reader = Reader::new(&buffer);
let expected = BlockHeader {
version: 1,
previous_header_hash: [2; 32].into(),
merkle_root_hash: [3; 32].into(),
reserved_hash: Default::default(),
time: 4,
bits: 5.into(),
nonce: 6,
nonce: 6.into(),
solution: Default::default(),
};
assert_eq!(expected, reader.read().unwrap());

View File

@ -1,7 +1,7 @@
use std::cmp;
use hash::H256;
use hex::FromHex;
use ser::{Serializable, serialized_list_size, serialized_list_size_with_flags, deserialize, SERIALIZE_TRANSACTION_WITNESS};
use ser::{Serializable, serialized_list_size, deserialize};
use block::Block;
use transaction::Transaction;
use merkle_root::merkle_root;
@ -54,29 +54,10 @@ impl IndexedBlock {
header_size + txs_size
}
pub fn size_with_witness(&self) -> usize {
let header_size = self.header.raw.serialized_size();
let transactions = self.transactions.iter().map(|tx| &tx.raw).collect::<Vec<_>>();
let txs_size = serialized_list_size_with_flags::<Transaction, &Transaction>(&transactions, SERIALIZE_TRANSACTION_WITNESS);
header_size + txs_size
}
pub fn merkle_root(&self) -> H256 {
merkle_root(&self.transactions.iter().map(|tx| &tx.hash).collect::<Vec<&H256>>())
}
pub fn witness_merkle_root(&self) -> H256 {
let hashes = match self.transactions.split_first() {
None => vec![],
Some((_, rest)) => {
let mut hashes = vec![H256::from(0)];
hashes.extend(rest.iter().map(|tx| tx.raw.witness_hash()));
hashes
},
};
merkle_root(&hashes)
}
pub fn is_final(&self, height: u32) -> bool {
self.transactions.iter().all(|tx| tx.raw.is_final_in_block(height, self.header.raw.time))
}
@ -87,18 +68,3 @@ impl From<&'static str> for IndexedBlock {
deserialize(&s.from_hex::<Vec<u8>>().unwrap() as &[u8]).unwrap()
}
}
#[cfg(test)]
mod tests {
use super::IndexedBlock;
#[test]
fn size_with_witness_not_equal_to_size() {
let block_without_witness: IndexedBlock = "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into();
assert_eq!(block_without_witness.size(), block_without_witness.size_with_witness());
// bip143 block
let block_with_witness: IndexedBlock = "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000010100000000000000000000000000000000000000000000000000000000000000000000000000000000000001010000000000".into();
assert!(block_with_witness.size() != block_with_witness.size_with_witness());
}
}

49
chain/src/join_split.rs Normal file
View File

@ -0,0 +1,49 @@
use std::io;
use hash::{H256, H512};
use ser::{Error, Stream, Reader, FixedArray_H256_2,
FixedArray_u8_296, FixedArray_u8_601_2};
#[derive(Debug, PartialEq, Default, Clone)]
pub struct JointSplit {
pub descriptions: Vec<JointSplitDescription>,
pub pubkey: H256,
pub sig: H512,
}
#[derive(Debug, PartialEq, Default, Clone, Serializable, Deserializable)]
pub struct JointSplitDescription {
pub value_pub_old: u64,
pub value_pub_new: u64,
pub anchor: H256,
pub nullifiers: FixedArray_H256_2,
pub commitments: FixedArray_H256_2,
pub ephemeral_key: H256,
pub random_seed: H256,
pub macs: FixedArray_H256_2,
pub zkproof: FixedArray_u8_296,
pub ciphertexts: FixedArray_u8_601_2,
}
pub fn serialize_joint_split(stream: &mut Stream, joint_split: &Option<JointSplit>) {
if let &Some(ref joint_split) = joint_split {
stream.append_list(&joint_split.descriptions)
.append(&joint_split.pubkey)
.append(&joint_split.sig);
}
}
pub fn deserialize_joint_split<T>(reader: &mut Reader<T>) -> Result<Option<JointSplit>, Error> where T: io::Read {
let descriptions: Vec<JointSplitDescription> = reader.read_list()?;
if descriptions.is_empty() {
return Ok(None);
}
let pubkey = reader.read()?;
let sig = reader.read()?;
Ok(Some(JointSplit {
descriptions,
pubkey,
sig,
}))
}

View File

@ -10,6 +10,8 @@ pub mod constants;
mod block;
mod block_header;
mod solution;
mod join_split;
mod merkle_root;
mod transaction;
@ -27,6 +29,8 @@ pub use primitives::{hash, bytes, bigint, compact};
pub use block::Block;
pub use block_header::BlockHeader;
pub use solution::EquihashSolution;
pub use join_split::{JointSplit, JointSplitDescription};
pub use merkle_root::{merkle_root, merkle_node_hash};
pub use transaction::{Transaction, TransactionInput, TransactionOutput, OutPoint};

48
chain/src/solution.rs Normal file
View File

@ -0,0 +1,48 @@
use std::{fmt, io};
use hex::ToHex;
use ser::{Error, Serializable, Deserializable, Stream, Reader};
/// Equihash solution size.
pub const SOLUTION_SIZE: usize = 1344;
#[derive(Clone)]
pub struct EquihashSolution([u8; SOLUTION_SIZE]);
impl AsRef<[u8]> for EquihashSolution {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl Default for EquihashSolution {
fn default() -> Self {
EquihashSolution([0; SOLUTION_SIZE])
}
}
impl PartialEq<EquihashSolution> for EquihashSolution {
fn eq(&self, other: &EquihashSolution) -> bool {
self.0.as_ref() == other.0.as_ref()
}
}
impl fmt::Debug for EquihashSolution {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.0.to_hex::<String>())
}
}
impl Serializable for EquihashSolution {
fn serialize(&self, stream: &mut Stream) {
stream.append_list(&self.0);
}
}
impl Deserializable for EquihashSolution {
fn deserialize<T>(reader: &mut Reader<T>) -> Result<Self, Error> where Self: Sized, T: io::Read {
let v = reader.read_list_exact(SOLUTION_SIZE)?;
let mut sol = [0; SOLUTION_SIZE];
sol.copy_from_slice(&v);
Ok(EquihashSolution(sol))
}
}

View File

@ -5,17 +5,13 @@ use std::io;
use heapsize::HeapSizeOf;
use hex::FromHex;
use bytes::Bytes;
use ser::{deserialize, serialize, serialize_with_flags, SERIALIZE_TRANSACTION_WITNESS};
use ser::{deserialize, serialize};
use crypto::dhash256;
use hash::H256;
use constants::{SEQUENCE_FINAL, LOCKTIME_THRESHOLD};
use join_split::{JointSplit, deserialize_joint_split, serialize_joint_split};
use ser::{Error, Serializable, Deserializable, Stream, Reader};
/// Must be zero.
const WITNESS_MARKER: u8 = 0;
/// Must be nonzero.
const WITNESS_FLAG: u8 = 1;
#[derive(Debug, PartialEq, Eq, Clone, Default, Serializable, Deserializable)]
pub struct OutPoint {
pub hash: H256,
@ -40,7 +36,6 @@ pub struct TransactionInput {
pub previous_output: OutPoint,
pub script_sig: Bytes,
pub sequence: u32,
pub script_witness: Vec<Bytes>,
}
impl TransactionInput {
@ -49,23 +44,17 @@ impl TransactionInput {
previous_output: OutPoint::null(),
script_sig: script_sig,
sequence: SEQUENCE_FINAL,
script_witness: vec![],
}
}
pub fn is_final(&self) -> bool {
self.sequence == SEQUENCE_FINAL
}
pub fn has_witness(&self) -> bool {
!self.script_witness.is_empty()
}
}
impl HeapSizeOf for TransactionInput {
fn heap_size_of_children(&self) -> usize {
self.script_sig.heap_size_of_children() +
self.script_witness.heap_size_of_children()
self.script_sig.heap_size_of_children()
}
}
@ -96,6 +85,7 @@ pub struct Transaction {
pub inputs: Vec<TransactionInput>,
pub outputs: Vec<TransactionOutput>,
pub lock_time: u32,
pub joint_split: Option<JointSplit>,
}
impl From<&'static str> for Transaction {
@ -115,10 +105,6 @@ impl Transaction {
dhash256(&serialize(self))
}
pub fn witness_hash(&self) -> H256 {
dhash256(&serialize_with_flags(self, SERIALIZE_TRANSACTION_WITNESS))
}
pub fn inputs(&self) -> &[TransactionInput] {
&self.inputs
}
@ -167,10 +153,6 @@ impl Transaction {
self.inputs.iter().all(TransactionInput::is_final)
}
pub fn has_witness(&self) -> bool {
self.inputs.iter().any(TransactionInput::has_witness)
}
pub fn total_spends(&self) -> u64 {
let mut result = 0u64;
for output in self.outputs.iter() {
@ -198,63 +180,39 @@ impl Deserializable for TransactionInput {
previous_output: reader.read()?,
script_sig: reader.read()?,
sequence: reader.read()?,
script_witness: vec![],
})
}
}
impl Serializable for Transaction {
fn serialize(&self, stream: &mut Stream) {
let include_transaction_witness = stream.include_transaction_witness() && self.has_witness();
match include_transaction_witness {
false => stream
.append(&self.version)
.append_list(&self.inputs)
.append_list(&self.outputs)
.append(&self.lock_time),
true => {
stream
.append(&self.version)
.append(&WITNESS_MARKER)
.append(&WITNESS_FLAG)
.append_list(&self.inputs)
.append_list(&self.outputs);
for input in &self.inputs {
stream.append_list(&input.script_witness);
}
stream.append(&self.lock_time)
}
};
stream
.append(&self.version)
.append_list(&self.inputs)
.append_list(&self.outputs)
.append(&self.lock_time);
serialize_joint_split(stream, &self.joint_split);
}
}
impl Deserializable for Transaction {
fn deserialize<T>(reader: &mut Reader<T>) -> Result<Self, Error> where Self: Sized, T: io::Read {
let version = reader.read()?;
let mut inputs: Vec<TransactionInput> = reader.read_list()?;
let read_witness = if inputs.is_empty() {
let witness_flag: u8 = reader.read()?;
if witness_flag != WITNESS_FLAG {
return Err(Error::MalformedData);
}
inputs = reader.read_list()?;
true
} else {
false
};
let inputs: Vec<TransactionInput> = reader.read_list()?;
let outputs = reader.read_list()?;
if read_witness {
for input in inputs.iter_mut() {
input.script_witness = reader.read_list()?;
}
}
let lock_time = reader.read()?;
let joint_split = if version >= 2 {
deserialize_joint_split(reader)?
} else {
None
};
Ok(Transaction {
version: version,
inputs: inputs,
outputs: outputs,
lock_time: reader.read()?,
lock_time: lock_time,
joint_split: joint_split,
})
}
}
@ -262,8 +220,8 @@ impl Deserializable for Transaction {
#[cfg(test)]
mod tests {
use hash::H256;
use ser::{Serializable, serialize_with_flags, SERIALIZE_TRANSACTION_WITNESS};
use super::{Transaction, TransactionInput, OutPoint, TransactionOutput};
use ser::{Serializable};
use super::Transaction;
// real transaction from block 80000
// https://blockchain.info/rawtx/5a4ebf66822b0b2d56bd9dc64ece0bc38ee7844a23ff1d7320a88c5fdb2ad3e2
@ -281,7 +239,6 @@ mod tests {
let tx_output = &t.outputs[0];
assert_eq!(tx_output.value, 5000000000);
assert_eq!(tx_output.script_pubkey, "76a914404371705fa9bd789a2fcd52d2c580b65d35549d88ac".into());
assert!(!t.has_witness());
}
#[test]
@ -297,60 +254,4 @@ mod tests {
let tx: Transaction = raw_tx.into();
assert_eq!(tx.serialized_size(), raw_tx.len() / 2);
}
#[test]
fn test_transaction_reader_with_witness() {
// test case from https://github.com/bitcoin/bips/blob/master/bip-0143.mediawiki
let actual: Transaction = "01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000".into();
let expected = Transaction {
version: 1,
inputs: vec![TransactionInput {
previous_output: OutPoint {
hash: "fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f".into(),
index: 0,
},
script_sig: "4830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01".into(),
sequence: 0xffffffee,
script_witness: vec![],
}, TransactionInput {
previous_output: OutPoint {
hash: "ef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a".into(),
index: 1,
},
script_sig: "".into(),
sequence: 0xffffffff,
script_witness: vec![
"304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee01".into(),
"025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee6357".into(),
],
}],
outputs: vec![TransactionOutput {
value: 0x0000000006b22c20,
script_pubkey: "76a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac".into(),
}, TransactionOutput {
value: 0x000000000d519390,
script_pubkey: "76a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac".into(),
}],
lock_time: 0x00000011,
};
assert_eq!(actual, expected);
}
#[test]
fn test_serialization_with_flags() {
let transaction_without_witness: Transaction = "000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into();
assert_eq!(serialize_with_flags(&transaction_without_witness, 0), serialize_with_flags(&transaction_without_witness, SERIALIZE_TRANSACTION_WITNESS));
let transaction_with_witness: Transaction = "0000000000010100000000000000000000000000000000000000000000000000000000000000000000000000000000000001010000000000".into();
assert!(serialize_with_flags(&transaction_with_witness, 0) != serialize_with_flags(&transaction_with_witness, SERIALIZE_TRANSACTION_WITNESS));
}
#[test]
fn test_witness_hash_differs() {
let transaction_without_witness: Transaction = "000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into();
assert_eq!(transaction_without_witness.hash(), transaction_without_witness.witness_hash());
let transaction_with_witness: Transaction = "0000000000010100000000000000000000000000000000000000000000000000000000000000000000000000000000000001010000000000".into();
assert!(transaction_with_witness.hash() != transaction_with_witness.witness_hash());
}
}

View File

@ -4,6 +4,7 @@ use std::path::Path;
use parking_lot::RwLock;
use hash::H256;
use bytes::Bytes;
use primitives::compact::Compact;
use chain::{
IndexedBlock, IndexedBlockHeader, IndexedTransaction, BlockHeader, Block, Transaction,
OutPoint, TransactionOutput
@ -562,8 +563,8 @@ impl<T> Store for BlockChainDatabase<T> where T: KeyValueDatabase {
}
/// get blockchain difficulty
fn difficulty(&self) -> f64 {
self.best_header().bits.to_f64()
fn difficulty(&self, max_bits: Compact) -> f64 {
self.best_header().bits.to_f64(max_bits)
}
}

View File

@ -9,10 +9,6 @@ pub enum InventoryType {
MessageTx = 1,
MessageBlock = 2,
MessageFilteredBlock = 3,
MessageCompactBlock = 4,
MessageWitnessTx = 0x40000001,
MessageWitnessBlock = 0x40000002,
MessageWitnessFilteredBlock = 0x40000003,
}
impl InventoryType {
@ -22,10 +18,6 @@ impl InventoryType {
1 => Some(InventoryType::MessageTx),
2 => Some(InventoryType::MessageBlock),
3 => Some(InventoryType::MessageFilteredBlock),
4 => Some(InventoryType::MessageCompactBlock),
0x40000001 => Some(InventoryType::MessageWitnessTx),
0x40000002 => Some(InventoryType::MessageWitnessBlock),
0x40000003 => Some(InventoryType::MessageWitnessFilteredBlock),
_ => None
}
}
@ -64,26 +56,12 @@ impl InventoryVector {
}
}
pub fn witness_tx(hash: H256) -> Self {
InventoryVector {
inv_type: InventoryType::MessageWitnessTx,
hash: hash,
}
}
pub fn block(hash: H256) -> Self {
InventoryVector {
inv_type: InventoryType::MessageBlock,
hash: hash,
}
}
pub fn witness_block(hash: H256) -> Self {
InventoryVector {
inv_type: InventoryType::MessageWitnessBlock,
hash: hash,
}
}
}
impl Serializable for InventoryVector {
@ -141,18 +119,10 @@ mod tests {
assert_eq!(1u32, InventoryType::MessageTx.into());
assert_eq!(2u32, InventoryType::MessageBlock.into());
assert_eq!(3u32, InventoryType::MessageFilteredBlock.into());
assert_eq!(4u32, InventoryType::MessageCompactBlock.into());
assert_eq!(0x40000001u32, InventoryType::MessageWitnessTx.into());
assert_eq!(0x40000002u32, InventoryType::MessageWitnessBlock.into());
assert_eq!(0x40000003u32, InventoryType::MessageWitnessFilteredBlock.into());
assert_eq!(InventoryType::from_u32(0).unwrap(), InventoryType::Error);
assert_eq!(InventoryType::from_u32(1).unwrap(), InventoryType::MessageTx);
assert_eq!(InventoryType::from_u32(2).unwrap(), InventoryType::MessageBlock);
assert_eq!(InventoryType::from_u32(3).unwrap(), InventoryType::MessageFilteredBlock);
assert_eq!(InventoryType::from_u32(4).unwrap(), InventoryType::MessageCompactBlock);
assert_eq!(InventoryType::from_u32(0x40000001).unwrap(), InventoryType::MessageWitnessTx);
assert_eq!(InventoryType::from_u32(0x40000002).unwrap(), InventoryType::MessageWitnessBlock);
assert_eq!(InventoryType::from_u32(0x40000003).unwrap(), InventoryType::MessageWitnessFilteredBlock);
}
}

View File

@ -41,15 +41,6 @@ impl Services {
self
}
pub fn witness(&self) -> bool {
self.bit_at(3)
}
pub fn with_witness(mut self, v: bool) -> Self {
self.set_bit(3, v);
self
}
pub fn xthin(&self) -> bool {
self.bit_at(4)
}
@ -59,15 +50,6 @@ impl Services {
self
}
pub fn bitcoin_cash(&self) -> bool {
self.bit_at(5)
}
pub fn with_bitcoin_cash(mut self, v: bool) -> Self {
self.set_bit(5, v);
self
}
pub fn includes(&self, other: &Self) -> bool {
self.0 & other.0 == other.0
}
@ -92,14 +74,10 @@ mod test {
#[test]
fn test_serivces_includes() {
let s1 = Services::default()
.with_witness(true)
.with_xthin(true);
let s2 = Services::default()
.with_witness(true);
let s2 = Services::default();
assert!(s1.witness());
assert!(s1.xthin());
assert!(s2.witness());
assert!(!s2.xthin());
assert!(s1.includes(&s2));
assert!(!s2.includes(&s1));

View File

@ -2,7 +2,7 @@ use ser::Stream;
use bytes::{TaggedBytes, Bytes};
use network::Magic;
use common::Command;
use serialization::serialize_payload_with_flags;
use serialization::serialize_payload;
use {Payload, MessageResult, MessageHeader};
pub fn to_raw_message(magic: Magic, command: Command, payload: &Bytes) -> Bytes {
@ -19,11 +19,7 @@ pub struct Message<T> {
impl<T> Message<T> where T: Payload {
pub fn new(magic: Magic, version: u32, payload: &T) -> MessageResult<Self> {
Self::with_flags(magic, version, payload, 0)
}
pub fn with_flags(magic: Magic, version: u32, payload: &T, serialization_flags: u32) -> MessageResult<Self> {
let serialized = try!(serialize_payload_with_flags(payload, version, serialization_flags));
let serialized = try!(serialize_payload(payload, version));
let message = Message {
bytes: TaggedBytes::new(to_raw_message(magic, T::command().into(), &serialized)),

View File

@ -62,14 +62,14 @@ impl Serializable for MessageHeader {
mod tests {
use bytes::Bytes;
use ser::serialize;
use network::{Network, ConsensusFork};
use network::Network;
use super::MessageHeader;
#[test]
fn test_message_header_serialization() {
let expected = "f9beb4d96164647200000000000000001f000000ed52399b".into();
let expected = "24e927646164647200000000000000001f000000ed52399b".into();
let header = MessageHeader {
magic: Network::Mainnet.magic(&ConsensusFork::BitcoinCore),
magic: Network::Mainnet.magic(),
command: "addr".into(),
len: 0x1f,
checksum: "ed52399b".into(),
@ -80,14 +80,14 @@ mod tests {
#[test]
fn test_message_header_deserialization() {
let raw: Bytes = "f9beb4d96164647200000000000000001f000000ed52399b".into();
let raw: Bytes = "24e927646164647200000000000000001f000000ed52399b".into();
let expected = MessageHeader {
magic: Network::Mainnet.magic(&ConsensusFork::BitcoinCore),
magic: Network::Mainnet.magic(),
command: "addr".into(),
len: 0x1f,
checksum: "ed52399b".into(),
};
assert_eq!(expected, MessageHeader::deserialize(&raw, Network::Mainnet.magic(&ConsensusFork::BitcoinCore)).unwrap());
assert_eq!(expected, MessageHeader::deserialize(&raw, Network::Mainnet.magic()).unwrap());
}
}

View File

@ -1,5 +1,5 @@
mod stream;
mod reader;
pub use self::stream::{serialize_payload, serialize_payload_with_flags};
pub use self::stream::serialize_payload;
pub use self::reader::deserialize_payload;

View File

@ -3,11 +3,7 @@ use ser::Stream;
use {Payload, Error, MessageResult};
pub fn serialize_payload<T>(t: &T, version: u32) -> MessageResult<Bytes> where T: Payload {
serialize_payload_with_flags(t, version, 0)
}
pub fn serialize_payload_with_flags<T>(t: &T, version: u32, serialization_flags: u32) -> MessageResult<Bytes> where T: Payload {
let mut stream = PayloadStream::new(version, serialization_flags);
let mut stream = PayloadStream::new(version);
try!(stream.append(t));
Ok(stream.out())
}
@ -18,9 +14,9 @@ pub struct PayloadStream {
}
impl PayloadStream {
pub fn new(version: u32, serialization_flags: u32) -> Self {
pub fn new(version: u32) -> Self {
PayloadStream {
stream: Stream::with_flags(serialization_flags),
stream: Stream::new(),
version: version,
}
}

View File

@ -1,32 +0,0 @@
use std::io;
use ser::{Stream, Reader};
use common::BlockHeaderAndIDs;
use {Payload, MessageResult};
#[derive(Debug, PartialEq)]
pub struct CompactBlock {
pub header: BlockHeaderAndIDs,
}
impl Payload for CompactBlock {
fn version() -> u32 {
70014
}
fn command() -> &'static str {
"cmpctblock"
}
fn deserialize_payload<T>(reader: &mut Reader<T>, _version: u32) -> MessageResult<Self> where T: io::Read {
let block = CompactBlock {
header: try!(reader.read()),
};
Ok(block)
}
fn serialize_payload(&self, stream: &mut Stream, _version: u32) -> MessageResult<()> {
stream.append(&self.header);
Ok(())
}
}

View File

@ -1,7 +1,6 @@
pub mod addr;
mod block;
mod blocktxn;
mod compactblock;
mod feefilter;
mod filteradd;
mod filterclear;
@ -19,7 +18,6 @@ mod notfound;
mod ping;
mod pong;
pub mod reject;
mod sendcompact;
mod sendheaders;
mod tx;
mod verack;
@ -28,7 +26,6 @@ pub mod version;
pub use self::addr::Addr;
pub use self::block::Block;
pub use self::blocktxn::BlockTxn;
pub use self::compactblock::CompactBlock;
pub use self::feefilter::FeeFilter;
pub use self::filterload::{FilterLoad, FILTERLOAD_MAX_FILTER_LEN, FILTERLOAD_MAX_HASH_FUNCS};
pub use self::filterload::FilterFlags;
@ -47,7 +44,6 @@ pub use self::notfound::NotFound;
pub use self::ping::Ping;
pub use self::pong::Pong;
pub use self::reject::Reject;
pub use self::sendcompact::SendCompact;
pub use self::sendheaders::SendHeaders;
pub use self::tx::Tx;
pub use self::verack::Verack;

View File

@ -1,35 +0,0 @@
use std::io;
use ser::{Stream, Reader};
use {Payload, MessageResult};
#[derive(Debug, PartialEq)]
pub struct SendCompact {
pub first: bool,
pub second: u64,
}
impl Payload for SendCompact {
fn version() -> u32 {
70014
}
fn command() -> &'static str {
"sendcmpct"
}
fn deserialize_payload<T>(reader: &mut Reader<T>, _version: u32) -> MessageResult<Self> where T: io::Read {
let send_compact = SendCompact {
first: try!(reader.read()),
second: try!(reader.read()),
};
Ok(send_compact)
}
fn serialize_payload(&self, stream: &mut Stream, _version: u32) -> MessageResult<()> {
stream
.append(&self.first)
.append(&self.second);
Ok(())
}
}

View File

@ -3,9 +3,9 @@ use primitives::hash::H256;
use primitives::compact::Compact;
use chain::{OutPoint, TransactionOutput, IndexedTransaction};
use storage::{SharedStore, TransactionOutputProvider};
use network::{ConsensusParams, ConsensusFork, TransactionOrdering};
use network::ConsensusParams;
use memory_pool::{MemoryPool, OrderingStrategy, Entry};
use verification::{work_required, block_reward_satoshi, transaction_sigops, median_timestamp_inclusive};
use verification::{work_required, block_reward_satoshi, transaction_sigops};
const BLOCK_VERSION: u32 = 0x20000000;
const BLOCK_HEADER_SIZE: u32 = 4 + 32 + 32 + 4 + 4 + 4;
@ -246,20 +246,15 @@ impl<'a, T> Iterator for FittingTransactionsIterator<'a, T> where T: Iterator<It
}
impl BlockAssembler {
pub fn create_new_block(&self, store: &SharedStore, mempool: &MemoryPool, time: u32, median_timestamp: u32, consensus: &ConsensusParams) -> BlockTemplate {
pub fn create_new_block(&self, store: &SharedStore, mempool: &MemoryPool, time: u32, consensus: &ConsensusParams) -> BlockTemplate {
// get best block
// take it's hash && height
let best_block = store.best_block();
let previous_header_hash = best_block.hash;
let height = best_block.number + 1;
let bits = work_required(previous_header_hash.clone(), time, height, store.as_block_header_provider(), consensus);
let bits = work_required(previous_header_hash.clone(), height, store.as_block_header_provider(), consensus);
let version = BLOCK_VERSION;
let checkdatasig_active = match consensus.fork {
ConsensusFork::BitcoinCash(ref fork) => median_timestamp >= fork.magnetic_anomaly_time,
_ => false
};
let mut coinbase_value = block_reward_satoshi(height);
let mut transactions = Vec::new();
@ -271,7 +266,7 @@ impl BlockAssembler {
self.max_block_sigops,
height,
time,
checkdatasig_active);
false);
for entry in tx_iter {
// miner_fee is i64, but we can safely cast it to u64
// memory pool should restrict miner fee to be positive
@ -280,15 +275,6 @@ impl BlockAssembler {
transactions.push(tx);
}
// sort block transactions
let median_time_past = median_timestamp_inclusive(previous_header_hash.clone(), store.as_block_header_provider());
match consensus.fork.transaction_ordering(median_time_past) {
TransactionOrdering::Canonical => transactions.sort_unstable_by(|tx1, tx2|
tx1.hash.cmp(&tx2.hash)),
// memory pool iter returns transactions in topological order
TransactionOrdering::Topological => (),
}
BlockTemplate {
version: version,
previous_header_hash: previous_header_hash,
@ -311,7 +297,7 @@ mod tests {
use db::BlockChainDatabase;
use primitives::hash::H256;
use storage::SharedStore;
use network::{ConsensusParams, ConsensusFork, Network, BitcoinCashConsensusParams};
use network::{ConsensusParams, Network};
use memory_pool::MemoryPool;
use self::test_data::{ChainBuilder, TransactionBuilder};
use super::{BlockAssembler, SizePolicy, NextStep, BlockTemplate};
@ -378,23 +364,14 @@ mod tests {
(BlockAssembler {
max_block_size: 0xffffffff,
max_block_sigops: 0xffffffff,
}.create_new_block(&storage, &pool, 0, 0, &consensus), hash0, hash1)
}.create_new_block(&storage, &pool, 0, &consensus), hash0, hash1)
}
// when topological consensus is used
let topological_consensus = ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore);
let topological_consensus = ConsensusParams::new(Network::Mainnet);
let (block, hash0, hash1) = construct_block(topological_consensus);
assert!(hash1 < hash0);
assert_eq!(block.transactions[0].hash, hash0);
assert_eq!(block.transactions[1].hash, hash1);
// when canonocal consensus is used
let mut canonical_fork = BitcoinCashConsensusParams::new(Network::Mainnet);
canonical_fork.magnetic_anomaly_time = 0;
let canonical_consensus = ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCash(canonical_fork));
let (block, hash0, hash1) = construct_block(canonical_consensus);
assert!(hash1 < hash0);
assert_eq!(block.transactions[0].hash, hash1);
assert_eq!(block.transactions[1].hash, hash0);
}
}

View File

@ -161,6 +161,7 @@ mod tests {
script_pubkey: script_pubkey,
}],
lock_time: 0,
joint_split: None,
};
P2shCoinbaseTransactionBuilder {

View File

@ -3,12 +3,20 @@ use ser::Serializable;
use storage::TransactionProvider;
pub fn transaction_fee(store: &TransactionProvider, transaction: &Transaction) -> u64 {
let inputs_sum = transaction.inputs.iter().map(|input| {
let mut inputs_sum = transaction.inputs.iter().map(|input| {
let input_transaction = store.transaction(&input.previous_output.hash)
.expect("transaction must be verified by caller");
input_transaction.outputs[input.previous_output.index as usize].value
}).sum::<u64>();
let outputs_sum = transaction.outputs.iter().map(|output| output.value).sum();
inputs_sum += transaction.joint_split.as_ref().map(|js| js.descriptions.iter()
.map(|jsd| jsd.value_pub_new)
.sum::<u64>()).unwrap_or_default();
let mut outputs_sum = transaction.outputs.iter().map(|output| output.value).sum();
outputs_sum += transaction.joint_split.as_ref().map(|js| js.descriptions.iter()
.map(|jsd| jsd.value_pub_old)
.sum::<u64>()).unwrap_or_default();
inputs_sum.saturating_sub(outputs_sum)
}
@ -27,7 +35,7 @@ mod tests {
#[test]
fn test_transaction_fee() {
let b0 = test_data::block_builder().header().nonce(1).build()
let b0 = test_data::block_builder().header().nonce(1.into()).build()
.transaction()
.output().value(1_000_000).build()
.output().value(2_000_000).build()
@ -35,7 +43,7 @@ mod tests {
.build();
let tx0 = b0.transactions[0].clone();
let tx0_hash = tx0.hash();
let b1 = test_data::block_builder().header().parent(b0.hash().clone()).nonce(2).build()
let b1 = test_data::block_builder().header().parent(b0.hash().clone()).nonce(2.into()).build()
.transaction()
.input().hash(tx0_hash.clone()).index(0).build()
.input().hash(tx0_hash).index(1).build()

View File

@ -7,3 +7,5 @@ authors = ["debris <marek.kotewicz@gmail.com>"]
lazy_static = "1.0"
chain = { path = "../chain" }
primitives = { path = "../primitives" }
serialization = { path = "../serialization" }
rustc-hex = "2"

View File

@ -1,4 +1,3 @@
use hash::H256;
use {Network, Magic, Deployment};
#[derive(Debug, Clone)]
@ -18,346 +17,127 @@ pub struct ConsensusParams {
/// Block height at which BIP65 becomes active.
/// See https://github.com/bitcoin/bips/blob/master/bip-0066.mediawiki
pub bip66_height: u32,
/// Selected consensus fork.
pub fork: ConsensusFork,
/// Version bits activation
pub rule_change_activation_threshold: u32,
/// Number of blocks with the same set of rules
pub miner_confirmation_window: u32,
/// BIP68, BIP112, BIP113 deployment
pub csv_deployment: Option<Deployment>,
/// BIP141, BIP143, BIP147 deployment
pub segwit_deployment: Option<Deployment>,
}
#[derive(Debug, Clone)]
/// Bitcoin cash consensus parameters.
pub struct BitcoinCashConsensusParams {
/// Initial BCH hard fork height.
pub height: u32,
/// Height of difficulty adjustment hardfork.
/// https://reviews.bitcoinabc.org/D601
pub difficulty_adjustion_height: u32,
/// Time of monolith (aka May 2018) hardfork.
/// https://github.com/bitcoincashorg/spec/blob/4fbb0face661e293bcfafe1a2a4744dcca62e50d/may-2018-hardfork.md
pub monolith_time: u32,
/// Time of magnetic anomaly (aka Nov 2018) hardfork.
/// https://github.com/bitcoincashorg/bitcoincash.org/blob/f92f5412f2ed60273c229f68dd8703b6d5d09617/spec/2018-nov-upgrade.md
pub magnetic_anomaly_time: u32,
}
/// Height of Overwinter activation.
/// Details: https://zcash.readthedocs.io/en/latest/rtd_pages/nu_dev_guide.html#overwinter
pub overwinter_height: u32,
/// Height of Sapling activation.
/// Details: https://zcash.readthedocs.io/en/latest/rtd_pages/nu_dev_guide.html#sapling
pub sapling_height: u32,
#[derive(Debug, Clone)]
/// Concurrent consensus rule forks.
pub enum ConsensusFork {
/// No fork.
BitcoinCore,
/// Bitcoin Cash (aka UAHF).
/// `u32` is height of the first block, for which new consensus rules are applied.
/// Briefly: no SegWit + blocks up to 8MB + replay protection.
/// Technical specification:
/// UAHF Technical Specification - https://github.com/Bitcoin-UAHF/spec/blob/master/uahf-technical-spec.md
/// BUIP-HF Digest for replay protected signature verification across hard forks - https://github.com/Bitcoin-UAHF/spec/blob/master/replay-protected-sighash.md
BitcoinCash(BitcoinCashConsensusParams),
}
#[derive(Debug, Clone, Copy)]
/// Describes the ordering of transactions within single block.
pub enum TransactionOrdering {
/// Topological tranasaction ordering: if tx TX2 depends on tx TX1,
/// it should come AFTER TX1 (not necessary **right** after it).
Topological,
/// Canonical transaction ordering: transactions are ordered by their
/// hash (in ascending order).
Canonical,
/// Interval (in blocks) to calculate average work.
pub pow_averaging_window: u32,
/// % of possible down adjustment of work.
pub pow_max_adjust_down: u32,
/// % of possible up adjustment of work.
pub pow_max_adjust_up: u32,
/// Optimal blocks interval (in seconds).
pub pow_target_spacing: u32,
}
impl ConsensusParams {
pub fn new(network: Network, fork: ConsensusFork) -> Self {
pub fn new(network: Network) -> Self {
match network {
Network::Mainnet | Network::Other(_) => ConsensusParams {
network: network,
bip16_time: 1333238400, // Apr 1 2012
bip34_height: 227931, // 000000000000024b89b42a942fe0d9fea3bb44ab7bd1b19115dd6a759c0808b8
bip65_height: 388381, // 000000000000000004c2b624ed5d7756c508d90fd0da2c7c679febfa6c4735f0
bip66_height: 363725, // 00000000000000000379eaa19dce8c9b722d46ae6a57c2f1a988119488b50931
segwit_deployment: match fork {
ConsensusFork::BitcoinCore => Some(Deployment {
name: "segwit",
bit: 1,
start_time: 1479168000,
timeout: 1510704000,
activation: Some(481824),
}),
ConsensusFork::BitcoinCash(_) => None,
},
fork: fork,
bip16_time: 0,
bip34_height: 1,
bip65_height: 0,
bip66_height: 0,
rule_change_activation_threshold: 1916, // 95%
miner_confirmation_window: 2016,
csv_deployment: Some(Deployment {
name: "csv",
bit: 0,
start_time: 1462060800,
timeout: 1493596800,
activation: Some(419328),
}),
csv_deployment: None,
overwinter_height: 347500,
sapling_height: 419200,
pow_averaging_window: 17,
pow_max_adjust_down: 32,
pow_max_adjust_up: 16,
pow_target_spacing: (2.5 * 60.0) as u32,
},
Network::Testnet => ConsensusParams {
network: network,
bip16_time: 1333238400, // Apr 1 2012
bip34_height: 21111, // 0000000023b3a96d3484e5abb3755c413e7d41500f8e2a5c3f0dd01299cd8ef8
bip65_height: 581885, // 00000000007f6655f22f98e72ed80d8b06dc761d5da09df0fa1dc4be4f861eb6
bip66_height: 330776, // 000000002104c8c45e99a8853285a3b592602a3ccde2b832481da85e9e4ba182
segwit_deployment: match fork {
ConsensusFork::BitcoinCore => Some(Deployment {
name: "segwit",
bit: 1,
start_time: 1462060800,
timeout: 1493596800,
activation: Some(834624),
}),
ConsensusFork::BitcoinCash(_) => None,
},
fork: fork,
bip16_time: 0,
bip34_height: 1,
bip65_height: 0,
bip66_height: 0,
rule_change_activation_threshold: 1512, // 75%
miner_confirmation_window: 2016,
csv_deployment: Some(Deployment {
name: "csv",
bit: 0,
start_time: 1456790400,
timeout: 1493596800,
activation: Some(770112),
}),
csv_deployment: None,
overwinter_height: 207500,
sapling_height: 280000,
pow_averaging_window: 17,
pow_max_adjust_down: 32,
pow_max_adjust_up: 16,
pow_target_spacing: (2.5 * 60.0) as u32,
},
Network::Regtest | Network::Unitest => ConsensusParams {
network: network,
bip16_time: 1333238400, // Apr 1 2012
bip34_height: 100000000, // not activated on regtest
bip65_height: 1351,
bip66_height: 1251, // used only in rpc tests
segwit_deployment: match fork {
ConsensusFork::BitcoinCore => Some(Deployment {
name: "segwit",
bit: 1,
start_time: 0,
timeout: ::std::u32::MAX,
activation: None,
}),
ConsensusFork::BitcoinCash(_) => None,
},
fork: fork,
bip16_time: 0,
bip34_height: 100000000,
bip65_height: 0,
bip66_height: 0,
rule_change_activation_threshold: 108, // 75%
miner_confirmation_window: 144,
csv_deployment: Some(Deployment {
name: "csv",
bit: 0,
start_time: 0,
timeout: 0,
activation: Some(0),
}),
csv_deployment: None,
overwinter_height: ::std::u32::MAX,
sapling_height: ::std::u32::MAX,
pow_averaging_window: 17,
pow_max_adjust_down: 0,
pow_max_adjust_up: 0,
pow_target_spacing: (2.5 * 60.0) as u32,
},
}
}
pub fn magic(&self) -> Magic {
self.network.magic(&self.fork)
self.network.magic()
}
pub fn is_bip30_exception(&self, hash: &H256, height: u32) -> bool {
(height == 91842 && hash == &H256::from_reversed_str("00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
(height == 91880 && hash == &H256::from_reversed_str("00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721"))
pub fn averaging_window_timespan(&self) -> u32 {
self.pow_averaging_window * self.pow_target_spacing
}
/// Returns true if SegWit is possible on this chain.
pub fn is_segwit_possible(&self) -> bool {
match self.fork {
// SegWit is not supported in (our?) regtests
ConsensusFork::BitcoinCore if self.network != Network::Regtest => true,
ConsensusFork::BitcoinCore | ConsensusFork::BitcoinCash(_) => false,
pub fn min_actual_timespan(&self) -> u32 {
(self.averaging_window_timespan() * (100 - self.pow_max_adjust_up)) / 100
}
pub fn max_actual_timespan(&self) -> u32 {
(self.averaging_window_timespan() * (100 + self.pow_max_adjust_down)) / 100
}
pub fn max_block_size(&self) -> usize {
2_000_000
}
pub fn max_block_sigops(&self) -> usize {
20_000
}
pub fn absolute_max_transaction_size(&self) -> usize {
2_000_000
}
pub fn max_transaction_size(&self, height: u32) -> usize {
if height >= self.sapling_height {
2_000_000
} else {
100_000
}
}
}
impl ConsensusFork {
/// Absolute (across all forks) maximum block size. Currently is 8MB for post-HF BitcoinCash
pub fn absolute_maximum_block_size() -> usize {
32_000_000
}
/// Absolute (across all forks) maximum number of sigops in single block. Currently is max(sigops) for 8MB post-HF BitcoinCash block
pub fn absolute_maximum_block_sigops() -> usize {
160_000
}
/// Witness scale factor (equal among all forks)
pub fn witness_scale_factor() -> usize {
4
}
pub fn activation_height(&self) -> u32 {
match *self {
ConsensusFork::BitcoinCore => 0,
ConsensusFork::BitcoinCash(ref fork) => fork.height,
}
}
pub fn min_transaction_size(&self, median_time_past: u32) -> usize {
match *self {
ConsensusFork::BitcoinCash(ref fork) if median_time_past >= fork.magnetic_anomaly_time => 100,
_ => 0,
}
}
pub fn max_transaction_size(&self) -> usize {
// BitcoinCash: according to REQ-5: max size of tx is still 1_000_000
// SegWit: size * 4 <= 4_000_000 ===> max size of tx is still 1_000_000
1_000_000
}
pub fn min_block_size(&self, height: u32) -> usize {
match *self {
// size of first fork block must be larger than 1MB
ConsensusFork::BitcoinCash(ref fork) if height == fork.height => 1_000_001,
ConsensusFork::BitcoinCore | ConsensusFork::BitcoinCash(_) => 0,
}
}
pub fn max_block_size(&self, height: u32, median_time_past: u32) -> usize {
match *self {
ConsensusFork::BitcoinCash(ref fork) if median_time_past >= fork.monolith_time => 32_000_000,
ConsensusFork::BitcoinCash(ref fork) if height >= fork.height => 8_000_000,
ConsensusFork::BitcoinCore | ConsensusFork::BitcoinCash(_) => 1_000_000,
}
}
pub fn max_block_sigops(&self, height: u32, block_size: usize) -> usize {
match *self {
// according to REQ-5: max_block_sigops = 20000 * ceil((max(blocksize_bytes, 1000000) / 1000000))
ConsensusFork::BitcoinCash(ref fork) if height >= fork.height =>
20_000 * (1 + (block_size - 1) / 1_000_000),
ConsensusFork::BitcoinCore | ConsensusFork::BitcoinCash(_) => 20_000,
}
}
pub fn max_block_sigops_cost(&self, height: u32, block_size: usize) -> usize {
match *self {
ConsensusFork::BitcoinCash(_) =>
self.max_block_sigops(height, block_size) * Self::witness_scale_factor(),
ConsensusFork::BitcoinCore =>
80_000,
}
}
pub fn max_block_weight(&self, _height: u32) -> usize {
match *self {
ConsensusFork::BitcoinCore =>
4_000_000,
ConsensusFork::BitcoinCash(_) =>
unreachable!("BitcoinCash has no SegWit; weight is only checked with SegWit activated; qed"),
}
}
pub fn transaction_ordering(&self, median_time_past: u32) -> TransactionOrdering {
match *self {
ConsensusFork::BitcoinCash(ref fork) if median_time_past >= fork.magnetic_anomaly_time
=> TransactionOrdering::Canonical,
_ => TransactionOrdering::Topological,
}
}
}
impl BitcoinCashConsensusParams {
pub fn new(network: Network) -> Self {
match network {
Network::Mainnet | Network::Other(_) => BitcoinCashConsensusParams {
height: 478559,
difficulty_adjustion_height: 504031,
monolith_time: 1526400000,
magnetic_anomaly_time: 1542300000,
},
Network::Testnet => BitcoinCashConsensusParams {
height: 1155876,
difficulty_adjustion_height: 1188697,
monolith_time: 1526400000,
magnetic_anomaly_time: 1542300000,
},
Network::Regtest | Network::Unitest => BitcoinCashConsensusParams {
height: 0,
difficulty_adjustion_height: 0,
monolith_time: 1526400000,
magnetic_anomaly_time: 1542300000,
},
}
}
}
#[cfg(test)]
mod tests {
use super::super::Network;
use super::{ConsensusParams, ConsensusFork, BitcoinCashConsensusParams};
#[test]
fn test_consensus_params_bip34_height() {
assert_eq!(ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore).bip34_height, 227931);
assert_eq!(ConsensusParams::new(Network::Testnet, ConsensusFork::BitcoinCore).bip34_height, 21111);
assert_eq!(ConsensusParams::new(Network::Regtest, ConsensusFork::BitcoinCore).bip34_height, 100000000);
}
#[test]
fn test_consensus_params_bip65_height() {
assert_eq!(ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore).bip65_height, 388381);
assert_eq!(ConsensusParams::new(Network::Testnet, ConsensusFork::BitcoinCore).bip65_height, 581885);
assert_eq!(ConsensusParams::new(Network::Regtest, ConsensusFork::BitcoinCore).bip65_height, 1351);
}
#[test]
fn test_consensus_params_bip66_height() {
assert_eq!(ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore).bip66_height, 363725);
assert_eq!(ConsensusParams::new(Network::Testnet, ConsensusFork::BitcoinCore).bip66_height, 330776);
assert_eq!(ConsensusParams::new(Network::Regtest, ConsensusFork::BitcoinCore).bip66_height, 1251);
}
#[test]
fn test_consensus_activation_threshold() {
assert_eq!(ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore).rule_change_activation_threshold, 1916);
assert_eq!(ConsensusParams::new(Network::Testnet, ConsensusFork::BitcoinCore).rule_change_activation_threshold, 1512);
assert_eq!(ConsensusParams::new(Network::Regtest, ConsensusFork::BitcoinCore).rule_change_activation_threshold, 108);
}
#[test]
fn test_consensus_miner_confirmation_window() {
assert_eq!(ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore).miner_confirmation_window, 2016);
assert_eq!(ConsensusParams::new(Network::Testnet, ConsensusFork::BitcoinCore).miner_confirmation_window, 2016);
assert_eq!(ConsensusParams::new(Network::Regtest, ConsensusFork::BitcoinCore).miner_confirmation_window, 144);
}
#[test]
fn test_consensus_fork_min_block_size() {
assert_eq!(ConsensusFork::BitcoinCore.min_block_size(0), 0);
let fork = ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(Network::Mainnet));
assert_eq!(fork.min_block_size(0), 0);
assert_eq!(fork.min_block_size(fork.activation_height()), 1_000_001);
}
#[test]
fn test_consensus_fork_max_transaction_size() {
assert_eq!(ConsensusFork::BitcoinCore.max_transaction_size(), 1_000_000);
assert_eq!(ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(Network::Mainnet)).max_transaction_size(), 1_000_000);
}
#[test]
fn test_consensus_fork_min_transaction_size() {
assert_eq!(ConsensusFork::BitcoinCore.min_transaction_size(0), 0);
assert_eq!(ConsensusFork::BitcoinCore.min_transaction_size(2000000000), 0);
assert_eq!(ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(Network::Mainnet)).min_transaction_size(0), 0);
assert_eq!(ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(Network::Mainnet)).min_transaction_size(2000000000), 100);
}
#[test]
fn test_consensus_fork_max_block_sigops() {
assert_eq!(ConsensusFork::BitcoinCore.max_block_sigops(0, 1_000_000), 20_000);
let fork = ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(Network::Mainnet));
assert_eq!(fork.max_block_sigops(0, 1_000_000), 20_000);
assert_eq!(fork.max_block_sigops(fork.activation_height(), 2_000_000), 40_000);
assert_eq!(fork.max_block_sigops(fork.activation_height() + 100, 3_000_000), 60_000);
}
pub fn max_transaction_value(&self) -> u64 {
21_000_000 * 100_000_000 // No amount larger than this (in satoshi) is valid
}
}

View File

@ -3,6 +3,8 @@ extern crate lazy_static;
extern crate chain;
extern crate primitives;
extern crate serialization;
extern crate rustc_hex as hex;
mod consensus;
mod deployments;
@ -10,6 +12,6 @@ mod network;
pub use primitives::{hash, compact};
pub use consensus::{ConsensusParams, ConsensusFork, BitcoinCashConsensusParams, TransactionOrdering};
pub use consensus::ConsensusParams;
pub use deployments::Deployment;
pub use network::{Magic, Network};

View File

@ -5,23 +5,17 @@ use compact::Compact;
use chain::Block;
use primitives::hash::H256;
use primitives::bigint::U256;
use {ConsensusFork};
const MAGIC_MAINNET: u32 = 0xD9B4BEF9;
const MAGIC_TESTNET: u32 = 0x0709110B;
const MAGIC_REGTEST: u32 = 0xDAB5BFFA;
const MAGIC_UNITEST: u32 = 0x00000000;
const BITCOIN_CASH_MAGIC_MAINNET: u32 = 0xE8F3E1E3;
const BITCOIN_CASH_MAGIC_TESTNET: u32 = 0xF4F3E5F4;
const BITCOIN_CASH_MAGIC_REGTEST: u32 = 0xFABFB5DA;
const ZCASH_MAGIC_MAINNET: u32 = 0x6427e924;
const ZCASH_MAGIC_TESTNET: u32 = 0xbff91afa;
const ZCASH_MAGIC_REGTEST: u32 = 0x5f3fe8aa;
lazy_static! {
static ref MAX_BITS_MAINNET: U256 = "00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse()
static ref ZCASH_MAX_BITS_MAINNET: U256 = "0007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse()
.expect("hardcoded value should parse without errors");
static ref MAX_BITS_TESTNET: U256 = "00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse()
static ref ZCASH_MAX_BITS_TESTNET: U256 = "07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse()
.expect("hardcoded value should parse without errors");
static ref MAX_BITS_REGTEST: U256 = "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".parse()
static ref ZCASH_MAX_BITS_REGTEST: U256 = "0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f".parse()
.expect("hardcoded value should parse without errors");
}
@ -44,33 +38,29 @@ pub enum Network {
}
impl Network {
pub fn magic(&self, fork: &ConsensusFork) -> Magic {
match (fork, *self) {
(&ConsensusFork::BitcoinCash(_), Network::Mainnet) => BITCOIN_CASH_MAGIC_MAINNET,
(&ConsensusFork::BitcoinCash(_), Network::Testnet) => BITCOIN_CASH_MAGIC_TESTNET,
(&ConsensusFork::BitcoinCash(_), Network::Regtest) => BITCOIN_CASH_MAGIC_REGTEST,
(_, Network::Mainnet) => MAGIC_MAINNET,
(_, Network::Testnet) => MAGIC_TESTNET,
(_, Network::Regtest) => MAGIC_REGTEST,
(_, Network::Unitest) => MAGIC_UNITEST,
(_, Network::Other(value)) => value,
pub fn magic(&self) -> Magic {
match *self {
Network::Mainnet => ZCASH_MAGIC_MAINNET,
Network::Testnet => ZCASH_MAGIC_TESTNET,
Network::Regtest | Network::Unitest => ZCASH_MAGIC_REGTEST,
Network::Other(value) => value,
}
}
pub fn max_bits(&self) -> U256 {
match *self {
Network::Mainnet | Network::Other(_) => MAX_BITS_MAINNET.clone(),
Network::Testnet => MAX_BITS_TESTNET.clone(),
Network::Regtest => MAX_BITS_REGTEST.clone(),
Network::Mainnet => ZCASH_MAX_BITS_MAINNET.clone(),
Network::Testnet | Network::Regtest => ZCASH_MAX_BITS_TESTNET.clone(),
Network::Other(_) => Compact::max_value().into(),
Network::Unitest => Compact::max_value().into(),
}
}
pub fn port(&self) -> u16 {
match *self {
Network::Mainnet | Network::Other(_) => 8333,
Network::Testnet => 18333,
Network::Regtest | Network::Unitest => 18444,
Network::Mainnet | Network::Other(_) => 8233,
Network::Testnet => 18233,
Network::Regtest | Network::Unitest => 18344,
}
}
@ -84,59 +74,18 @@ impl Network {
pub fn genesis_block(&self) -> Block {
match *self {
Network::Mainnet | Network::Other(_) => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a29ab5f49ffff001d1dac2b7c0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
Network::Testnet => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4adae5494dffff001d1aa4ae180101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
Network::Regtest | Network::Unitest => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4adae5494dffff7f20020000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
Network::Mainnet | Network::Other(_) => "040000000000000000000000000000000000000000000000000000000000000000000000db4d7a85b768123f1dff1d4c4cece70083b2d27e117b4ac2e31d087988a5eac4000000000000000000000000000000000000000000000000000000000000000090041358ffff071f5712000000000000000000000000000000000000000000000000000000000000fd4005000a889f00854b8665cd555f4656f68179d31ccadc1b1f7fb0952726313b16941da348284d67add4686121d4e3d930160c1348d8191c25f12b267a6a9c131b5031cbf8af1f79c9d513076a216ec87ed045fa966e01214ed83ca02dc1797270a454720d3206ac7d931a0a680c5c5e099057592570ca9bdf6058343958b31901fce1a15a4f38fd347750912e14004c73dfe588b903b6c03166582eeaf30529b14072a7b3079e3a684601b9b3024054201f7440b0ee9eb1a7120ff43f713735494aa27b1f8bab60d7f398bca14f6abb2adbf29b04099121438a7974b078a11635b594e9170f1086140b4173822dd697894483e1c6b4e8b8dcd5cb12ca4903bc61e108871d4d915a9093c18ac9b02b6716ce1013ca2c1174e319c1a570215bc9ab5f7564765f7be20524dc3fdf8aa356fd94d445e05ab165ad8bb4a0db096c097618c81098f91443c719416d39837af6de85015dca0de89462b1d8386758b2cf8a99e00953b308032ae44c35e05eb71842922eb69797f68813b59caf266cb6c213569ae3280505421a7e3a0a37fdf8e2ea354fc5422816655394a9454bac542a9298f176e211020d63dee6852c40de02267e2fc9d5e1ff2ad9309506f02a1a71a0501b16d0d36f70cdfd8de78116c0c506ee0b8ddfdeb561acadf31746b5a9dd32c21930884397fb1682164cb565cc14e089d66635a32618f7eb05fe05082b8a3fae620571660a6b89886eac53dec109d7cbb6930ca698a168f301a950be152da1be2b9e07516995e20baceebecb5579d7cdbc16d09f3a50cb3c7dffe33f26686d4ff3f8946ee6475e98cf7b3cf9062b6966e838f865ff3de5fb064a37a21da7bb8dfd2501a29e184f207caaba364f36f2329a77515dcb710e29ffbf73e2bbd773fab1f9a6b005567affff605c132e4e4dd69f36bd201005458cfbd2c658701eb2a700251cefd886b1e674ae816d3f719bac64be649c172ba27a4fd55947d95d53ba4cbc73de97b8af5ed4840b659370c556e7376457f51e5ebb66018849923db82c1c9a819f173cccdb8f3324b239609a300018d0fb094adf5bd7cbb3834c69e6d0b3798065c525b20f040e965e1a161af78ff7561cd874f5f1b75aa0bc77f720589e1b810f831eac5073e6dd46d00a2793f70f7427f0f798f2f53a67e615e65d356e66fe40609a958a05edb4c175bcc383ea0530e67ddbe479a898943c6e3074c6fcc252d6014de3a3d292b03f0d88d312fe221be7be7e3c59d07fa0f2f4029e364f1f355c5d01fa53770d0cd76d82bf7e60f6903bc1beb772e6fde4a70be51d9c7e03c8d6d8dfb361a234ba47c470fe630820bbd920715621b9fbedb49fcee165ead0875e6c2b1af16f50b5d6140cc981122fcbcf7c5a4e3772b3661b628e08380abc545957e59f634705b1bbde2f0b4e055a5ec5676d859be77e20962b645e051a880fddb0180b4555789e1f9344a436a84dc5579e2553f1e5fb0a599c137be36cabbed0319831fea3fddf94ddc7971e4bcf02cdc93294a9aab3e3b13e3b058235b4f4ec06ba4ceaa49d675b4ba80716f3bc6976b1fbf9c8bf1f3e3a4dc1cd83ef9cf816667fb94f1e923ff63fef072e6a19321e4812f96cb0ffa864da50ad74deb76917a336f31dce03ed5f0303aad5e6a83634f9fcc371096f8288b8f02ddded5ff1bb9d49331e4a84dbe1543164438fde9ad71dab024779dcdde0b6602b5ae0a6265c14b94edd83b37403f4b78fcd2ed555b596402c28ee81d87a909c4e8722b30c71ecdd861b05f61f8b1231795c76adba2fdefa451b283a5d527955b9f3de1b9828e7b2e74123dd47062ddcc09b05e7fa13cb2212a6fdbc65d7e852cec463ec6fd929f5b8483cf3052113b13dac91b69f49d1b7d1aec01c4a68e41ce1570101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff071f0104455a6361736830623963346565663862376363343137656535303031653335303039383462366665613335363833613763616331343161303433633432303634383335643334ffffffff010000000000000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
Network::Testnet => "040000000000000000000000000000000000000000000000000000000000000000000000db4d7a85b768123f1dff1d4c4cece70083b2d27e117b4ac2e31d087988a5eac4000000000000000000000000000000000000000000000000000000000000000090041358ffff071f5712000000000000000000000000000000000000000000000000000000000000fd4005000a889f00854b8665cd555f4656f68179d31ccadc1b1f7fb0952726313b16941da348284d67add4686121d4e3d930160c1348d8191c25f12b267a6a9c131b5031cbf8af1f79c9d513076a216ec87ed045fa966e01214ed83ca02dc1797270a454720d3206ac7d931a0a680c5c5e099057592570ca9bdf6058343958b31901fce1a15a4f38fd347750912e14004c73dfe588b903b6c03166582eeaf30529b14072a7b3079e3a684601b9b3024054201f7440b0ee9eb1a7120ff43f713735494aa27b1f8bab60d7f398bca14f6abb2adbf29b04099121438a7974b078a11635b594e9170f1086140b4173822dd697894483e1c6b4e8b8dcd5cb12ca4903bc61e108871d4d915a9093c18ac9b02b6716ce1013ca2c1174e319c1a570215bc9ab5f7564765f7be20524dc3fdf8aa356fd94d445e05ab165ad8bb4a0db096c097618c81098f91443c719416d39837af6de85015dca0de89462b1d8386758b2cf8a99e00953b308032ae44c35e05eb71842922eb69797f68813b59caf266cb6c213569ae3280505421a7e3a0a37fdf8e2ea354fc5422816655394a9454bac542a9298f176e211020d63dee6852c40de02267e2fc9d5e1ff2ad9309506f02a1a71a0501b16d0d36f70cdfd8de78116c0c506ee0b8ddfdeb561acadf31746b5a9dd32c21930884397fb1682164cb565cc14e089d66635a32618f7eb05fe05082b8a3fae620571660a6b89886eac53dec109d7cbb6930ca698a168f301a950be152da1be2b9e07516995e20baceebecb5579d7cdbc16d09f3a50cb3c7dffe33f26686d4ff3f8946ee6475e98cf7b3cf9062b6966e838f865ff3de5fb064a37a21da7bb8dfd2501a29e184f207caaba364f36f2329a77515dcb710e29ffbf73e2bbd773fab1f9a6b005567affff605c132e4e4dd69f36bd201005458cfbd2c658701eb2a700251cefd886b1e674ae816d3f719bac64be649c172ba27a4fd55947d95d53ba4cbc73de97b8af5ed4840b659370c556e7376457f51e5ebb66018849923db82c1c9a819f173cccdb8f3324b239609a300018d0fb094adf5bd7cbb3834c69e6d0b3798065c525b20f040e965e1a161af78ff7561cd874f5f1b75aa0bc77f720589e1b810f831eac5073e6dd46d00a2793f70f7427f0f798f2f53a67e615e65d356e66fe40609a958a05edb4c175bcc383ea0530e67ddbe479a898943c6e3074c6fcc252d6014de3a3d292b03f0d88d312fe221be7be7e3c59d07fa0f2f4029e364f1f355c5d01fa53770d0cd76d82bf7e60f6903bc1beb772e6fde4a70be51d9c7e03c8d6d8dfb361a234ba47c470fe630820bbd920715621b9fbedb49fcee165ead0875e6c2b1af16f50b5d6140cc981122fcbcf7c5a4e3772b3661b628e08380abc545957e59f634705b1bbde2f0b4e055a5ec5676d859be77e20962b645e051a880fddb0180b4555789e1f9344a436a84dc5579e2553f1e5fb0a599c137be36cabbed0319831fea3fddf94ddc7971e4bcf02cdc93294a9aab3e3b13e3b058235b4f4ec06ba4ceaa49d675b4ba80716f3bc6976b1fbf9c8bf1f3e3a4dc1cd83ef9cf816667fb94f1e923ff63fef072e6a19321e4812f96cb0ffa864da50ad74deb76917a336f31dce03ed5f0303aad5e6a83634f9fcc371096f8288b8f02ddded5ff1bb9d49331e4a84dbe1543164438fde9ad71dab024779dcdde0b6602b5ae0a6265c14b94edd83b37403f4b78fcd2ed555b596402c28ee81d87a909c4e8722b30c71ecdd861b05f61f8b1231795c76adba2fdefa451b283a5d527955b9f3de1b9828e7b2e74123dd47062ddcc09b05e7fa13cb2212a6fdbc65d7e852cec463ec6fd929f5b8483cf3052113b13dac91b69f49d1b7d1aec01c4a68e41ce1570101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff071f0104455a6361736830623963346565663862376363343137656535303031653335303039383462366665613335363833613763616331343161303433633432303634383335643334ffffffff010000000000000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
Network::Regtest | Network::Unitest => "TODO".into(),
}
}
pub fn default_verification_edge(&self) -> H256 {
match *self {
Network::Mainnet => H256::from_reversed_str("0000000000000000030abc968e1bd635736e880b946085c93152969b9a81a6e2"),
Network::Testnet => H256::from_reversed_str("000000000871ee6842d3648317ccc8a435eb8cc3c2429aee94faff9ba26b05a0"),
_ => self.genesis_block().hash(),
}
self.genesis_block().hash()
}
}
#[cfg(test)]
mod tests {
use compact::Compact;
use {ConsensusFork};
use super::{
Network, MAGIC_MAINNET, MAGIC_TESTNET, MAGIC_REGTEST, MAGIC_UNITEST,
MAX_BITS_MAINNET, MAX_BITS_TESTNET, MAX_BITS_REGTEST,
};
#[test]
fn test_network_magic_number() {
assert_eq!(MAGIC_MAINNET, Network::Mainnet.magic(&ConsensusFork::BitcoinCore));
assert_eq!(MAGIC_TESTNET, Network::Testnet.magic(&ConsensusFork::BitcoinCore));
assert_eq!(MAGIC_REGTEST, Network::Regtest.magic(&ConsensusFork::BitcoinCore));
assert_eq!(MAGIC_UNITEST, Network::Unitest.magic(&ConsensusFork::BitcoinCore));
}
#[test]
fn test_network_max_bits() {
assert_eq!(Network::Mainnet.max_bits(), *MAX_BITS_MAINNET);
assert_eq!(Network::Testnet.max_bits(), *MAX_BITS_TESTNET);
assert_eq!(Network::Regtest.max_bits(), *MAX_BITS_REGTEST);
assert_eq!(Network::Unitest.max_bits(), Compact::max_value().into());
}
#[test]
fn test_network_port() {
assert_eq!(Network::Mainnet.port(), 8333);
assert_eq!(Network::Testnet.port(), 18333);
assert_eq!(Network::Regtest.port(), 18444);
assert_eq!(Network::Unitest.port(), 18444);
}
#[test]
fn test_network_rpc_port() {
assert_eq!(Network::Mainnet.rpc_port(), 8332);
assert_eq!(Network::Testnet.rpc_port(), 18332);
assert_eq!(Network::Regtest.rpc_port(), 18443);
assert_eq!(Network::Unitest.rpc_port(), 18443);
}
// TODO: tests
}

View File

@ -125,7 +125,6 @@ impl<A> Future for Handshake<A> where A: AsyncRead + AsyncWrite {
},
HandshakeState::SendVerack { ref mut version, ref mut future } => {
let (stream, _) = try_ready!(future.poll());
let version = version.take().expect("verack must be preceded by version");
HandshakeState::ReceiveVerack {
@ -211,7 +210,7 @@ mod tests {
use tokio_io::{AsyncRead, AsyncWrite};
use bytes::Bytes;
use ser::Stream;
use network::{Network, ConsensusFork, BitcoinCashConsensusParams};
use network::Network;
use message::{Message, Error};
use message::types::Verack;
use message::types::version::{Version, V0, V106, V70001};
@ -287,7 +286,7 @@ mod tests {
#[test]
fn test_handshake() {
let magic = Network::Mainnet.magic(&ConsensusFork::BitcoinCore);
let magic = Network::Mainnet.magic();
let version = 70012;
let local_version = local_version();
let remote_version = remote_version();
@ -317,7 +316,7 @@ mod tests {
#[test]
fn test_accept_handshake() {
let magic = Network::Mainnet.magic(&ConsensusFork::BitcoinCore);
let magic = Network::Mainnet.magic();
let version = 70012;
let local_version = local_version();
let remote_version = remote_version();
@ -346,7 +345,7 @@ mod tests {
#[test]
fn test_self_handshake() {
let magic = Network::Mainnet.magic(&ConsensusFork::BitcoinCore);
let magic = Network::Mainnet.magic();
let version = 70012;
let remote_version = local_version();
let local_version = local_version();
@ -367,7 +366,7 @@ mod tests {
#[test]
fn test_accept_self_handshake() {
let magic = Network::Mainnet.magic(&ConsensusFork::BitcoinCore);
let magic = Network::Mainnet.magic();
let version = 70012;
let remote_version = local_version();
let local_version = local_version();
@ -385,26 +384,4 @@ mod tests {
let hs = accept_handshake(test_io, magic, local_version, 0).wait().unwrap();
assert_eq!(hs.1.unwrap_err(), expected);
}
#[test]
fn test_fails_to_accept_other_fork_node() {
let magic1 = Network::Mainnet.magic(&ConsensusFork::BitcoinCore);
let magic2 = Network::Mainnet.magic(&ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(Network::Mainnet)));
let version = 70012;
let local_version = local_version();
let remote_version = remote_version();
let mut remote_stream = Stream::new();
remote_stream.append_slice(Message::new(magic2, version, &remote_version).unwrap().as_ref());
let test_io = TestIo {
read: io::Cursor::new(remote_stream.out()),
write: Bytes::default(),
};
let expected = Error::InvalidMagic;
let hs = accept_handshake(test_io, magic1, local_version, 0).wait().unwrap();
assert_eq!(hs.1.unwrap_err(), expected);
}
}

View File

@ -63,31 +63,31 @@ impl<A> Future for ReadAnyMessage<A> where A: AsyncRead {
mod tests {
use futures::Future;
use bytes::Bytes;
use network::{Network, ConsensusFork};
use network::{Network};
use message::Error;
use super::read_any_message;
#[test]
fn test_read_any_message() {
let raw: Bytes = "f9beb4d970696e6700000000000000000800000083c00c765845303b6da97786".into();
let raw: Bytes = "24e9276470696e6700000000000000000800000083c00c765845303b6da97786".into();
let name = "ping".into();
let nonce = "5845303b6da97786".into();
let expected = (name, nonce);
assert_eq!(read_any_message(raw.as_ref(), Network::Mainnet.magic(&ConsensusFork::BitcoinCore)).wait().unwrap(), Ok(expected));
assert_eq!(read_any_message(raw.as_ref(), Network::Testnet.magic(&ConsensusFork::BitcoinCore)).wait().unwrap(), Err(Error::InvalidMagic));
assert_eq!(read_any_message(raw.as_ref(), Network::Mainnet.magic()).wait().unwrap(), Ok(expected));
assert_eq!(read_any_message(raw.as_ref(), Network::Testnet.magic()).wait().unwrap(), Err(Error::InvalidMagic));
}
#[test]
fn test_read_too_short_any_message() {
let raw: Bytes = "f9beb4d970696e6700000000000000000800000083c00c765845303b6da977".into();
assert!(read_any_message(raw.as_ref(), Network::Mainnet.magic(&ConsensusFork::BitcoinCore)).wait().is_err());
let raw: Bytes = "24e9276470696e6700000000000000000800000083c00c765845303b6da977".into();
assert!(read_any_message(raw.as_ref(), Network::Mainnet.magic()).wait().is_err());
}
#[test]
fn test_read_any_message_with_invalid_checksum() {
let raw: Bytes = "f9beb4d970696e6700000000000000000800000083c01c765845303b6da97786".into();
assert_eq!(read_any_message(raw.as_ref(), Network::Mainnet.magic(&ConsensusFork::BitcoinCore)).wait().unwrap(), Err(Error::InvalidChecksum));
let raw: Bytes = "24e9276470696e6700000000000000000800000083c01c765845303b6da97786".into();
assert_eq!(read_any_message(raw.as_ref(), Network::Mainnet.magic()).wait().unwrap(), Err(Error::InvalidChecksum));
}
}

View File

@ -32,33 +32,33 @@ impl<A> Future for ReadHeader<A> where A: AsyncRead {
mod tests {
use futures::Future;
use bytes::Bytes;
use network::{Network, ConsensusFork};
use network::{Network};
use message::{MessageHeader, Error};
use super::read_header;
#[test]
fn test_read_header() {
let raw: Bytes = "f9beb4d96164647200000000000000001f000000ed52399b".into();
let raw: Bytes = "24e927646164647200000000000000001f000000ed52399b".into();
let expected = MessageHeader {
magic: Network::Mainnet.magic(&ConsensusFork::BitcoinCore),
magic: Network::Mainnet.magic(),
command: "addr".into(),
len: 0x1f,
checksum: "ed52399b".into(),
};
assert_eq!(read_header(raw.as_ref(), Network::Mainnet.magic(&ConsensusFork::BitcoinCore)).wait().unwrap().1, Ok(expected));
assert_eq!(read_header(raw.as_ref(), Network::Testnet.magic(&ConsensusFork::BitcoinCore)).wait().unwrap().1, Err(Error::InvalidMagic));
assert_eq!(read_header(raw.as_ref(), Network::Mainnet.magic()).wait().unwrap().1, Ok(expected));
assert_eq!(read_header(raw.as_ref(), Network::Testnet.magic()).wait().unwrap().1, Err(Error::InvalidMagic));
}
#[test]
fn test_read_header_with_invalid_magic() {
let raw: Bytes = "f9beb4d86164647200000000000000001f000000ed52399b".into();
assert_eq!(read_header(raw.as_ref(), Network::Testnet.magic(&ConsensusFork::BitcoinCore)).wait().unwrap().1, Err(Error::InvalidMagic));
assert_eq!(read_header(raw.as_ref(), Network::Testnet.magic()).wait().unwrap().1, Err(Error::InvalidMagic));
}
#[test]
fn test_read_too_short_header() {
let raw: Bytes = "f9beb4d96164647200000000000000001f000000ed5239".into();
assert!(read_header(raw.as_ref(), Network::Mainnet.magic(&ConsensusFork::BitcoinCore)).wait().is_err());
let raw: Bytes = "24e927646164647200000000000000001f000000ed5239".into();
assert!(read_header(raw.as_ref(), Network::Mainnet.magic()).wait().is_err());
}
}

View File

@ -13,7 +13,7 @@ pub fn read_message<M, A>(a: A, magic: Magic, version: u32) -> ReadMessage<M, A>
version: version,
future: read_header(a, magic),
},
message_type: PhantomData
message_type: PhantomData,
}
}
@ -45,6 +45,7 @@ impl<M, A> Future for ReadMessage<M, A> where A: AsyncRead, M: Payload {
Ok(header) => header,
Err(err) => return Ok((read, Err(err)).into()),
};
if header.command != M::command() {
return Ok((read, Err(Error::InvalidCommand)).into());
}
@ -69,30 +70,30 @@ impl<M, A> Future for ReadMessage<M, A> where A: AsyncRead, M: Payload {
mod tests {
use futures::Future;
use bytes::Bytes;
use network::{Network, ConsensusFork};
use network::{Network};
use message::Error;
use message::types::{Ping, Pong};
use super::read_message;
#[test]
fn test_read_message() {
let raw: Bytes = "f9beb4d970696e6700000000000000000800000083c00c765845303b6da97786".into();
let raw: Bytes = "24e9276470696e6700000000000000000800000083c00c765845303b6da97786".into();
let ping = Ping::new(u64::from_str_radix("8677a96d3b304558", 16).unwrap());
assert_eq!(read_message(raw.as_ref(), Network::Mainnet.magic(&ConsensusFork::BitcoinCore), 0).wait().unwrap().1, Ok(ping));
assert_eq!(read_message::<Ping, _>(raw.as_ref(), Network::Testnet.magic(&ConsensusFork::BitcoinCore), 0).wait().unwrap().1, Err(Error::InvalidMagic));
assert_eq!(read_message::<Pong, _>(raw.as_ref(), Network::Mainnet.magic(&ConsensusFork::BitcoinCore), 0).wait().unwrap().1, Err(Error::InvalidCommand));
assert_eq!(read_message(raw.as_ref(), Network::Mainnet.magic(), 0).wait().unwrap().1, Ok(ping));
assert_eq!(read_message::<Ping, _>(raw.as_ref(), Network::Testnet.magic(), 0).wait().unwrap().1, Err(Error::InvalidMagic));
assert_eq!(read_message::<Pong, _>(raw.as_ref(), Network::Mainnet.magic(), 0).wait().unwrap().1, Err(Error::InvalidCommand));
}
#[test]
fn test_read_too_short_message() {
let raw: Bytes = "f9beb4d970696e6700000000000000000800000083c00c765845303b6da977".into();
assert!(read_message::<Ping, _>(raw.as_ref(), Network::Mainnet.magic(&ConsensusFork::BitcoinCore), 0).wait().is_err());
let raw: Bytes = "24e9276470696e6700000000000000000800000083c00c765845303b6da977".into();
assert!(read_message::<Ping, _>(raw.as_ref(), Network::Mainnet.magic(), 0).wait().is_err());
}
#[test]
fn test_read_message_with_invalid_checksum() {
let raw: Bytes = "f9beb4d970696e6700000000000000000800000083c01c765845303b6da97786".into();
assert_eq!(read_message::<Ping, _>(raw.as_ref(), Network::Mainnet.magic(&ConsensusFork::BitcoinCore), 0).wait().unwrap().1, Err(Error::InvalidChecksum));
let raw: Bytes = "24e9276470696e6700000000000000000800000083c01c765845303b6da97786".into();
assert_eq!(read_message::<Ping, _>(raw.as_ref(), Network::Mainnet.magic(), 0).wait().unwrap().1, Err(Error::InvalidChecksum));
}
}

View File

@ -61,12 +61,7 @@ impl PeerContext {
/// Request is always automatically send.
pub fn send_request<T>(&self, payload: &T) where T: Payload {
self.send_request_with_flags(payload, 0)
}
/// Request is always automatically send.
pub fn send_request_with_flags<T>(&self, payload: &T, serialization_flags: u32) where T: Payload {
let send = Context::send_to_peer(self.context.clone(), self.info.id, payload, serialization_flags);
let send = Context::send_to_peer(self.context.clone(), self.info.id, payload);
self.context.spawn(send);
}
@ -99,14 +94,14 @@ impl PeerContext {
let mut queue = self.response_queue.lock();
if is_final {
if sync.permission_for_response(id) {
let send = Context::send_to_peer(self.context.clone(), self.info.id, payload, 0);
let send = Context::send_to_peer(self.context.clone(), self.info.id, payload);
self.context.spawn(send);
self.send_awaiting(&mut sync, &mut queue, id);
} else {
queue.push_finished_response(id, self.to_message(payload).into());
}
} else if sync.is_permitted(id) {
let send = Context::send_to_peer(self.context.clone(), self.info.id, payload, 0);
let send = Context::send_to_peer(self.context.clone(), self.info.id, payload);
self.context.spawn(send);
} else {
queue.push_unfinished_response(id, self.to_message(payload).into());

View File

@ -127,7 +127,6 @@ impl Context {
let needed = context.connection_counter.outbound_connections_needed() as usize;
if needed != 0 {
// TODO: pass Services::with_bitcoin_cash(true) after HF block
let used_addresses = context.connections.addresses();
let peers = context.node_table.read().nodes_with_services(&Services::default(), context.config.internet_protocol, &used_addresses, needed);
let addresses = peers.into_iter()
@ -167,9 +166,9 @@ impl Context {
channel.session().initialize();
Context::on_message(context, channel)
},
Ok(DeadlineStatus::Meet(Err(_))) => {
Ok(DeadlineStatus::Meet(Err(err))) => {
// protocol error
trace!("Handshake with {} failed", socket);
trace!("Handshake with {} failed with: {}", socket, err);
// TODO: close socket
context.node_table.write().note_failure(&socket);
context.connection_counter.note_close_outbound_connection();
@ -316,11 +315,11 @@ impl Context {
}
/// Send message to a channel with given peer id.
pub fn send_to_peer<T>(context: Arc<Context>, peer: PeerId, payload: &T, serialization_flags: u32) -> IoFuture<()> where T: Payload {
pub fn send_to_peer<T>(context: Arc<Context>, peer: PeerId, payload: &T) -> IoFuture<()> where T: Payload {
match context.connections.channel(peer) {
Some(channel) => {
let info = channel.peer_info();
let message = Message::with_flags(info.magic, info.version, payload, serialization_flags).expect("failed to create outgoing message");
let message = Message::new(info.magic, info.version, payload).expect("failed to create outgoing message");
channel.session().stats().lock().report_send(T::command().into(), message.len());
Context::send(context, channel, message)
},

View File

@ -3,7 +3,6 @@ use bytes::Bytes;
use message::{Command, Error, Payload, Services, types, deserialize_payload};
use protocol::Protocol;
use net::PeerContext;
use ser::SERIALIZE_TRANSACTION_WITNESS;
pub type InboundSyncConnectionRef = Box<InboundSyncConnection>;
pub type OutboundSyncConnectionRef = Arc<OutboundSyncConnection>;
@ -30,10 +29,6 @@ pub trait InboundSyncConnection : Send + Sync {
fn on_merkleblock(&self, message: types::MerkleBlock);
fn on_sendheaders(&self, message: types::SendHeaders);
fn on_feefilter(&self, message: types::FeeFilter);
fn on_send_compact(&self, message: types::SendCompact);
fn on_compact_block(&self, message: types::CompactBlock);
fn on_get_block_txn(&self, message: types::GetBlockTxn);
fn on_block_txn(&self, message: types::BlockTxn);
fn on_notfound(&self, message: types::NotFound);
}
@ -44,8 +39,6 @@ pub trait OutboundSyncConnection : Send + Sync {
fn send_getheaders(&self, message: &types::GetHeaders);
fn send_transaction(&self, message: &types::Tx);
fn send_block(&self, message: &types::Block);
fn send_witness_transaction(&self, message: &types::Tx);
fn send_witness_block(&self, message: &types::Block);
fn send_headers(&self, message: &types::Headers);
fn respond_headers(&self, message: &types::Headers, id: u32);
fn send_mempool(&self, message: &types::MemPool);
@ -55,10 +48,6 @@ pub trait OutboundSyncConnection : Send + Sync {
fn send_merkleblock(&self, message: &types::MerkleBlock);
fn send_sendheaders(&self, message: &types::SendHeaders);
fn send_feefilter(&self, message: &types::FeeFilter);
fn send_send_compact(&self, message: &types::SendCompact);
fn send_compact_block(&self, message: &types::CompactBlock);
fn send_get_block_txn(&self, message: &types::GetBlockTxn);
fn send_block_txn(&self, message: &types::BlockTxn);
fn send_notfound(&self, message: &types::NotFound);
fn ignored(&self, id: u32);
fn close(&self);
@ -101,14 +90,6 @@ impl OutboundSyncConnection for OutboundSync {
self.context.send_request(message);
}
fn send_witness_transaction(&self, message: &types::Tx) {
self.context.send_request_with_flags(message, SERIALIZE_TRANSACTION_WITNESS);
}
fn send_witness_block(&self, message: &types::Block) {
self.context.send_request_with_flags(message, SERIALIZE_TRANSACTION_WITNESS);
}
fn send_headers(&self, message: &types::Headers) {
self.context.send_request(message);
}
@ -145,22 +126,6 @@ impl OutboundSyncConnection for OutboundSync {
self.context.send_request(message);
}
fn send_send_compact(&self, message: &types::SendCompact) {
self.context.send_request(message);
}
fn send_compact_block(&self, message: &types::CompactBlock) {
self.context.send_request(message);
}
fn send_get_block_txn(&self, message: &types::GetBlockTxn) {
self.context.send_request(message);
}
fn send_block_txn(&self, message: &types::BlockTxn) {
self.context.send_request(message);
}
fn send_notfound(&self, message: &types::NotFound) {
self.context.send_request(message);
}
@ -260,22 +225,6 @@ impl Protocol for SyncProtocol {
let message: types::FeeFilter = try!(deserialize_payload(payload, version));
self.inbound_connection.on_feefilter(message);
}
else if command == &types::SendCompact::command() {
let message: types::SendCompact = try!(deserialize_payload(payload, version));
self.inbound_connection.on_send_compact(message);
}
else if command == &types::CompactBlock::command() {
let message: types::CompactBlock = try!(deserialize_payload(payload, version));
self.inbound_connection.on_compact_block(message);
}
else if command == &types::GetBlockTxn::command() {
let message: types::GetBlockTxn = try!(deserialize_payload(payload, version));
self.inbound_connection.on_get_block_txn(message);
}
else if command == &types::BlockTxn::command() {
let message: types::BlockTxn = try!(deserialize_payload(payload, version));
self.inbound_connection.on_block_txn(message);
}
else if command == &types::NotFound::command() {
let message: types::NotFound = try!(deserialize_payload(payload, version));
self.inbound_connection.on_notfound(message);

View File

@ -607,9 +607,9 @@ mod tests {
let s0: SocketAddr = "127.0.0.1:8000".parse().unwrap();
let s1: SocketAddr = "127.0.0.1:8001".parse().unwrap();
let mut table = NodeTable::new(Services::default().with_network(true).with_bitcoin_cash(true));
let mut table = NodeTable::new(Services::default().with_network(true).with_xthin(true));
table.insert(s0, Services::default().with_network(true));
table.insert(s1, Services::default().with_network(true).with_bitcoin_cash(true));
table.insert(s1, Services::default().with_network(true).with_xthin(true));
assert_eq!(table.nodes_with_services(&Services::default(), InternetProtocol::default(), &HashSet::new(), 1)[0].address(), s1);
table.note_failure(&s1);

View File

@ -9,12 +9,6 @@ args:
- regtest:
long: regtest
help: Use a private network for regression tests.
- btc:
long: btc
help: Use Bitcoin Core verification rules (BTC).
- bch:
long: bch
help: Use Bitcoin Cash verification rules (BCH).
- connect:
short: c
long: connect

View File

@ -6,7 +6,7 @@ use std::sync::atomic::{AtomicBool, Ordering};
use sync::{create_sync_peers, create_local_sync_node, create_sync_connection_factory, SyncListener};
use primitives::hash::H256;
use util::{init_db, node_table_path};
use {config, p2p, PROTOCOL_VERSION, PROTOCOL_MINIMUM};
use {config, p2p, ZCASH_PROTOCOL_VERSION, ZCASH_PROTOCOL_MINIMUM};
use super::super::rpc;
enum BlockNotifierTask {
@ -93,8 +93,8 @@ pub fn start(cfg: config::Config) -> Result<(), String> {
inbound_connections: cfg.inbound_connections,
outbound_connections: cfg.outbound_connections,
connection: p2p::NetConfig {
protocol_version: PROTOCOL_VERSION,
protocol_minimum: PROTOCOL_MINIMUM,
protocol_version: ZCASH_PROTOCOL_VERSION,
protocol_minimum: ZCASH_PROTOCOL_MINIMUM,
magic: cfg.consensus.magic(),
local_address: SocketAddr::new(cfg.host, cfg.port),
services: cfg.services,

View File

@ -2,9 +2,9 @@ use std::net;
use clap;
use storage;
use message::Services;
use network::{Network, ConsensusParams, ConsensusFork, BitcoinCashConsensusParams};
use network::{Network, ConsensusParams};
use p2p::InternetProtocol;
use seednodes::{mainnet_seednodes, testnet_seednodes, bitcoin_cash_seednodes, bitcoin_cash_testnet_seednodes};
use seednodes::{zcash_seednodes, zcash_testnet_seednodes};
use rpc_apis::ApiSet;
use {USER_AGENT, REGTEST_USER_AGENT};
use primitives::hash::H256;
@ -58,8 +58,7 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
(true, true) => return Err("Only one testnet option can be used".into()),
};
let consensus_fork = parse_consensus_fork(network, &db, &matches)?;
let consensus = ConsensusParams::new(network, consensus_fork);
let consensus = ConsensusParams::new(network);
let (in_connections, out_connections) = match network {
Network::Testnet | Network::Mainnet | Network::Other(_) => (10, 10),
@ -72,10 +71,7 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
};
// to skip idiotic 30 seconds delay in test-scripts
let user_agent_suffix = match consensus.fork {
ConsensusFork::BitcoinCore => "",
ConsensusFork::BitcoinCash(_) => "/UAHF",
};
let user_agent_suffix = "";
let user_agent = match network {
Network::Testnet | Network::Mainnet | Network::Unitest | Network::Other(_) => format!("{}{}", USER_AGENT, user_agent_suffix),
Network::Regtest => REGTEST_USER_AGENT.into(),
@ -98,12 +94,10 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
let seednodes: Vec<String> = match matches.value_of("seednode") {
Some(s) => vec![s.parse().map_err(|_| "Invalid seednode".to_owned())?],
None => match (network, &consensus.fork) {
(Network::Mainnet, &ConsensusFork::BitcoinCash(_)) => bitcoin_cash_seednodes().into_iter().map(Into::into).collect(),
(Network::Testnet, &ConsensusFork::BitcoinCash(_)) => bitcoin_cash_testnet_seednodes().into_iter().map(Into::into).collect(),
(Network::Mainnet, _) => mainnet_seednodes().into_iter().map(Into::into).collect(),
(Network::Testnet, _) => testnet_seednodes().into_iter().map(Into::into).collect(),
(Network::Other(_), _) | (Network::Regtest, _) | (Network::Unitest, _) => Vec::new(),
None => match network {
Network::Mainnet => zcash_seednodes().into_iter().map(Into::into).collect(),
Network::Testnet => zcash_testnet_seednodes().into_iter().map(Into::into).collect(),
Network::Other(_) | Network::Regtest | Network::Unitest => Vec::new(),
},
};
@ -128,10 +122,6 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
};
let services = Services::default().with_network(true);
let services = match &consensus.fork {
&ConsensusFork::BitcoinCash(_) => services.with_bitcoin_cash(true),
&ConsensusFork::BitcoinCore => services.with_witness(true),
};
let verification_level = match matches.value_of("verification-level") {
Some(s) if s == "full" => VerificationLevel::Full,
@ -177,32 +167,6 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
Ok(config)
}
fn parse_consensus_fork(network: Network, db: &storage::SharedStore, matches: &clap::ArgMatches) -> Result<ConsensusFork, String> {
let old_consensus_fork = db.consensus_fork()?;
let new_consensus_fork = match (matches.is_present("btc"), matches.is_present("bch")) {
(false, false) => match &old_consensus_fork {
&Some(ref old_consensus_fork) => old_consensus_fork,
&None => return Err("You must select fork on first run: --btc, --bch".into()),
},
(true, false) => "btc",
(false, true) => "bch",
_ => return Err("You can only pass single fork argument: --btc, --bch".into()),
};
match &old_consensus_fork {
&None => db.set_consensus_fork(new_consensus_fork)?,
&Some(ref old_consensus_fork) if old_consensus_fork == new_consensus_fork => (),
&Some(ref old_consensus_fork) =>
return Err(format!("Cannot select '{}' fork with non-empty database of '{}' fork", new_consensus_fork, old_consensus_fork)),
}
return match new_consensus_fork {
"btc" => Ok(ConsensusFork::BitcoinCore),
"bch" => Ok(ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(network))),
_ => Err(String::from("Fork mandatory")),
};
}
fn parse_rpc_config(network: Network, matches: &clap::ArgMatches) -> Result<RpcHttpConfig, String> {
let mut config = RpcHttpConfig::with_port(network.rpc_port());
config.enabled = !matches.is_present("no-jsonrpc");

View File

@ -35,7 +35,9 @@ use app_dirs::AppInfo;
pub const APP_INFO: AppInfo = AppInfo { name: "pbtc", author: "Parity" };
pub const PROTOCOL_VERSION: u32 = 70_014;
pub const PROTOCOL_MINIMUM: u32 = 70_001;
pub const USER_AGENT: &'static str = "pbtc";
pub const ZCASH_PROTOCOL_VERSION: u32 = 170_007;
pub const ZCASH_PROTOCOL_MINIMUM: u32 = 170_007;
pub const USER_AGENT: &'static str = "bcore";
pub const REGTEST_USER_AGENT: &'static str = "/Satoshi:0.12.1/";
pub const LOG_INFO: &'static str = "sync=info";

View File

@ -1,49 +1,13 @@
pub fn mainnet_seednodes() -> Vec<&'static str> {
pub fn zcash_seednodes() -> Vec<&'static str> {
vec![
// Pieter Wuille
"seed.bitcoin.sipa.be:8333",
// Matt Corallo
"dnsseed.bluematt.me:8333",
// Luke Dashjr
"dnsseed.bitcoin.dashjr.org:8333",
// Christian Decker
"seed.bitcoinstats.com:8333",
// Jonas Schnelli
"seed.bitcoin.jonasschnelli.ch:8333",
// Peter Todd
"seed.btc.petertodd.org:8333",
//
"seed.voskuil.org:8333",
"dnsseed.z.cash:8233",
"dnsseed.str4d.xyz:8233",
"dnsseed.znodes.org:8233",
]
}
pub fn testnet_seednodes() -> Vec<&'static str> {
pub fn zcash_testnet_seednodes() -> Vec<&'static str> {
vec![
"testnet-seed.bitcoin.jonasschnelli.ch:18333",
"seed.tbtc.petertodd.org:18333",
"testnet-seed.bluematt.me:18333",
"testnet-seed.bitcoin.schildbach.de:18333",
"testnet-seed.voskuil.org:18333",
]
}
pub fn bitcoin_cash_seednodes() -> Vec<&'static str> {
vec![
"seed.bitcoinabc.org:8333",
"seed-abc.bitcoinforks.org:8333",
"seed.bitprim.org:8333",
"seed.deadalnix.me:8333",
"seeder.criptolayer.net:8333"
]
}
pub fn bitcoin_cash_testnet_seednodes() -> Vec<&'static str> {
vec![
"testnet-seed.bitcoinabc.org:18333",
"testnet-seed-abc.bitcoinforks.org:18333",
"testnet-seed.bitprim.org:18333",
"testnet-seed.deadalnix.me:18333",
"testnet-seeder.criptolayer.net:18333"
"dnsseed.testnet.z.cash:18233",
]
}

View File

@ -84,14 +84,15 @@ impl Compact {
Compact(compact | (size << 24) as u32)
}
pub fn to_f64(&self) -> f64 {
pub fn to_f64(&self, limit: Compact) -> f64 {
let shift_amount = (limit.0 >> 24) & 0xff;
let mut shift = (self.0 >> 24) & 0xff;
let mut diff = f64::from(0x0000ffffu32) / f64::from(self.0 & 0x00ffffffu32);
while shift < 29 {
let mut diff = f64::from(limit.0 & 0x00ffffffu32) / f64::from(self.0 & 0x00ffffffu32);
while shift < shift_amount {
diff *= f64::from(256);
shift += 1;
}
while shift > 29 {
while shift > shift_amount {
diff /= f64::from(256.0);
shift -= 1;
}
@ -142,15 +143,17 @@ mod tests {
(v1 - v2).abs() < 0.00001
}
assert!(compare_f64(Compact::new(0x1b0404cb).to_f64(), 16307.42094));
let limit = Compact::new(486604799);
assert!(compare_f64(Compact::new(0x1b0404cb).to_f64(limit), 16307.42094));
// tests from original bitcoin client:
// https://github.com/bitcoin/bitcoin/blob/1e8f88e071019907785b260477bd359bef6f9a8f/src/test/blockchain_tests.cpp
assert!(compare_f64(Compact::new(0x1f111111).to_f64(), 0.000001));
assert!(compare_f64(Compact::new(0x1ef88f6f).to_f64(), 0.000016));
assert!(compare_f64(Compact::new(0x1df88f6f).to_f64(), 0.004023));
assert!(compare_f64(Compact::new(0x1cf88f6f).to_f64(), 1.029916));
assert!(compare_f64(Compact::new(0x12345678).to_f64(), 5913134931067755359633408.0));
assert!(compare_f64(Compact::new(0x1f111111).to_f64(limit), 0.000001));
assert!(compare_f64(Compact::new(0x1ef88f6f).to_f64(limit), 0.000016));
assert!(compare_f64(Compact::new(0x1df88f6f).to_f64(limit), 0.004023));
assert!(compare_f64(Compact::new(0x1cf88f6f).to_f64(limit), 1.029916));
assert!(compare_f64(Compact::new(0x12345678).to_f64(limit), 5913134931067755359633408.0));
}
}

View File

@ -14,7 +14,7 @@ use global_script::Script;
use chain::OutPoint;
use verification;
use ser::serialize;
use network::Network;
use network::{Network};
use primitives::hash::H256 as GlobalH256;
pub struct BlockChainClient<T: BlockChainClientCoreApi> {
@ -38,7 +38,6 @@ pub struct BlockChainClientCore {
impl BlockChainClientCore {
pub fn new(network: Network, storage: storage::SharedStore) -> Self {
BlockChainClientCore {
network: network,
storage: storage,
@ -60,7 +59,7 @@ impl BlockChainClientCoreApi for BlockChainClientCore {
}
fn difficulty(&self) -> f64 {
self.storage.difficulty()
self.storage.difficulty(self.network.max_bits().into())
}
fn raw_block(&self, hash: GlobalH256) -> Option<RawBlock> {
@ -88,18 +87,16 @@ impl BlockChainClientCoreApi for BlockChainClientCore {
VerboseBlock {
confirmations: confirmations,
size: block_size as u32,
strippedsize: block_size as u32, // TODO: segwit
weight: block_size as u32, // TODO: segwit
height: height,
mediantime: Some(median_time),
difficulty: block.header.raw.bits.to_f64(),
difficulty: block.header.raw.bits.to_f64(self.network.max_bits().into()),
chainwork: U256::default(), // TODO: read from storage
previousblockhash: Some(block.header.raw.previous_header_hash.clone().into()),
nextblockhash: height.and_then(|h| self.storage.block_hash(h + 1).map(|h| h.into())),
bits: block.header.raw.bits.into(),
hash: block.hash().clone().into(),
merkleroot: block.header.raw.merkle_root_hash.clone().into(),
nonce: block.header.raw.nonce,
nonce: block.header.raw.nonce.clone().into(),
time: block.header.raw.time,
tx: block.transactions.into_iter().map(|t| t.hash.into()).collect(),
version: block.header.raw.version,
@ -287,8 +284,6 @@ pub mod tests {
hash: "bddd99ccfda39da1b108ce1a5d70038d0a967bacb68b6b63065f626a00000000".into(),
confirmations: 1, // h2
size: 215,
strippedsize: 215,
weight: 215,
height: Some(2),
version: 1,
version_hex: "1".to_owned(),
@ -296,7 +291,7 @@ pub mod tests {
tx: vec!["d5fdcc541e25de1c7a5addedf24858b8bb665c9f36ef744ee42c316022c90f9b".into()],
time: 1231469744,
mediantime: None,
nonce: 1639830024,
nonce: 42.into(),
bits: 486604799,
difficulty: 1.0,
chainwork: 0.into(),
@ -367,9 +362,7 @@ pub mod tests {
"id": 1
}"#)).unwrap();
// direct hash is 6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000
// but client expects reverse hash
assert_eq!(&sample, r#"{"jsonrpc":"2.0","result":"000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f","id":1}"#);
assert_eq!(&sample, r#"{"jsonrpc":"2.0","result":"00040fe8ec8471911baa1db1266ea15dd06b4a8a5c453883c000b031973dce08","id":1}"#);
}
#[test]
@ -403,9 +396,7 @@ pub mod tests {
"id": 1
}"#)).unwrap();
// direct hash is 6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000
// but client expects reverse hash
assert_eq!(&sample, r#"{"jsonrpc":"2.0","result":"000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f","id":1}"#);
assert_eq!(&sample, r#"{"jsonrpc":"2.0","result":"00040fe8ec8471911baa1db1266ea15dd06b4a8a5c453883c000b031973dce08","id":1}"#);
}
#[test]
@ -458,51 +449,47 @@ pub mod tests {
// https://blockexplorer.com/block/00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048
// https://blockchain.info/block/00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048
// https://webbtc.com/block/00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048.json
let verbose_block = core.verbose_block("4860eb18bf1b1620e37e9490fc8a427514416fd75159ab86688e9a8300000000".into());
let verbose_block = core.verbose_block("8392336da29773c56b1649ab555156ceb7e700ad7c230ea7a4571c7e22bc0700".into());
assert_eq!(verbose_block, Some(VerboseBlock {
hash: "4860eb18bf1b1620e37e9490fc8a427514416fd75159ab86688e9a8300000000".into(),
hash: "8392336da29773c56b1649ab555156ceb7e700ad7c230ea7a4571c7e22bc0700".into(),
confirmations: 2, // h1 + h2
size: 215,
strippedsize: 215,
weight: 215,
size: 1617,
height: Some(1),
version: 1,
version_hex: "1".to_owned(),
merkleroot: "982051fd1e4ba744bbbe680e1fee14677ba1a3c3540bf7b1cdb606e857233e0e".into(),
tx: vec!["982051fd1e4ba744bbbe680e1fee14677ba1a3c3540bf7b1cdb606e857233e0e".into()],
time: 1231469665,
mediantime: Some(1231006505),
nonce: 2573394689,
bits: 486604799,
version: 4,
version_hex: "4".to_owned(),
merkleroot: "0946edb9c083c9942d92305444527765fad789c438c717783276a9f7fbf61b85".into(),
tx: vec!["0946edb9c083c9942d92305444527765fad789c438c717783276a9f7fbf61b85".into()],
time: 1477671596,
mediantime: Some(1477641360),
nonce: "7534e8cf161ff2e49d54bdb3bfbcde8cdbf2fc5963c9ec7d86aed4a67e975790".into(),
bits: 520617983,
difficulty: 1.0,
chainwork: 0.into(),
previousblockhash: Some("6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000".into()),
nextblockhash: Some("bddd99ccfda39da1b108ce1a5d70038d0a967bacb68b6b63065f626a00000000".into()),
previousblockhash: Some("08ce3d9731b000c08338455c8a4a6bd05da16e26b11daa1b917184ece80f0400".into()),
nextblockhash: Some("ed73e297d7c51cb8dc53fc2213d7e2e3f116eb4f26434496fc1926906ca20200".into()),
}));
// get info on block #2:
// https://blockexplorer.com/block/000000006a625f06636b8bb6ac7b960a8d03705d1ace08b1a19da3fdcc99ddbd
// https://blockchain.info/ru/block/000000006a625f06636b8bb6ac7b960a8d03705d1ace08b1a19da3fdcc99ddbd
// https://webbtc.com/block/000000006a625f06636b8bb6ac7b960a8d03705d1ace08b1a19da3fdcc99ddbd.json
let verbose_block = core.verbose_block("bddd99ccfda39da1b108ce1a5d70038d0a967bacb68b6b63065f626a00000000".into());
let verbose_block = core.verbose_block("ed73e297d7c51cb8dc53fc2213d7e2e3f116eb4f26434496fc1926906ca20200".into());
assert_eq!(verbose_block, Some(VerboseBlock {
hash: "bddd99ccfda39da1b108ce1a5d70038d0a967bacb68b6b63065f626a00000000".into(),
hash: "ed73e297d7c51cb8dc53fc2213d7e2e3f116eb4f26434496fc1926906ca20200".into(),
confirmations: 1, // h2
size: 215,
strippedsize: 215,
weight: 215,
size: 1617,
height: Some(2),
version: 1,
version_hex: "1".to_owned(),
merkleroot: "d5fdcc541e25de1c7a5addedf24858b8bb665c9f36ef744ee42c316022c90f9b".into(),
tx: vec!["d5fdcc541e25de1c7a5addedf24858b8bb665c9f36ef744ee42c316022c90f9b".into()],
time: 1231469744,
mediantime: Some(1231469665),
nonce: 1639830024,
bits: 486604799,
version: 4,
version_hex: "4".to_owned(),
merkleroot: "f4b084a7c2fc5a5aa2985f2bcb1d4a9a65562a589d628b0d869c5f1c8dd07489".into(),
tx: vec!["f4b084a7c2fc5a5aa2985f2bcb1d4a9a65562a589d628b0d869c5f1c8dd07489".into()],
time: 1477671626,
mediantime: Some(1477671596),
nonce: "a5556cd346010000000000000000000000000000000000000000000000000002".into(),
bits: 520617983,
difficulty: 1.0,
chainwork: 0.into(),
previousblockhash: Some("4860eb18bf1b1620e37e9490fc8a427514416fd75159ab86688e9a8300000000".into()),
previousblockhash: Some("8392336da29773c56b1649ab555156ceb7e700ad7c230ea7a4571c7e22bc0700".into()),
nextblockhash: None,
}));
}
@ -566,7 +553,7 @@ pub mod tests {
"id": 1
}"#)).unwrap();
assert_eq!(&sample, r#"{"jsonrpc":"2.0","result":{"bits":486604799,"chainwork":"0","confirmations":1,"difficulty":1.0,"hash":"000000006a625f06636b8bb6ac7b960a8d03705d1ace08b1a19da3fdcc99ddbd","height":2,"mediantime":null,"merkleroot":"9b0fc92260312ce44e74ef369f5c66bbb85848f2eddd5a7a1cde251e54ccfdd5","nextblockhash":null,"nonce":1639830024,"previousblockhash":"00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048","size":215,"strippedsize":215,"time":1231469744,"tx":["9b0fc92260312ce44e74ef369f5c66bbb85848f2eddd5a7a1cde251e54ccfdd5"],"version":1,"versionHex":"1","weight":215},"id":1}"#);
assert_eq!(&sample, r#"{"jsonrpc":"2.0","result":{"bits":486604799,"chainwork":"0","confirmations":1,"difficulty":1.0,"hash":"000000006a625f06636b8bb6ac7b960a8d03705d1ace08b1a19da3fdcc99ddbd","height":2,"mediantime":null,"merkleroot":"9b0fc92260312ce44e74ef369f5c66bbb85848f2eddd5a7a1cde251e54ccfdd5","nextblockhash":null,"nonce":"2a00000000000000000000000000000000000000000000000000000000000000","previousblockhash":"00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048","size":215,"time":1231469744,"tx":["9b0fc92260312ce44e74ef369f5c66bbb85848f2eddd5a7a1cde251e54ccfdd5"],"version":1,"versionHex":"1"},"id":1}"#);
}
#[test]
@ -586,23 +573,24 @@ pub mod tests {
assert_eq!(&sample, r#"{"jsonrpc":"2.0","error":{"code":-32099,"message":"Block with given hash is not found","data":"000000006a625f06636b8bb6ac7b960a8d03705d1ace08b1a19da3fdcc99ddbd"},"id":1}"#);
}
#[ignore("TODO: Needs ZCash address")]
#[test]
fn verbose_transaction_out_contents() {
let storage = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let storage = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into(), test_data::block_h1().into()]));
let core = BlockChainClientCore::new(Network::Mainnet, storage);
// get info on tx from genesis block:
// https://blockchain.info/ru/tx/4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b
// get info on tx from block#1:
// https://zcash.blockexplorer.com/tx/851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609
let verbose_transaction_out = core.verbose_transaction_out(OutPoint {
hash: "3ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a".into(),
hash: "0946edb9c083c9942d92305444527765fad789c438c717783276a9f7fbf61b85".into(),
index: 0,
});
assert_eq!(verbose_transaction_out, Ok(GetTxOutResponse {
bestblock: "6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000".into(),
bestblock: "8392336da29773c56b1649ab555156ceb7e700ad7c230ea7a4571c7e22bc0700".into(),
confirmations: 1,
value: 50.0,
value: 0.0005,
script: TransactionOutputScript {
asm: "OP_PUSHBYTES_65 0x04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f\nOP_CHECKSIG\n".to_owned(),
asm: "OP_PUSHBYTES_33 0x027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875\nOP_CHECKSIG\n".to_owned(),
hex: Bytes::from("4104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac"),
req_sigs: 1,
script_type: ScriptType::PubKey,

View File

@ -48,7 +48,6 @@ impl RawClientCore {
},
script_sig: GlobalBytes::new(), // default script
sequence: input.sequence.unwrap_or(default_sequence),
script_witness: vec![],
}).collect();
// prepare outputs
@ -84,6 +83,7 @@ impl RawClientCore {
inputs: inputs,
outputs: outputs,
lock_time: lock_time,
joint_split: None,
};
Ok(transaction)

View File

@ -62,7 +62,7 @@ pub struct BlockTemplateTransaction {
pub data: RawTransaction,
/// Transaction id encoded in little-endian hexadecimal
pub txid: Option<H256>,
/// Hash encoded in little-endian hexadecimal (including witness data)
/// Hash encoded in little-endian hexadecimal
pub hash: Option<H256>,
/// Transactions before this one (by 1-based index in 'transactions' list) that must be present in the final block if this one is
pub depends: Option<Vec<u64>>,

View File

@ -21,10 +21,6 @@ pub struct VerboseBlock {
pub confirmations: i64,
/// Block size
pub size: u32,
/// Block size, excluding witness data
pub strippedsize: u32,
/// Block weight
pub weight: u32,
/// Block height
/// TODO: bitcoind always returns value, but we hold this value for main chain blocks only
pub height: Option<u32>,
@ -43,7 +39,7 @@ pub struct VerboseBlock {
/// TODO: bitcoind always returns value, but we can calculate this only if height(block) > 2
pub mediantime: Option<u32>,
/// Block nonce
pub nonce: u32,
pub nonce: H256,
/// Block nbits
pub bits: u32,
/// Block difficulty
@ -76,14 +72,12 @@ mod tests {
#[test]
fn verbose_block_serialize() {
let block = VerboseBlock::default();
assert_eq!(serde_json::to_string(&block).unwrap(), r#"{"hash":"0000000000000000000000000000000000000000000000000000000000000000","confirmations":0,"size":0,"strippedsize":0,"weight":0,"height":null,"version":0,"versionHex":"","merkleroot":"0000000000000000000000000000000000000000000000000000000000000000","tx":[],"time":0,"mediantime":null,"nonce":0,"bits":0,"difficulty":0.0,"chainwork":"0","previousblockhash":null,"nextblockhash":null}"#);
assert_eq!(serde_json::to_string(&block).unwrap(), r#"{"hash":"0000000000000000000000000000000000000000000000000000000000000000","confirmations":0,"size":0,"height":null,"version":0,"versionHex":"","merkleroot":"0000000000000000000000000000000000000000000000000000000000000000","tx":[],"time":0,"mediantime":null,"nonce":"0000000000000000000000000000000000000000000000000000000000000000","bits":0,"difficulty":0.0,"chainwork":"0","previousblockhash":null,"nextblockhash":null}"#);
let block = VerboseBlock {
hash: H256::from(1),
confirmations: -1,
size: 500000,
strippedsize: 444444,
weight: 5236235,
height: Some(3513513),
version: 1,
version_hex: "01".to_owned(),
@ -91,29 +85,27 @@ mod tests {
tx: vec![H256::from(3), H256::from(4)],
time: 111,
mediantime: Some(100),
nonce: 124,
nonce: 124.into(),
bits: 13513,
difficulty: 555.555,
chainwork: U256::from(3),
previousblockhash: Some(H256::from(4)),
nextblockhash: Some(H256::from(5)),
};
assert_eq!(serde_json::to_string(&block).unwrap(), r#"{"hash":"0100000000000000000000000000000000000000000000000000000000000000","confirmations":-1,"size":500000,"strippedsize":444444,"weight":5236235,"height":3513513,"version":1,"versionHex":"01","merkleroot":"0200000000000000000000000000000000000000000000000000000000000000","tx":["0300000000000000000000000000000000000000000000000000000000000000","0400000000000000000000000000000000000000000000000000000000000000"],"time":111,"mediantime":100,"nonce":124,"bits":13513,"difficulty":555.555,"chainwork":"3","previousblockhash":"0400000000000000000000000000000000000000000000000000000000000000","nextblockhash":"0500000000000000000000000000000000000000000000000000000000000000"}"#);
assert_eq!(serde_json::to_string(&block).unwrap(), r#"{"hash":"0100000000000000000000000000000000000000000000000000000000000000","confirmations":-1,"size":500000,"height":3513513,"version":1,"versionHex":"01","merkleroot":"0200000000000000000000000000000000000000000000000000000000000000","tx":["0300000000000000000000000000000000000000000000000000000000000000","0400000000000000000000000000000000000000000000000000000000000000"],"time":111,"mediantime":100,"nonce":"7c00000000000000000000000000000000000000000000000000000000000000","bits":13513,"difficulty":555.555,"chainwork":"3","previousblockhash":"0400000000000000000000000000000000000000000000000000000000000000","nextblockhash":"0500000000000000000000000000000000000000000000000000000000000000"}"#);
}
#[test]
fn verbose_block_deserialize() {
let block = VerboseBlock::default();
assert_eq!(
serde_json::from_str::<VerboseBlock>(r#"{"hash":"0000000000000000000000000000000000000000000000000000000000000000","confirmations":0,"size":0,"strippedsize":0,"weight":0,"height":null,"version":0,"versionHex":"","merkleroot":"0000000000000000000000000000000000000000000000000000000000000000","tx":[],"time":0,"mediantime":null,"nonce":0,"bits":0,"difficulty":0.0,"chainwork":"0","previousblockhash":null,"nextblockhash":null}"#).unwrap(),
serde_json::from_str::<VerboseBlock>(r#"{"hash":"0000000000000000000000000000000000000000000000000000000000000000","confirmations":0,"size":0,"height":null,"version":0,"versionHex":"","merkleroot":"0000000000000000000000000000000000000000000000000000000000000000","tx":[],"time":0,"mediantime":null,"nonce":"0000000000000000000000000000000000000000000000000000000000000000","bits":0,"difficulty":0.0,"chainwork":"0","previousblockhash":null,"nextblockhash":null}"#).unwrap(),
block);
let block = VerboseBlock {
hash: H256::from(1),
confirmations: -1,
size: 500000,
strippedsize: 444444,
weight: 5236235,
height: Some(3513513),
version: 1,
version_hex: "01".to_owned(),
@ -121,7 +113,7 @@ mod tests {
tx: vec![H256::from(3), H256::from(4)],
time: 111,
mediantime: Some(100),
nonce: 124,
nonce: 124.into(),
bits: 13513,
difficulty: 555.555,
chainwork: U256::from(3),
@ -129,7 +121,7 @@ mod tests {
nextblockhash: Some(H256::from(5)),
};
assert_eq!(
serde_json::from_str::<VerboseBlock>(r#"{"hash":"0100000000000000000000000000000000000000000000000000000000000000","confirmations":-1,"size":500000,"strippedsize":444444,"weight":5236235,"height":3513513,"version":1,"versionHex":"01","merkleroot":"0200000000000000000000000000000000000000000000000000000000000000","tx":["0300000000000000000000000000000000000000000000000000000000000000","0400000000000000000000000000000000000000000000000000000000000000"],"time":111,"mediantime":100,"nonce":124,"bits":13513,"difficulty":555.555,"chainwork":"3","previousblockhash":"0400000000000000000000000000000000000000000000000000000000000000","nextblockhash":"0500000000000000000000000000000000000000000000000000000000000000"}"#).unwrap(),
serde_json::from_str::<VerboseBlock>(r#"{"hash":"0100000000000000000000000000000000000000000000000000000000000000","confirmations":-1,"size":500000,"height":3513513,"version":1,"versionHex":"01","merkleroot":"0200000000000000000000000000000000000000000000000000000000000000","tx":["0300000000000000000000000000000000000000000000000000000000000000","0400000000000000000000000000000000000000000000000000000000000000"],"time":111,"mediantime":100,"nonce":"7c00000000000000000000000000000000000000000000000000000000000000","bits":13513,"difficulty":555.555,"chainwork":"3","previousblockhash":"0400000000000000000000000000000000000000000000000000000000000000","nextblockhash":"0500000000000000000000000000000000000000000000000000000000000000"}"#).unwrap(),
block);
}
@ -143,6 +135,6 @@ mod tests {
fn get_block_response_verbose_serialize() {
let block = VerboseBlock::default();
let verbose_response = GetBlockResponse::Verbose(block);
assert_eq!(serde_json::to_string(&verbose_response).unwrap(), r#"{"hash":"0000000000000000000000000000000000000000000000000000000000000000","confirmations":0,"size":0,"strippedsize":0,"weight":0,"height":null,"version":0,"versionHex":"","merkleroot":"0000000000000000000000000000000000000000000000000000000000000000","tx":[],"time":0,"mediantime":null,"nonce":0,"bits":0,"difficulty":0.0,"chainwork":"0","previousblockhash":null,"nextblockhash":null}"#);
assert_eq!(serde_json::to_string(&verbose_response).unwrap(), r#"{"hash":"0000000000000000000000000000000000000000000000000000000000000000","confirmations":0,"size":0,"height":null,"version":0,"versionHex":"","merkleroot":"0000000000000000000000000000000000000000000000000000000000000000","tx":[],"time":0,"mediantime":null,"nonce":"0000000000000000000000000000000000000000000000000000000000000000","bits":0,"difficulty":0.0,"chainwork":"0","previousblockhash":null,"nextblockhash":null}"#);
}
}

View File

@ -11,8 +11,6 @@ pub enum ScriptType {
ScriptHash,
Multisig,
NullData,
WitnessScript,
WitnessKey,
}
impl From<GlobalScriptType> for ScriptType {
@ -24,8 +22,6 @@ impl From<GlobalScriptType> for ScriptType {
GlobalScriptType::ScriptHash => ScriptType::ScriptHash,
GlobalScriptType::Multisig => ScriptType::Multisig,
GlobalScriptType::NullData => ScriptType::NullData,
GlobalScriptType::WitnessScript => ScriptType::WitnessScript,
GlobalScriptType::WitnessKey => ScriptType::WitnessKey,
}
}
}
@ -39,8 +35,6 @@ impl Serialize for ScriptType {
ScriptType::ScriptHash => "scripthash".serialize(serializer),
ScriptType::Multisig => "multisig".serialize(serializer),
ScriptType::NullData => "nulldata".serialize(serializer),
ScriptType::WitnessScript => "witness_v0_scripthash".serialize(serializer),
ScriptType::WitnessKey => "witness_v0_keyhash".serialize(serializer),
}
}
}
@ -66,8 +60,6 @@ impl<'a> Deserialize<'a> for ScriptType {
"scripthash" => Ok(ScriptType::ScriptHash),
"multisig" => Ok(ScriptType::Multisig),
"nulldata" => Ok(ScriptType::NullData),
"witness_v0_scripthash" => Ok(ScriptType::WitnessScript),
"witness_v0_keyhash" => Ok(ScriptType::WitnessKey),
_ => Err(E::invalid_value(Unexpected::Str(value), &self)),
}
}
@ -90,8 +82,6 @@ mod tests {
assert_eq!(serde_json::to_string(&ScriptType::ScriptHash).unwrap(), r#""scripthash""#);
assert_eq!(serde_json::to_string(&ScriptType::Multisig).unwrap(), r#""multisig""#);
assert_eq!(serde_json::to_string(&ScriptType::NullData).unwrap(), r#""nulldata""#);
assert_eq!(serde_json::to_string(&ScriptType::WitnessScript).unwrap(), r#""witness_v0_scripthash""#);
assert_eq!(serde_json::to_string(&ScriptType::WitnessKey).unwrap(), r#""witness_v0_keyhash""#);
}
#[test]
@ -102,7 +92,5 @@ mod tests {
assert_eq!(serde_json::from_str::<ScriptType>(r#""scripthash""#).unwrap(), ScriptType::ScriptHash);
assert_eq!(serde_json::from_str::<ScriptType>(r#""multisig""#).unwrap(), ScriptType::Multisig);
assert_eq!(serde_json::from_str::<ScriptType>(r#""nulldata""#).unwrap(), ScriptType::NullData);
assert_eq!(serde_json::from_str::<ScriptType>(r#""witness_v0_scripthash""#).unwrap(), ScriptType::WitnessScript);
assert_eq!(serde_json::from_str::<ScriptType>(r#""witness_v0_keyhash""#).unwrap(), ScriptType::WitnessKey);
}
}

View File

@ -91,8 +91,6 @@ pub struct SignedTransactionInput {
pub script_sig: TransactionInputScript,
/// Sequence number
pub sequence: u32,
/// Hex-encoded witness data (if any)
pub txinwitness: Vec<String>,
}
/// Signed transaction output
@ -114,12 +112,10 @@ pub struct Transaction {
pub hex: RawTransaction,
/// The transaction id (same as provided)
pub txid: H256,
/// The transaction hash (differs from txid for witness transactions)
/// The transaction hash
pub hash: H256,
/// The serialized transaction size
pub size: usize,
/// The virtual transaction size (differs from size for witness transactions)
pub vsize: usize,
/// The version
pub version: i32,
/// The lock time
@ -357,9 +353,8 @@ mod tests {
hex: Bytes::new(vec![1, 2, 3, 4]),
},
sequence: 123,
txinwitness: vec![],
};
assert_eq!(serde_json::to_string(&txin).unwrap(), r#"{"txid":"4d00000000000000000000000000000000000000000000000000000000000000","vout":13,"script_sig":{"asm":"Hello, world!!!","hex":"01020304"},"sequence":123,"txinwitness":[]}"#);
assert_eq!(serde_json::to_string(&txin).unwrap(), r#"{"txid":"4d00000000000000000000000000000000000000000000000000000000000000","vout":13,"script_sig":{"asm":"Hello, world!!!","hex":"01020304"},"sequence":123}"#);
}
#[test]
@ -372,10 +367,9 @@ mod tests {
hex: Bytes::new(vec![1, 2, 3, 4]),
},
sequence: 123,
txinwitness: vec![],
};
assert_eq!(
serde_json::from_str::<SignedTransactionInput>(r#"{"txid":"4d00000000000000000000000000000000000000000000000000000000000000","vout":13,"script_sig":{"asm":"Hello, world!!!","hex":"01020304"},"sequence":123,"txinwitness":[]}"#).unwrap(),
serde_json::from_str::<SignedTransactionInput>(r#"{"txid":"4d00000000000000000000000000000000000000000000000000000000000000","vout":13,"script_sig":{"asm":"Hello, world!!!","hex":"01020304"},"sequence":123}"#).unwrap(),
txin);
}
@ -420,7 +414,6 @@ mod tests {
txid: H256::from(4),
hash: H256::from(5),
size: 33,
vsize: 44,
version: 55,
locktime: 66,
vin: vec![],
@ -430,7 +423,7 @@ mod tests {
time: 88,
blocktime: 99,
};
assert_eq!(serde_json::to_string(&tx).unwrap(), r#"{"hex":"deadbeef","txid":"0400000000000000000000000000000000000000000000000000000000000000","hash":"0500000000000000000000000000000000000000000000000000000000000000","size":33,"vsize":44,"version":55,"locktime":66,"vin":[],"vout":[],"blockhash":"0600000000000000000000000000000000000000000000000000000000000000","confirmations":77,"time":88,"blocktime":99}"#);
assert_eq!(serde_json::to_string(&tx).unwrap(), r#"{"hex":"deadbeef","txid":"0400000000000000000000000000000000000000000000000000000000000000","hash":"0500000000000000000000000000000000000000000000000000000000000000","size":33,"version":55,"locktime":66,"vin":[],"vout":[],"blockhash":"0600000000000000000000000000000000000000000000000000000000000000","confirmations":77,"time":88,"blocktime":99}"#);
}
#[test]
@ -440,7 +433,6 @@ mod tests {
txid: H256::from(4),
hash: H256::from(5),
size: 33,
vsize: 44,
version: 55,
locktime: 66,
vin: vec![],
@ -451,7 +443,7 @@ mod tests {
blocktime: 99,
};
assert_eq!(
serde_json::from_str::<Transaction>(r#"{"hex":"deadbeef","txid":"0400000000000000000000000000000000000000000000000000000000000000","hash":"0500000000000000000000000000000000000000000000000000000000000000","size":33,"vsize":44,"version":55,"locktime":66,"vin":[],"vout":[],"blockhash":"0600000000000000000000000000000000000000000000000000000000000000","confirmations":77,"time":88,"blocktime":99}"#).unwrap(),
serde_json::from_str::<Transaction>(r#"{"hex":"deadbeef","txid":"0400000000000000000000000000000000000000000000000000000000000000","hash":"0500000000000000000000000000000000000000000000000000000000000000","size":33,"version":55,"locktime":66,"vin":[],"vout":[],"blockhash":"0600000000000000000000000000000000000000000000000000000000000000","confirmations":77,"time":88,"blocktime":99}"#).unwrap(),
tx);
}
}

View File

@ -55,6 +55,20 @@ impl Builder {
self
}
/// Push integer to the end of script
pub fn push_i64(mut self, int: i64) -> Self {
if int == -1 || (int >= 1 && int <= 16) {
let shift: i64 = (Opcode::OP_1 as u8 - 1) as i64;
self.data.push((int + shift) as u8);
self
} else if int == 0 {
self.data.push(Opcode::OP_0 as u8);
self
} else {
self.push_num(int.into())
}
}
/// Appends num push operation to the end of script
pub fn push_num(self, num: Num) -> Self {
self.push_data(&num.to_bytes())

View File

@ -45,8 +45,6 @@ pub enum Error {
// BIP62
SignatureHashtype,
SignatureDer,
SignatureIllegalForkId,
SignatureMustUseForkId,
Minimaldata,
SignaturePushOnly,
SignatureHighS,
@ -56,16 +54,6 @@ pub enum Error {
// Softfork safeness
DiscourageUpgradableNops,
DiscourageUpgradableWitnessProgram,
// SegWit-related errors
WitnessProgramWrongLength,
WitnessProgramWitnessEmpty,
WitnessProgramMismatch,
WitnessMalleated,
WitnessMalleatedP2SH,
WitnessUnexpected,
WitnessPubKeyType,
}
impl fmt::Display for Error {
@ -110,8 +98,6 @@ impl fmt::Display for Error {
// BIP62
Error::SignatureHashtype => "Invalid Signature Hashtype".fmt(f),
Error::SignatureDer => "Invalid Signature".fmt(f),
Error::SignatureIllegalForkId => "Illegal use of SIGHASH_FORKID".fmt(f),
Error::SignatureMustUseForkId => "Signature must use SIGHASH_FORKID".fmt(f),
Error::Minimaldata => "Check minimaldata failed".fmt(f),
Error::SignaturePushOnly => "Only push opcodes are allowed in this signature".fmt(f),
Error::SignatureHighS => "Invalid High S in Signature".fmt(f),
@ -121,16 +107,6 @@ impl fmt::Display for Error {
// Softfork safeness
Error::DiscourageUpgradableNops => "Discourage Upgradable Nops".fmt(f),
Error::DiscourageUpgradableWitnessProgram => "Discourage Upgradable Witness Program".fmt(f),
// SegWit-related errors
Error::WitnessProgramWrongLength => "Witness program has incorrect length".fmt(f),
Error::WitnessProgramWitnessEmpty => "Witness program was passed an empty witness".fmt(f),
Error::WitnessProgramMismatch => "Witness program hash mismatch".fmt(f),
Error::WitnessMalleated => "Witness requires empty scriptSig".fmt(f),
Error::WitnessMalleatedP2SH => "Witness requires only-redeemscript scriptSig".fmt(f),
Error::WitnessUnexpected => "Witness provided for non-witness script".fmt(f),
Error::WitnessPubKeyType => "Using non-compressed keys in segwit".fmt(f),
}
}
}

View File

@ -47,7 +47,7 @@ pub struct VerificationFlags {
/// "At least one stack element must remain, and when interpreted as a boolean, it must be true" to
/// "Exactly one stack element must remain, and when interpreted as a boolean, it must be true".
/// (softfork safe, BIP62 rule 6)
/// Note: CLEANSTACK should never be used without P2SH or WITNESS.
/// Note: CLEANSTACK should never be used without P2SH.
pub verify_cleanstack: bool,
/// Verify CHECKLOCKTIMEVERIFY
@ -60,12 +60,6 @@ pub struct VerificationFlags {
/// See BIP112 for details
pub verify_checksequence: bool,
/// Support segregated witness
pub verify_witness: bool,
/// Making v1-v16 witness program non-standard
pub verify_discourage_upgradable_witness_program: bool,
/// Support OP_CAT opcode
pub verify_concat: bool,
@ -129,11 +123,6 @@ impl VerificationFlags {
self
}
pub fn verify_witness(mut self, value: bool) -> Self {
self.verify_witness = value;
self
}
pub fn verify_nulldummy(mut self, value: bool) -> Self {
self.verify_nulldummy = value;
self
@ -149,11 +138,6 @@ impl VerificationFlags {
self
}
pub fn verify_discourage_upgradable_witness_program(mut self, value: bool) -> Self {
self.verify_discourage_upgradable_witness_program = value;
self
}
pub fn verify_concat(mut self, value: bool) -> Self {
self.verify_concat = value;
self

File diff suppressed because it is too large Load Diff

View File

@ -24,8 +24,8 @@ pub use self::flags::VerificationFlags;
pub use self::interpreter::{eval_script, verify_script};
pub use self::opcode::Opcode;
pub use self::num::Num;
pub use self::script::{Script, ScriptType, ScriptAddress, ScriptWitness, is_witness_commitment_script};
pub use self::sign::{TransactionInputSigner, UnsignedTransactionInput, SignatureVersion};
pub use self::script::{Script, ScriptType, ScriptAddress};
pub use self::sign::{TransactionInputSigner, UnsignedTransactionInput};
pub use self::stack::Stack;
pub use self::verify::{SignatureChecker, NoopSignatureChecker, TransactionSignatureChecker};

View File

@ -26,8 +26,6 @@ pub enum ScriptType {
ScriptHash,
Multisig,
NullData,
WitnessScript,
WitnessKey,
}
/// Address from Script
@ -137,34 +135,6 @@ impl Script {
self.data[22] == Opcode::OP_EQUAL as u8
}
/// Extra-fast test for pay-to-witness-key-hash scripts.
pub fn is_pay_to_witness_key_hash(&self) -> bool {
self.data.len() == 22 &&
self.data[0] == Opcode::OP_0 as u8 &&
self.data[1] == Opcode::OP_PUSHBYTES_20 as u8
}
/// Parse witness program. Returns Some(witness program version, code) or None if not a witness program.
pub fn parse_witness_program(&self) -> Option<(u8, &[u8])> {
if self.data.len() < 4 || self.data.len() > 42 || self.data.len() != self.data[1] as usize + 2 {
return None;
}
let witness_version = match Opcode::from_u8(self.data[0]) {
Some(Opcode::OP_0) => 0,
Some(x) if x >= Opcode::OP_1 && x <= Opcode::OP_16 => (x as u8) - (Opcode::OP_1 as u8) + 1,
_ => return None,
};
let witness_program = &self.data[2..];
Some((witness_version, witness_program))
}
/// Extra-fast test for pay-to-witness-script-hash scripts.
pub fn is_pay_to_witness_script_hash(&self) -> bool {
self.data.len() == 34 &&
self.data[0] == Opcode::OP_0 as u8 &&
self.data[1] == Opcode::OP_PUSHBYTES_32 as u8
}
/// Extra-fast test for multisig scripts.
pub fn is_multisig_script(&self) -> bool {
if self.data.len() < 3 {
@ -350,10 +320,6 @@ impl Script {
ScriptType::Multisig
} else if self.is_null_data_script() {
ScriptType::NullData
} else if self.is_pay_to_witness_key_hash() {
ScriptType::WitnessKey
} else if self.is_pay_to_witness_script_hash() {
ScriptType::WitnessScript
} else {
ScriptType::NonStandard
}
@ -448,12 +414,6 @@ impl Script {
ScriptType::NullData => {
Ok(vec![])
},
ScriptType::WitnessScript => {
Ok(vec![]) // TODO
},
ScriptType::WitnessKey => {
Ok(vec![]) // TODO
},
}
}
@ -573,20 +533,6 @@ impl fmt::Display for Script {
}
}
pub type ScriptWitness = Vec<Bytes>;
/// Passed bytes array is a commitment script?
/// https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki#Commitment_structure
pub fn is_witness_commitment_script(script: &[u8]) -> bool {
script.len() >= 38 &&
script[0] == Opcode::OP_RETURN as u8 &&
script[1] == 0x24 &&
script[2] == 0xAA &&
script[3] == 0x21 &&
script[4] == 0xA9 &&
script[5] == 0xED
}
#[cfg(test)]
mod tests {
use {Builder, Opcode};
@ -601,22 +547,6 @@ mod tests {
assert!(!script2.is_pay_to_script_hash());
}
#[test]
fn test_is_pay_to_witness_key_hash() {
let script: Script = "00140000000000000000000000000000000000000000".into();
let script2: Script = "01140000000000000000000000000000000000000000".into();
assert!(script.is_pay_to_witness_key_hash());
assert!(!script2.is_pay_to_witness_key_hash());
}
#[test]
fn test_is_pay_to_witness_script_hash() {
let script: Script = "00203b80842f4ea32806ce5e723a255ddd6490cfd28dac38c58bf9254c0577330693".into();
let script2: Script = "01203b80842f4ea32806ce5e723a255ddd6490cfd28dac38c58bf9254c0577330693".into();
assert!(script.is_pay_to_witness_script_hash());
assert!(!script2.is_pay_to_witness_script_hash());
}
#[test]
fn test_script_debug() {
use std::fmt::Write;

View File

@ -8,13 +8,6 @@ use ser::Stream;
use chain::{Transaction, TransactionOutput, OutPoint, TransactionInput};
use {Script, Builder};
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum SignatureVersion {
Base,
WitnessV0,
ForkId,
}
#[derive(Debug, PartialEq, Clone, Copy)]
#[repr(u8)]
pub enum SighashBase {
@ -65,12 +58,9 @@ impl Sighash {
}
/// Used by SCRIPT_VERIFY_STRICTENC
pub fn is_defined(version: SignatureVersion, u: u32) -> bool {
pub fn is_defined(u: u32) -> bool {
// reset anyone_can_pay && fork_id (if applicable) bits
let u = match version {
SignatureVersion::ForkId => u & !(0x40 | 0x80),
_ => u & !(0x80),
};
let u = u & !(0x80);
// Only exact All | None | Single values are passing this check
match u {
@ -80,16 +70,15 @@ impl Sighash {
}
/// Creates Sighash from any u, even if is_defined() == false
pub fn from_u32(version: SignatureVersion, u: u32) -> Self {
pub fn from_u32(u: u32) -> Self {
let anyone_can_pay = (u & 0x80) == 0x80;
let fork_id = version == SignatureVersion::ForkId && (u & 0x40) == 0x40;
let base = match u & 0x1f {
2 => SighashBase::None,
3 => SighashBase::Single,
1 | _ => SighashBase::All,
};
Sighash::new(base, anyone_can_pay, fork_id)
Sighash::new(base, anyone_can_pay, false)
}
}
@ -130,42 +119,9 @@ impl From<Transaction> for TransactionInputSigner {
}
impl TransactionInputSigner {
pub fn signature_hash(&self, input_index: usize, input_amount: u64, script_pubkey: &Script, sigversion: SignatureVersion, sighashtype: u32) -> H256 {
let sighash = Sighash::from_u32(sigversion, sighashtype);
match sigversion {
SignatureVersion::ForkId if sighash.fork_id => self.signature_hash_fork_id(input_index, input_amount, script_pubkey, sighashtype, sighash),
SignatureVersion::Base | SignatureVersion::ForkId => self.signature_hash_original(input_index, script_pubkey, sighashtype, sighash),
SignatureVersion::WitnessV0 => self.signature_hash_witness0(input_index, input_amount, script_pubkey, sighashtype, sighash),
}
}
/// input_index - index of input to sign
/// script_pubkey - script_pubkey of input's previous_output pubkey
pub fn signed_input(
&self,
keypair: &KeyPair,
input_index: usize,
input_amount: u64,
script_pubkey: &Script,
sigversion: SignatureVersion,
sighash: u32,
) -> TransactionInput {
let hash = self.signature_hash(input_index, input_amount, script_pubkey, sigversion, sighash);
let mut signature: Vec<u8> = keypair.private().sign(&hash).unwrap().into();
signature.push(sighash as u8);
let script_sig = Builder::default()
.push_data(&signature)
//.push_data(keypair.public())
.into_script();
let unsigned_input = &self.inputs[input_index];
TransactionInput {
previous_output: unsigned_input.previous_output.clone(),
sequence: unsigned_input.sequence,
script_sig: script_sig.to_bytes(),
script_witness: vec![],
}
pub fn signature_hash(&self, input_index: usize, script_pubkey: &Script, sighashtype: u32) -> H256 {
let sighash = Sighash::from_u32(sighashtype);
self.signature_hash_original(input_index, script_pubkey, sighashtype, sighash)
}
pub fn signature_hash_original(&self, input_index: usize, script_pubkey: &Script, sighashtype: u32, sighash: Sighash) -> H256 {
@ -185,7 +141,6 @@ impl TransactionInputSigner {
previous_output: input.previous_output.clone(),
script_sig: script_pubkey.to_bytes(),
sequence: input.sequence,
script_witness: vec![],
}]
} else {
self.inputs.iter()
@ -201,7 +156,6 @@ impl TransactionInputSigner {
SighashBase::Single | SighashBase::None if n != input_index => 0,
_ => input.sequence,
},
script_witness: vec![],
})
.collect()
};
@ -225,6 +179,7 @@ impl TransactionInputSigner {
outputs: outputs,
version: self.version,
lock_time: self.lock_time,
joint_split: None, // TODO
};
let mut stream = Stream::default();
@ -234,80 +189,30 @@ impl TransactionInputSigner {
dhash256(&out)
}
fn signature_hash_witness0(&self, input_index: usize, input_amount: u64, script_pubkey: &Script, sighashtype: u32, sighash: Sighash) -> H256 {
let hash_prevouts = compute_hash_prevouts(sighash, &self.inputs);
let hash_sequence = compute_hash_sequence(sighash, &self.inputs);
let hash_outputs = compute_hash_outputs(sighash, input_index, &self.outputs);
/// input_index - index of input to sign
/// script_pubkey - script_pubkey of input's previous_output pubkey
pub fn signed_input(
&self,
keypair: &KeyPair,
input_index: usize,
script_pubkey: &Script,
sighash: u32,
) -> TransactionInput {
let hash = self.signature_hash(input_index, script_pubkey, sighash);
let mut stream = Stream::default();
stream.append(&self.version);
stream.append(&hash_prevouts);
stream.append(&hash_sequence);
stream.append(&self.inputs[input_index].previous_output);
stream.append_list(&**script_pubkey);
stream.append(&input_amount);
stream.append(&self.inputs[input_index].sequence);
stream.append(&hash_outputs);
stream.append(&self.lock_time);
stream.append(&sighashtype); // this also includes 24-bit fork id. which is 0 for BitcoinCash
let out = stream.out();
dhash256(&out)
}
let mut signature: Vec<u8> = keypair.private().sign(&hash).unwrap().into();
signature.push(sighash as u8);
let script_sig = Builder::default()
.push_data(&signature)
//.push_data(keypair.public())
.into_script();
fn signature_hash_fork_id(&self, input_index: usize, input_amount: u64, script_pubkey: &Script, sighashtype: u32, sighash: Sighash) -> H256 {
if input_index >= self.inputs.len() {
return 1u8.into();
let unsigned_input = &self.inputs[input_index];
TransactionInput {
previous_output: unsigned_input.previous_output.clone(),
sequence: unsigned_input.sequence,
script_sig: script_sig.to_bytes(),
}
if sighash.base == SighashBase::Single && input_index >= self.outputs.len() {
return 1u8.into();
}
self.signature_hash_witness0(input_index, input_amount, script_pubkey, sighashtype, sighash)
}
}
fn compute_hash_prevouts(sighash: Sighash, inputs: &[UnsignedTransactionInput]) -> H256 {
match sighash.anyone_can_pay {
false => {
let mut stream = Stream::default();
for input in inputs {
stream.append(&input.previous_output);
}
dhash256(&stream.out())
},
true => 0u8.into(),
}
}
fn compute_hash_sequence(sighash: Sighash, inputs: &[UnsignedTransactionInput]) -> H256 {
match sighash.base {
SighashBase::All if !sighash.anyone_can_pay => {
let mut stream = Stream::default();
for input in inputs {
stream.append(&input.sequence);
}
dhash256(&stream.out())
},
_ => 0u8.into(),
}
}
fn compute_hash_outputs(sighash: Sighash, input_index: usize, outputs: &[TransactionOutput]) -> H256 {
match sighash.base {
SighashBase::All => {
let mut stream = Stream::default();
for output in outputs {
stream.append(output);
}
dhash256(&stream.out())
},
SighashBase::Single if input_index < outputs.len() => {
let mut stream = Stream::default();
stream.append(&outputs[input_index]);
dhash256(&stream.out())
},
_ => 0u8.into(),
}
}
@ -318,7 +223,7 @@ mod tests {
use keys::{KeyPair, Private, Address};
use chain::{OutPoint, TransactionOutput, Transaction};
use script::Script;
use super::{Sighash, UnsignedTransactionInput, TransactionInputSigner, SighashBase, SignatureVersion};
use super::{Sighash, UnsignedTransactionInput, TransactionInputSigner, SighashBase};
// http://www.righto.com/2014/02/bitcoins-hard-way-using-raw-bitcoin.html
// https://blockchain.info/rawtx/81b4c832d70cb56ff957589752eb4125a4cab78a25a8fc52d6a09e5bd4404d48
@ -360,7 +265,7 @@ mod tests {
outputs: vec![output],
};
let hash = input_signer.signature_hash(0, 0, &previous_output, SignatureVersion::Base, SighashBase::All.into());
let hash = input_signer.signature_hash(0, &previous_output, SighashBase::All.into());
assert_eq!(hash, expected_signature_hash);
}
@ -376,13 +281,14 @@ mod tests {
let script: Script = script.into();
let expected = H256::from_reversed_str(result);
let sighash = Sighash::from_u32(SignatureVersion::Base, hash_type as u32);
let sighash = Sighash::from_u32(hash_type as u32);
let hash = signer.signature_hash_original(input_index, &script, hash_type as u32, sighash);
assert_eq!(expected, hash);
}
// These test vectors were stolen from libbtc, which is Copyright 2014 Jonas Schnelli MIT
// https://github.com/libbtc/libbtc/blob/998badcdac95a226a8f8c00c8f6abbd8a77917c1/test/tx_tests.c
#[ignore("TODO: use ZCash-specific transaction format")]
#[test]
fn test_signature_hash_libbtc() {
run_test_sighash("907c2bc503ade11cc3b04eb2918b6f547b0630ab569273824748c87ea14b0696526c66ba740200000004ab65ababfd1f9bdd4ef073c7afc4ae00da8a66f429c917a0081ad1e1dabce28d373eab81d8628de802000000096aab5253ab52000052ad042b5f25efb33beec9f3364e8a9139e8439d9d7e26529c3c30b6c3fd89f8684cfd68ea0200000009ab53526500636a52ab599ac2fe02a526ed040000000008535300516352515164370e010000000003006300ab2ec229", "", 2, 1864164639, "31af167a6cf3f9d5f6875caa4d31704ceb0eba078d132b78dab52c3b8997317e");
@ -889,17 +795,11 @@ mod tests {
#[test]
fn test_sighash_forkid_from_u32() {
assert!(!Sighash::is_defined(SignatureVersion::Base, 0xFFFFFF82));
assert!(!Sighash::is_defined(SignatureVersion::Base, 0x00000182));
assert!(!Sighash::is_defined(SignatureVersion::Base, 0x00000080));
assert!( Sighash::is_defined(SignatureVersion::Base, 0x00000001));
assert!( Sighash::is_defined(SignatureVersion::Base, 0x00000082));
assert!( Sighash::is_defined(SignatureVersion::Base, 0x00000003));
assert!(!Sighash::is_defined(SignatureVersion::ForkId, 0xFFFFFFC2));
assert!(!Sighash::is_defined(SignatureVersion::ForkId, 0x000001C2));
assert!( Sighash::is_defined(SignatureVersion::ForkId, 0x00000081));
assert!( Sighash::is_defined(SignatureVersion::ForkId, 0x000000C2));
assert!( Sighash::is_defined(SignatureVersion::ForkId, 0x00000043));
assert!(!Sighash::is_defined(0xFFFFFF82));
assert!(!Sighash::is_defined(0x00000182));
assert!(!Sighash::is_defined(0x00000080));
assert!( Sighash::is_defined(0x00000001));
assert!( Sighash::is_defined(0x00000082));
assert!( Sighash::is_defined(0x00000003));
}
}

View File

@ -3,7 +3,6 @@ use chain::constants::{
SEQUENCE_FINAL, SEQUENCE_LOCKTIME_DISABLE_FLAG,
SEQUENCE_LOCKTIME_MASK, SEQUENCE_LOCKTIME_TYPE_FLAG, LOCKTIME_THRESHOLD
};
use sign::SignatureVersion;
use {Script, TransactionInputSigner, Num};
/// Checks transaction signature
@ -21,7 +20,6 @@ pub trait SignatureChecker {
public: &Public,
script_code: &Script,
sighashtype: u32,
version: SignatureVersion
) -> bool;
fn check_lock_time(&self, lock_time: Num) -> bool;
@ -36,7 +34,7 @@ impl SignatureChecker for NoopSignatureChecker {
public.verify(hash, signature).unwrap_or(false)
}
fn check_signature(&self, _: &Signature, _: &Public, _: &Script, _: u32, _: SignatureVersion) -> bool {
fn check_signature(&self, _: &Signature, _: &Public, _: &Script, _: u32) -> bool {
false
}
@ -72,9 +70,8 @@ impl SignatureChecker for TransactionSignatureChecker {
public: &Public,
script_code: &Script,
sighashtype: u32,
version: SignatureVersion
) -> bool {
let hash = self.signer.signature_hash(self.input_index, self.input_amount, script_code, version, sighashtype);
let hash = self.signer.signature_hash(self.input_index, script_code, sighashtype);
self.verify_signature(signature, public, &hash)
}

View File

@ -6,3 +6,4 @@ authors = ["debris <marek.kotewicz@gmail.com>"]
[dependencies]
byteorder = "1.0"
primitives = { path = "../primitives" }
rustc-hex = "2"

View File

@ -0,0 +1,93 @@
#![allow(non_camel_case_types)]
use std::{fmt, io};
use hex::ToHex;
use primitives::hash::H256;
use {
Serializable, Stream,
Deserializable, Reader, Error as ReaderError
};
macro_rules! impl_fixed_array {
($name: ident, $type: ty, $len: expr) => {
/// A type for fixed-length array.
#[derive(Default, Debug, Clone, PartialEq)]
pub struct $name(pub [$type; $len]);
impl Serializable for $name {
fn serialize(&self, stream: &mut Stream) {
for i in 0..$len {
stream.append(&self.0[i]);
}
}
fn serialized_size(&self) -> usize {
$len * ::std::mem::size_of::<$type>()
}
}
impl Deserializable for $name {
fn deserialize<T>(reader: &mut Reader<T>) -> Result<Self, ReaderError> where T: io::Read {
//let mut array = [Default::default(); $len];
let mut array: [$type; $len] = Default::default();
for i in 0..$len {
array[i] = reader.read()?;
}
Ok($name(array))
}
}
}
}
macro_rules! impl_fixed_array_u8 {
($name: ident, $len: expr) => {
/// A type for fixed-length array.
#[derive(Clone)]
pub struct $name(pub [u8; $len]);
impl PartialEq for $name {
fn eq(&self, other: &Self) -> bool {
self.0.iter().zip(other.0.iter()).all(|(l, r)| l == r)
}
}
impl Default for $name {
fn default() -> Self {
$name([0; $len])
}
}
impl fmt::Debug for $name {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.to_hex::<String>().fmt(f)
}
}
impl Serializable for $name {
fn serialize(&self, stream: &mut Stream) {
stream.append_slice(&self.0);
}
fn serialized_size(&self) -> usize {
$len
}
}
impl Deserializable for $name {
fn deserialize<T>(reader: &mut Reader<T>) -> Result<Self, ReaderError> where T: io::Read {
let mut array = [0; $len];
reader.read_slice(&mut array)?;
Ok($name(array))
}
}
}
}
impl_fixed_array!(FixedArray_H256_2, H256, 2);
impl_fixed_array_u8!(FixedArray_u8_296, 296);
impl_fixed_array_u8!(FixedArray_u8_601, 601);
impl_fixed_array!(FixedArray_u8_601_2, FixedArray_u8_601, 2);

View File

@ -1,7 +1,9 @@
extern crate byteorder;
extern crate primitives;
extern crate rustc_hex as hex;
mod compact_integer;
mod fixed_array;
mod impls;
mod list;
mod reader;
@ -9,11 +11,12 @@ mod stream;
pub use primitives::{hash, bytes, compact};
pub use fixed_array::*;
pub use compact_integer::CompactInteger;
pub use list::List;
pub use reader::{Reader, Deserializable, deserialize, deserialize_iterator, ReadIterator, Error};
pub use stream::{
Stream, Serializable, serialize, serialize_with_flags, serialize_list, serialized_list_size,
serialized_list_size_with_flags, SERIALIZE_TRANSACTION_WITNESS,
pub use reader::{
Reader, Deserializable, deserialize, deserialize_iterator, ReadIterator, Error,
};
pub use stream::{
Stream, Serializable, serialize, serialize_list, serialized_list_size,
};

View File

@ -119,6 +119,21 @@ impl<R> Reader<R> where R: io::Read {
Ok(result)
}
pub fn read_list_exact<T>(&mut self, expected_len: usize) -> Result<Vec<T>, Error> where T: Deserializable {
let len: usize = try!(self.read::<CompactInteger>()).into();
if len != expected_len {
return Err(Error::MalformedData);
}
let mut result = Vec::with_capacity(len);
for _ in 0..len {
result.push(try!(self.read()));
}
Ok(result)
}
#[cfg_attr(feature="cargo-clippy", allow(wrong_self_convention))]
pub fn is_finished(&mut self) -> bool {
if self.peeked.is_some() {

View File

@ -4,23 +4,14 @@ use std::borrow::Borrow;
use compact_integer::CompactInteger;
use bytes::Bytes;
/// Do not serialize transaction witness data.
pub const SERIALIZE_TRANSACTION_WITNESS: u32 = 0x40000000;
pub fn serialize<T>(t: &T) -> Bytes where T: Serializable{
let mut stream = Stream::default();
stream.append(t);
stream.out()
}
pub fn serialize_with_flags<T>(t: &T, flags: u32) -> Bytes where T: Serializable{
let mut stream = Stream::with_flags(flags);
let mut stream = Stream::new();
stream.append(t);
stream.out()
}
pub fn serialize_list<T, K>(t: &[K]) -> Bytes where T: Serializable, K: Borrow<T> {
let mut stream = Stream::default();
let mut stream = Stream::new();
stream.append_list(t);
stream.out()
}
@ -30,11 +21,6 @@ pub fn serialized_list_size<T, K>(t: &[K]) -> usize where T: Serializable, K: Bo
t.iter().map(Borrow::borrow).map(Serializable::serialized_size).sum::<usize>()
}
pub fn serialized_list_size_with_flags<T, K>(t: &[K], flags: u32) -> usize where T: Serializable, K: Borrow<T> {
CompactInteger::from(t.len()).serialized_size() +
t.iter().map(Borrow::borrow).map(|i| Serializable::serialized_size_with_flags(i, flags)).sum::<usize>()
}
pub trait Serializable {
/// Serialize the struct and appends it to the end of stream.
fn serialize(&self, s: &mut Stream);
@ -44,35 +30,18 @@ pub trait Serializable {
// fallback implementation
serialize(self).len()
}
/// Hint about the size of serialized struct with given flags.
fn serialized_size_with_flags(&self, flags: u32) -> usize where Self: Sized {
// fallback implementation
serialize_with_flags(self, flags).len()
}
}
/// Stream used for serialization of Bitcoin structures
#[derive(Default)]
pub struct Stream {
buffer: Vec<u8>,
flags: u32,
}
impl Stream {
/// New stream
pub fn new() -> Self {
Stream { buffer: Vec::new(), flags: 0 }
}
/// Create stream with given flags,
pub fn with_flags(flags: u32) -> Self {
Stream { buffer: Vec::new(), flags: flags }
}
/// Are transactions written to this stream with witness data?
pub fn include_transaction_witness(&self) -> bool {
(self.flags & SERIALIZE_TRANSACTION_WITNESS) != 0
Stream { buffer: Vec::new() }
}
/// Serializes the struct and appends it to the end of stream.

View File

@ -1,5 +1,6 @@
use std::sync::Arc;
use chain::BlockHeader;
use primitives::compact::Compact;
use {
BestBlock, BlockProvider, BlockHeaderProvider, TransactionProvider, TransactionMetaProvider,
TransactionOutputProvider, BlockChain, IndexedBlockProvider, Forkable, Error
@ -27,7 +28,7 @@ pub trait Store: AsSubstore {
fn best_header(&self) -> BlockHeader;
/// get blockchain difficulty
fn difficulty(&self) -> f64;
fn difficulty(&self, max_bits: Compact) -> f64;
}
/// Allows casting Arc<Store> to reference to any substore type

View File

@ -147,7 +147,7 @@ mod tests {
use std::sync::Arc;
use db::{BlockChainDatabase};
use network::{ConsensusParams, ConsensusFork, Network};
use network::{ConsensusParams, Network};
use verification::VerificationLevel;
use super::super::Error;
use super::{BlocksWriter, MAX_ORPHANED_BLOCKS};
@ -163,7 +163,7 @@ mod tests {
#[test]
fn blocks_writer_appends_blocks() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut blocks_target = BlocksWriter::new(db.clone(), ConsensusParams::new(Network::Testnet, ConsensusFork::BitcoinCore), default_verification_params());
let mut blocks_target = BlocksWriter::new(db.clone(), ConsensusParams::new(Network::Mainnet), default_verification_params());
blocks_target.append_block(test_data::block_h1().into()).expect("Expecting no error");
assert_eq!(db.best_block().number, 1);
}
@ -172,7 +172,7 @@ mod tests {
fn blocks_writer_verification_error() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let blocks = test_data::build_n_empty_blocks_from_genesis((MAX_ORPHANED_BLOCKS + 2) as u32, 1);
let mut blocks_target = BlocksWriter::new(db.clone(), ConsensusParams::new(Network::Testnet, ConsensusFork::BitcoinCore), default_verification_params());
let mut blocks_target = BlocksWriter::new(db.clone(), ConsensusParams::new(Network::Testnet), default_verification_params());
for (index, block) in blocks.into_iter().skip(1).enumerate() {
match blocks_target.append_block(block.into()) {
Err(Error::TooManyOrphanBlocks) if index == MAX_ORPHANED_BLOCKS => (),
@ -186,7 +186,7 @@ mod tests {
#[test]
fn blocks_writer_out_of_order_block() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut blocks_target = BlocksWriter::new(db.clone(), ConsensusParams::new(Network::Testnet, ConsensusFork::BitcoinCore), default_verification_params());
let mut blocks_target = BlocksWriter::new(db.clone(), ConsensusParams::new(Network::Testnet), default_verification_params());
let wrong_block = test_data::block_builder()
.header().parent(test_data::genesis().hash()).build()
@ -201,10 +201,7 @@ mod tests {
#[test]
fn blocks_writer_append_to_existing_db() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut blocks_target = BlocksWriter::new(db.clone(), ConsensusParams::new(Network::Testnet, ConsensusFork::BitcoinCore), default_verification_params());
assert!(blocks_target.append_block(test_data::genesis().into()).is_ok());
assert_eq!(db.best_block().number, 0);
let mut blocks_target = BlocksWriter::new(db.clone(), ConsensusParams::new(Network::Mainnet), default_verification_params());
assert!(blocks_target.append_block(test_data::block_h1().into()).is_ok());
assert_eq!(db.best_block().number, 1);

View File

@ -147,22 +147,6 @@ impl InboundSyncConnection for InboundConnection {
self.node.on_feefilter(self.peer_index, message);
}
fn on_send_compact(&self, message: types::SendCompact) {
self.node.on_send_compact(self.peer_index, message);
}
fn on_compact_block(&self, message: types::CompactBlock) {
self.node.on_compact_block(self.peer_index, message);
}
fn on_get_block_txn(&self, message: types::GetBlockTxn) {
self.node.on_get_block_txn(self.peer_index, message);
}
fn on_block_txn(&self, message: types::BlockTxn) {
self.node.on_block_txn(self.peer_index, message);
}
fn on_notfound(&self, message: types::NotFound) {
self.node.on_notfound(self.peer_index, message);
}
@ -196,8 +180,6 @@ pub mod tests {
fn send_getheaders(&self, _message: &types::GetHeaders) { *self.messages.lock().entry("getheaders".to_owned()).or_insert(0) += 1; }
fn send_transaction(&self, _message: &types::Tx) { *self.messages.lock().entry("transaction".to_owned()).or_insert(0) += 1; }
fn send_block(&self, _message: &types::Block) { *self.messages.lock().entry("block".to_owned()).or_insert(0) += 1; }
fn send_witness_transaction(&self, _message: &types::Tx) { *self.messages.lock().entry("witness_transaction".to_owned()).or_insert(0) += 1; }
fn send_witness_block(&self, _message: &types::Block) { *self.messages.lock().entry("witness_block".to_owned()).or_insert(0) += 1; }
fn send_headers(&self, _message: &types::Headers) { *self.messages.lock().entry("headers".to_owned()).or_insert(0) += 1; }
fn respond_headers(&self, _message: &types::Headers, _id: RequestId) { *self.messages.lock().entry("headers".to_owned()).or_insert(0) += 1; }
fn send_mempool(&self, _message: &types::MemPool) { *self.messages.lock().entry("mempool".to_owned()).or_insert(0) += 1; }
@ -207,10 +189,6 @@ pub mod tests {
fn send_merkleblock(&self, _message: &types::MerkleBlock) { *self.messages.lock().entry("merkleblock".to_owned()).or_insert(0) += 1; }
fn send_sendheaders(&self, _message: &types::SendHeaders) { *self.messages.lock().entry("sendheaders".to_owned()).or_insert(0) += 1; }
fn send_feefilter(&self, _message: &types::FeeFilter) { *self.messages.lock().entry("feefilter".to_owned()).or_insert(0) += 1; }
fn send_send_compact(&self, _message: &types::SendCompact) { *self.messages.lock().entry("sendcompact".to_owned()).or_insert(0) += 1; }
fn send_compact_block(&self, _message: &types::CompactBlock) { *self.messages.lock().entry("cmpctblock".to_owned()).or_insert(0) += 1; }
fn send_get_block_txn(&self, _message: &types::GetBlockTxn) { *self.messages.lock().entry("getblocktxn".to_owned()).or_insert(0) += 1; }
fn send_block_txn(&self, _message: &types::BlockTxn) { *self.messages.lock().entry("blocktxn".to_owned()).or_insert(0) += 1; }
fn send_notfound(&self, _message: &types::NotFound) { *self.messages.lock().entry("notfound".to_owned()).or_insert(0) += 1; }
fn ignored(&self, _id: RequestId) {}
fn close(&self) {}

View File

@ -42,7 +42,6 @@ pub use types::PeersRef;
use std::sync::Arc;
use parking_lot::RwLock;
use message::Services;
use network::{Network, ConsensusParams};
use primitives::hash::H256;
use verification::BackwardsCompatibleChainVerifier as ChainVerifier;
@ -109,10 +108,7 @@ pub fn create_local_sync_node(consensus: ConsensusParams, db: storage::SharedSto
let memory_pool = Arc::new(RwLock::new(MemoryPool::new()));
let sync_state = SynchronizationStateRef::new(SynchronizationState::with_storage(db.clone()));
let sync_chain = SyncChain::new(db.clone(), consensus.clone(), memory_pool.clone());
if sync_chain.is_segwit_possible() {
peers.require_peer_services(Services::default().with_witness(true));
}
let sync_chain = SyncChain::new(db.clone(), memory_pool.clone());
let chain_verifier = Arc::new(ChainVerifier::new(db.clone(), consensus.clone()));
let sync_executor = SyncExecutor::new(peers.clone());

View File

@ -12,7 +12,6 @@ use synchronization_server::{Server, ServerTask};
use synchronization_verifier::{TransactionVerificationSink};
use primitives::hash::H256;
use miner::BlockTemplate;
use verification::median_timestamp_inclusive;
use synchronization_peers::{TransactionAnnouncementType, BlockAnnouncementType};
use types::{PeerIndex, RequestId, StorageRef, MemoryPoolRef, PeersRef, ExecutorRef,
ClientRef, ServerRef, SynchronizationStateRef, SyncListenerRef};
@ -179,17 +178,6 @@ impl<T, U, V> LocalNode<T, U, V> where T: TaskExecutor, U: Server, V: Client {
self.server.execute(ServerTask::Mempool(peer_index));
}
/// When peer asks us from specific transactions from specific block
pub fn on_get_block_txn(&self, peer_index: PeerIndex, message: types::GetBlockTxn) {
if self.state.synchronizing() {
trace!(target: "sync", "Ignored `getblocktxn` message from peer#{}", peer_index);
return;
}
trace!(target: "sync", "Got `getblocktxn` message from peer#{}", peer_index);
self.server.execute(ServerTask::GetBlockTxn(peer_index, message));
}
/// When peer sets bloom filter for connection
pub fn on_filterload(&self, peer_index: PeerIndex, message: types::FilterLoad) {
trace!(target: "sync", "Got `filterload` message from peer#{}", peer_index);
@ -220,27 +208,6 @@ impl<T, U, V> LocalNode<T, U, V> where T: TaskExecutor, U: Server, V: Client {
self.peers.set_block_announcement_type(peer_index, BlockAnnouncementType::SendHeaders);
}
/// When peer asks us to announce new blocks using cpmctblock message
pub fn on_send_compact(&self, peer_index: PeerIndex, message: types::SendCompact) {
trace!(target: "sync", "Got `sendcmpct` message from peer#{}", peer_index);
// The second integer SHALL be interpreted as a little-endian version number. Nodes sending a sendcmpct message MUST currently set this value to 1.
// TODO: version 2 supports segregated witness transactions
if message.second != 1 {
return;
}
// Upon receipt of a "sendcmpct" message with the first and second integers set to 1, the node SHOULD announce new blocks by sending a cmpctblock message.
if message.first {
self.peers.set_block_announcement_type(peer_index, BlockAnnouncementType::SendCompactBlock);
}
// else:
// Upon receipt of a "sendcmpct" message with the first integer set to 0, the node SHOULD NOT announce new blocks by sending a cmpctblock message,
// but SHOULD announce new blocks by sending invs or headers, as defined by BIP130.
// => work as before
}
/// When peer sents us a merkle block
pub fn on_merkleblock(&self, peer_index: PeerIndex, _message: types::MerkleBlock) {
trace!(target: "sync", "Got `merkleblock` message from peer#{}", peer_index);
@ -248,20 +215,6 @@ impl<T, U, V> LocalNode<T, U, V> where T: TaskExecutor, U: Server, V: Client {
self.peers.misbehaving(peer_index, "Got unrequested 'merkleblock' message");
}
/// When peer sents us a compact block
pub fn on_compact_block(&self, peer_index: PeerIndex, _message: types::CompactBlock) {
trace!(target: "sync", "Got `cmpctblock` message from peer#{}", peer_index);
// we never ask compact block from peers => misbehaving
self.peers.misbehaving(peer_index, "Got unrequested 'cmpctblock' message");
}
/// When peer sents us specific transactions for specific block
pub fn on_block_txn(&self, peer_index: PeerIndex, _message: types::BlockTxn) {
trace!(target: "sync", "Got `blocktxn` message from peer#{}", peer_index);
// we never ask for this => misbehaving
self.peers.misbehaving(peer_index, "Got unrequested 'blocktxn' message");
}
/// Verify and then schedule new transaction
pub fn accept_transaction(&self, transaction: Transaction) -> Result<H256, String> {
let sink_data = Arc::new(TransactionAcceptSinkData::default());
@ -276,17 +229,14 @@ impl<T, U, V> LocalNode<T, U, V> where T: TaskExecutor, U: Server, V: Client {
/// Get block template for mining
pub fn get_block_template(&self) -> BlockTemplate {
let previous_block_height = self.storage.best_block().number;
let previous_block_header = self.storage.block_header(previous_block_height.into()).expect("best block is in db; qed");
let median_timestamp = median_timestamp_inclusive(previous_block_header.hash(), self.storage.as_block_header_provider());
let new_block_height = previous_block_height + 1;
let max_block_size = self.consensus.fork.max_block_size(new_block_height, median_timestamp);
let max_block_size = self.consensus.max_block_size();
let max_block_sigops = self.consensus.max_block_sigops();
let block_assembler = BlockAssembler {
max_block_size: max_block_size as u32,
max_block_sigops: self.consensus.fork.max_block_sigops(new_block_height, max_block_size) as u32,
max_block_sigops: max_block_sigops as u32,
};
let memory_pool = &*self.memory_pool.read();
block_assembler.create_new_block(&self.storage, memory_pool, time::get_time().sec as u32, median_timestamp, &self.consensus)
block_assembler.create_new_block(&self.storage, memory_pool, time::get_time().sec as u32, &self.consensus)
}
/// Install synchronization events listener
@ -344,7 +294,7 @@ pub mod tests {
use synchronization_chain::Chain;
use message::types;
use message::common::{InventoryVector, InventoryType};
use network::{ConsensusParams, ConsensusFork, Network};
use network::{ConsensusParams, Network};
use chain::Transaction;
use db::{BlockChainDatabase};
use miner::MemoryPool;
@ -378,12 +328,12 @@ pub mod tests {
let memory_pool = Arc::new(RwLock::new(MemoryPool::new()));
let storage = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let sync_state = SynchronizationStateRef::new(SynchronizationState::with_storage(storage.clone()));
let chain = Chain::new(storage.clone(), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), memory_pool.clone());
let chain = Chain::new(storage.clone(), memory_pool.clone());
let sync_peers = Arc::new(PeersImpl::default());
let executor = DummyTaskExecutor::new();
let server = Arc::new(DummyServer::new());
let config = Config { close_connection_on_bad_block: true };
let chain_verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore)));
let chain_verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Mainnet)));
let client_core = SynchronizationClientCore::new(config, sync_state.clone(), sync_peers.clone(), executor.clone(), chain, chain_verifier);
let mut verifier = match verifier {
Some(verifier) => verifier,
@ -391,7 +341,7 @@ pub mod tests {
};
verifier.set_sink(Arc::new(CoreVerificationSink::new(client_core.clone())));
let client = SynchronizationClient::new(sync_state.clone(), client_core, verifier);
let local_node = LocalNode::new(ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore), storage, memory_pool, sync_peers, sync_state, executor.clone(), client, server.clone());
let local_node = LocalNode::new(ConsensusParams::new(Network::Mainnet), storage, memory_pool, sync_peers, sync_state, executor.clone(), client, server.clone());
(executor, server, local_node)
}
@ -430,7 +380,7 @@ pub mod tests {
let result = local_node.accept_transaction(transaction.clone());
assert_eq!(result, Ok(transaction_hash.clone()));
assert_eq!(executor.take_tasks(), vec![Task::RelayNewTransaction(transaction.into(), 83333333)]);
assert_eq!(executor.take_tasks(), vec![Task::RelayNewTransaction(transaction.into(), 0)]);
}
#[test]

View File

@ -4,7 +4,6 @@ use linked_hash_map::LinkedHashMap;
use chain::{BlockHeader, Transaction, IndexedBlockHeader, IndexedBlock, IndexedTransaction};
use storage;
use miner::{MemoryPoolOrderingStrategy, MemoryPoolInformation};
use network::ConsensusParams;
use primitives::bytes::Bytes;
use primitives::hash::H256;
use utils::{BestHeadersChain, BestHeadersChainInformation, HashQueueChain, HashPosition};
@ -115,9 +114,6 @@ pub struct Chain {
memory_pool: MemoryPoolRef,
/// Blocks that have been marked as dead-ends
dead_end_blocks: HashSet<H256>,
/// Is SegWit is possible on this chain? SegWit inventory types are used when block/tx-es are
/// requested and this flag is true.
is_segwit_possible: bool,
}
impl BlockState {
@ -142,13 +138,12 @@ impl BlockState {
impl Chain {
/// Create new `Chain` with given storage
pub fn new(storage: StorageRef, consensus: ConsensusParams, memory_pool: MemoryPoolRef) -> Self {
pub fn new(storage: StorageRef, memory_pool: MemoryPoolRef) -> Self {
// we only work with storages with genesis block
let genesis_block_hash = storage.block_hash(0)
.expect("storage with genesis block is required");
let best_storage_block = storage.best_block();
let best_storage_block_hash = best_storage_block.hash.clone();
let is_segwit_possible = consensus.is_segwit_possible();
Chain {
genesis_block_hash: genesis_block_hash,
@ -159,7 +154,6 @@ impl Chain {
verifying_transactions: LinkedHashMap::new(),
memory_pool: memory_pool,
dead_end_blocks: HashSet::new(),
is_segwit_possible,
}
}
@ -185,11 +179,6 @@ impl Chain {
self.memory_pool.clone()
}
/// Is segwit active
pub fn is_segwit_possible(&self) -> bool {
self.is_segwit_possible
}
/// Get number of blocks in given state
pub fn length_of_blocks_state(&self, state: BlockState) -> BlockHeight {
match state {
@ -734,7 +723,6 @@ mod tests {
use chain::{Transaction, IndexedBlockHeader};
use db::BlockChainDatabase;
use miner::MemoryPool;
use network::{Network, ConsensusParams, ConsensusFork};
use primitives::hash::H256;
use super::{Chain, BlockState, TransactionState, BlockInsertionResult};
use utils::HashPosition;
@ -743,7 +731,7 @@ mod tests {
fn chain_empty() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let db_best_block = db.best_block();
let chain = Chain::new(db.clone(), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let chain = Chain::new(db.clone(), Arc::new(RwLock::new(MemoryPool::new())));
assert_eq!(chain.information().scheduled, 0);
assert_eq!(chain.information().requested, 0);
assert_eq!(chain.information().verifying, 0);
@ -760,7 +748,7 @@ mod tests {
#[test]
fn chain_block_path() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db.clone(), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db.clone(), Arc::new(RwLock::new(MemoryPool::new())));
// add 6 blocks to scheduled queue
let blocks = test_data::build_n_empty_blocks_from_genesis(6, 0);
@ -812,7 +800,7 @@ mod tests {
#[test]
fn chain_block_locator_hashes() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, Arc::new(RwLock::new(MemoryPool::new())));
let genesis_hash = chain.best_block().hash;
assert_eq!(chain.block_locator_hashes(), vec![genesis_hash.clone()]);
@ -897,7 +885,7 @@ mod tests {
#[test]
fn chain_transaction_state() {
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, Arc::new(RwLock::new(MemoryPool::new())));
let genesis_block = test_data::genesis();
let block1 = test_data::block_h1();
let tx1: Transaction = test_data::TransactionBuilder::with_version(1).into();
@ -934,7 +922,7 @@ mod tests {
let tx2_hash = tx2.hash();
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![b0.into()]));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, Arc::new(RwLock::new(MemoryPool::new())));
chain.verify_transaction(tx1.into());
chain.insert_verified_transaction(tx2.into());
@ -958,7 +946,7 @@ mod tests {
.set_default_input(0).set_output(400).store(test_chain); // t4
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, Arc::new(RwLock::new(MemoryPool::new())));
chain.verify_transaction(test_chain.at(0).into());
chain.verify_transaction(test_chain.at(1).into());
chain.verify_transaction(test_chain.at(2).into());
@ -980,7 +968,7 @@ mod tests {
.set_default_input(0).set_output(400).store(test_chain); // t4
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, Arc::new(RwLock::new(MemoryPool::new())));
chain.insert_verified_transaction(test_chain.at(0).into());
chain.insert_verified_transaction(test_chain.at(1).into());
chain.insert_verified_transaction(test_chain.at(2).into());
@ -996,8 +984,8 @@ mod tests {
#[test]
fn memory_pool_transactions_are_reverified_after_reorganization() {
let b0 = test_data::block_builder().header().build().build();
let b1 = test_data::block_builder().header().nonce(1).parent(b0.hash()).build().build();
let b2 = test_data::block_builder().header().nonce(2).parent(b0.hash()).build().build();
let b1 = test_data::block_builder().header().nonce(1.into()).parent(b0.hash()).build().build();
let b2 = test_data::block_builder().header().nonce(2.into()).parent(b0.hash()).build().build();
let b3 = test_data::block_builder().header().parent(b2.hash()).build().build();
let tx1: Transaction = test_data::TransactionBuilder::with_version(1).into();
@ -1006,7 +994,7 @@ mod tests {
let tx2_hash = tx2.hash();
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![b0.into()]));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, Arc::new(RwLock::new(MemoryPool::new())));
chain.verify_transaction(tx1.into());
chain.insert_verified_transaction(tx2.into());
@ -1029,13 +1017,13 @@ mod tests {
fn fork_chain_block_transaction_is_removed_from_on_block_insert() {
let genesis = test_data::genesis();
let b0 = test_data::block_builder().header().parent(genesis.hash()).build().build(); // genesis -> b0
let b1 = test_data::block_builder().header().nonce(1).parent(b0.hash()).build()
let b1 = test_data::block_builder().header().nonce(1.into()).parent(b0.hash()).build()
.transaction().output().value(10).build().build()
.build(); // genesis -> b0 -> b1[tx1]
let b2 = test_data::block_builder().header().parent(b1.hash()).build()
.transaction().output().value(20).build().build()
.build(); // genesis -> b0 -> b1[tx1] -> b2[tx2]
let b3 = test_data::block_builder().header().nonce(2).parent(b0.hash()).build()
let b3 = test_data::block_builder().header().nonce(2.into()).parent(b0.hash()).build()
.transaction().output().value(30).build().build()
.build(); // genesis -> b0 -> b3[tx3]
let b4 = test_data::block_builder().header().parent(b3.hash()).build()
@ -1054,7 +1042,7 @@ mod tests {
let tx5 = b5.transactions[0].clone();
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![genesis.into()]));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, Arc::new(RwLock::new(MemoryPool::new())));
chain.insert_verified_transaction(tx3.into());
chain.insert_verified_transaction(tx4.into());
@ -1086,7 +1074,7 @@ mod tests {
fn double_spend_transaction_is_removed_from_memory_pool_when_output_is_spent_in_block_transaction() {
let genesis = test_data::genesis();
let tx0 = genesis.transactions[0].clone();
let b0 = test_data::block_builder().header().nonce(1).parent(genesis.hash()).build()
let b0 = test_data::block_builder().header().nonce(1.into()).parent(genesis.hash()).build()
.transaction()
.lock_time(1)
.input().hash(tx0.hash()).index(0).build()
@ -1098,7 +1086,7 @@ mod tests {
// insert tx2 to memory pool
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, Arc::new(RwLock::new(MemoryPool::new())));
chain.insert_verified_transaction(tx2.clone().into());
chain.insert_verified_transaction(tx3.clone().into());
// insert verified block with tx1
@ -1117,7 +1105,7 @@ mod tests {
.reset().set_input(&data_chain.at(0), 0).add_output(30).store(data_chain); // transaction0 -> transaction2
let db = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let mut chain = Chain::new(db, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), Arc::new(RwLock::new(MemoryPool::new())));
let mut chain = Chain::new(db, Arc::new(RwLock::new(MemoryPool::new())));
chain.insert_verified_transaction(data_chain.at(1).into());
assert_eq!(chain.information().transactions.transactions_count, 1);
chain.insert_verified_transaction(data_chain.at(2).into());

View File

@ -226,16 +226,15 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
}
// else ask for all unknown transactions and blocks
let is_segwit_possible = self.chain.is_segwit_possible();
let unknown_inventory: Vec<_> = message.inventory.into_iter()
.filter(|item| {
match item.inv_type {
// check that transaction is unknown to us
InventoryType::MessageTx| InventoryType::MessageWitnessTx =>
InventoryType::MessageTx =>
self.chain.transaction_state(&item.hash) == TransactionState::Unknown
&& !self.orphaned_transactions_pool.contains(&item.hash),
// check that block is unknown to us
InventoryType::MessageBlock | InventoryType::MessageWitnessBlock => match self.chain.block_state(&item.hash) {
InventoryType::MessageBlock => match self.chain.block_state(&item.hash) {
BlockState::Unknown => !self.orphaned_blocks_pool.contains_unknown_block(&item.hash),
BlockState::DeadEnd if !self.config.close_connection_on_bad_block => true,
BlockState::DeadEnd if self.config.close_connection_on_bad_block => {
@ -245,9 +244,7 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
_ => false,
},
// we never ask for merkle blocks && we never ask for compact blocks
InventoryType::MessageCompactBlock | InventoryType::MessageFilteredBlock
| InventoryType::MessageWitnessFilteredBlock
=> false,
InventoryType::MessageFilteredBlock => false,
// unknown inventory type
InventoryType::Error => {
self.peers.misbehaving(peer_index, &format!("Provided unknown inventory type {:?}", item.hash.to_reversed_str()));
@ -255,24 +252,6 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
}
}
})
// we are not synchronizing =>
// 1) either segwit is active and we are connected to segwit-enabled nodes => we could ask for witness
// 2) or segwit is inactive => we shall not ask for witness
.map(|item| if !is_segwit_possible {
item
} else {
match item.inv_type {
InventoryType::MessageTx => InventoryVector {
inv_type: InventoryType::MessageWitnessTx,
hash: item.hash,
},
InventoryType::MessageBlock => InventoryVector {
inv_type: InventoryType::MessageWitnessBlock,
hash: item.hash,
},
_ => item,
}
})
.collect();
// if everything is known => ignore this message
@ -973,8 +952,6 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
let chunk_size = min(limits.max_blocks_in_request, max(hashes.len() as BlockHeight, limits.min_blocks_in_request));
let last_peer_index = peers.len() - 1;
let mut tasks: Vec<Task> = Vec::new();
let is_segwit_possible = self.chain.is_segwit_possible();
let inv_type = if is_segwit_possible { InventoryType::MessageWitnessBlock } else { InventoryType::MessageBlock };
for (peer_index, peer) in peers.into_iter().enumerate() {
// we have to request all blocks => we will request last peer for all remaining blocks
let peer_chunk_size = if peer_index == last_peer_index { hashes.len() } else { min(hashes.len(), chunk_size as usize) };
@ -988,10 +965,10 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
// remember that peer is asked for these blocks
self.peers_tasks.on_blocks_requested(peer, &chunk_hashes);
// request blocks. If block is believed to have witness - ask for witness
// request blocks
let getdata = types::GetData {
inventory: chunk_hashes.into_iter().map(|h| InventoryVector {
inv_type: inv_type,
inv_type: InventoryType::MessageBlock,
hash: h,
}).collect(),
};
@ -1258,7 +1235,7 @@ pub mod tests {
use message::common::InventoryVector;
use message::{Services, types};
use miner::MemoryPool;
use network::{ConsensusParams, ConsensusFork, Network};
use network::{ConsensusParams, Network};
use primitives::hash::H256;
use verification::BackwardsCompatibleChainVerifier as ChainVerifier;
use inbound_connection::tests::DummyOutboundSyncConnection;
@ -1309,11 +1286,11 @@ pub mod tests {
};
let sync_state = SynchronizationStateRef::new(SynchronizationState::with_storage(storage.clone()));
let memory_pool = Arc::new(RwLock::new(MemoryPool::new()));
let chain = Chain::new(storage.clone(), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore), memory_pool.clone());
let chain = Chain::new(storage.clone(), memory_pool.clone());
let executor = DummyTaskExecutor::new();
let config = Config { close_connection_on_bad_block: true };
let chain_verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore)));
let chain_verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Unitest)));
let client_core = SynchronizationClientCore::new(config, sync_state.clone(), sync_peers.clone(), executor.clone(), chain, chain_verifier.clone());
{
client_core.lock().set_verify_headers(false);
@ -1339,7 +1316,7 @@ pub mod tests {
fn request_blocks(peer_index: PeerIndex, hashes: Vec<H256>) -> Task {
Task::GetData(peer_index, types::GetData {
inventory: hashes.into_iter().map(InventoryVector::witness_block).collect(),
inventory: hashes.into_iter().map(InventoryVector::block).collect(),
})
}
@ -1739,13 +1716,13 @@ pub mod tests {
sync.on_block(1, test_data::block_h2().into());
sync.on_inventory(1, types::Inv::with_inventory(vec![
InventoryVector::witness_block(test_data::block_h1().hash()),
InventoryVector::witness_block(test_data::block_h2().hash()),
InventoryVector::block(test_data::block_h1().hash()),
InventoryVector::block(test_data::block_h2().hash()),
]));
let tasks = executor.take_tasks();
assert_eq!(tasks, vec![Task::GetData(1, types::GetData::with_inventory(vec![
InventoryVector::witness_block(test_data::block_h1().hash())
InventoryVector::block(test_data::block_h1().hash())
]))]);
}
@ -1873,11 +1850,11 @@ pub mod tests {
fn transaction_is_requested_when_not_synchronizing() {
let (executor, core, sync) = create_sync(None, None);
sync.on_inventory(0, types::Inv::with_inventory(vec![InventoryVector::witness_tx(H256::from(0))]));
sync.on_inventory(0, types::Inv::with_inventory(vec![InventoryVector::tx(H256::from(0))]));
{
let tasks = executor.take_tasks();
assert_eq!(tasks, vec![Task::GetData(0, types::GetData::with_inventory(vec![InventoryVector::witness_tx(H256::from(0))]))]);
assert_eq!(tasks, vec![Task::GetData(0, types::GetData::with_inventory(vec![InventoryVector::tx(H256::from(0))]))]);
}
let b1 = test_data::block_h1();
@ -1886,28 +1863,28 @@ pub mod tests {
assert!(core.lock().information().state.is_nearly_saturated());
{ executor.take_tasks(); } // forget tasks
sync.on_inventory(0, types::Inv::with_inventory(vec![InventoryVector::witness_tx(H256::from(1))]));
sync.on_inventory(0, types::Inv::with_inventory(vec![InventoryVector::tx(H256::from(1))]));
let tasks = executor.take_tasks();
assert_eq!(tasks, vec![Task::GetData(0, types::GetData::with_inventory(vec![InventoryVector::witness_tx(H256::from(1))]))]);
assert_eq!(tasks, vec![Task::GetData(0, types::GetData::with_inventory(vec![InventoryVector::tx(H256::from(1))]))]);
}
#[test]
fn same_transaction_can_be_requested_twice() {
let (executor, _, sync) = create_sync(None, None);
sync.on_inventory(0, types::Inv::with_inventory(vec![InventoryVector::witness_tx(H256::from(0))]));
sync.on_inventory(0, types::Inv::with_inventory(vec![InventoryVector::tx(H256::from(0))]));
let tasks = executor.take_tasks();
assert_eq!(tasks, vec![Task::GetData(0, types::GetData::with_inventory(vec![
InventoryVector::witness_tx(H256::from(0))
InventoryVector::tx(H256::from(0))
]))]);
sync.on_inventory(0, types::Inv::with_inventory(vec![InventoryVector::witness_tx(H256::from(0))]));
sync.on_inventory(0, types::Inv::with_inventory(vec![InventoryVector::tx(H256::from(0))]));
let tasks = executor.take_tasks();
assert_eq!(tasks, vec![Task::GetData(0, types::GetData::with_inventory(vec![
InventoryVector::witness_tx(H256::from(0))
InventoryVector::tx(H256::from(0))
]))]);
}
@ -1916,11 +1893,11 @@ pub mod tests {
let (executor, _, sync) = create_sync(None, None);
sync.on_inventory(0, types::Inv::with_inventory(vec![
InventoryVector::witness_tx(test_data::genesis().transactions[0].hash()),
InventoryVector::witness_tx(H256::from(0)),
InventoryVector::tx(test_data::genesis().transactions[0].hash()),
InventoryVector::tx(H256::from(0)),
]));
assert_eq!(executor.take_tasks(), vec![Task::GetData(0, types::GetData::with_inventory(vec![
InventoryVector::witness_tx(H256::from(0))
InventoryVector::tx(H256::from(0))
]))]);
}
@ -2002,10 +1979,10 @@ pub mod tests {
let genesis = test_data::genesis();
let b10 = test_data::block_builder().header().parent(genesis.hash()).build().build();
let b11 = test_data::block_builder().header().nonce(1).parent(b10.hash()).build().build();
let b11 = test_data::block_builder().header().nonce(1.into()).parent(b10.hash()).build().build();
let b12 = test_data::block_builder().header().parent(b11.hash()).build().build();
let b21 = test_data::block_builder().header().nonce(2).parent(b10.hash()).build().build();
let b21 = test_data::block_builder().header().nonce(2.into()).parent(b10.hash()).build().build();
let b22 = test_data::block_builder().header().parent(b21.hash()).build().build();
let b23 = test_data::block_builder().header().parent(b22.hash()).build().build();

View File

@ -26,16 +26,8 @@ pub enum Task {
Block(PeerIndex, IndexedBlock),
/// Send merkleblock
MerkleBlock(PeerIndex, types::MerkleBlock),
/// Send cmpcmblock
CompactBlock(PeerIndex, types::CompactBlock),
/// Send block with witness data
WitnessBlock(PeerIndex, IndexedBlock),
/// Send transaction
Transaction(PeerIndex, IndexedTransaction),
/// Send transaction with witness data
WitnessTransaction(PeerIndex, IndexedTransaction),
/// Send block transactions
BlockTxn(PeerIndex, types::BlockTxn),
/// Send notfound
NotFound(PeerIndex, types::NotFound),
/// Send inventory
@ -112,26 +104,6 @@ impl LocalSynchronizationTaskExecutor {
}
}
fn execute_compact_block(&self, peer_index: PeerIndex, block: types::CompactBlock) {
if let Some(connection) = self.peers.connection(peer_index) {
let hash = block.header.header.hash();
trace!(target: "sync", "Sending compact block {} to peer#{}", hash.to_reversed_str(), peer_index);
self.peers.hash_known_as(peer_index, hash, KnownHashType::CompactBlock);
connection.send_compact_block(&block);
}
}
fn execute_witness_block(&self, peer_index: PeerIndex, block: IndexedBlock) {
if let Some(connection) = self.peers.connection(peer_index) {
trace!(target: "sync", "Sending witness block {} to peer#{}", block.hash().to_reversed_str(), peer_index);
self.peers.hash_known_as(peer_index, block.hash().clone(), KnownHashType::Block);
let block = types::Block {
block: block.to_raw_block(),
};
connection.send_witness_block(&block);
}
}
fn execute_transaction(&self, peer_index: PeerIndex, transaction: IndexedTransaction) {
if let Some(connection) = self.peers.connection(peer_index) {
trace!(target: "sync", "Sending transaction {} to peer#{}", transaction.hash.to_reversed_str(), peer_index);
@ -143,24 +115,6 @@ impl LocalSynchronizationTaskExecutor {
}
}
fn execute_witness_transaction(&self, peer_index: PeerIndex, transaction: IndexedTransaction) {
if let Some(connection) = self.peers.connection(peer_index) {
trace!(target: "sync", "Sending witness transaction {} to peer#{}", transaction.hash.to_reversed_str(), peer_index);
self.peers.hash_known_as(peer_index, transaction.hash, KnownHashType::Transaction);
let transaction = types::Tx {
transaction: transaction.raw,
};
connection.send_witness_transaction(&transaction);
}
}
fn execute_block_txn(&self, peer_index: PeerIndex, blocktxn: types::BlockTxn) {
if let Some(connection) = self.peers.connection(peer_index) {
trace!(target: "sync", "Sending blocktxn with {} transactions to peer#{}", blocktxn.request.transactions.len(), peer_index);
connection.send_block_txn(&blocktxn);
}
}
fn execute_notfound(&self, peer_index: PeerIndex, notfound: types::NotFound) {
if let Some(connection) = self.peers.connection(peer_index) {
trace!(target: "sync", "Sending notfound to peer#{} with {} items", peer_index, notfound.inventory.len());
@ -198,9 +152,6 @@ impl LocalSynchronizationTaskExecutor {
block.header.raw.clone(),
]), None);
},
BlockAnnouncementType::SendCompactBlock => if let Some(compact_block) = self.peers.build_compact_block(peer_index, &block) {
self.execute_compact_block(peer_index, compact_block);
},
BlockAnnouncementType::DoNotAnnounce => (),
}
}
@ -227,11 +178,7 @@ impl TaskExecutor for LocalSynchronizationTaskExecutor {
Task::MemoryPool(peer_index) => self.execute_memorypool(peer_index),
Task::Block(peer_index, block) => self.execute_block(peer_index, block),
Task::MerkleBlock(peer_index, block) => self.execute_merkleblock(peer_index, block),
Task::CompactBlock(peer_index, block) => self.execute_compact_block(peer_index, block),
Task::WitnessBlock(peer_index, block) => self.execute_witness_block(peer_index, block),
Task::Transaction(peer_index, transaction) => self.execute_transaction(peer_index, transaction),
Task::WitnessTransaction(peer_index, transaction) => self.execute_witness_transaction(peer_index, transaction),
Task::BlockTxn(peer_index, blocktxn) => self.execute_block_txn(peer_index, blocktxn),
Task::NotFound(peer_index, notfound) => self.execute_notfound(peer_index, notfound),
Task::Inventory(peer_index, inventory) => self.execute_inventory(peer_index, inventory),
Task::Headers(peer_index, headers, request_id) => self.execute_headers(peer_index, headers, request_id),
@ -297,22 +244,6 @@ pub mod tests {
}
}
#[test]
fn relay_new_block_after_sendcmpct() {
let peers = Arc::new(PeersImpl::default());
let executor = LocalSynchronizationTaskExecutor::new(peers.clone());
let c1 = DummyOutboundSyncConnection::new();
peers.insert(1, Services::default(), c1.clone());
let c2 = DummyOutboundSyncConnection::new();
peers.insert(2, Services::default(), c2.clone());
peers.set_block_announcement_type(2, BlockAnnouncementType::SendCompactBlock);
executor.execute(Task::RelayNewBlock(test_data::genesis().into()));
assert_eq!(*c1.messages.lock().entry("inventory".to_owned()).or_insert(0), 1);
assert_eq!(*c2.messages.lock().entry("cmpctblock".to_owned()).or_insert(0), 1);
}
#[test]
fn relay_new_block_after_sendheaders() {
let peers = Arc::new(PeersImpl::default());

View File

@ -392,7 +392,7 @@ mod tests {
fn manage_orphan_transactions_good() {
let config = ManageOrphanTransactionsConfig { removal_time_ms: 1000, max_number: 100 };
let mut pool = OrphanTransactionsPool::new();
let transaction = test_data::block_h170().transactions[1].clone();
let transaction = test_data::block_h522().transactions[3].clone();
let unknown_inputs: HashSet<H256> = transaction.inputs.iter().map(|i| i.previous_output.hash.clone()).collect();
pool.insert(transaction.into(), unknown_inputs);
assert_eq!(manage_orphaned_transactions(&config, &mut pool), None);
@ -405,7 +405,7 @@ mod tests {
use std::time::Duration;
let config = ManageOrphanTransactionsConfig { removal_time_ms: 0, max_number: 100 };
let mut pool = OrphanTransactionsPool::new();
let transaction = test_data::block_h170().transactions[1].clone();
let transaction = test_data::block_h522().transactions[3].clone();
let unknown_inputs: HashSet<H256> = transaction.inputs.iter().map(|i| i.previous_output.hash.clone()).collect();
let transaction_hash = transaction.hash();
pool.insert(transaction.into(), unknown_inputs);
@ -419,10 +419,10 @@ mod tests {
fn manage_orphan_transactions_by_max_number() {
let config = ManageOrphanTransactionsConfig { removal_time_ms: 100, max_number: 1 };
let mut pool = OrphanTransactionsPool::new();
let transaction1 = test_data::block_h170().transactions[1].clone();
let transaction1 = test_data::block_h522().transactions[3].clone();
let unknown_inputs1: HashSet<H256> = transaction1.inputs.iter().map(|i| i.previous_output.hash.clone()).collect();
let transaction1_hash = transaction1.hash();
let transaction2 = test_data::block_h182().transactions[1].clone();
let transaction2 = test_data::block_h567().transactions[1].clone();
let unknown_inputs2: HashSet<H256> = transaction2.inputs.iter().map(|i| i.previous_output.hash.clone()).collect();
pool.insert(transaction1.into(), unknown_inputs1);
pool.insert(transaction2.into(), unknown_inputs2);

View File

@ -14,8 +14,6 @@ pub enum BlockAnnouncementType {
SendInventory,
/// Send headers message with block header
SendHeaders,
/// Send cmpctblock message with this block
SendCompactBlock,
/// Do not announce blocks at all
DoNotAnnounce,
}
@ -40,8 +38,6 @@ pub struct MerkleBlockArtefacts {
/// Connected peers
pub trait Peers : Send + Sync + PeersContainer + PeersFilters + PeersOptions {
/// Require peers services.
fn require_peer_services(&self, services: Services);
/// Get peer connection
fn connection(&self, peer_index: PeerIndex) -> Option<OutboundSyncConnectionRef>;
}
@ -78,16 +74,12 @@ pub trait PeersFilters {
fn hash_known_as(&self, peer_index: PeerIndex, hash: H256, hash_type: KnownHashType);
/// Is given hash known by peer as hash of given type
fn is_hash_known_as(&self, peer_index: PeerIndex, hash: &H256, hash_type: KnownHashType) -> bool;
/// Build compact block using filter for given peer
fn build_compact_block(&self, peer_index: PeerIndex, block: &IndexedBlock) -> Option<types::CompactBlock>;
/// Build merkle block using filter for given peer
fn build_merkle_block(&self, peer_index: PeerIndex, block: &IndexedBlock) -> Option<MerkleBlockArtefacts>;
}
/// Options for peers connections
pub trait PeersOptions {
/// Is node supporting SegWit?
fn is_segwit_enabled(&self, peer_index: PeerIndex) -> bool;
/// Set up new block announcement type for the connection
fn set_block_announcement_type(&self, peer_index: PeerIndex, announcement_type: BlockAnnouncementType);
/// Set up new transaction announcement type for the connection
@ -129,19 +121,6 @@ impl Peer {
}
impl Peers for PeersImpl {
fn require_peer_services(&self, services: Services) {
// possible optimization: force p2p level to establish connections to SegWit-nodes only
// without it, all other nodes will be eventually banned (this could take some time, though)
let mut peers = self.peers.write();
for peer_index in peers.iter().filter(|&(_, p)| p.services.includes(&services)).map(|(p, _)| *p).collect::<Vec<_>>() {
let peer = peers.remove(&peer_index).expect("iterating peers keys; qed");
let expected_services: u64 = services.into();
let actual_services: u64 = peer.services.into();
warn!(target: "sync", "Disconnecting from peer#{} because of insufficient services. Expected {:x}, actual: {:x}", peer_index, expected_services, actual_services);
peer.connection.close();
}
}
fn connection(&self, peer_index: PeerIndex) -> Option<OutboundSyncConnectionRef> {
self.peers.read().get(&peer_index).map(|peer| peer.connection.clone())
}
@ -235,11 +214,6 @@ impl PeersFilters for PeersImpl {
.unwrap_or(false)
}
fn build_compact_block(&self, peer_index: PeerIndex, block: &IndexedBlock) -> Option<types::CompactBlock> {
self.peers.read().get(&peer_index)
.map(|peer| peer.filter.build_compact_block(block))
}
fn build_merkle_block(&self, peer_index: PeerIndex, block: &IndexedBlock) -> Option<MerkleBlockArtefacts> {
self.peers.read().get(&peer_index)
.and_then(|peer| peer.filter.build_merkle_block(block))
@ -247,13 +221,6 @@ impl PeersFilters for PeersImpl {
}
impl PeersOptions for PeersImpl {
fn is_segwit_enabled(&self, peer_index: PeerIndex) -> bool {
self.peers.read()
.get(&peer_index)
.map(|peer| peer.services.witness())
.unwrap_or_default()
}
fn set_block_announcement_type(&self, peer_index: PeerIndex, announcement_type: BlockAnnouncementType) {
if let Some(peer) = self.peers.write().get_mut(&peer_index) {
peer.block_announcement_type = announcement_type;

View File

@ -1,4 +1,4 @@
use std::collections::{VecDeque, HashMap, HashSet};
use std::collections::{VecDeque, HashMap};
use std::collections::hash_map::Entry;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
@ -9,7 +9,6 @@ use message::{types, common};
use primitives::hash::H256;
use synchronization_executor::{Task, TaskExecutor};
use types::{PeerIndex, RequestId, BlockHeight, StorageRef, ExecutorRef, MemoryPoolRef, PeersRef};
use utils::KnownHashType;
/// Synchronization server task
#[derive(Debug, PartialEq)]
@ -24,8 +23,6 @@ pub enum ServerTask {
GetHeaders(PeerIndex, types::GetHeaders, RequestId),
/// Serve 'mempool' request
Mempool(PeerIndex),
/// Serve 'getblocktxn' request
GetBlockTxn(PeerIndex, types::GetBlockTxn),
}
/// Synchronization server
@ -80,8 +77,7 @@ impl ServerTask {
| ServerTask::ReversedGetData(peer_index, _, _)
| ServerTask::GetBlocks(peer_index, _)
| ServerTask::GetHeaders(peer_index, _, _)
| ServerTask::Mempool(peer_index)
| ServerTask::GetBlockTxn(peer_index, _) => peer_index,
| ServerTask::Mempool(peer_index) => peer_index,
}
}
}
@ -234,7 +230,6 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
ServerTask::GetBlocks(peer_index, message) => self.serve_get_blocks(peer_index, message),
ServerTask::GetHeaders(peer_index, message, request_id) => self.serve_get_headers(peer_index, message, request_id),
ServerTask::Mempool(peer_index) => self.serve_mempool(peer_index),
ServerTask::GetBlockTxn(peer_index, message) => self.serve_get_block_txn(peer_index, message),
}
None
@ -273,16 +268,6 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
notfound.inventory.push(next_item);
}
},
common::InventoryType::MessageWitnessTx => {
// only transaction from memory pool can be requested
if let Some(transaction) = self.memory_pool.read().read_by_hash(&next_item.hash) {
trace!(target: "sync", "'getblocks' response to peer#{} is ready with witness-tx {}", peer_index, next_item.hash.to_reversed_str());
let transaction = IndexedTransaction::new(next_item.hash, transaction.clone());
self.executor.execute(Task::WitnessTransaction(peer_index, transaction));
} else {
notfound.inventory.push(next_item);
}
},
common::InventoryType::MessageBlock => {
if let Some(block) = self.storage.block(next_item.hash.clone().into()) {
trace!(target: "sync", "'getblocks' response to peer#{} is ready with block {}", peer_index, next_item.hash.to_reversed_str());
@ -311,26 +296,7 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
notfound.inventory.push(next_item);
}
},
common::InventoryType::MessageCompactBlock => {
if let Some(block) = self.storage.block(next_item.hash.clone().into()) {
let message = self.peers.build_compact_block(peer_index, &block.into());
if let Some(message) = message {
trace!(target: "sync", "'getblocks' response to peer#{} is ready with compactblock {}", peer_index, next_item.hash.to_reversed_str());
self.executor.execute(Task::CompactBlock(peer_index, message));
}
} else {
notfound.inventory.push(next_item);
}
},
common::InventoryType::MessageWitnessBlock => {
if let Some(block) = self.storage.block(next_item.hash.clone().into()) {
trace!(target: "sync", "'getblocks' response to peer#{} is ready with witness-block {}", peer_index, next_item.hash.to_reversed_str());
self.executor.execute(Task::WitnessBlock(peer_index, block.into()));
} else {
notfound.inventory.push(next_item);
}
},
common::InventoryType::Error | common::InventoryType::MessageWitnessFilteredBlock => (),
common::InventoryType::Error => (),
}
Some(ServerTask::ReversedGetData(peer_index, message, notfound))
@ -393,58 +359,6 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
}
}
fn serve_get_block_txn(&self, peer_index: PeerIndex, message: types::GetBlockTxn) {
// according to protocol documentation, we only should only respond
// if requested block has been recently sent in 'cmpctblock'
if !self.peers.is_hash_known_as(peer_index, &message.request.blockhash, KnownHashType::CompactBlock) {
self.peers.misbehaving(peer_index, &format!("Got 'getblocktxn' message for non-sent block: {}", message.request.blockhash.to_reversed_str()));
return;
}
let block_transactions = self.storage.block_transaction_hashes(message.request.blockhash.clone().into());
let block_transactions_len = block_transactions.len();
let requested_len = message.request.indexes.len();
if requested_len > block_transactions_len {
// peer has requested more transactions, than there are
self.peers.misbehaving(peer_index, &format!("Got 'getblocktxn' message with {} transactions, when there are: {}", requested_len, block_transactions_len));
return;
}
let mut requested_indexes = HashSet::new();
let mut transactions = Vec::with_capacity(message.request.indexes.len());
for transaction_index in message.request.indexes {
if transaction_index >= block_transactions_len {
// peer has requested index, larger than index of last transaction
self.peers.misbehaving(peer_index, &format!("Got 'getblocktxn' message with index {}, larger than index of last transaction {}", transaction_index, block_transactions_len - 1));
return;
}
if !requested_indexes.insert(transaction_index) {
// peer has requested same index several times
self.peers.misbehaving(peer_index, &format!("Got 'getblocktxn' message where same index {} has been requested several times", transaction_index));
return;
}
if let Some(transaction) = self.storage.transaction(&block_transactions[transaction_index]) {
transactions.push(transaction);
} else {
// we have just got this hash using block_transactions_hashes
// => this is either some db error, or db has been pruned
// => we can not skip transactions, according to protocol description
// => ignore
warn!(target: "sync", "'getblocktxn' request from peer#{} is ignored as we have failed to find transaction {} in storage", peer_index, block_transactions[transaction_index].to_reversed_str());
return;
}
}
trace!(target: "sync", "'getblocktxn' response to peer#{} is ready with {} transactions", peer_index, transactions.len());
self.executor.execute(Task::BlockTxn(peer_index, types::BlockTxn {
request: common::BlockTransactions {
blockhash: message.request.blockhash,
transactions: transactions,
}
}));
}
fn locate_best_common_block(&self, hash_stop: &H256, locator: &[H256]) -> Option<BlockHeight> {
for block_hash in locator.iter().chain(&[hash_stop.clone()]) {
if let Some(block_number) = self.storage.block_number(block_hash) {
@ -482,7 +396,7 @@ pub mod tests {
use parking_lot::{Mutex, RwLock};
use db::{BlockChainDatabase};
use message::types;
use message::common::{self, Services, InventoryVector, InventoryType};
use message::common::{Services, InventoryVector, InventoryType};
use primitives::hash::H256;
use chain::Transaction;
use inbound_connection::tests::DummyOutboundSyncConnection;
@ -492,7 +406,6 @@ pub mod tests {
use synchronization_executor::tests::DummyTaskExecutor;
use synchronization_peers::{PeersContainer, PeersFilters, PeersImpl};
use types::{PeerIndex, StorageRef, MemoryPoolRef, PeersRef, ExecutorRef};
use utils::KnownHashType;
use super::{Server, ServerTask, ServerImpl, ServerTaskExecutor};
pub struct DummyServer {
@ -660,53 +573,6 @@ pub mod tests {
assert_eq!(tasks, vec![Task::Inventory(0, types::Inv::with_inventory(inventory))]);
}
#[test]
fn server_get_block_txn_responds_when_good_request() {
let (_, _, executor, peers, server) = create_synchronization_server();
peers.insert(0, Services::default(), DummyOutboundSyncConnection::new());
peers.hash_known_as(0, test_data::genesis().hash(), KnownHashType::CompactBlock);
// when asking for block_txns
server.execute(ServerTask::GetBlockTxn(0, types::GetBlockTxn {
request: common::BlockTransactionsRequest {
blockhash: test_data::genesis().hash(),
indexes: vec![0],
}
}));
// server responds with transactions
let tasks = DummyTaskExecutor::wait_tasks(executor);
assert_eq!(tasks, vec![Task::BlockTxn(0, types::BlockTxn {
request: common::BlockTransactions {
blockhash: test_data::genesis().hash(),
transactions: vec![test_data::genesis().transactions[0].clone()],
}
})]);
}
#[test]
fn server_get_block_txn_do_not_responds_when_bad_request() {
let (_, _, _, peers, server) = create_synchronization_server();
peers.insert(0, Services::default(), DummyOutboundSyncConnection::new());
assert!(peers.enumerate().contains(&0));
// when asking for block_txns
server.execute(ServerTask::GetBlockTxn(0, types::GetBlockTxn {
request: common::BlockTransactionsRequest {
blockhash: test_data::genesis().hash(),
indexes: vec![1],
}
}));
// server closes connection
use std::thread;
use std::time::Duration;
thread::park_timeout(Duration::from_millis(100)); // TODO: get rid of timeout
assert!(!peers.enumerate().contains(&0));
}
#[test]
fn server_getdata_responds_notfound_when_transaction_is_inaccessible() {
let (_, _, executor, _, server) = create_synchronization_server();
@ -902,41 +768,4 @@ pub mod tests {
}
}
}
#[test]
fn server_serves_compactblock() {
let peers = Arc::new(PeersImpl::default());
let storage = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let memory_pool = Arc::new(RwLock::new(MemoryPool::new()));
let sync_executor = DummyTaskExecutor::new();
let executor = ServerTaskExecutor::new(peers.clone(), storage.clone(), memory_pool.clone(), sync_executor.clone());
let genesis = test_data::genesis();
let b1 = test_data::block_builder().header().parent(genesis.hash()).build()
.transaction().output().value(10).build().build()
.build(); // genesis -> b1
let b1_hash = b1.hash();
// This peer will provide blocks
storage.insert(b1.clone().into()).expect("no error");
storage.canonize(&b1.hash()).unwrap();
// This peer will receive compact block
let peer_index2 = 1; peers.insert(peer_index2, Services::default(), DummyOutboundSyncConnection::new());
// ask for data
let mut loop_task = ServerTask::GetData(peer_index2, types::GetData {inventory: vec![
InventoryVector { inv_type: InventoryType::MessageCompactBlock, hash: b1_hash.clone() },
]});
while let Some(new_task) = executor.execute(loop_task) {
loop_task = new_task;
}
let tasks = sync_executor.take_tasks();
assert_eq!(tasks.len(), 1);
match tasks[0] {
Task::CompactBlock(_, _) => (),
_ => panic!("unexpected"),
}
}
}

View File

@ -263,15 +263,14 @@ pub mod tests {
use std::sync::atomic::Ordering;
use std::collections::{HashSet, HashMap};
use db::BlockChainDatabase;
use network::{Network, ConsensusParams, ConsensusFork};
use verification::{VerificationLevel, BackwardsCompatibleChainVerifier as ChainVerifier, Error as VerificationError, TransactionError};
use network::{Network, ConsensusParams};
use verification::{VerificationLevel, BackwardsCompatibleChainVerifier as ChainVerifier, Error as VerificationError};
use synchronization_client_core::CoreVerificationSink;
use synchronization_executor::tests::DummyTaskExecutor;
use primitives::hash::H256;
use chain::{IndexedBlock, IndexedTransaction};
use super::{Verifier, BlockVerificationSink, TransactionVerificationSink, AsyncVerifier, VerificationTask, ChainVerifierWrapper};
use types::{BlockHeight, StorageRef, MemoryPoolRef};
use script::Error as ScriptError;
use VerificationParameters;
#[derive(Default)]
@ -351,7 +350,7 @@ pub mod tests {
#[test]
fn verifier_wrapper_switches_to_full_mode() {
let storage: StorageRef = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore)));
let verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Unitest)));
// switching to full verification when block is already in db
assert_eq!(ChainVerifierWrapper::new(verifier.clone(), &storage, VerificationParameters {
@ -374,10 +373,11 @@ pub mod tests {
fn verification_level_header_accept_incorrect_transaction() {
let mut blocks: Vec<IndexedBlock> = vec![test_data::genesis().into()];
let mut rolling_hash = blocks[0].hash().clone();
for _ in 1..101 {
for i in 1..101 {
let next_block = test_data::block_builder()
.transaction()
.coinbase()
.version(i)
.output().value(5000000000).build()
.build()
.merkled_header()
@ -389,10 +389,10 @@ pub mod tests {
blocks.push(next_block.into());
}
let coinbase_transaction_hash = blocks[0].transactions[0].hash.clone();
let coinbase_transaction_hash = blocks[1].transactions[0].hash.clone();
let last_block_hash = blocks[blocks.len() - 1].hash().clone();
let storage: StorageRef = Arc::new(BlockChainDatabase::init_test_chain(blocks));
let verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore)));
let verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Unitest)));
let bad_transaction_block: IndexedBlock = test_data::block_builder()
.transaction().coinbase().output().value(50).build().build()
.transaction()
@ -414,17 +414,18 @@ pub mod tests {
assert_eq!(wrapper.verify_block(&bad_transaction_block), Ok(()));
// Error when tx script is checked
/* TODO: fixme
let wrapper = ChainVerifierWrapper::new(verifier, &storage, VerificationParameters {
verification_level: VerificationLevel::Full,
verification_edge: 1.into(),
});
assert_eq!(wrapper.verify_block(&bad_transaction_block), Err(VerificationError::Transaction(1, TransactionError::Signature(0, ScriptError::InvalidStackOperation))));
assert_eq!(wrapper.verify_block(&bad_transaction_block), Err(VerificationError::Transaction(1, TransactionError::Signature(0, ScriptError::InvalidStackOperation))));*/
}
#[test]
fn verification_level_none_accept_incorrect_block() {
let storage: StorageRef = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore)));
let verifier = Arc::new(ChainVerifier::new(storage.clone(), ConsensusParams::new(Network::Unitest)));
let bad_block: IndexedBlock = test_data::block_builder().header().build().build().into();
// Ok(()) when nothing is verified

View File

@ -1,121 +0,0 @@
use std::collections::HashSet;
use rand::{thread_rng, Rng};
use bitcrypto::{sha256, siphash24};
use byteorder::{LittleEndian, ByteOrder};
use chain::{BlockHeader, ShortTransactionID, IndexedBlock};
use message::common::{BlockHeaderAndIDs, PrefilledTransaction};
use primitives::hash::H256;
use ser::{Stream, Serializable};
/// Maximum size of prefilled transactions in compact block
const MAX_COMPACT_BLOCK_PREFILLED_SIZE: usize = 10 * 1024;
pub fn build_compact_block(block: &IndexedBlock, prefilled_transactions_indexes: HashSet<usize>) -> BlockHeaderAndIDs {
let nonce: u64 = thread_rng().gen();
let prefilled_transactions_len = prefilled_transactions_indexes.len();
let mut short_ids: Vec<ShortTransactionID> = Vec::with_capacity(block.transactions.len() - prefilled_transactions_len);
let mut prefilled_transactions: Vec<PrefilledTransaction> = Vec::with_capacity(prefilled_transactions_len);
let mut prefilled_transactions_size: usize = 0;
let (key0, key1) = short_transaction_id_keys(nonce, &block.header.raw);
for (transaction_index, transaction) in block.transactions.iter().enumerate() {
let transaction_size = transaction.raw.serialized_size();
if prefilled_transactions_size + transaction_size < MAX_COMPACT_BLOCK_PREFILLED_SIZE
&& prefilled_transactions_indexes.contains(&transaction_index) {
prefilled_transactions_size += transaction_size;
prefilled_transactions.push(PrefilledTransaction {
index: transaction_index,
transaction: transaction.raw.clone(),
})
} else {
short_ids.push(short_transaction_id(key0, key1, &transaction.hash));
}
}
BlockHeaderAndIDs {
header: block.header.raw.clone(),
nonce: nonce,
short_ids: short_ids,
prefilled_transactions: prefilled_transactions,
}
}
pub fn short_transaction_id_keys(nonce: u64, block_header: &BlockHeader) -> (u64, u64) {
// Short transaction IDs are used to represent a transaction without sending a full 256-bit hash. They are calculated by:
// 1) single-SHA256 hashing the block header with the nonce appended (in little-endian)
let mut stream = Stream::new();
stream.append(block_header);
stream.append(&nonce);
let block_header_with_nonce_hash = sha256(&stream.out());
// 2) Running SipHash-2-4 with the input being the transaction ID and the keys (k0/k1) set to the first two little-endian
// 64-bit integers from the above hash, respectively.
let key0 = LittleEndian::read_u64(&block_header_with_nonce_hash[0..8]);
let key1 = LittleEndian::read_u64(&block_header_with_nonce_hash[8..16]);
(key0, key1)
}
pub fn short_transaction_id(key0: u64, key1: u64, transaction_hash: &H256) -> ShortTransactionID {
// 2) Running SipHash-2-4 with the input being the transaction ID and the keys (k0/k1) set to the first two little-endian
// 64-bit integers from the above hash, respectively.
let siphash_transaction_hash = siphash24(key0, key1, &**transaction_hash);
// 3) Dropping the 2 most significant bytes from the SipHash output to make it 6 bytes.
let mut siphash_transaction_hash_bytes = [0u8; 8];
LittleEndian::write_u64(&mut siphash_transaction_hash_bytes, siphash_transaction_hash);
siphash_transaction_hash_bytes[0..6].into()
}
#[cfg(test)]
mod tests {
extern crate test_data;
use std::collections::HashSet;
use chain::{BlockHeader, Transaction, ShortTransactionID};
use message::common::{BlockHeaderAndIDs, PrefilledTransaction};
use super::*;
#[test]
fn short_transaction_id_is_correct() {
// https://webbtc.com/tx/fa755807ab9f3ca8a9b25982570700f3f94bb0627f373893c3cfe79b5cf16def
let transaction: Transaction = "01000000015fe01688dd8ae4428e21835c0e1b7af571c4223658d94da0c123e6fd7399862a010000006b483045022100f9e6d1bd3c9f54dcc72405994ec9ac2795878dd0b3cfbdc52bed28c2737fbecc02201fd68deab17bfaef1626e232cc4488dc273ba6fa5d807712b111d017cb96e0990121021fff64d1a21ede90d77cafa35fe7621db8aa433d947267980b395c35d23bd87fffffffff021ea56f72000000001976a9146fae1c8e7a648fff905dfdac9b019d3e887d7e8f88ac80f0fa02000000001976a9147f29b567c7dd9fc59cd3a7f716914966cc91ffa188ac00000000".into();
let transaction_hash = transaction.hash();
// https://webbtc.com/block/000000000000000001582cb2307ac43f3b4b268f2a75d3581d0babd48df1c300
let block_header: BlockHeader = "000000205a54771c6a1a2bcc8f3412184f319dc02f7258b56fd5060100000000000000001de7a03cefe565d11cdfa369f6ffe59b9368a257203726c9cc363d31b4e3c2ebca4f3c58d4e6031830ccfd80".into();
let nonce = 13450019974716797918_u64;
let (key0, key1) = short_transaction_id_keys(nonce, &block_header);
let actual_id = short_transaction_id(key0, key1, &transaction_hash);
let expected_id: ShortTransactionID = "036e8b8b8f00".into();
assert_eq!(expected_id, actual_id);
}
#[test]
fn compact_block_is_built_correctly() {
let block = test_data::block_builder().header().parent(test_data::genesis().hash()).build()
.transaction().output().value(10).build().build()
.transaction().output().value(20).build().build()
.transaction().output().value(30).build().build()
.build(); // genesis -> block
let prefilled: HashSet<_> = vec![1].into_iter().collect();
let compact_block = build_compact_block(&block.clone().into(), prefilled);
let (key0, key1) = short_transaction_id_keys(compact_block.nonce, &block.block_header);
let short_ids = vec![
short_transaction_id(key0, key1, &block.transactions[0].hash()),
short_transaction_id(key0, key1, &block.transactions[2].hash()),
];
assert_eq!(compact_block, BlockHeaderAndIDs {
header: block.block_header.clone(),
nonce: compact_block.nonce,
short_ids: short_ids,
prefilled_transactions: vec![
PrefilledTransaction {
index: 1,
transaction: block.transactions[1].clone(),
}
],
});
}
}

View File

@ -4,7 +4,7 @@ use message::types;
use primitives::bytes::Bytes;
use primitives::hash::H256;
use synchronization_peers::MerkleBlockArtefacts;
use utils::{KnownHashFilter, KnownHashType, BloomFilter, FeeRateFilter, build_compact_block, build_partial_merkle_tree};
use utils::{KnownHashFilter, KnownHashType, BloomFilter, FeeRateFilter, build_partial_merkle_tree};
/// Filter, which controls data relayed over connection.
#[derive(Debug, Default)]
@ -60,17 +60,6 @@ impl ConnectionFilter {
self.fee_rate_filter.set_min_fee_rate(message);
}
/// Convert block to compact block using this filter
pub fn build_compact_block(&self, block: &IndexedBlock) -> types::CompactBlock {
let unknown_transaction_indexes = block.transactions.iter().enumerate()
.filter(|&(_, tx)| self.known_hash_filter.contains(&tx.hash, KnownHashType::Transaction))
.map(|(idx, _)| idx)
.collect();
types::CompactBlock {
header: build_compact_block(block, unknown_transaction_indexes),
}
}
/// Convert `Block` to `MerkleBlock` using this filter
pub fn build_merkle_block(&self, block: &IndexedBlock) -> Option<MerkleBlockArtefacts> {
if !self.bloom_filter.is_set() {
@ -146,7 +135,7 @@ pub mod tests {
fn filter_rejects_block_known() {
let mut filter = ConnectionFilter::default();
filter.hash_known_as(test_data::block_h1().hash(), KnownHashType::Block);
filter.hash_known_as(test_data::block_h2().hash(), KnownHashType::CompactBlock);
filter.hash_known_as(test_data::block_h2().hash(), KnownHashType::Block);
assert!(!filter.filter_block(&test_data::block_h1().hash()));
assert!(!filter.filter_block(&test_data::block_h2().hash()));
assert!(filter.filter_block(&test_data::genesis().hash()));

View File

@ -11,8 +11,6 @@ pub enum KnownHashType {
Transaction,
/// Peer knows block with this hash
Block,
/// Peer knows compact block with this hash
CompactBlock,
}
/// Known-hashes filter
@ -50,8 +48,7 @@ impl KnownHashFilter {
/// Filter block using its hash
pub fn filter_block(&self, hash: &H256) -> bool {
self.known_hashes.get(hash)
.map(|stored_hash_type| *stored_hash_type != KnownHashType::Block
&& *stored_hash_type != KnownHashType::CompactBlock)
.map(|stored_hash_type| *stored_hash_type != KnownHashType::Block)
.unwrap_or(true)
}
@ -78,7 +75,7 @@ mod tests {
fn known_hash_filter_block() {
let mut filter = KnownHashFilter::default();
filter.insert(H256::from(0), KnownHashType::Block);
filter.insert(H256::from(1), KnownHashType::CompactBlock);
filter.insert(H256::from(1), KnownHashType::Block);
filter.insert(H256::from(2), KnownHashType::Transaction);
assert!(!filter.filter_block(&H256::from(0)));
assert!(!filter.filter_block(&H256::from(1)));
@ -90,7 +87,7 @@ mod tests {
fn known_hash_filter_transaction() {
let mut filter = KnownHashFilter::default();
filter.insert(H256::from(0), KnownHashType::Block);
filter.insert(H256::from(1), KnownHashType::CompactBlock);
filter.insert(H256::from(1), KnownHashType::Block);
filter.insert(H256::from(2), KnownHashType::Transaction);
assert!(filter.filter_transaction(&H256::from(0)));
assert!(filter.filter_transaction(&H256::from(1)));
@ -102,16 +99,14 @@ mod tests {
fn known_hash_filter_contains() {
let mut filter = KnownHashFilter::default();
filter.insert(H256::from(0), KnownHashType::Block);
filter.insert(H256::from(1), KnownHashType::CompactBlock);
filter.insert(H256::from(1), KnownHashType::Block);
filter.insert(H256::from(2), KnownHashType::Transaction);
assert!(filter.contains(&H256::from(0), KnownHashType::Block));
assert!(!filter.contains(&H256::from(0), KnownHashType::CompactBlock));
assert!(filter.contains(&H256::from(1), KnownHashType::CompactBlock));
assert!(!filter.contains(&H256::from(1), KnownHashType::Block));
assert!(filter.contains(&H256::from(1), KnownHashType::Block));
assert!(filter.contains(&H256::from(2), KnownHashType::Transaction));
assert!(!filter.contains(&H256::from(2), KnownHashType::Block));
assert!(!filter.contains(&H256::from(3), KnownHashType::Block));
assert!(!filter.contains(&H256::from(3), KnownHashType::CompactBlock));
assert!(!filter.contains(&H256::from(3), KnownHashType::Block));
assert!(!filter.contains(&H256::from(3), KnownHashType::Transaction));
}

View File

@ -1,7 +1,6 @@
mod average_speed_meter;
mod best_headers_chain;
mod bloom_filter;
mod compact_block_builder;
mod connection_filter;
mod fee_rate_filter;
mod hash_queue;
@ -16,7 +15,6 @@ mod synchronization_state;
pub use self::average_speed_meter::AverageSpeedMeter;
pub use self::best_headers_chain::{BestHeadersChain, Information as BestHeadersChainInformation};
pub use self::bloom_filter::BloomFilter;
pub use self::compact_block_builder::build_compact_block;
pub use self::connection_filter::ConnectionFilter;
pub use self::fee_rate_filter::FeeRateFilter;
pub use self::hash_queue::{HashQueue, HashQueueChain, HashPosition};

View File

@ -194,7 +194,7 @@ pub struct BlockHeaderBuilder<F=Identity> {
callback: F,
time: u32,
parent: H256,
nonce: u32,
nonce: H256,
bits: Compact,
version: u32,
merkle_root: H256,
@ -205,7 +205,7 @@ impl<F> BlockHeaderBuilder<F> where F: Invoke<chain::BlockHeader> {
BlockHeaderBuilder {
callback: callback,
time: TIMESTAMP_COUNTER.with(|counter| { let val = counter.get(); counter.set(val+1); val }),
nonce: 0,
nonce: Default::default(),
merkle_root: 0.into(),
parent: 0.into(),
bits: Compact::max_value(),
@ -234,7 +234,7 @@ impl<F> BlockHeaderBuilder<F> where F: Invoke<chain::BlockHeader> {
self
}
pub fn nonce(mut self, nonce: u32) -> Self {
pub fn nonce(mut self, nonce: H256) -> Self {
self.nonce = nonce;
self
}
@ -245,9 +245,11 @@ impl<F> BlockHeaderBuilder<F> where F: Invoke<chain::BlockHeader> {
time: self.time,
previous_header_hash: self.parent,
bits: self.bits,
nonce: self.nonce,
nonce: self.nonce.into(),
merkle_root_hash: self.merkle_root,
version: self.version,
reserved_hash: Default::default(),
solution: chain::EquihashSolution::default(),
}
)
}
@ -335,6 +337,7 @@ impl<F> TransactionBuilder<F> where F: Invoke<chain::Transaction> {
version: self.version,
inputs: self.inputs,
outputs: self.outputs,
joint_split: None,
}
)
}
@ -440,7 +443,6 @@ impl<F> TransactionInputBuilder<F> where F: Invoke<chain::TransactionInput> {
previous_output: self.output.expect("Building input without previous output"),
script_sig: self.signature,
sequence: self.sequence,
script_witness: vec![],
}
)
}
@ -509,7 +511,7 @@ pub fn build_n_empty_blocks_from(n: u32, start_nonce: u32, previous: &chain::Blo
let mut previous_hash = previous.hash();
let end_nonce = start_nonce + n;
for i in start_nonce..end_nonce {
let block = block_builder().header().nonce(i).parent(previous_hash).build().build();
let block = block_builder().header().nonce((i as u8).into()).parent(previous_hash).build().build();
previous_hash = block.hash();
result.push(block);
}
@ -522,7 +524,7 @@ pub fn build_n_empty_blocks_from_genesis(n: u32, start_nonce: u32) -> Vec<chain:
pub fn build_n_empty_blocks(n: u32, start_nonce: u32) -> Vec<chain::Block> {
assert!(n != 0);
let previous = block_builder().header().nonce(start_nonce).build().build();
let previous = block_builder().header().nonce((start_nonce as u8).into()).build().build();
let mut result = vec![previous];
let children = build_n_empty_blocks_from(n, start_nonce + 1, &result[0].block_header);
result.extend(children);
@ -577,7 +579,7 @@ fn example5() {
.build()
.build();
assert_eq!(hash, "3e24319d69a77c58e2da8c7331a21729482835c96834dafb3e1793c1253847c7".into());
assert_eq!(hash, "f50c5629c1b6921cb9219574152b253dc72f7de96b0813aab75e2f2fb43e05e5".into());
assert_eq!(block.header().previous_header_hash, "0000000000000000000000000000000000000000000000000000000000000000".into());
}

View File

@ -49,6 +49,12 @@ impl Into<IndexedTransaction> for TransactionBuilder {
}
impl TransactionBuilder {
pub fn coinbase() -> TransactionBuilder {
let mut builder = TransactionBuilder::default();
builder.transaction.inputs.push(TransactionInput::coinbase(Default::default()));
builder
}
pub fn with_version(version: i32) -> TransactionBuilder {
let builder = TransactionBuilder::default();
builder.set_version(version)
@ -98,7 +104,6 @@ impl TransactionBuilder {
self.add_input(&Transaction::default(), output_index)
}
pub fn add_input(mut self, transaction: &Transaction, output_index: u32) -> TransactionBuilder {
self.transaction.inputs.push(TransactionInput {
previous_output: OutPoint {
@ -107,7 +112,6 @@ impl TransactionBuilder {
},
script_sig: Bytes::new_with_len(0),
sequence: 0xffffffff,
script_witness: vec![],
});
self
}
@ -124,7 +128,6 @@ impl TransactionBuilder {
},
script_sig: Bytes::new_with_len(0),
sequence: 0xffffffff,
script_witness: vec![],
}];
self
}
@ -143,4 +146,9 @@ impl TransactionBuilder {
pub fn hash(self) -> H256 {
self.transaction.hash()
}
pub fn add_default_joint_split(mut self) -> Self {
self.transaction.joint_split = Some(Default::default());
self
}
}

File diff suppressed because one or more lines are too long

View File

@ -5,10 +5,11 @@ authors = ["Nikolay Volf <nikvolf@gmail.com>"]
[dependencies]
time = "0.1"
lazy_static = "1.0"
log = "0.4"
rayon = "1.0"
parking_lot = "0.4"
blake2-rfc = { git = "https://github.com/gtank/blake2-rfc.git", branch = "persona" }
byteorder = "1.2"
primitives = { path = "../primitives" }
chain = { path = "../chain" }
serialization = { path = "../serialization" }
@ -16,7 +17,9 @@ script = { path = "../script" }
network = { path = "../network" }
storage = { path = "../storage" }
bitcrypto = { path = "../crypto" }
rustc-hex = "2"
[dev-dependencies]
rand = "0.4"
test-data = { path = "../test-data" }
db = { path = "../db" }

View File

@ -1,11 +1,9 @@
use network::{ConsensusParams, ConsensusFork, TransactionOrdering};
use crypto::dhash256;
use network::{ConsensusParams};
use storage::{TransactionOutputProvider, BlockHeaderProvider};
use script;
use ser::Stream;
use sigops::{transaction_sigops, transaction_sigops_cost} ;
use sigops::{transaction_sigops};
use work::block_reward_satoshi;
use duplex_store::{transaction_index_for_output_check, DuplexTransactionOutputProvider};
use duplex_store::DuplexTransactionOutputProvider;
use deployments::BlockDeployments;
use canon::CanonBlock;
use error::{Error, TransactionError};
@ -18,8 +16,6 @@ pub struct BlockAcceptor<'a> {
pub sigops: BlockSigops<'a>,
pub coinbase_claim: BlockCoinbaseClaim<'a>,
pub coinbase_script: BlockCoinbaseScript<'a>,
pub witness: BlockWitness<'a>,
pub ordering: BlockTransactionOrdering<'a>,
}
impl<'a> BlockAcceptor<'a> {
@ -28,18 +24,15 @@ impl<'a> BlockAcceptor<'a> {
consensus: &'a ConsensusParams,
block: CanonBlock<'a>,
height: u32,
median_time_past: u32,
deployments: &'a BlockDeployments<'a>,
headers: &'a BlockHeaderProvider,
) -> Self {
BlockAcceptor {
finality: BlockFinality::new(block, height, deployments, headers),
serialized_size: BlockSerializedSize::new(block, consensus, deployments, height, median_time_past),
serialized_size: BlockSerializedSize::new(block, consensus),
coinbase_script: BlockCoinbaseScript::new(block, consensus, height),
coinbase_claim: BlockCoinbaseClaim::new(block, consensus, store, height, median_time_past),
sigops: BlockSigops::new(block, store, consensus, height, median_time_past),
witness: BlockWitness::new(block, deployments),
ordering: BlockTransactionOrdering::new(block, consensus, median_time_past),
coinbase_claim: BlockCoinbaseClaim::new(block, store, height),
sigops: BlockSigops::new(block, store, consensus),
}
}
@ -49,8 +42,6 @@ impl<'a> BlockAcceptor<'a> {
self.serialized_size.check()?;
self.coinbase_claim.check()?;
self.coinbase_script.check()?;
self.witness.check()?;
self.ordering.check()?;
Ok(())
}
}
@ -91,45 +82,24 @@ impl<'a> BlockFinality<'a> {
pub struct BlockSerializedSize<'a> {
block: CanonBlock<'a>,
consensus: &'a ConsensusParams,
height: u32,
median_time_past: u32,
segwit_active: bool,
max_block_size: usize,
}
impl<'a> BlockSerializedSize<'a> {
fn new(block: CanonBlock<'a>, consensus: &'a ConsensusParams, deployments: &'a BlockDeployments<'a>, height: u32, median_time_past: u32) -> Self {
let segwit_active = deployments.segwit();
fn new(block: CanonBlock<'a>, consensus: &'a ConsensusParams) -> Self {
BlockSerializedSize {
block: block,
consensus: consensus,
height: height,
median_time_past: median_time_past,
segwit_active: segwit_active,
max_block_size: consensus.max_block_size(),
}
}
fn check(&self) -> Result<(), Error> {
let size = self.block.size();
// block size (without witness) is valid for all forks:
// before SegWit: it is main check for size
// after SegWit: without witness data, block size should be <= 1_000_000
// after BitcoinCash fork: block size is increased to 8_000_000
if size < self.consensus.fork.min_block_size(self.height) ||
size > self.consensus.fork.max_block_size(self.height, self.median_time_past) {
if size > self.max_block_size {
return Err(Error::Size(size));
}
// there's no need to define weight for pre-SegWit blocks
if self.segwit_active {
let size_with_witness = self.block.size_with_witness();
let weight = size * (ConsensusFork::witness_scale_factor() - 1) + size_with_witness;
if weight > self.consensus.fork.max_block_weight(self.height) {
return Err(Error::Weight);
}
}
Ok(())
}
}
@ -137,10 +107,9 @@ impl<'a> BlockSerializedSize<'a> {
pub struct BlockSigops<'a> {
block: CanonBlock<'a>,
store: &'a TransactionOutputProvider,
consensus: &'a ConsensusParams,
height: u32,
bip16_active: bool,
checkdatasig_active: bool,
max_block_sigops: usize,
}
impl<'a> BlockSigops<'a> {
@ -148,53 +117,30 @@ impl<'a> BlockSigops<'a> {
block: CanonBlock<'a>,
store: &'a TransactionOutputProvider,
consensus: &'a ConsensusParams,
height: u32,
median_time_past: u32,
) -> Self {
let bip16_active = block.header.raw.time >= consensus.bip16_time;
let checkdatasig_active = match consensus.fork {
ConsensusFork::BitcoinCash(ref fork) => median_time_past >= fork.magnetic_anomaly_time,
_ => false,
};
let checkdatasig_active = false;
BlockSigops {
block: block,
store: store,
consensus: consensus,
height: height,
bip16_active,
checkdatasig_active,
max_block_sigops: consensus.max_block_sigops(),
}
}
fn check(&self) -> Result<(), Error> {
let store = DuplexTransactionOutputProvider::new(self.store, &*self.block);
let (sigops, sigops_cost) = self.block.transactions.iter()
.map(|tx| {
let tx_sigops = transaction_sigops(&tx.raw, &store, self.bip16_active, self.checkdatasig_active);
let tx_sigops_cost = transaction_sigops_cost(&tx.raw, &store, tx_sigops);
(tx_sigops, tx_sigops_cost)
})
.fold((0, 0), |acc, (tx_sigops, tx_sigops_cost)| (acc.0 + tx_sigops, acc.1 + tx_sigops_cost));
let sigops = self.block.transactions.iter()
.map(|tx| transaction_sigops(&tx.raw, &store, self.bip16_active, self.checkdatasig_active))
.fold(0, |acc, tx_sigops| (acc + tx_sigops));
// sigops check is valid for all forks:
// before SegWit: 20_000
// after SegWit: cost of sigops is sigops * 4 and max cost is 80_000 => max sigops is still 20_000
// after BitcoinCash fork: 20_000 sigops for each full/partial 1_000_000 bytes of block
let size = self.block.size();
if sigops > self.consensus.fork.max_block_sigops(self.height, size) {
if sigops > self.max_block_sigops {
return Err(Error::MaximumSigops);
}
// sigops check is valid for all forks:
// before SegWit: no witnesses => cost is sigops * 4 and max cost is 80_000
// after SegWit: it is main check for sigops
// after BitcoinCash fork: no witnesses => cost is sigops * 4 and max cost depends on block size
if sigops_cost > self.consensus.fork.max_block_sigops_cost(self.height, size) {
Err(Error::MaximumSigopsCost)
} else {
Ok(())
}
Ok(())
}
}
@ -202,22 +148,18 @@ pub struct BlockCoinbaseClaim<'a> {
block: CanonBlock<'a>,
store: &'a TransactionOutputProvider,
height: u32,
transaction_ordering: TransactionOrdering,
}
impl<'a> BlockCoinbaseClaim<'a> {
fn new(
block: CanonBlock<'a>,
consensus_params: &ConsensusParams,
store: &'a TransactionOutputProvider,
height: u32,
median_time_past: u32
) -> Self {
BlockCoinbaseClaim {
block: block,
store: store,
height: height,
transaction_ordering: consensus_params.fork.transaction_ordering(median_time_past),
}
}
@ -230,8 +172,7 @@ impl<'a> BlockCoinbaseClaim<'a> {
// (1) Total sum of all referenced outputs
let mut incoming: u64 = 0;
for input in tx.raw.inputs.iter() {
let prevout_tx_idx = transaction_index_for_output_check(self.transaction_ordering, tx_idx);
let prevout = store.transaction_output(&input.previous_output, prevout_tx_idx);
let prevout = store.transaction_output(&input.previous_output, tx_idx);
let (sum, overflow) = incoming.overflowing_add(prevout.map(|o| o.value).unwrap_or(0));
if overflow {
return Err(Error::ReferencedInputsSumOverflow);
@ -293,7 +234,7 @@ impl<'a> BlockCoinbaseScript<'a> {
}
let prefix = script::Builder::default()
.push_num(self.height.into())
.push_i64(self.height.into())
.into_script();
let matches = self.block.transactions.first()
@ -309,97 +250,12 @@ impl<'a> BlockCoinbaseScript<'a> {
}
}
pub struct BlockWitness<'a> {
block: CanonBlock<'a>,
segwit_active: bool,
}
impl<'a> BlockWitness<'a> {
fn new(block: CanonBlock<'a>, deployments: &'a BlockDeployments<'a>) -> Self {
let segwit_active = deployments.segwit();
BlockWitness {
block: block,
segwit_active: segwit_active,
}
}
fn check(&self) -> Result<(), Error> {
if !self.segwit_active {
return Ok(());
}
// check witness from coinbase transaction
let mut has_witness = false;
if let Some(coinbase) = self.block.transactions.first() {
let commitment = coinbase.raw.outputs.iter().rev()
.find(|output| script::is_witness_commitment_script(&output.script_pubkey));
if let Some(commitment) = commitment {
let witness_merkle_root = self.block.raw().witness_merkle_root();
if coinbase.raw.inputs.get(0).map(|i| i.script_witness.len()).unwrap_or_default() != 1 ||
coinbase.raw.inputs[0].script_witness[0].len() != 32 {
return Err(Error::WitnessInvalidNonceSize);
}
let mut stream = Stream::new();
stream.append(&witness_merkle_root);
stream.append_slice(&coinbase.raw.inputs[0].script_witness[0]);
let hash_witness = dhash256(&stream.out());
if hash_witness != commitment.script_pubkey[6..].into() {
return Err(Error::WitnessMerkleCommitmentMismatch);
}
has_witness = true;
}
}
// witness commitment is required when block contains transactions with witness
if !has_witness && self.block.transactions.iter().any(|tx| tx.raw.has_witness()) {
return Err(Error::UnexpectedWitness);
}
Ok(())
}
}
pub struct BlockTransactionOrdering<'a> {
block: CanonBlock<'a>,
transaction_ordering: TransactionOrdering,
}
impl<'a> BlockTransactionOrdering<'a> {
fn new(block: CanonBlock<'a>, consensus: &'a ConsensusParams, median_time_past: u32) -> Self {
BlockTransactionOrdering {
block,
transaction_ordering: consensus.fork.transaction_ordering(median_time_past),
}
}
fn check(&self) -> Result<(), Error> {
match self.transaction_ordering {
// topological transaction ordering is checked in TransactionMissingInputs
TransactionOrdering::Topological => Ok(()),
// canonical transaction ordering means that transactions are ordered by
// their id (i.e. hash) in ascending order
TransactionOrdering::Canonical =>
if self.block.transactions.windows(2).skip(1).all(|w| w[0].hash < w[1].hash) {
Ok(())
} else {
Err(Error::NonCanonicalTransactionOrdering)
},
}
}
}
#[cfg(test)]
mod tests {
extern crate test_data;
use chain::{IndexedBlock, Transaction};
use network::{Network, ConsensusFork, ConsensusParams, BitcoinCashConsensusParams};
use {Error, CanonBlock};
use super::{BlockCoinbaseScript, BlockTransactionOrdering};
use super::{BlockCoinbaseScript};
#[test]
fn test_block_coinbase_script() {
@ -429,42 +285,4 @@ mod tests {
assert_eq!(coinbase_script_validator2.check(), Err(Error::CoinbaseScript));
}
#[test]
fn block_transaction_ordering_works() {
let tx1: Transaction = test_data::TransactionBuilder::with_output(1).into();
let tx2: Transaction = test_data::TransactionBuilder::with_output(2).into();
let tx3: Transaction = test_data::TransactionBuilder::with_output(3).into();
let bad_block: IndexedBlock = test_data::block_builder()
.with_transaction(tx1.clone())
.with_transaction(tx2.clone())
.with_transaction(tx3.clone())
.header().build()
.build()
.into();
let good_block: IndexedBlock = test_data::block_builder()
.with_transaction(tx1)
.with_transaction(tx3)
.with_transaction(tx2)
.header().build()
.build()
.into();
let bad_block = CanonBlock::new(&bad_block);
let good_block = CanonBlock::new(&good_block);
// when topological ordering is used => we don't care about tx ordering
let consensus = ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore);
let checker = BlockTransactionOrdering::new(bad_block, &consensus, 0);
assert_eq!(checker.check(), Ok(()));
// when topological ordering is used => we care about tx ordering
let mut bch = BitcoinCashConsensusParams::new(Network::Unitest);
bch.magnetic_anomaly_time = 0;
let consensus = ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCash(bch));
let checker = BlockTransactionOrdering::new(bad_block, &consensus, 0);
assert_eq!(checker.check(), Err(Error::NonCanonicalTransactionOrdering));
let checker = BlockTransactionOrdering::new(good_block, &consensus, 0);
assert_eq!(checker.check(), Ok(()));
}
}

View File

@ -17,13 +17,13 @@ pub struct ChainAcceptor<'a> {
}
impl<'a> ChainAcceptor<'a> {
pub fn new(store: &'a Store, consensus: &'a ConsensusParams, verification_level: VerificationLevel, block: CanonBlock<'a>, height: u32, median_time_past: u32, deployments: &'a BlockDeployments) -> Self {
pub fn new(store: &'a Store, consensus: &'a ConsensusParams, verification_level: VerificationLevel, block: CanonBlock<'a>, height: u32, deployments: &'a BlockDeployments) -> Self {
trace!(target: "verification", "Block verification {}", block.hash().to_reversed_str());
let output_store = DuplexTransactionOutputProvider::new(store.as_transaction_output_provider(), block.raw());
let headers = store.as_block_header_provider();
ChainAcceptor {
block: BlockAcceptor::new(store.as_transaction_output_provider(), consensus, block, height, median_time_past, deployments, headers),
block: BlockAcceptor::new(store.as_transaction_output_provider(), consensus, block, height, deployments, headers),
header: HeaderAcceptor::new(headers, consensus, block.header(), height, deployments),
transactions: block.transactions()
.into_iter()
@ -34,10 +34,8 @@ impl<'a> ChainAcceptor<'a> {
consensus,
tx,
verification_level,
block.hash(),
height,
block.header.raw.time,
median_time_past,
tx_index,
deployments,
))

View File

@ -1,4 +1,4 @@
use network::ConsensusParams;
use network::{Network, ConsensusParams};
use storage::BlockHeaderProvider;
use canon::CanonHeader;
use error::Error;
@ -8,6 +8,7 @@ use deployments::Deployments;
pub struct HeaderAcceptor<'a> {
pub version: HeaderVersion<'a>,
pub equihash: HeaderEquihashSolution<'a>,
pub work: HeaderWork<'a>,
pub median_timestamp: HeaderMedianTimestamp<'a>,
}
@ -22,6 +23,7 @@ impl<'a> HeaderAcceptor<'a> {
) -> Self {
let csv_active = deployments.as_ref().csv(height, store, consensus);
HeaderAcceptor {
equihash: HeaderEquihashSolution::new(header, consensus),
work: HeaderWork::new(header, store, height, consensus),
median_timestamp: HeaderMedianTimestamp::new(header, store, csv_active),
version: HeaderVersion::new(header, height, consensus),
@ -29,6 +31,7 @@ impl<'a> HeaderAcceptor<'a> {
}
pub fn check(&self) -> Result<(), Error> {
try!(self.equihash.check());
try!(self.version.check());
try!(self.work.check());
try!(self.median_timestamp.check());
@ -64,6 +67,39 @@ impl<'a> HeaderVersion<'a> {
}
}
pub struct HeaderEquihashSolution<'a> {
header: CanonHeader<'a>,
consensus: &'a ConsensusParams,
}
impl<'a> HeaderEquihashSolution<'a> {
fn new(header: CanonHeader<'a>, consensus: &'a ConsensusParams) -> Self {
HeaderEquihashSolution {
header: header,
consensus: consensus,
}
}
fn check(&self) -> Result<(), Error> {
match self.consensus.network {
Network::Unitest => return Ok(()),
_ => (),
};
use equihash;
let is_solution_correct = equihash::verify_block_equihash_solution(&equihash::EquihashParams {
N: 200,
K: 9,
}, &self.header.raw);
if is_solution_correct {
Ok(())
} else {
Err(Error::InvalidEquihashSolution)
}
}
}
pub struct HeaderWork<'a> {
header: CanonHeader<'a>,
store: &'a BlockHeaderProvider,
@ -83,8 +119,7 @@ impl<'a> HeaderWork<'a> {
fn check(&self) -> Result<(), Error> {
let previous_header_hash = self.header.raw.previous_header_hash.clone();
let time = self.header.raw.time;
let work = work_required(previous_header_hash, time, self.height, self.store, self.consensus);
let work = work_required(previous_header_hash, self.height, self.store, self.consensus);
if work == self.header.raw.bits {
Ok(())
} else {

View File

@ -1,12 +1,9 @@
use primitives::hash::H256;
use primitives::bytes::Bytes;
use ser::Serializable;
use storage::{TransactionMetaProvider, TransactionOutputProvider};
use network::{ConsensusParams, ConsensusFork};
use script::{Script, verify_script, VerificationFlags, TransactionSignatureChecker, TransactionInputSigner, SignatureVersion};
use duplex_store::{DuplexTransactionOutputProvider, transaction_index_for_output_check};
use network::{ConsensusParams};
use script::{Script, verify_script, VerificationFlags, TransactionSignatureChecker, TransactionInputSigner};
use duplex_store::DuplexTransactionOutputProvider;
use deployments::BlockDeployments;
use script::Builder;
use sigops::transaction_sigops;
use canon::CanonTransaction;
use constants::{COINBASE_MATURITY};
@ -15,13 +12,11 @@ use VerificationLevel;
pub struct TransactionAcceptor<'a> {
pub size: TransactionSize<'a>,
pub premature_witness: TransactionPrematureWitness<'a>,
pub bip30: TransactionBip30<'a>,
pub missing_inputs: TransactionMissingInputs<'a>,
pub maturity: TransactionMaturity<'a>,
pub overspent: TransactionOverspent<'a>,
pub double_spent: TransactionDoubleSpend<'a>,
pub return_replay_protection: TransactionReturnReplayProtection<'a>,
pub eval: TransactionEval<'a>,
}
@ -35,51 +30,60 @@ impl<'a> TransactionAcceptor<'a> {
consensus: &'a ConsensusParams,
transaction: CanonTransaction<'a>,
verification_level: VerificationLevel,
block_hash: &'a H256,
height: u32,
time: u32,
median_time_past: u32,
transaction_index: usize,
deployments: &'a BlockDeployments<'a>,
) -> Self {
/*
TODO:
Sprout:
reject transactions which are intended for Overwinter and beyond
Overwinter:
tx version
tx version group
reject transactions with valid version but missing overwinter flag
reject transactions intended for Sprout
check that all transactions are unexpired
Sapling:
tx version
tx version group
reject transactions intended for Sprout
check that all transactions are unexpired
block max size changes!!!
*/
trace!(target: "verification", "Tx verification {}", transaction.hash.to_reversed_str());
let tx_ordering = consensus.fork.transaction_ordering(median_time_past);
let missing_input_tx_index = transaction_index_for_output_check(tx_ordering,transaction_index);
TransactionAcceptor {
size: TransactionSize::new(transaction, consensus, median_time_past),
premature_witness: TransactionPrematureWitness::new(transaction, deployments),
bip30: TransactionBip30::new_for_sync(transaction, meta_store, consensus, block_hash, height),
missing_inputs: TransactionMissingInputs::new(transaction, output_store, missing_input_tx_index),
size: TransactionSize::new(transaction, consensus, height),
bip30: TransactionBip30::new_for_sync(transaction, meta_store),
missing_inputs: TransactionMissingInputs::new(transaction, output_store, transaction_index),
maturity: TransactionMaturity::new(transaction, meta_store, height),
overspent: TransactionOverspent::new(transaction, output_store),
double_spent: TransactionDoubleSpend::new(transaction, output_store),
return_replay_protection: TransactionReturnReplayProtection::new(transaction, consensus, height),
eval: TransactionEval::new(transaction, output_store, consensus, verification_level, height, time, median_time_past, deployments),
eval: TransactionEval::new(transaction, output_store, consensus, verification_level, height, time, deployments),
}
}
pub fn check(&self) -> Result<(), TransactionError> {
try!(self.size.check());
try!(self.premature_witness.check());
try!(self.bip30.check());
try!(self.missing_inputs.check());
try!(self.maturity.check());
try!(self.overspent.check());
try!(self.double_spent.check());
try!(self.return_replay_protection.check());
try!(self.eval.check());
self.size.check()?;
self.bip30.check()?;
self.missing_inputs.check()?;
self.maturity.check()?;
self.overspent.check()?;
self.double_spent.check()?;
self.eval.check()?;
Ok(())
}
}
pub struct MemoryPoolTransactionAcceptor<'a> {
pub size: TransactionSize<'a>,
pub missing_inputs: TransactionMissingInputs<'a>,
pub maturity: TransactionMaturity<'a>,
pub overspent: TransactionOverspent<'a>,
pub sigops: TransactionSigops<'a>,
pub double_spent: TransactionDoubleSpend<'a>,
pub return_replay_protection: TransactionReturnReplayProtection<'a>,
pub eval: TransactionEval<'a>,
}
@ -93,34 +97,29 @@ impl<'a> MemoryPoolTransactionAcceptor<'a> {
transaction: CanonTransaction<'a>,
height: u32,
time: u32,
median_time_past: u32,
deployments: &'a BlockDeployments<'a>,
) -> Self {
trace!(target: "verification", "Mempool-Tx verification {}", transaction.hash.to_reversed_str());
let transaction_index = 0;
let max_block_sigops = consensus.fork.max_block_sigops(height, consensus.fork.max_block_size(height, median_time_past));
let max_block_sigops = consensus.max_block_sigops();
MemoryPoolTransactionAcceptor {
size: TransactionSize::new(transaction, consensus, median_time_past),
missing_inputs: TransactionMissingInputs::new(transaction, output_store, transaction_index),
maturity: TransactionMaturity::new(transaction, meta_store, height),
overspent: TransactionOverspent::new(transaction, output_store),
sigops: TransactionSigops::new(transaction, output_store, consensus, max_block_sigops, time),
double_spent: TransactionDoubleSpend::new(transaction, output_store),
return_replay_protection: TransactionReturnReplayProtection::new(transaction, consensus, height),
eval: TransactionEval::new(transaction, output_store, consensus, VerificationLevel::Full, height, time, median_time_past, deployments),
eval: TransactionEval::new(transaction, output_store, consensus, VerificationLevel::Full, height, time, deployments),
}
}
pub fn check(&self) -> Result<(), TransactionError> {
// Bip30 is not checked because we don't need to allow tx pool acceptance of an unspent duplicate.
// Tx pool validation is not strinctly a matter of consensus.
try!(self.size.check());
try!(self.missing_inputs.check());
try!(self.maturity.check());
try!(self.overspent.check());
try!(self.sigops.check());
try!(self.double_spent.check());
try!(self.return_replay_protection.check());
try!(self.eval.check());
Ok(())
}
@ -138,29 +137,22 @@ impl<'a> MemoryPoolTransactionAcceptor<'a> {
pub struct TransactionBip30<'a> {
transaction: CanonTransaction<'a>,
store: &'a TransactionMetaProvider,
exception: bool,
}
impl<'a> TransactionBip30<'a> {
fn new_for_sync(
transaction: CanonTransaction<'a>,
store: &'a TransactionMetaProvider,
consensus_params: &'a ConsensusParams,
block_hash: &'a H256,
height: u32
) -> Self {
let exception = consensus_params.is_bip30_exception(block_hash, height);
TransactionBip30 {
transaction: transaction,
store: store,
exception: exception,
}
}
fn check(&self) -> Result<(), TransactionError> {
match self.store.transaction_meta(&self.transaction.hash) {
Some(ref meta) if !meta.is_fully_spent() && !self.exception => {
Some(ref meta) if !meta.is_fully_spent() => {
Err(TransactionError::UnspentTransactionWithTheSameHash)
},
_ => Ok(())
@ -282,10 +274,7 @@ impl<'a> TransactionSigops<'a> {
fn check(&self) -> Result<(), TransactionError> {
let bip16_active = self.time >= self.consensus_params.bip16_time;
let checkdatasig_active = match self.consensus_params.fork {
ConsensusFork::BitcoinCash(ref fork) => self.time >= fork.magnetic_anomaly_time,
_ => false
};
let checkdatasig_active = false;
let sigops = transaction_sigops(&self.transaction.raw, &self.store, bip16_active, checkdatasig_active);
if sigops > self.max_sigops {
Err(TransactionError::MaxSigops)
@ -304,13 +293,11 @@ pub struct TransactionEval<'a> {
verify_locktime: bool,
verify_checksequence: bool,
verify_dersig: bool,
verify_witness: bool,
verify_nulldummy: bool,
verify_monolith_opcodes: bool,
verify_magnetic_anomaly_opcodes: bool,
verify_sigpushonly: bool,
verify_cleanstack: bool,
signature_version: SignatureVersion,
}
impl<'a> TransactionEval<'a> {
@ -321,32 +308,16 @@ impl<'a> TransactionEval<'a> {
verification_level: VerificationLevel,
height: u32,
time: u32,
median_timestamp: u32,
deployments: &'a BlockDeployments,
) -> Self {
let verify_p2sh = time >= params.bip16_time;
let verify_strictenc = match params.fork {
ConsensusFork::BitcoinCash(ref fork) if height >= fork.height => true,
_ => false,
};
let verify_strictenc = false;
let verify_locktime = height >= params.bip65_height;
let verify_dersig = height >= params.bip66_height;
let verify_monolith_opcodes = match params.fork {
ConsensusFork::BitcoinCash(ref fork) => median_timestamp >= fork.monolith_time,
_ => false,
};
let verify_magnetic_anomaly_opcodes = match params.fork {
ConsensusFork::BitcoinCash(ref fork) => median_timestamp >= fork.magnetic_anomaly_time,
_ => false,
};
let signature_version = match params.fork {
ConsensusFork::BitcoinCash(ref fork) if height >= fork.height => SignatureVersion::ForkId,
ConsensusFork::BitcoinCore | ConsensusFork::BitcoinCash(_) => SignatureVersion::Base,
};
let verify_monolith_opcodes = false;
let verify_magnetic_anomaly_opcodes = false;
let verify_checksequence = deployments.csv();
let verify_witness = deployments.segwit();
let verify_nulldummy = verify_witness;
let verify_sigpushonly = verify_magnetic_anomaly_opcodes;
let verify_cleanstack = verify_magnetic_anomaly_opcodes;
@ -359,13 +330,11 @@ impl<'a> TransactionEval<'a> {
verify_locktime: verify_locktime,
verify_checksequence: verify_checksequence,
verify_dersig: verify_dersig,
verify_witness: verify_witness,
verify_nulldummy: verify_nulldummy,
verify_nulldummy: false,
verify_monolith_opcodes: verify_monolith_opcodes,
verify_magnetic_anomaly_opcodes: verify_magnetic_anomaly_opcodes,
verify_sigpushonly: verify_sigpushonly,
verify_cleanstack: verify_cleanstack,
signature_version: signature_version,
}
}
@ -394,7 +363,6 @@ impl<'a> TransactionEval<'a> {
checker.input_index = index;
checker.input_amount = output.value;
let script_witness = &input.script_witness;
let input: Script = input.script_sig.clone().into();
let output: Script = output.script_pubkey.into();
@ -405,7 +373,6 @@ impl<'a> TransactionEval<'a> {
.verify_checksequence(self.verify_checksequence)
.verify_dersig(self.verify_dersig)
.verify_nulldummy(self.verify_nulldummy)
.verify_witness(self.verify_witness)
.verify_concat(self.verify_monolith_opcodes)
.verify_split(self.verify_monolith_opcodes)
.verify_and(self.verify_monolith_opcodes)
@ -419,7 +386,7 @@ impl<'a> TransactionEval<'a> {
.verify_sigpushonly(self.verify_sigpushonly)
.verify_cleanstack(self.verify_cleanstack);
try!(verify_script(&input, &output, &script_witness, &flags, &checker, self.signature_version)
try!(verify_script(&input, &output, &flags, &checker)
.map_err(|e| TransactionError::Signature(index, e)));
}
@ -453,147 +420,26 @@ impl<'a> TransactionDoubleSpend<'a> {
}
}
pub struct TransactionReturnReplayProtection<'a> {
transaction: CanonTransaction<'a>,
consensus: &'a ConsensusParams,
height: u32,
}
lazy_static! {
pub static ref BITCOIN_CASH_RETURN_REPLAY_PROTECTION_SCRIPT: Bytes = Builder::default()
.return_bytes(b"Bitcoin: A Peer-to-Peer Electronic Cash System")
.into_bytes();
}
impl<'a> TransactionReturnReplayProtection<'a> {
fn new(transaction: CanonTransaction<'a>, consensus: &'a ConsensusParams, height: u32) -> Self {
TransactionReturnReplayProtection {
transaction: transaction,
consensus: consensus,
height: height,
}
}
fn check(&self) -> Result<(), TransactionError> {
if let ConsensusFork::BitcoinCash(ref fork) = self.consensus.fork {
// Transactions with such OP_RETURNs shall be considered valid again for block 530,001 and onwards
if self.height >= fork.height && self.height <= 530_000 {
if (*self.transaction).raw.outputs.iter()
.any(|out| out.script_pubkey == *BITCOIN_CASH_RETURN_REPLAY_PROTECTION_SCRIPT) {
return Err(TransactionError::ReturnReplayProtection)
}
}
}
Ok(())
}
}
pub struct TransactionPrematureWitness<'a> {
transaction: CanonTransaction<'a>,
segwit_active: bool,
}
impl<'a> TransactionPrematureWitness<'a> {
fn new(transaction: CanonTransaction<'a>, deployments: &'a BlockDeployments<'a>) -> Self {
let segwit_active = deployments.segwit();
TransactionPrematureWitness {
transaction: transaction,
segwit_active: segwit_active,
}
}
fn check(&self) -> Result<(), TransactionError> {
if !self.segwit_active && (*self.transaction).raw.has_witness() {
Err(TransactionError::PrematureWitness)
} else {
Ok(())
}
}
}
/// The encoded size of the transaction MUST be less than or equal to current max limit.
pub struct TransactionSize<'a> {
transaction: CanonTransaction<'a>,
min_transaction_size: usize,
max_size: usize,
}
impl<'a> TransactionSize<'a> {
fn new(transaction: CanonTransaction<'a>, consensus: &'a ConsensusParams, median_time_past: u32) -> Self {
let min_transaction_size = consensus.fork.min_transaction_size(median_time_past);
fn new(transaction: CanonTransaction<'a>, consensus: &'a ConsensusParams, height: u32) -> Self {
TransactionSize {
transaction: transaction,
min_transaction_size,
max_size: consensus.max_transaction_size(height),
}
}
fn check(&self) -> Result<(), TransactionError> {
if self.min_transaction_size != 0 && self.transaction.raw.serialized_size() < self.min_transaction_size {
Err(TransactionError::MinSize)
let size = self.transaction.raw.serialized_size();
if size > self.max_size {
Err(TransactionError::MaxSize)
} else {
Ok(())
}
}
}
#[cfg(test)]
mod tests {
use chain::{IndexedTransaction, Transaction, TransactionOutput};
use network::{Network, ConsensusParams, ConsensusFork, BitcoinCashConsensusParams};
use script::Builder;
use canon::CanonTransaction;
use error::TransactionError;
use super::{TransactionReturnReplayProtection, TransactionSize};
#[test]
fn return_replay_protection_works() {
let transaction: IndexedTransaction = Transaction {
version: 1,
inputs: vec![],
outputs: vec![TransactionOutput {
value: 0,
script_pubkey: Builder::default()
.return_bytes(b"Bitcoin: A Peer-to-Peer Electronic Cash System")
.into_bytes(),
}],
lock_time: 0xffffffff,
}.into();
assert_eq!(transaction.raw.outputs[0].script_pubkey.len(), 46 + 2);
let consensus = ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(Network::Mainnet)));
let checker = TransactionReturnReplayProtection::new(CanonTransaction::new(&transaction), &consensus, consensus.fork.activation_height());
assert_eq!(checker.check(), Err(TransactionError::ReturnReplayProtection));
let checker = TransactionReturnReplayProtection::new(CanonTransaction::new(&transaction), &consensus, consensus.fork.activation_height() - 1);
assert_eq!(checker.check(), Ok(()));
let consensus = ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore);
let checker = TransactionReturnReplayProtection::new(CanonTransaction::new(&transaction), &consensus, 100);
assert_eq!(checker.check(), Ok(()));
}
#[test]
fn transaction_size_works() {
let small_tx = Transaction::default();
let big_tx: Transaction = "01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000".into();
let small_tx = IndexedTransaction::new(small_tx.hash(), small_tx);
let big_tx = IndexedTransaction::new(big_tx.hash(), big_tx);
let small_tx = CanonTransaction::new(&small_tx);
let big_tx = CanonTransaction::new(&big_tx);
let unrestricted_consensus = ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore);
let restricted_consensus = ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCash(BitcoinCashConsensusParams::new(Network::Unitest)));
// no restrictions
let checker = TransactionSize::new(small_tx, &unrestricted_consensus, 10000000);
assert_eq!(checker.check(), Ok(()));
// big + restricted
let checker = TransactionSize::new(big_tx, &restricted_consensus, 2000000000);
assert_eq!(checker.check(), Ok(()));
// small + restricted
let checker = TransactionSize::new(small_tx, &restricted_consensus, 2000000000);
assert_eq!(checker.check(), Err(TransactionError::MinSize));
}
}

View File

@ -13,7 +13,6 @@ use verify_transaction::MemoryPoolTransactionVerifier;
use accept_chain::ChainAcceptor;
use accept_transaction::MemoryPoolTransactionAcceptor;
use deployments::{Deployments, BlockDeployments};
use timestamp::median_timestamp_inclusive;
use {Verify, VerificationLevel};
pub struct BackwardsCompatibleChainVerifier {
@ -38,14 +37,13 @@ impl BackwardsCompatibleChainVerifier {
let current_time = ::time::get_time().sec as u32;
// first run pre-verification
let chain_verifier = ChainVerifier::new(block, self.consensus.network, current_time);
let chain_verifier = ChainVerifier::new(block, &self.consensus, current_time);
chain_verifier.check()?;
assert_eq!(Some(self.store.best_block().hash), self.store.block_hash(self.store.best_block().number));
let block_origin = self.store.block_origin(&block.header)?;
trace!(target: "verification", "verify_block: {:?} best_block: {:?} block_origin: {:?}", block.hash().reversed(), self.store.best_block(), block_origin);
let median_time_past = median_timestamp_inclusive(block.header.raw.previous_header_hash.clone(), self.store.as_block_header_provider());
match block_origin {
BlockOrigin::KnownBlock => {
// there should be no known blocks at this point
@ -56,7 +54,7 @@ impl BackwardsCompatibleChainVerifier {
let deployments = BlockDeployments::new(&self.deployments, block_number, header_provider, &self.consensus);
let canon_block = CanonBlock::new(block);
let chain_acceptor = ChainAcceptor::new(self.store.as_store(), &self.consensus, verification_level,
canon_block, block_number, median_time_past, &deployments);
canon_block, block_number, &deployments);
chain_acceptor.check()?;
},
BlockOrigin::SideChain(origin) => {
@ -66,7 +64,7 @@ impl BackwardsCompatibleChainVerifier {
let fork = self.store.fork(origin)?;
let canon_block = CanonBlock::new(block);
let chain_acceptor = ChainAcceptor::new(fork.store(), &self.consensus, verification_level, canon_block,
block_number, median_time_past, &deployments);
block_number, &deployments);
chain_acceptor.check()?;
},
BlockOrigin::SideChainBecomingCanonChain(origin) => {
@ -76,7 +74,7 @@ impl BackwardsCompatibleChainVerifier {
let fork = self.store.fork(origin)?;
let canon_block = CanonBlock::new(block);
let chain_acceptor = ChainAcceptor::new(fork.store(), &self.consensus, verification_level, canon_block,
block_number, median_time_past, &deployments);
block_number, &deployments);
chain_acceptor.check()?;
},
}
@ -95,7 +93,7 @@ impl BackwardsCompatibleChainVerifier {
// TODO: full verification
let current_time = ::time::get_time().sec as u32;
let header = IndexedBlockHeader::new(hash.clone(), header.clone());
let header_verifier = HeaderVerifier::new(&header, self.consensus.network, current_time);
let header_verifier = HeaderVerifier::new(&header, &self.consensus, current_time);
header_verifier.check()
}
@ -110,18 +108,13 @@ impl BackwardsCompatibleChainVerifier {
let indexed_tx = transaction.clone().into();
// let's do preverification first
let deployments = BlockDeployments::new(&self.deployments, height, block_header_provider, &self.consensus);
let tx_verifier = MemoryPoolTransactionVerifier::new(&indexed_tx, &self.consensus, &deployments);
let tx_verifier = MemoryPoolTransactionVerifier::new(&indexed_tx, &self.consensus);
try!(tx_verifier.check());
let canon_tx = CanonTransaction::new(&indexed_tx);
// now let's do full verification
let noop = NoopStore;
let output_store = DuplexTransactionOutputProvider::new(prevout_provider, &noop);
let previous_block_number = height.checked_sub(1)
.expect("height is the height of future block of new tx; genesis block can't be in the future; qed");
let previous_block_header = block_header_provider.block_header(previous_block_number.into())
.expect("blocks up to height should be in db; qed");
let median_time_past = median_timestamp_inclusive(previous_block_header.hash(), block_header_provider);
let tx_acceptor = MemoryPoolTransactionAcceptor::new(
self.store.as_transaction_meta_provider(),
output_store,
@ -129,7 +122,6 @@ impl BackwardsCompatibleChainVerifier {
canon_tx,
height,
time,
median_time_past,
&deployments,
);
tx_acceptor.check()
@ -154,12 +146,11 @@ mod tests {
extern crate test_data;
use std::sync::Arc;
use chain::{IndexedBlock, Transaction, Block};
use chain::{IndexedBlock};
use storage::Error as DBError;
use db::BlockChainDatabase;
use network::{Network, ConsensusParams, ConsensusFork, BitcoinCashConsensusParams};
use network::{Network, ConsensusParams};
use script;
use constants::DOUBLE_SPACING_SECONDS;
use super::BackwardsCompatibleChainVerifier as ChainVerifier;
use {Verify, Error, TransactionError, VerificationLevel};
@ -167,7 +158,7 @@ mod tests {
fn verify_orphan() {
let storage = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let b2 = test_data::block_h2().into();
let verifier = ChainVerifier::new(storage, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
let verifier = ChainVerifier::new(storage, ConsensusParams::new(Network::Unitest));
assert_eq!(Err(Error::Database(DBError::UnknownParent)), verifier.verify(VerificationLevel::Full, &b2));
}
@ -175,8 +166,8 @@ mod tests {
fn verify_smoky() {
let storage = Arc::new(BlockChainDatabase::init_test_chain(vec![test_data::genesis().into()]));
let b1 = test_data::block_h1();
let verifier = ChainVerifier::new(storage, ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
assert!(verifier.verify(VerificationLevel::Full, &b1.into()).is_ok());
let verifier = ChainVerifier::new(storage, ConsensusParams::new(Network::Mainnet));
assert_eq!(verifier.verify(VerificationLevel::Full, &b1.into()), Ok(()));
}
#[test]
@ -187,8 +178,8 @@ mod tests {
test_data::block_h1().into(),
]);
let b1 = test_data::block_h2();
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
assert!(verifier.verify(VerificationLevel::Full, &b1.into()).is_ok());
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Mainnet));
assert_eq!(verifier.verify(VerificationLevel::Full, &b1.into()), Ok(()));
}
#[test]
@ -216,7 +207,7 @@ mod tests {
.merkled_header().parent(genesis.hash()).build()
.build();
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest));
let expected = Err(Error::Transaction(
1,
@ -254,8 +245,8 @@ mod tests {
.merkled_header().parent(genesis.hash()).build()
.build();
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
assert!(verifier.verify(VerificationLevel::Full, &block.into()).is_ok());
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest));
assert_eq!(verifier.verify(VerificationLevel::Full, &block.into()), Ok(()));
}
#[test]
@ -290,7 +281,7 @@ mod tests {
.merkled_header().parent(genesis.hash()).build()
.build();
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest));
assert!(verifier.verify(VerificationLevel::Full, &block.into()).is_ok());
}
@ -329,76 +320,12 @@ mod tests {
.merkled_header().parent(genesis.hash()).build()
.build();
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest));
let expected = Err(Error::Transaction(2, TransactionError::Overspend));
assert_eq!(expected, verifier.verify(VerificationLevel::Full, &block.into()));
}
#[test]
fn transaction_references_same_block_and_goes_before_previous() {
let mut blocks = vec![test_data::block_builder()
.transaction()
.coinbase()
.output().value(50).build()
.build()
.merkled_header().build()
.build()];
let input_tx = blocks[0].transactions()[0].clone();
let mut parent_hash = blocks[0].hash();
// waiting 100 blocks for genesis coinbase to become valid
for _ in 0..100 {
let block: Block = test_data::block_builder()
.transaction().coinbase().build()
.merkled_header().parent(parent_hash).build()
.build()
.into();
parent_hash = block.hash();
blocks.push(block);
}
let storage = Arc::new(BlockChainDatabase::init_test_chain(blocks.into_iter().map(Into::into).collect()));
let tx1: Transaction = test_data::TransactionBuilder::with_version(4)
.add_input(&input_tx, 0)
.add_output(10).add_output(10).add_output(10)
.add_output(5).add_output(5).add_output(5)
.into();
let tx2: Transaction = test_data::TransactionBuilder::with_version(1)
.add_input(&tx1, 0)
.add_output(1).add_output(1).add_output(1)
.add_output(2).add_output(2).add_output(2)
.into();
assert!(tx1.hash() > tx2.hash());
let block = test_data::block_builder()
.transaction()
.coinbase()
.output().value(2).script_pubkey_with_sigops(100).build()
.build()
.with_transaction(tx2)
.with_transaction(tx1)
.merkled_header()
.time(DOUBLE_SPACING_SECONDS + 101) // to pass BCH work check
.parent(parent_hash)
.build()
.build();
// when topological order is required
let topological_consensus = ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore);
let verifier = ChainVerifier::new(storage.clone(), topological_consensus);
let expected = Err(Error::Transaction(1, TransactionError::Overspend));
assert_eq!(expected, verifier.verify(VerificationLevel::Header, &block.clone().into()));
// when canonical order is required
let mut canonical_params = BitcoinCashConsensusParams::new(Network::Unitest);
canonical_params.magnetic_anomaly_time = 0;
let canonical_consensus = ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCash(canonical_params));
let verifier = ChainVerifier::new(storage, canonical_consensus);
let expected = Ok(());
assert_eq!(expected, verifier.verify(VerificationLevel::Header, &block.into()));
}
#[test]
#[ignore]
fn coinbase_happy() {
@ -435,7 +362,7 @@ mod tests {
.merkled_header().parent(best_hash).build()
.build();
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest));
assert!(verifier.verify(VerificationLevel::Full, &block.into()).is_ok());
}
@ -482,7 +409,7 @@ mod tests {
.build()
.into();
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest));
let expected = Err(Error::MaximumSigops);
assert_eq!(expected, verifier.verify(VerificationLevel::Full, &block.into()));
}
@ -504,7 +431,7 @@ mod tests {
.build()
.into();
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest, ConsensusFork::BitcoinCore));
let verifier = ChainVerifier::new(Arc::new(storage), ConsensusParams::new(Network::Unitest));
let expected = Err(Error::CoinbaseOverspend {
expected_max: 5000000000,

View File

@ -78,17 +78,6 @@ impl Deployments {
None => false
}
}
/// Returns true if SegWit deployment is active
pub fn segwit(&self, number: u32, headers: &BlockHeaderProvider, consensus: &ConsensusParams) -> bool {
match consensus.segwit_deployment {
Some(segwit) => {
let mut cache = self.cache.lock();
threshold_state(&mut cache, segwit, number, headers, consensus.miner_confirmation_window, consensus.rule_change_activation_threshold).is_active()
},
None => false
}
}
}
impl<'a> BlockDeployments<'a> {
@ -104,10 +93,6 @@ impl<'a> BlockDeployments<'a> {
pub fn csv(&self) -> bool {
self.deployments.csv(self.number, self.headers, self.consensus)
}
pub fn segwit(&self) -> bool {
self.deployments.segwit(self.number, self.headers, self.consensus)
}
}
impl AsRef<Deployments> for Deployments {
@ -293,7 +278,9 @@ mod tests {
merkle_root_hash: Default::default(),
time: time,
bits: 0.into(),
nonce: height,
nonce: (height as u8).into(),
reserved_hash: Default::default(),
solution: Default::default(),
};
previous_header_hash = header.hash();

View File

@ -2,7 +2,6 @@
//! require sophisticated (in more than one source) previous transaction lookups
use chain::{OutPoint, TransactionOutput};
use network::TransactionOrdering;
use storage::TransactionOutputProvider;
#[derive(Clone, Copy)]
@ -42,19 +41,3 @@ impl TransactionOutputProvider for NoopStore {
false
}
}
/// Converts actual transaction index into transaction index to use in
/// TransactionOutputProvider::transaction_output call.
/// When topological ordering is used, we expect ascendant transaction (TX1)
/// to come BEFORE descendant transaction (TX2) in the block, like this:
/// [ ... TX1 ... TX2 ... ]
/// When canonical ordering is used, transactions order within block is not
/// relevant for this check and ascendant transaction (TX1) can come AFTER
/// descendant, like this:
/// [ ... TX2 ... TX1 ... ]
pub fn transaction_index_for_output_check(ordering: TransactionOrdering, tx_idx: usize) -> usize {
match ordering {
TransactionOrdering::Topological => tx_idx,
TransactionOrdering::Canonical => ::std::usize::MAX,
}
}

View File

@ -0,0 +1,321 @@
// https://github.com/zcash/zcash/commit/fdda3c5085199d2c2170887aa064fc42afdb0360
use blake2_rfc::blake2b::Blake2b;
use byteorder::{BigEndian, LittleEndian, ByteOrder};
use chain::BlockHeader;
#[allow(non_snake_case)]
#[derive(Debug)]
pub struct EquihashParams {
pub N: u32,
pub K: u32,
}
impl EquihashParams {
pub fn indices_per_hash_output(&self) -> usize {
(512 / self.N) as usize
}
pub fn hash_output(&self) -> usize {
(self.indices_per_hash_output() * self.N as usize / 8usize) as usize
}
pub fn collision_bit_length(&self) -> usize {
(self.N / (self.K + 1)) as usize
}
pub fn collision_byte_length(&self) -> usize {
(self.collision_bit_length() + 7) / 8
}
pub fn final_full_width(&self) -> usize {
2 * self.collision_byte_length() + 4 * (1 << self.K)
}
pub fn solution_size(&self) -> usize {
((1usize << self.K) * (self.collision_bit_length() + 1) / 8) as usize
}
pub fn hash_length(&self) -> usize {
(self.K as usize + 1) * self.collision_byte_length()
}
}
pub fn verify_block_equihash_solution(params: &EquihashParams, header: &BlockHeader) -> bool {
let equihash_solution = header.solution.as_ref();
let input = header.equihash_input();
verify_equihash_solution(params, &input, equihash_solution)
}
pub fn verify_equihash_solution(params: &EquihashParams, input: &[u8], solution: &[u8]) -> bool {
if solution.len() != params.solution_size() {
return false;
}
let mut context = new_blake2(params);
context.update(input);
// pure equihash
let collision_bit_length = params.collision_bit_length();
let indices = get_indices_from_minimal(solution, collision_bit_length);
let mut rows = Vec::new();
for idx in indices {
let hash = generate_hash(&context, (idx as usize / params.indices_per_hash_output()) as u32);
let hash_begin = (idx as usize % params.indices_per_hash_output()) * params.N as usize / 8;
let hash_end = hash_begin + params.N as usize / 8;
let mut row = vec![0; params.final_full_width()];
let expanded_hash = expand_array(
&hash[hash_begin..hash_end],
params.collision_bit_length(),
0);
row[0..expanded_hash.len()].clone_from_slice(&expanded_hash);
row[params.hash_length()..params.hash_length() + 4].clone_from_slice(&to_big_endian(idx));
rows.push(row);
}
let mut hash_len = params.hash_length();
let mut indices_len = 4;
while rows.len() > 1 {
let mut rows_check = Vec::new();
for i in 0..rows.len() / 2 {
let row1 = &rows[i * 2];
let row2 = &rows[i * 2 + 1];
if !has_collision(row1, row2, params.collision_byte_length()) {
return false;
}
if indices_before(row2, row1, hash_len, indices_len) {
return false;
}
if !distinct_indices(row1, row2, hash_len, indices_len) {
return false;
}
rows_check.push(merge_rows(row1, row2, hash_len, indices_len, params.collision_byte_length()));
}
rows = rows_check;
hash_len -= params.collision_byte_length();
indices_len *= 2;
}
rows[0].iter().take(hash_len).all(|x| *x == 0)
}
fn merge_rows(row1: &[u8], row2: &[u8], len: usize, indices_len: usize, trim: usize) -> Vec<u8> {
let mut row = row1.to_vec();
for i in trim..len {
row[i - trim] = row1[i] ^ row2[i];
}
if indices_before(row1, row2, len, indices_len) {
row[len - trim..len - trim + indices_len]
.clone_from_slice(&row1[len..len + indices_len]);
row[len - trim + indices_len..len - trim + indices_len + indices_len]
.clone_from_slice(&row2[len..len + indices_len]);
} else {
row[len - trim..len - trim + indices_len]
.clone_from_slice(&row2[len..len + indices_len]);
row[len - trim + indices_len..len - trim + indices_len + indices_len]
.clone_from_slice(&row1[len..len + indices_len]);
}
row
}
fn distinct_indices(row1: &[u8], row2: &[u8], len: usize, indices_len: usize) -> bool {
let mut i = 0;
let mut j = 0;
while i < indices_len {
while j < indices_len {
if row1[len + i..len + i + 4] == row2[len + j..len + j + 4] {
return false;
}
j += 4;
}
i += 4;
}
true
}
fn has_collision(row1: &[u8], row2: &[u8], collision_byte_length: usize) -> bool {
for i in 0..collision_byte_length {
if row1[i] != row2[i] {
return false;
}
}
true
}
fn indices_before(row1: &[u8], row2: &[u8], len: usize, indices_len: usize) -> bool {
for i in 0..indices_len {
if row1[len + i] < row2[len + i] {
return true;
} else if row1[len + i] > row2[len + i] {
return false;
}
}
false
}
fn generate_hash(context: &Blake2b, g: u32) -> Vec<u8> {
let mut context = context.clone();
context.update(&to_little_endian(g));
context.finalize().as_bytes().to_vec()
}
fn get_indices_from_minimal(solution: &[u8], collision_bit_length: usize) -> Vec<u32> {
let indices_len = 8 * 4 * solution.len() / (collision_bit_length + 1);
let byte_pad = 4 - ((collision_bit_length + 1 + 7) / 8);
let array = expand_array(solution, collision_bit_length + 1, byte_pad);
let mut ret = Vec::new();
for i in 0..indices_len / 4 {
ret.push(array_to_eh_index(&array[i*4..i*4 + 4]));
}
ret
}
fn array_to_eh_index(data: &[u8]) -> u32 {
BigEndian::read_u32(data)
}
fn expand_array(data: &[u8], bit_len: usize, byte_pad: usize) -> Vec<u8> {
let mut array = Vec::new();
let out_width = (bit_len + 7) / 8 + byte_pad;
let bit_len_mask = (1u32 << bit_len) - 1;
// The acc_bits least-significant bits of acc_value represent a bit sequence
// in big-endian order.
let mut acc_bits = 0usize;
let mut acc_value = 0u32;
for i in 0usize..data.len() {
acc_value = (acc_value << 8) | (data[i] as u32);
acc_bits += 8;
// When we have bit_len or more bits in the accumulator, write the next
// output element.
if acc_bits >= bit_len {
acc_bits -= bit_len;
for _ in 0usize..byte_pad {
array.push(0);
}
for x in byte_pad..out_width {
array.push((
// Big-endian
(acc_value >> (acc_bits + (8 * (out_width - x - 1)))) as u8
) & (
// Apply bit_len_mask across byte boundaries
((bit_len_mask >> (8 * (out_width - x - 1))) & 0xFF) as u8
));
}
}
}
array
}
fn new_blake2(params: &EquihashParams) -> Blake2b {
let mut personalization = [0u8; 16];
personalization[0..8].clone_from_slice(b"ZcashPoW");
personalization[8..12].clone_from_slice(&to_little_endian(params.N));
personalization[12..16].clone_from_slice(&to_little_endian(params.K));
Blake2b::with_params(params.hash_output(), &[], &[], &personalization)
}
fn to_little_endian(num: u32) -> [u8; 4] {
let mut le_num = [0u8; 4];
LittleEndian::write_u32(&mut le_num[..], num);
le_num
}
fn to_big_endian(num: u32) -> [u8; 4] {
let mut be_num = [0u8; 4];
BigEndian::write_u32(&mut be_num[..], num);
be_num
}
#[cfg(test)]
mod tests {
use primitives::bigint::{U256, Uint};
use byteorder::WriteBytesExt;
use super::*;
fn get_minimal_from_indices(indices: &[u32], collision_bit_length: usize) -> Vec<u8> {
let indices_len = indices.len() * 4;
let min_len = (collision_bit_length + 1) * indices_len / (8 * 4);
let byte_pad = 4 - ((collision_bit_length + 1) + 7) / 8;
let mut array = Vec::new();
for i in 0..indices.len() {
let mut be_index = Vec::new();
be_index.write_u32::<BigEndian>(indices[i]).unwrap();
array.extend(be_index);
}
let mut ret = vec![0u8; min_len];
compress_array(&array, &mut ret, collision_bit_length + 1, byte_pad);
ret
}
fn compress_array(data: &[u8], array: &mut Vec<u8>, bit_len: usize, byte_pad: usize) {
let in_width = (bit_len + 7) / 8 + byte_pad;
let bit_len_mask = (1u32 << bit_len) - 1;
// The acc_bits least-significant bits of acc_value represent a bit sequence
// in big-endian order.
let mut acc_bits = 0usize;
let mut acc_value = 0u32;
let mut j = 0usize;
for i in 0usize..array.len() {
// When we have fewer than 8 bits left in the accumulator, read the next
// input element.
if acc_bits < 8 {
acc_value = acc_value << bit_len;
for x in byte_pad..in_width {
acc_value = acc_value | ((
data[j + x] & (((bit_len_mask >> (8 * (in_width - x - 1))) & 0xFF) as u8)
) as u32) << (8 * (in_width - x - 1));
}
j += in_width;
acc_bits += bit_len;
}
acc_bits -= 8;
array[i] = ((acc_value >> acc_bits) & 0xFF) as u8;
}
}
fn test_equihash_verifier(n: u32, k: u32, input: &[u8], nonce: U256, solution: &[u32]) -> bool {
let solution = get_minimal_from_indices(solution, (n / (k + 1)) as usize);
let mut le_nonce = vec![0; 32];
nonce.to_little_endian(&mut le_nonce);
let mut input = input.to_vec();
input.extend(le_nonce);
let params = EquihashParams { N: n, K: k };
verify_equihash_solution(&params, &input, &solution)
}
#[test]
fn verify_equihash_solution_works() {
assert!(test_equihash_verifier(
96, 5, b"Equihash is an asymmetric PoW based on the Generalised Birthday problem.",
U256::one(), &vec![
2261, 15185, 36112, 104243, 23779, 118390, 118332, 130041, 32642, 69878, 76925, 80080, 45858, 116805, 92842, 111026, 15972, 115059, 85191, 90330, 68190, 122819, 81830, 91132, 23460, 49807, 52426, 80391, 69567, 114474, 104973, 122568,
],
));
}
}

View File

@ -51,16 +51,11 @@ pub enum Error {
TransactionFeesOverflow,
/// Sum of all referenced outputs in block transactions resulted in the overflow
ReferencedInputsSumOverflow,
/// SegWit: bad witess nonce size
WitnessInvalidNonceSize,
/// SegWit: witness merkle mismatch
WitnessMerkleCommitmentMismatch,
/// SegWit: unexpected witness
UnexpectedWitness,
/// Non-canonical tranasctions ordering within block
NonCanonicalTransactionOrdering,
/// Database error
Database(DBError),
InvalidEquihashSolution,
}
impl From<DBError> for Error {
@ -108,9 +103,11 @@ pub enum TransactionError {
UnspentTransactionWithTheSameHash,
/// Using output that is surely spent
UsingSpentOutput(H256, u32),
/// Transaction, protected using BitcoinCash OP_RETURN replay protection (REQ-6-1).
ReturnReplayProtection,
/// Transaction with witness is received before SegWit is activated.
PrematureWitness,
/// A coinbase transaction MUST NOT have any joint split descriptions
CoinbaseWithJointSplit,
/// Invalid transaction version.
InvalidVersion,
/// Transaction has too large output value.
ValueOverflow,
}

View File

@ -53,11 +53,14 @@
extern crate time;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
extern crate parking_lot;
extern crate rayon;
extern crate blake2_rfc;
extern crate byteorder;
#[cfg(test)]
extern crate rand;
extern crate rustc_hex as hex;
extern crate storage;
extern crate chain;
@ -73,11 +76,12 @@ pub mod constants;
mod canon;
mod deployments;
mod duplex_store;
mod equihash;
mod error;
mod sigops;
mod timestamp;
mod work;
mod work_bch;
mod work_zcash;
// pre-verification
mod verify_block;

View File

@ -1,7 +1,6 @@
use network::ConsensusFork;
use chain::Transaction;
use storage::TransactionOutputProvider;
use script::{Script, ScriptWitness};
use script::Script;
/// Counts signature operations in given transaction
/// bip16_active flag indicates if we should also count signature operations
@ -41,55 +40,3 @@ pub fn transaction_sigops(
input_sigops + output_sigops + bip16_sigops
}
pub fn transaction_sigops_cost(
transaction: &Transaction,
store: &TransactionOutputProvider,
sigops: usize,
) -> usize {
let sigops_cost = sigops * ConsensusFork::witness_scale_factor();
let witness_sigops_cost: usize = transaction.inputs.iter()
.map(|input| store.transaction_output(&input.previous_output, usize::max_value())
.map(|output| witness_sigops(&Script::new(input.script_sig.clone()), &Script::new(output.script_pubkey.clone()), &input.script_witness,))
.unwrap_or(0))
.sum();
sigops_cost + witness_sigops_cost
}
fn witness_sigops(
script_sig: &Script,
script_pubkey: &Script,
script_witness: &ScriptWitness,
) -> usize {
if let Some((witness_version, witness_program)) = script_pubkey.parse_witness_program() {
return witness_program_sigops(witness_version, witness_program, script_witness);
}
if script_pubkey.is_pay_to_script_hash() && script_sig.is_push_only() {
if let Some(Ok(instruction)) = script_sig.iter().last() {
if let Some(data) = instruction.data {
let subscript = Script::new(data.into());
if let Some((witness_version, witness_program)) = subscript.parse_witness_program() {
return witness_program_sigops(witness_version, witness_program, script_witness);
}
}
}
}
0
}
fn witness_program_sigops(
witness_version: u8,
witness_program: &[u8],
script_witness: &ScriptWitness,
) -> usize {
match witness_version {
0 if witness_program.len() == 20 => 1,
0 if witness_program.len() == 32 => match script_witness.last() {
Some(subscript) => Script::new(subscript.clone()).sigops_count(false, true),
_ => 0,
},
_ => 0,
}
}

View File

@ -23,5 +23,6 @@ pub fn median_timestamp_inclusive(previous_header_hash: H256, store: &BlockHeade
}
timestamps.sort();
timestamps[timestamps.len() / 2]
}

View File

@ -1,6 +1,6 @@
use std::collections::HashSet;
use chain::IndexedBlock;
use network::ConsensusFork;
use network::ConsensusParams;
use sigops::transaction_sigops;
use duplex_store::NoopStore;
use error::{Error, TransactionError};
@ -16,14 +16,14 @@ pub struct BlockVerifier<'a> {
}
impl<'a> BlockVerifier<'a> {
pub fn new(block: &'a IndexedBlock) -> Self {
pub fn new(block: &'a IndexedBlock, consensus: &'a ConsensusParams) -> Self {
BlockVerifier {
empty: BlockEmpty::new(block),
coinbase: BlockCoinbase::new(block),
serialized_size: BlockSerializedSize::new(block, ConsensusFork::absolute_maximum_block_size()),
serialized_size: BlockSerializedSize::new(block, consensus),
extra_coinbases: BlockExtraCoinbases::new(block),
transactions_uniqueness: BlockTransactionsUniqueness::new(block),
sigops: BlockSigops::new(block, ConsensusFork::absolute_maximum_block_sigops()),
sigops: BlockSigops::new(block, consensus),
merkle_root: BlockMerkleRoot::new(block),
}
}
@ -66,10 +66,10 @@ pub struct BlockSerializedSize<'a> {
}
impl<'a> BlockSerializedSize<'a> {
fn new(block: &'a IndexedBlock, max_size: usize) -> Self {
fn new(block: &'a IndexedBlock, consensus: &'a ConsensusParams) -> Self {
BlockSerializedSize {
block: block,
max_size: max_size,
max_size: consensus.max_block_size(),
}
}
@ -153,10 +153,10 @@ pub struct BlockSigops<'a> {
}
impl<'a> BlockSigops<'a> {
fn new(block: &'a IndexedBlock, max_sigops: usize) -> Self {
fn new(block: &'a IndexedBlock, consensus: &'a ConsensusParams) -> Self {
BlockSigops {
block: block,
max_sigops: max_sigops,
max_sigops: consensus.max_block_sigops(),
}
}

View File

@ -1,6 +1,6 @@
use rayon::prelude::{IntoParallelRefIterator, IndexedParallelIterator, ParallelIterator};
use chain::IndexedBlock;
use network::Network;
use network::ConsensusParams;
use error::Error;
use verify_block::BlockVerifier;
use verify_header::HeaderVerifier;
@ -13,12 +13,12 @@ pub struct ChainVerifier<'a> {
}
impl<'a> ChainVerifier<'a> {
pub fn new(block: &'a IndexedBlock, network: Network, current_time: u32) -> Self {
pub fn new(block: &'a IndexedBlock, consensus: &'a ConsensusParams, current_time: u32) -> Self {
trace!(target: "verification", "Block pre-verification {}", block.hash().to_reversed_str());
ChainVerifier {
block: BlockVerifier::new(block),
header: HeaderVerifier::new(&block.header, network, current_time),
transactions: block.transactions.iter().map(TransactionVerifier::new).collect(),
block: BlockVerifier::new(block, consensus),
header: HeaderVerifier::new(&block.header, consensus, current_time),
transactions: block.transactions.iter().map(|tx| TransactionVerifier::new(tx, consensus)).collect(),
}
}

View File

@ -1,6 +1,6 @@
use primitives::compact::Compact;
use chain::IndexedBlockHeader;
use network::Network;
use network::ConsensusParams;
use work::is_valid_proof_of_work;
use error::Error;
use constants::BLOCK_MAX_FUTURE;
@ -11,9 +11,9 @@ pub struct HeaderVerifier<'a> {
}
impl<'a> HeaderVerifier<'a> {
pub fn new(header: &'a IndexedBlockHeader, network: Network, current_time: u32) -> Self {
pub fn new(header: &'a IndexedBlockHeader, consensus: &ConsensusParams, current_time: u32) -> Self {
HeaderVerifier {
proof_of_work: HeaderProofOfWork::new(header, network),
proof_of_work: HeaderProofOfWork::new(header, consensus),
timestamp: HeaderTimestamp::new(header, current_time, BLOCK_MAX_FUTURE as u32),
}
}
@ -31,10 +31,10 @@ pub struct HeaderProofOfWork<'a> {
}
impl<'a> HeaderProofOfWork<'a> {
fn new(header: &'a IndexedBlockHeader, network: Network) -> Self {
fn new(header: &'a IndexedBlockHeader, consensus: &ConsensusParams) -> Self {
HeaderProofOfWork {
header: header,
max_work_bits: network.max_bits().into(),
max_work_bits: consensus.network.max_bits().into(),
}
}

View File

@ -1,33 +1,44 @@
use std::ops;
use ser::Serializable;
use chain::IndexedTransaction;
use network::{ConsensusParams, ConsensusFork};
use deployments::BlockDeployments;
use network::{ConsensusParams};
use duplex_store::NoopStore;
use sigops::transaction_sigops;
use error::TransactionError;
use constants::{MIN_COINBASE_SIZE, MAX_COINBASE_SIZE};
pub struct TransactionVerifier<'a> {
pub version: TransactionVersion<'a>,
pub empty: TransactionEmpty<'a>,
pub null_non_coinbase: TransactionNullNonCoinbase<'a>,
pub oversized_coinbase: TransactionOversizedCoinbase<'a>,
pub joint_split_in_coinbase: TransactionJointSplitInCoinbase<'a>,
pub size: TransactionAbsoluteSize<'a>,
pub value_overflow: TransactionValueOverflow<'a>,
}
impl<'a> TransactionVerifier<'a> {
pub fn new(transaction: &'a IndexedTransaction) -> Self {
pub fn new(transaction: &'a IndexedTransaction, consensus: &'a ConsensusParams) -> Self {
trace!(target: "verification", "Tx pre-verification {}", transaction.hash.to_reversed_str());
TransactionVerifier {
version: TransactionVersion::new(transaction),
empty: TransactionEmpty::new(transaction),
null_non_coinbase: TransactionNullNonCoinbase::new(transaction),
oversized_coinbase: TransactionOversizedCoinbase::new(transaction, MIN_COINBASE_SIZE..MAX_COINBASE_SIZE),
joint_split_in_coinbase: TransactionJointSplitInCoinbase::new(transaction),
size: TransactionAbsoluteSize::new(transaction, consensus),
value_overflow: TransactionValueOverflow::new(transaction, consensus),
}
}
pub fn check(&self) -> Result<(), TransactionError> {
try!(self.empty.check());
try!(self.null_non_coinbase.check());
try!(self.oversized_coinbase.check());
self.version.check()?;
self.empty.check()?;
self.null_non_coinbase.check()?;
self.oversized_coinbase.check()?;
self.joint_split_in_coinbase.check()?;
self.size.check()?;
self.value_overflow.check()?;
Ok(())
}
}
@ -36,35 +47,38 @@ pub struct MemoryPoolTransactionVerifier<'a> {
pub empty: TransactionEmpty<'a>,
pub null_non_coinbase: TransactionNullNonCoinbase<'a>,
pub is_coinbase: TransactionMemoryPoolCoinbase<'a>,
pub size: TransactionSize<'a>,
pub premature_witness: TransactionPrematureWitness<'a>,
pub size: TransactionAbsoluteSize<'a>,
pub sigops: TransactionSigops<'a>,
pub value_overflow: TransactionValueOverflow<'a>,
}
impl<'a> MemoryPoolTransactionVerifier<'a> {
pub fn new(transaction: &'a IndexedTransaction, consensus: &'a ConsensusParams, deployments: &'a BlockDeployments<'a>) -> Self {
pub fn new(transaction: &'a IndexedTransaction, consensus: &'a ConsensusParams) -> Self {
trace!(target: "verification", "Mempool-Tx pre-verification {}", transaction.hash.to_reversed_str());
MemoryPoolTransactionVerifier {
empty: TransactionEmpty::new(transaction),
null_non_coinbase: TransactionNullNonCoinbase::new(transaction),
is_coinbase: TransactionMemoryPoolCoinbase::new(transaction),
size: TransactionSize::new(transaction, consensus),
premature_witness: TransactionPrematureWitness::new(transaction, &deployments),
sigops: TransactionSigops::new(transaction, ConsensusFork::absolute_maximum_block_sigops()),
size: TransactionAbsoluteSize::new(transaction, consensus),
sigops: TransactionSigops::new(transaction, consensus.max_block_sigops()),
value_overflow: TransactionValueOverflow::new(transaction, consensus),
}
}
pub fn check(&self) -> Result<(), TransactionError> {
try!(self.empty.check());
try!(self.null_non_coinbase.check());
try!(self.is_coinbase.check());
try!(self.size.check());
try!(self.premature_witness.check());
try!(self.sigops.check());
self.empty.check()?;
self.null_non_coinbase.check()?;
self.is_coinbase.check()?;
self.size.check()?;
self.sigops.check()?;
self.value_overflow.check()?;
Ok(())
}
}
/// If version == 1 or nJointSplit == 0, then tx_in_count MUST NOT be 0.
/// Transactions containing empty `vin` must have either non-empty `vjoinsplit` or non-empty `vShieldedSpend`.
/// Transactions containing empty `vout` must have either non-empty `vjoinsplit` or non-empty `vShieldedOutput`.
pub struct TransactionEmpty<'a> {
transaction: &'a IndexedTransaction,
}
@ -77,11 +91,23 @@ impl<'a> TransactionEmpty<'a> {
}
fn check(&self) -> Result<(), TransactionError> {
if self.transaction.raw.is_empty() {
Err(TransactionError::Empty)
} else {
Ok(())
// If version == 1 or nJointSplit == 0, then tx_in_count MUST NOT be 0.
if self.transaction.raw.version == 1 || self.transaction.raw.joint_split.is_none() {
if self.transaction.raw.inputs.is_empty() {
return Err(TransactionError::Empty);
}
}
// Transactions containing empty `vin` must have either non-empty `vjoinsplit`.
// Transactions containing empty `vout` must have either non-empty `vjoinsplit`.
// TODO [Sapling]: ... or non-empty `vShieldedOutput`
if self.transaction.raw.is_empty() {
if self.transaction.raw.joint_split.is_none() {
return Err(TransactionError::Empty);
}
}
Ok(())
}
}
@ -149,22 +175,23 @@ impl<'a> TransactionMemoryPoolCoinbase<'a> {
}
}
pub struct TransactionSize<'a> {
/// The encoded size of the transaction MUST be less than or equal to EVER possible max limit.
pub struct TransactionAbsoluteSize<'a> {
transaction: &'a IndexedTransaction,
consensus: &'a ConsensusParams,
absoute_max_size: usize,
}
impl<'a> TransactionSize<'a> {
impl<'a> TransactionAbsoluteSize<'a> {
fn new(transaction: &'a IndexedTransaction, consensus: &'a ConsensusParams) -> Self {
TransactionSize {
TransactionAbsoluteSize {
transaction: transaction,
consensus: consensus,
absoute_max_size: consensus.absolute_max_transaction_size(),
}
}
fn check(&self) -> Result<(), TransactionError> {
let size = self.transaction.raw.serialized_size();
if size > self.consensus.fork.max_transaction_size() {
if size > self.absoute_max_size {
Err(TransactionError::MaxSize)
} else {
Ok(())
@ -195,26 +222,143 @@ impl<'a> TransactionSigops<'a> {
}
}
pub struct TransactionPrematureWitness<'a> {
/// The transaction version number MUST be greater than or equal to 1.
pub struct TransactionVersion<'a> {
transaction: &'a IndexedTransaction,
segwit_active: bool,
}
impl<'a> TransactionPrematureWitness<'a> {
pub fn new(transaction: &'a IndexedTransaction, deployments: &'a BlockDeployments<'a>) -> Self {
let segwit_active = deployments.segwit();
TransactionPrematureWitness {
transaction: transaction,
segwit_active: segwit_active,
impl<'a> TransactionVersion<'a> {
fn new(transaction: &'a IndexedTransaction) -> Self {
TransactionVersion {
transaction,
}
}
pub fn check(&self) -> Result<(), TransactionError> {
if !self.segwit_active && self.transaction.raw.has_witness() {
Err(TransactionError::PrematureWitness)
} else {
Ok(())
fn check(&self) -> Result<(), TransactionError> {
if self.transaction.raw.version < 1 {
return Err(TransactionError::InvalidVersion);
}
Ok(())
}
}
/// A coinbase transaction MUST NOT have any JoinSplit descriptions.
pub struct TransactionJointSplitInCoinbase<'a> {
transaction: &'a IndexedTransaction,
}
impl<'a> TransactionJointSplitInCoinbase<'a> {
fn new(transaction: &'a IndexedTransaction) -> Self {
TransactionJointSplitInCoinbase {
transaction,
}
}
fn check(&self) -> Result<(), TransactionError> {
if self.transaction.raw.is_coinbase() && self.transaction.raw.joint_split.is_some() {
return Err(TransactionError::CoinbaseWithJointSplit);
}
Ok(())
}
}
/// Check for overflow of output values.
pub struct TransactionValueOverflow<'a> {
transaction: &'a IndexedTransaction,
max_value: u64,
}
impl<'a> TransactionValueOverflow<'a> {
fn new(transaction: &'a IndexedTransaction, consensus: &'a ConsensusParams) -> Self {
TransactionValueOverflow {
transaction,
max_value: consensus.max_transaction_value(),
}
}
fn check(&self) -> Result<(), TransactionError> {
let mut total_output = 0u64;
for output in &self.transaction.raw.outputs {
if output.value > self.max_value {
return Err(TransactionError::ValueOverflow)
}
total_output = match total_output.checked_add(output.value) {
Some(total_output) if total_output <= self.max_value => total_output,
_ => return Err(TransactionError::ValueOverflow),
};
}
Ok(())
}
}
#[cfg(test)]
mod tests {
extern crate test_data;
use network::{Network, ConsensusParams};
use error::TransactionError;
use super::{TransactionEmpty, TransactionVersion, TransactionJointSplitInCoinbase, TransactionValueOverflow};
#[test]
fn transaction_empty_works() {
assert_eq!(TransactionEmpty::new(&test_data::TransactionBuilder::with_version(1)
.add_output(0)
.add_default_joint_split()
.into()).check(), Err(TransactionError::Empty));
assert_eq!(TransactionEmpty::new(&test_data::TransactionBuilder::with_version(2)
.add_output(0)
.into()).check(), Err(TransactionError::Empty));
assert_eq!(TransactionEmpty::new(&test_data::TransactionBuilder::with_version(2)
.add_output(0)
.add_default_joint_split()
.into()).check(), Ok(()));
assert_eq!(TransactionEmpty::new(&test_data::TransactionBuilder::with_version(2)
.into()).check(), Err(TransactionError::Empty));
}
#[test]
fn transaction_version_works() {
assert_eq!(TransactionVersion::new(&test_data::TransactionBuilder::with_version(0)
.into()).check(), Err(TransactionError::InvalidVersion));
assert_eq!(TransactionVersion::new(&test_data::TransactionBuilder::with_version(1)
.into()).check(), Ok(()));
}
#[test]
fn transaction_joint_split_in_coinbase_works() {
assert_eq!(TransactionJointSplitInCoinbase::new(&test_data::TransactionBuilder::coinbase()
.add_default_joint_split().into()).check(), Err(TransactionError::CoinbaseWithJointSplit));
assert_eq!(TransactionJointSplitInCoinbase::new(&test_data::TransactionBuilder::coinbase()
.into()).check(), Ok(()));
assert_eq!(TransactionJointSplitInCoinbase::new(&test_data::TransactionBuilder::default()
.add_default_joint_split().into()).check(), Ok(()));
assert_eq!(TransactionJointSplitInCoinbase::new(&test_data::TransactionBuilder::default()
.into()).check(), Ok(()));
}
#[test]
fn transaction_value_overflow_works() {
let consensus = ConsensusParams::new(Network::Mainnet);
assert_eq!(TransactionValueOverflow::new(&test_data::TransactionBuilder::with_output(consensus.max_transaction_value() + 1)
.into(), &consensus).check(), Err(TransactionError::ValueOverflow));
assert_eq!(TransactionValueOverflow::new(&test_data::TransactionBuilder::with_output(consensus.max_transaction_value() / 2)
.add_output(consensus.max_transaction_value() / 2 + 1)
.into(), &consensus).check(), Err(TransactionError::ValueOverflow));
assert_eq!(TransactionValueOverflow::new(&test_data::TransactionBuilder::with_output(consensus.max_transaction_value())
.into(), &consensus).check(), Ok(()));
}
}

View File

@ -1,24 +1,10 @@
use std::cmp;
use primitives::compact::Compact;
use primitives::hash::H256;
use primitives::bigint::U256;
use chain::{IndexedBlockHeader, BlockHeader};
use network::{Network, ConsensusParams, ConsensusFork};
use storage::{BlockHeaderProvider, BlockRef};
use work_bch::work_required_bitcoin_cash;
use constants::{
DOUBLE_SPACING_SECONDS, TARGET_TIMESPAN_SECONDS,
MIN_TIMESPAN, MAX_TIMESPAN, RETARGETING_INTERVAL
};
pub fn is_retarget_height(height: u32) -> bool {
height % RETARGETING_INTERVAL == 0
}
fn range_constrain(value: i64, min: i64, max: i64) -> i64 {
cmp::min(cmp::max(value, min), max)
}
use chain::IndexedBlockHeader;
use network::ConsensusParams;
use storage::BlockHeaderProvider;
use work_zcash::work_required_zcash;
/// Returns true if hash is lower or equal than target represented by compact bits
pub fn is_valid_proof_of_work_hash(bits: Compact, hash: &H256) -> bool {
@ -48,16 +34,8 @@ pub fn is_valid_proof_of_work(max_work_bits: Compact, bits: Compact, hash: &H256
target <= maximum && value <= target
}
/// Returns constrained number of seconds since last retarget
pub fn retarget_timespan(retarget_timestamp: u32, last_timestamp: u32) -> u32 {
// subtract unsigned 32 bit numbers in signed 64 bit space in
// order to prevent underflow before applying the range constraint.
let timespan = last_timestamp as i64 - retarget_timestamp as i64;
range_constrain(timespan, MIN_TIMESPAN as i64, MAX_TIMESPAN as i64) as u32
}
/// Returns work required for given header
pub fn work_required(parent_hash: H256, time: u32, height: u32, store: &BlockHeaderProvider, consensus: &ConsensusParams) -> Compact {
pub fn work_required(parent_hash: H256, height: u32, store: &BlockHeaderProvider, consensus: &ConsensusParams) -> Compact {
let max_bits = consensus.network.max_bits().into();
if height == 0 {
return max_bits;
@ -65,81 +43,10 @@ pub fn work_required(parent_hash: H256, time: u32, height: u32, store: &BlockHea
let parent_header = store.block_header(parent_hash.clone().into()).expect("self.height != 0; qed");
match consensus.fork {
ConsensusFork::BitcoinCash(ref fork) if height >= fork.height =>
return work_required_bitcoin_cash(IndexedBlockHeader {
hash: parent_hash,
raw: parent_header
}, time, height, store, consensus, fork, max_bits),
_ => (),
}
if is_retarget_height(height) {
return work_required_retarget(parent_header, height, store, max_bits);
}
if consensus.network == Network::Testnet {
return work_required_testnet(parent_hash, time, height, store, Network::Testnet)
}
parent_header.bits
}
pub fn work_required_testnet(parent_hash: H256, time: u32, height: u32, store: &BlockHeaderProvider, network: Network) -> Compact {
assert!(height != 0, "cannot calculate required work for genesis block");
let mut bits = Vec::new();
let mut block_ref: BlockRef = parent_hash.into();
let parent_header = store.block_header(block_ref.clone()).expect("height != 0; qed");
let max_time_gap = parent_header.time + DOUBLE_SPACING_SECONDS;
let max_bits = network.max_bits().into();
if time > max_time_gap {
return max_bits;
}
// TODO: optimize it, so it does not make 2016!!! redundant queries each time
for _ in 0..RETARGETING_INTERVAL {
let previous_header = match store.block_header(block_ref) {
Some(h) => h,
None => { break; }
};
bits.push(previous_header.bits);
block_ref = previous_header.previous_header_hash.into();
}
for (index, bit) in bits.into_iter().enumerate() {
if bit != max_bits || is_retarget_height(height - index as u32 - 1) {
return bit;
}
}
max_bits
}
/// Algorithm used for retargeting work every 2 weeks
pub fn work_required_retarget(parent_header: BlockHeader, height: u32, store: &BlockHeaderProvider, max_work_bits: Compact) -> Compact {
let retarget_ref = (height - RETARGETING_INTERVAL).into();
let retarget_header = store.block_header(retarget_ref).expect("self.height != 0 && self.height % RETARGETING_INTERVAL == 0; qed");
// timestamp of block(height - RETARGETING_INTERVAL)
let retarget_timestamp = retarget_header.time;
// timestamp of parent block
let last_timestamp = parent_header.time;
// bits of last block
let last_bits = parent_header.bits;
let mut retarget: U256 = last_bits.into();
let maximum: U256 = max_work_bits.into();
retarget = retarget * retarget_timespan(retarget_timestamp, last_timestamp).into();
retarget = retarget / TARGET_TIMESPAN_SECONDS.into();
if retarget > maximum {
max_work_bits
} else {
retarget.into()
}
work_required_zcash(IndexedBlockHeader {
hash: parent_hash,
raw: parent_header
}, store, consensus, max_bits)
}
pub fn block_reward_satoshi(block_height: u32) -> u64 {
@ -150,28 +57,7 @@ pub fn block_reward_satoshi(block_height: u32) -> u64 {
#[cfg(test)]
mod tests {
use primitives::hash::H256;
use primitives::compact::Compact;
use network::Network;
use super::{is_valid_proof_of_work_hash, is_valid_proof_of_work, block_reward_satoshi};
fn is_valid_pow(max: Compact, bits: u32, hash: &'static str) -> bool {
is_valid_proof_of_work_hash(bits.into(), &H256::from_reversed_str(hash)) &&
is_valid_proof_of_work(max.into(), bits.into(), &H256::from_reversed_str(hash))
}
#[test]
fn test_is_valid_proof_of_work() {
// block 2
assert!(is_valid_pow(Network::Mainnet.max_bits().into(), 486604799u32, "000000006a625f06636b8bb6ac7b960a8d03705d1ace08b1a19da3fdcc99ddbd"));
// block 400_000
assert!(is_valid_pow(Network::Mainnet.max_bits().into(), 403093919u32, "000000000000000004ec466ce4732fe6f1ed1cddc2ed4b328fff5224276e3f6f"));
// other random tests
assert!(is_valid_pow(Network::Regtest.max_bits().into(), 0x181bc330u32, "00000000000000001bc330000000000000000000000000000000000000000000"));
assert!(!is_valid_pow(Network::Regtest.max_bits().into(), 0x181bc330u32, "00000000000000001bc330000000000000000000000000000000000000000001"));
assert!(!is_valid_pow(Network::Regtest.max_bits().into(), 0x181bc330u32, "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"));
}
use super::{block_reward_satoshi};
#[test]
fn reward() {

View File

@ -1,492 +0,0 @@
use primitives::compact::Compact;
use primitives::hash::H256;
use primitives::bigint::{Uint, U256};
use chain::{IndexedBlockHeader, BlockHeader};
use network::{Network, ConsensusParams, BitcoinCashConsensusParams};
use storage::BlockHeaderProvider;
use timestamp::median_timestamp_inclusive;
use work::{is_retarget_height, work_required_testnet, work_required_retarget};
use constants::{
DOUBLE_SPACING_SECONDS, TARGET_SPACING_SECONDS, RETARGETING_INTERVAL
};
/// Returns work required for given header for the post-HF Bitcoin Cash block
pub fn work_required_bitcoin_cash(parent_header: IndexedBlockHeader, time: u32, height: u32, store: &BlockHeaderProvider, consensus: &ConsensusParams, fork: &BitcoinCashConsensusParams, max_bits: Compact) -> Compact {
// special processing of Bitcoin Cash difficulty adjustment hardfork (Nov 2017), where difficulty is adjusted after each block
// `height` is the height of the new block => comparison is shifted by one
if height.saturating_sub(1) >= fork.difficulty_adjustion_height {
return work_required_bitcoin_cash_adjusted(parent_header, time, height, store, consensus);
}
if is_retarget_height(height) {
return work_required_retarget(parent_header.raw, height, store, max_bits);
}
if consensus.network == Network::Testnet {
return work_required_testnet(parent_header.hash, time, height, store, Network::Testnet)
}
if parent_header.raw.bits == max_bits {
return parent_header.raw.bits;
}
// REQ-7 Difficulty adjustement in case of hashrate drop
// In case the MTP of the tip of the chain is 12h or more after the MTP 6 block before the tip,
// the proof of work target is increased by a quarter, or 25%, which corresponds to a difficulty
// reduction of 20%.
let ancient_block_ref = (height - 6 - 1).into();
let ancient_header = store.block_header(ancient_block_ref)
.expect("parent_header.bits != max_bits; difficulty is max_bits for first RETARGETING_INTERVAL height; RETARGETING_INTERVAL > 7; qed");
let ancient_timestamp = median_timestamp_inclusive(ancient_header.hash(), store);
let parent_timestamp = median_timestamp_inclusive(parent_header.hash.clone(), store);
let timestamp_diff = parent_timestamp.checked_sub(ancient_timestamp).unwrap_or_default();
if timestamp_diff < 43_200 {
// less than 12h => no difficulty change needed
return parent_header.raw.bits;
}
let mut new_bits: U256 = parent_header.raw.bits.into();
let max_bits: U256 = max_bits.into();
new_bits = new_bits + (new_bits >> 2);
if new_bits > max_bits {
new_bits = max_bits
}
new_bits.into()
}
/// Algorithm to adjust difficulty after each block. Implementation is based on Bitcoin ABC commit:
/// https://github.com/Bitcoin-ABC/bitcoin-abc/commit/be51cf295c239ff6395a0aa67a3e13906aca9cb2
fn work_required_bitcoin_cash_adjusted(parent_header: IndexedBlockHeader, time: u32, height: u32, store: &BlockHeaderProvider, consensus: &ConsensusParams) -> Compact {
/// To reduce the impact of timestamp manipulation, we select the block we are
/// basing our computation on via a median of 3.
fn suitable_block(mut header2: BlockHeader, store: &BlockHeaderProvider) -> BlockHeader {
let reason = "header.height >= RETARGETNG_INTERVAL; RETARGETING_INTERVAL > 2; qed";
let mut header1 = store.block_header(header2.previous_header_hash.clone().into()).expect(reason);
let mut header0 = store.block_header(header1.previous_header_hash.clone().into()).expect(reason);
if header0.time > header2.time {
::std::mem::swap(&mut header0, &mut header2);
}
if header0.time > header1.time {
::std::mem::swap(&mut header0, &mut header1);
}
if header1.time > header2.time {
::std::mem::swap(&mut header1, &mut header2);
}
header1
}
/// Get block proof.
fn block_proof(header: &BlockHeader) -> U256 {
let proof: U256 = header.bits.into();
// We need to compute 2**256 / (bnTarget+1), but we can't represent 2**256
// as it's too large for a arith_uint256. However, as 2**256 is at least as
// large as bnTarget+1, it is equal to ((2**256 - bnTarget - 1) /
// (bnTarget+1)) + 1, or ~bnTarget / (nTarget+1) + 1.
(!proof / (proof + U256::one())) + U256::one()
}
/// Compute chain work between two blocks. Last block work is included. First block work is excluded.
fn compute_work_between_blocks(first: H256, last: &BlockHeader, store: &BlockHeaderProvider) -> U256 {
debug_assert!(last.hash() != first);
let mut chain_work: U256 = block_proof(last);
let mut prev_hash = last.previous_header_hash.clone();
loop {
let header = store.block_header(prev_hash.into())
.expect("last header is on main chain; first is at height last.height - 144; it is on main chain; qed");
chain_work = chain_work + block_proof(&header);
prev_hash = header.previous_header_hash;
if prev_hash == first {
return chain_work;
}
}
}
/// Compute the a target based on the work done between 2 blocks and the time
/// required to produce that work.
fn compute_target(first_header: BlockHeader, last_header: BlockHeader, store: &BlockHeaderProvider) -> U256 {
// From the total work done and the time it took to produce that much work,
// we can deduce how much work we expect to be produced in the targeted time
// between blocks.
let mut work = compute_work_between_blocks(first_header.hash(), &last_header, store);
work = work * TARGET_SPACING_SECONDS.into();
// In order to avoid difficulty cliffs, we bound the amplitude of the
// adjustement we are going to do.
debug_assert!(last_header.time > first_header.time);
let mut actual_timespan = last_header.time - first_header.time;
if actual_timespan > 288 * TARGET_SPACING_SECONDS {
actual_timespan = 288 * TARGET_SPACING_SECONDS;
} else if actual_timespan < 72 * TARGET_SPACING_SECONDS {
actual_timespan = 72 * TARGET_SPACING_SECONDS;
}
let work = work / actual_timespan.into();
// We need to compute T = (2^256 / W) - 1 but 2^256 doesn't fit in 256 bits.
// By expressing 1 as W / W, we get (2^256 - W) / W, and we can compute
// 2^256 - W as the complement of W.
(!work) / work
}
// This cannot handle the genesis block and early blocks in general.
debug_assert!(height > 0);
// Special difficulty rule for testnet:
// If the new block's timestamp is more than 2 * 10 minutes then allow
// mining of a min-difficulty block.
let max_bits = consensus.network.max_bits();
if consensus.network == Network::Testnet || consensus.network == Network::Unitest {
let max_time_gap = parent_header.raw.time + DOUBLE_SPACING_SECONDS;
if time > max_time_gap {
return max_bits.into();
}
}
// Compute the difficulty based on the full adjustement interval.
let last_height = height - 1;
debug_assert!(last_height >= RETARGETING_INTERVAL);
// Get the last suitable block of the difficulty interval.
let last_header = suitable_block(parent_header.raw, store);
// Get the first suitable block of the difficulty interval.
let first_height = last_height - 144;
let first_header = store.block_header(first_height.into())
.expect("last_height >= RETARGETING_INTERVAL; RETARGETING_INTERVAL - 144 > 0; qed");
let first_header = suitable_block(first_header, store);
// Compute the target based on time and work done during the interval.
let next_target = compute_target(first_header, last_header, store);
let max_bits = consensus.network.max_bits();
if next_target > max_bits {
return max_bits.into();
}
next_target.into()
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use primitives::bytes::Bytes;
use primitives::hash::H256;
use primitives::bigint::U256;
use network::{Network, ConsensusParams, BitcoinCashConsensusParams, ConsensusFork};
use storage::{BlockHeaderProvider, BlockRef};
use chain::BlockHeader;
use work::work_required;
use super::work_required_bitcoin_cash_adjusted;
#[derive(Default)]
struct MemoryBlockHeaderProvider {
pub by_height: Vec<BlockHeader>,
pub by_hash: HashMap<H256, usize>,
}
impl MemoryBlockHeaderProvider {
pub fn insert(&mut self, header: BlockHeader) {
self.by_hash.insert(header.hash(), self.by_height.len());
self.by_height.push(header);
}
}
impl BlockHeaderProvider for MemoryBlockHeaderProvider {
fn block_header_bytes(&self, _block_ref: BlockRef) -> Option<Bytes> {
unimplemented!()
}
fn block_header(&self, block_ref: BlockRef) -> Option<BlockHeader> {
match block_ref {
BlockRef::Hash(ref hash) => self.by_hash.get(hash).map(|h| &self.by_height[*h]).cloned(),
BlockRef::Number(height) => self.by_height.get(height as usize).cloned(),
}
}
}
// original test link:
// https://github.com/bitcoinclassic/bitcoinclassic/blob/8bf1fb856df44d1b790b0b835e4c1969be736e25/src/test/pow_tests.cpp#L108
#[test]
fn bitcoin_cash_req7() {
let main_consensus = ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCore);
let uahf_consensus = ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCash(BitcoinCashConsensusParams {
height: 1000,
difficulty_adjustion_height: 0xffffffff,
monolith_time: 0xffffffff,
magnetic_anomaly_time: 0xffffffff,
}));
let mut header_provider = MemoryBlockHeaderProvider::default();
header_provider.insert(BlockHeader {
version: 0,
previous_header_hash: 0.into(),
merkle_root_hash: 0.into(),
time: 1269211443,
bits: 0x207fffff.into(),
nonce: 0,
});
// create x100 pre-HF blocks
for height in 1..1000 {
let mut header = header_provider.block_header((height - 1).into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 10 * 60;
header_provider.insert(header);
}
// create x10 post-HF blocks every 2 hours
// MTP still less than 12h
for height in 1000..1010 {
let mut header = header_provider.block_header((height - 1).into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 2 * 60 * 60;
header_provider.insert(header.clone());
let main_bits: u32 = work_required(header.hash(), 0, height as u32, &header_provider, &main_consensus).into();
assert_eq!(main_bits, 0x207fffff_u32);
let uahf_bits: u32 = work_required(header.hash(), 0, height as u32, &header_provider, &uahf_consensus).into();
assert_eq!(uahf_bits, 0x207fffff_u32);
}
// MTP becames greater than 12h
let mut header = header_provider.block_header(1009.into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 2 * 60 * 60;
header_provider.insert(header.clone());
let main_bits: u32 = work_required(header.hash(), 0, 1010, &header_provider, &main_consensus).into();
assert_eq!(main_bits, 0x207fffff_u32);
let uahf_bits: u32 = work_required(header.hash(), 0, 1010, &header_provider, &uahf_consensus).into();
assert_eq!(uahf_bits, 0x1d00ffff_u32);
}
// original test link:
// https://github.com/Bitcoin-ABC/bitcoin-abc/blob/d8eac91f8d16716eed0ad11ccac420122280bb13/src/test/pow_tests.cpp#L193
#[test]
fn bitcoin_cash_adjusted_difficulty() {
let uahf_consensus = ConsensusParams::new(Network::Mainnet, ConsensusFork::BitcoinCash(BitcoinCashConsensusParams {
height: 1000,
difficulty_adjustion_height: 0xffffffff,
monolith_time: 0xffffffff,
magnetic_anomaly_time: 0xffffffff,
}));
let limit_bits = uahf_consensus.network.max_bits();
let initial_bits = limit_bits >> 4;
let mut header_provider = MemoryBlockHeaderProvider::default();
// Genesis block.
header_provider.insert(BlockHeader {
version: 0,
previous_header_hash: 0.into(),
merkle_root_hash: 0.into(),
time: 1269211443,
bits: initial_bits.into(),
nonce: 0,
});
// Pile up some blocks every 10 mins to establish some history.
for height in 1..2050 {
let mut header = header_provider.block_header((height - 1).into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 600;
header_provider.insert(header);
}
// Difficulty stays the same as long as we produce a block every 10 mins.
let current_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(2049.into()).unwrap().into(),
0, 2050, &header_provider, &uahf_consensus);
for height in 2050..2060 {
let mut header = header_provider.block_header((height - 1).into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 600;
header.bits = current_bits;
header_provider.insert(header);
let calculated_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(height.into()).unwrap().into(),
0, height + 1, &header_provider, &uahf_consensus);
debug_assert_eq!(calculated_bits, current_bits);
}
// Make sure we skip over blocks that are out of wack. To do so, we produce
// a block that is far in the future
let mut header = header_provider.block_header(2059.into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 6000;
header.bits = current_bits;
header_provider.insert(header);
let calculated_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(2060.into()).unwrap().into(),
0, 2061, &header_provider, &uahf_consensus);
debug_assert_eq!(calculated_bits, current_bits);
// .. and then produce a block with the expected timestamp.
let mut header = header_provider.block_header(2060.into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 2 * 600 - 6000;
header.bits = current_bits;
header_provider.insert(header);
let calculated_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(2061.into()).unwrap().into(),
0, 2062, &header_provider, &uahf_consensus);
debug_assert_eq!(calculated_bits, current_bits);
// The system should continue unaffected by the block with a bogous timestamps.
for height in 2062..2082 {
let mut header = header_provider.block_header((height - 1).into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 600;
header.bits = current_bits;
header_provider.insert(header);
let calculated_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(height.into()).unwrap().into(),
0, height + 1, &header_provider, &uahf_consensus);
debug_assert_eq!(calculated_bits, current_bits);
}
// We start emitting blocks slightly faster. The first block has no impact.
let mut header = header_provider.block_header(2081.into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 550;
header.bits = current_bits;
header_provider.insert(header);
let calculated_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(2082.into()).unwrap().into(),
0, 2083, &header_provider, &uahf_consensus);
debug_assert_eq!(calculated_bits, current_bits);
// Now we should see difficulty increase slowly.
let mut current_bits = current_bits;
for height in 2083..2093 {
let mut header = header_provider.block_header((height - 1).into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 550;
header.bits = current_bits;
header_provider.insert(header);
let calculated_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(height.into()).unwrap().into(),
0, height + 1, &header_provider, &uahf_consensus);
let current_work: U256 = current_bits.into();
let calculated_work: U256 = calculated_bits.into();
debug_assert!(calculated_work < current_work);
debug_assert!((current_work - calculated_work) < (current_work >> 10));
current_bits = calculated_bits;
}
// Check the actual value.
debug_assert_eq!(current_bits, 0x1c0fe7b1.into());
// If we dramatically shorten block production, difficulty increases faster.
for height in 2093..2113 {
let mut header = header_provider.block_header((height - 1).into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 10;
header.bits = current_bits;
header_provider.insert(header);
let calculated_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(height.into()).unwrap().into(),
0, height + 1, &header_provider, &uahf_consensus);
let current_work: U256 = current_bits.into();
let calculated_work: U256 = calculated_bits.into();
debug_assert!(calculated_work < current_work);
debug_assert!((current_work - calculated_work) < (current_work >> 4));
current_bits = calculated_bits;
}
// Check the actual value.
debug_assert_eq!(current_bits, 0x1c0db19f.into());
// We start to emit blocks significantly slower. The first block has no
// impact.
let mut header = header_provider.block_header(2112.into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 6000;
header.bits = current_bits;
header_provider.insert(header);
let mut current_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(2113.into()).unwrap().into(),
0, 2114, &header_provider, &uahf_consensus);
// Check the actual value.
debug_assert_eq!(current_bits, 0x1c0d9222.into());
// If we dramatically slow down block production, difficulty decreases.
for height in 2114..2207 {
let mut header = header_provider.block_header((height - 1).into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 6000;
header.bits = current_bits;
header_provider.insert(header);
let calculated_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(height.into()).unwrap().into(),
0, height + 1, &header_provider, &uahf_consensus);
let current_work: U256 = current_bits.into();
let calculated_work: U256 = calculated_bits.into();
debug_assert!(calculated_work < limit_bits);
debug_assert!(calculated_work > current_work);
debug_assert!((calculated_work - current_work) < (current_work >> 3));
current_bits = calculated_bits;
}
// Check the actual value.
debug_assert_eq!(current_bits, 0x1c2f13b9.into());
// Due to the window of time being bounded, next block's difficulty actually
// gets harder.
let mut header = header_provider.block_header(2206.into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 6000;
header.bits = current_bits;
header_provider.insert(header);
let mut current_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(2207.into()).unwrap().into(),
0, 2208, &header_provider, &uahf_consensus);
debug_assert_eq!(current_bits, 0x1c2ee9bf.into());
// And goes down again. It takes a while due to the window being bounded and
// the skewed block causes 2 blocks to get out of the window.
for height in 2208..2400 {
let mut header = header_provider.block_header((height - 1).into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 6000;
header.bits = current_bits;
header_provider.insert(header);
let calculated_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(height.into()).unwrap().into(),
0, height + 1, &header_provider, &uahf_consensus);
let current_work: U256 = current_bits.into();
let calculated_work: U256 = calculated_bits.into();
debug_assert!(calculated_work <= limit_bits);
debug_assert!(calculated_work > current_work);
debug_assert!((calculated_work - current_work) < (current_work >> 3));
current_bits = calculated_bits;
}
// Check the actual value.
debug_assert_eq!(current_bits, 0x1d00ffff.into());
// Once the difficulty reached the minimum allowed level, it doesn't get any
// easier.
for height in 2400..2405 {
let mut header = header_provider.block_header((height - 1).into()).unwrap();
header.previous_header_hash = header.hash();
header.time = header.time + 6000;
header.bits = current_bits;
header_provider.insert(header);
let calculated_bits = work_required_bitcoin_cash_adjusted(header_provider.block_header(height.into()).unwrap().into(),
0, height + 1, &header_provider, &uahf_consensus);
debug_assert_eq!(calculated_bits, limit_bits.into());
current_bits = calculated_bits;
}
}
}

View File

@ -0,0 +1,216 @@
use primitives::compact::Compact;
use primitives::bigint::{U256, Uint};
use chain::IndexedBlockHeader;
use network::ConsensusParams;
use storage::BlockHeaderProvider;
use timestamp::median_timestamp_inclusive;
/// Returns work required for given header for the ZCash block
pub fn work_required_zcash(parent_header: IndexedBlockHeader, store: &BlockHeaderProvider, consensus: &ConsensusParams, max_bits: Compact) -> Compact {
// TODO: special testnet case!
// Find the first block in the averaging interval
let parent_hash = parent_header.hash.clone();
let mut oldest_hash = parent_header.raw.previous_header_hash;
let mut bits_total: U256 = parent_header.raw.bits.into();
for _ in 1..consensus.pow_averaging_window {
let previous_header = match store.block_header(oldest_hash.into()) {
Some(previous_header) => previous_header,
None => return max_bits,
};
// TODO: check this
bits_total = match bits_total.overflowing_add(previous_header.bits.into()) {
(bits_total, false) => bits_total,
(_, true) => return max_bits,
};
oldest_hash = previous_header.previous_header_hash;
}
let bits_avg = bits_total / consensus.pow_averaging_window.into();
let parent_mtp = median_timestamp_inclusive(parent_hash, store);
let oldest_mtp = median_timestamp_inclusive(oldest_hash, store);
calculate_work_required(bits_avg, parent_mtp, oldest_mtp, consensus, max_bits)
}
fn calculate_work_required(bits_avg: U256, parent_mtp: u32, oldest_mtp: u32, consensus: &ConsensusParams, max_bits: Compact) -> Compact {
// Limit adjustment step
// Use medians to prevent time-warp attacks
let actual_timespan = parent_mtp - oldest_mtp;
let mut actual_timespan = consensus.averaging_window_timespan() as i64 +
(actual_timespan as i64 - consensus.averaging_window_timespan() as i64) / 4;
if actual_timespan < consensus.min_actual_timespan() as i64 {
actual_timespan = consensus.min_actual_timespan() as i64;
}
if actual_timespan > consensus.max_actual_timespan() as i64 {
actual_timespan = consensus.max_actual_timespan() as i64;
}
// Retarget
let actual_timespan = actual_timespan as u32;
let mut bits_new = bits_avg / consensus.averaging_window_timespan().into();
bits_new = bits_new * actual_timespan.into();
if bits_new > max_bits.into() {
return max_bits;
}
bits_new.into()
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use primitives::bytes::Bytes;
use primitives::compact::Compact;
use primitives::bigint::U256;
use primitives::hash::H256;
use network::{Network, ConsensusParams};
use chain::BlockHeader;
use storage::{BlockHeaderProvider, BlockRef};
use timestamp::median_timestamp_inclusive;
use super::{work_required_zcash, calculate_work_required};
#[derive(Default)]
pub struct MemoryBlockHeaderProvider {
pub by_height: Vec<BlockHeader>,
pub by_hash: HashMap<H256, usize>,
}
impl MemoryBlockHeaderProvider {
pub fn last(&self) -> &BlockHeader {
self.by_height.last().unwrap()
}
pub fn insert(&mut self, header: BlockHeader) {
self.by_hash.insert(header.hash(), self.by_height.len());
self.by_height.push(header);
}
pub fn replace_last(&mut self, header: BlockHeader) {
let idx = self.by_height.len() - 1;
self.by_hash.remove(&self.by_height[idx].hash());
self.by_hash.insert(header.hash(), idx);
self.by_height[idx] = header;
}
}
impl BlockHeaderProvider for MemoryBlockHeaderProvider {
fn block_header_bytes(&self, _block_ref: BlockRef) -> Option<Bytes> {
unimplemented!()
}
fn block_header(&self, block_ref: BlockRef) -> Option<BlockHeader> {
match block_ref {
BlockRef::Hash(ref hash) => self.by_hash.get(hash).map(|h| &self.by_height[*h]).cloned(),
BlockRef::Number(height) => self.by_height.get(height as usize).cloned(),
}
}
}
// original test link:
// https://github.com/Bitcoin-ABC/bitcoin-abc/blob/d8eac91f8d16716eed0ad11ccac420122280bb13/src/test/pow_tests.cpp#L193
#[test]
fn zcash_work_required_works() {
let consensus = ConsensusParams::new(Network::Mainnet);
let max_bits = Network::Mainnet.max_bits();
let last_block = 2 * consensus.pow_averaging_window;
let first_block = last_block - consensus.pow_averaging_window;
// insert genesis block
let mut header_provider = MemoryBlockHeaderProvider::default();
header_provider.insert(BlockHeader {
time: 1269211443,
bits: Compact::new(0x1e7fffff),
version: 0,
previous_header_hash: 0.into(),
merkle_root_hash: 0.into(),
nonce: 0.into(),
reserved_hash: Default::default(),
solution: Default::default(),
});
// Start with blocks evenly-spaced and equal difficulty
for i in 1..last_block+1 {
let header = BlockHeader {
time: header_provider.last().time + consensus.pow_target_spacing,
bits: Compact::new(0x1e7fffff),
version: 0,
previous_header_hash: header_provider.by_height[i as usize - 1].hash(),
merkle_root_hash: 0.into(),
nonce: 0.into(),
reserved_hash: Default::default(),
solution: Default::default(),
};
header_provider.insert(header);
}
// Result should be the same as if last difficulty was used
let bits_avg: U256 = header_provider.by_height[last_block as usize].bits.into();
let expected = calculate_work_required(bits_avg,
median_timestamp_inclusive(header_provider.by_height[last_block as usize].hash(), &header_provider),
median_timestamp_inclusive(header_provider.by_height[first_block as usize].hash(), &header_provider),
&consensus, max_bits.into());
let actual = work_required_zcash(header_provider.last().clone().into(),
&header_provider, &consensus, max_bits.into());
assert_eq!(actual, expected);
// Result should be unchanged, modulo integer division precision loss
let mut bits_expected: U256 = Compact::new(0x1e7fffff).into();
bits_expected = bits_expected / consensus.averaging_window_timespan().into();
bits_expected = bits_expected * consensus.averaging_window_timespan().into();
assert_eq!(work_required_zcash(header_provider.last().clone().into(),
&header_provider, &consensus, max_bits.into()),
bits_expected.into());
// Randomise the final block time (plus 1 to ensure it is always different)
use rand::{thread_rng, Rng};
let mut last_header = header_provider.by_height[last_block as usize].clone();
last_header.time += thread_rng().gen_range(1, consensus.pow_target_spacing / 2);
header_provider.replace_last(last_header);
// Result should be the same as if last difficulty was used
let bits_avg: U256 = header_provider.by_height[last_block as usize].bits.into();
let expected = calculate_work_required(bits_avg,
median_timestamp_inclusive(header_provider.by_height[last_block as usize].hash(), &header_provider),
median_timestamp_inclusive(header_provider.by_height[first_block as usize].hash(), &header_provider),
&consensus, max_bits.into());
let actual = work_required_zcash(header_provider.last().clone().into(),
&header_provider, &consensus, max_bits.into());
assert_eq!(actual, expected);
// Result should not be unchanged
let bits_expected = Compact::new(0x1e7fffff);
assert!(work_required_zcash(header_provider.last().clone().into(),
&header_provider, &consensus, max_bits.into()) != bits_expected);
// Change the final block difficulty
let mut last_header = header_provider.by_height[last_block as usize].clone();
last_header.bits = Compact::new(0x1e0fffff);
header_provider.replace_last(last_header);
// Result should not be the same as if last difficulty was used
let bits_avg = header_provider.by_height[last_block as usize].bits;
let expected = calculate_work_required(bits_avg.into(),
median_timestamp_inclusive(header_provider.by_height[last_block as usize].hash(), &header_provider),
median_timestamp_inclusive(header_provider.by_height[first_block as usize].hash(), &header_provider),
&consensus, max_bits.into());
let actual = work_required_zcash(header_provider.last().clone().into(),
&header_provider, &consensus, max_bits.into());
assert!(actual != expected);
// Result should be the same as if the average difficulty was used
let bits_avg = "0000796968696969696969696969696969696969696969696969696969696969".parse().unwrap();
let expected = calculate_work_required(bits_avg,
median_timestamp_inclusive(header_provider.by_height[last_block as usize].hash(), &header_provider),
median_timestamp_inclusive(header_provider.by_height[first_block as usize].hash(), &header_provider),
&consensus, max_bits.into());
let actual = work_required_zcash(header_provider.last().clone().into(),
&header_provider, &consensus, max_bits.into());
assert_eq!(actual, expected);
}
}