Merge branch 'master' into fixed_178

This commit is contained in:
Svyatoslav Nikolsky 2016-11-29 14:51:22 +03:00
commit 270a04c887
78 changed files with 2204 additions and 1068 deletions

123
Cargo.lock generated
View File

@ -8,6 +8,7 @@ dependencies = [
"ethcore-devtools 1.3.0",
"linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"network 0.1.0",
"parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"primitives 0.1.0",
"script 0.1.0",
@ -21,9 +22,9 @@ name = "abstract-ns"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"futures 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"quick-error 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -56,7 +57,7 @@ version = "0.3.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"nodrop 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
"odds 0.2.24 (registry+https://github.com/rust-lang/crates.io-index)",
"odds 0.2.25 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -88,6 +89,7 @@ version = "0.1.0"
dependencies = [
"primitives 0.1.0",
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"siphasher 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -123,18 +125,18 @@ dependencies = [
[[package]]
name = "clap"
version = "2.18.0"
version = "2.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)",
"strsim 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
"strsim 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
"term_size 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-segmentation 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-width 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"vec_map 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"yaml-rust 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
"yaml-rust 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -173,20 +175,18 @@ name = "deque"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "domain"
version = "0.1.0"
source = "git+https://github.com/debris/domain#3754429fefb19b7c1c78cf34fcea32272266a8c0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.0 (git+https://github.com/debris/tokio-core)",
"void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -211,7 +211,7 @@ dependencies = [
"arrayvec 0.3.20 (registry+https://github.com/rust-lang/crates.io-index)",
"gcc 0.3.38 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -219,12 +219,12 @@ dependencies = [
name = "ethcore-devtools"
version = "1.3.0"
dependencies = [
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "futures"
version = "0.1.3"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
@ -236,7 +236,7 @@ version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -284,7 +284,7 @@ dependencies = [
"eth-secp256k1 0.5.6 (git+https://github.com/ethcore/rust-secp256k1)",
"lazy_static 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"primitives 0.1.0",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -328,6 +328,7 @@ dependencies = [
"bitcrypto 0.1.0",
"byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
"chain 0.1.0",
"network 0.1.0",
"primitives 0.1.0",
"serialization 0.1.0",
]
@ -337,6 +338,7 @@ name = "miner"
version = "0.1.0"
dependencies = [
"chain 0.1.0",
"db 0.1.0",
"heapsize 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"primitives 0.1.0",
"serialization 0.1.0",
@ -390,6 +392,15 @@ dependencies = [
"ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "network"
version = "0.1.0"
dependencies = [
"chain 0.1.0",
"primitives 0.1.0",
"serialization 0.1.0",
]
[[package]]
name = "nix"
version = "0.7.0"
@ -408,18 +419,18 @@ name = "nodrop"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"odds 0.2.24 (registry+https://github.com/rust-lang/crates.io-index)",
"odds 0.2.25 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "ns-dns-tokio"
version = "0.1.0"
source = "git+https://github.com/debris/abstract-ns#d4df407f94ae725c88aed3457aa8ebb44c123c5a"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"abstract-ns 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"domain 0.1.0 (git+https://github.com/debris/domain)",
"futures 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.0 (git+https://github.com/debris/tokio-core)",
"domain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -432,7 +443,7 @@ dependencies = [
[[package]]
name = "odds"
version = "0.2.24"
version = "0.2.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@ -456,17 +467,18 @@ dependencies = [
"abstract-ns 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"bitcrypto 0.1.0",
"csv 0.14.7 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"message 0.1.0",
"ns-dns-tokio 0.1.0 (git+https://github.com/debris/abstract-ns)",
"network 0.1.0",
"ns-dns-tokio 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"primitives 0.1.0",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
"serialization 0.1.0",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.0 (git+https://github.com/debris/tokio-core)",
"tokio-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -485,7 +497,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
"smallvec 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -497,7 +509,7 @@ dependencies = [
"app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"bencher 0.1.0",
"chain 0.1.0",
"clap 2.18.0 (registry+https://github.com/rust-lang/crates.io-index)",
"clap 2.19.0 (registry+https://github.com/rust-lang/crates.io-index)",
"db 0.1.0",
"env_logger 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"import 0.1.0",
@ -505,6 +517,7 @@ dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"message 0.1.0",
"miner 0.1.0",
"network 0.1.0",
"p2p 0.1.0",
"script 0.1.0",
"sync 0.1.0",
@ -527,7 +540,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rand"
version = "0.3.14"
version = "0.3.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)",
@ -541,7 +554,7 @@ dependencies = [
"deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -586,7 +599,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"gcc 0.3.38 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -643,6 +656,11 @@ dependencies = [
"winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "siphasher"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "slab"
version = "0.3.0"
@ -655,7 +673,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "strsim"
version = "0.5.1"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@ -663,25 +681,28 @@ name = "sync"
version = "0.1.0"
dependencies = [
"bit-vec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"bitcrypto 0.1.0",
"byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
"chain 0.1.0",
"db 0.1.0",
"ethcore-devtools 1.3.0",
"futures 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"message 0.1.0",
"miner 0.1.0",
"murmur3 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"network 0.1.0",
"p2p 0.1.0",
"parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"primitives 0.1.0",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
"script 0.1.0",
"serialization 0.1.0",
"test-data 0.1.0",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.0 (git+https://github.com/debris/tokio-core)",
"tokio-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"verification 0.1.0",
]
@ -702,6 +723,7 @@ dependencies = [
"chain 0.1.0",
"primitives 0.1.0",
"serialization 0.1.0",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -733,10 +755,10 @@ dependencies = [
[[package]]
name = "tokio-core"
version = "0.1.0"
source = "git+https://github.com/debris/tokio-core#623ce443d89cd9ffa2c1adae8d2eb75538802d01"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"futures 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
"scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -794,7 +816,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "yaml-rust"
version = "0.3.4"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
[metadata]
@ -809,15 +831,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d"
"checksum byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855"
"checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c"
"checksum clap 2.18.0 (registry+https://github.com/rust-lang/crates.io-index)" = "40046b8a004bf3ba43b9078bf4b9b6d1708406a234848f925dbd7160a374c8a8"
"checksum clap 2.19.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ef87e92396a3d29bf7e611c8a595be35ae90d9cb844a3571425900eaca4f51c8"
"checksum crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0c5ea215664ca264da8a9d9c3be80d2eaf30923c259d03e870388eb927508f97"
"checksum csv 0.14.7 (registry+https://github.com/rust-lang/crates.io-index)" = "266c1815d7ca63a5bd86284043faf91e8c95e943e55ce05dc0ae08e952de18bc"
"checksum deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1614659040e711785ed8ea24219140654da1729f3ec8a47a9719d041112fe7bf"
"checksum domain 0.1.0 (git+https://github.com/debris/domain)" = "<none>"
"checksum domain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "725459994103308a8476a95d8115280b1359dccc06ca14291df75f37459a9e30"
"checksum elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4bc9250a632e7c001b741eb0ec6cee93c9a5b6d5f1879696a4b94d62b012210a"
"checksum env_logger 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "15abd780e45b3ea4f76b4e9a26ff4843258dd8a3eed2775a0e7368c2e7936c2f"
"checksum eth-secp256k1 0.5.6 (git+https://github.com/ethcore/rust-secp256k1)" = "<none>"
"checksum futures 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dd89497091f8c5d3a65c6b4baf6d2f0731937a7c9217d2f89141b21437a9d96"
"checksum futures 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0bad0a2ac64b227fdc10c254051ae5af542cf19c9328704fd4092f7914196897"
"checksum futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bb982bb25cd8fa5da6a8eb3a460354c984ff1113da82bcb4f0b0862b5795db82"
"checksum gcc 0.3.38 (registry+https://github.com/rust-lang/crates.io-index)" = "553f11439bdefe755bf366b264820f1da70f3aaf3924e594b886beb9c831bcf5"
"checksum heapsize 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "8c80e194758495a9109566134dc06e42ea0423987d6ceca016edaa90381b3549"
@ -834,15 +856,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum net2 0.2.26 (registry+https://github.com/rust-lang/crates.io-index)" = "5edf9cb6be97212423aed9413dd4729d62b370b5e1c571750e882cebbbc1e3e2"
"checksum nix 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a0d95c5fa8b641c10ad0b8887454ebaafa3c92b5cd5350f8fc693adafd178e7b"
"checksum nodrop 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "0dbbadd3f4c98dea0bd3d9b4be4c0cdaf1ab57035cb2e41fce3983db5add7cc5"
"checksum ns-dns-tokio 0.1.0 (git+https://github.com/debris/abstract-ns)" = "<none>"
"checksum ns-dns-tokio 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "43330aab5077c311b390b62147feb44316cb5b754b97d28c92210e6c6b7baff7"
"checksum num_cpus 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8890e6084723d57d0df8d2720b0d60c6ee67d6c93e7169630e4371e88765dcad"
"checksum odds 0.2.24 (registry+https://github.com/rust-lang/crates.io-index)" = "97b2d7c12734955740d14f7a6723d8dd8ed53cf16770ab38ca6a1aaf3124fc0d"
"checksum odds 0.2.25 (registry+https://github.com/rust-lang/crates.io-index)" = "c3df9b730298cea3a1c3faa90b7e2f9df3a9c400d0936d6015e6165734eefcba"
"checksum ole32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d2c49021782e5233cd243168edfa8037574afed4eba4bbaf538b3d8d1789d8c"
"checksum owning_ref 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8d91377085359426407a287ab16884a0111ba473aa6844ff01d4ec20ce3d75e7"
"checksum parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "e1435e7a2a00dfebededd6c6bdbd54008001e94b4a2aadd6aef0dc4c56317621"
"checksum parking_lot_core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fb1b97670a2ffadce7c397fb80a3d687c4f3060140b885621ef1653d0e5d5068"
"checksum quick-error 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0aad603e8d7fb67da22dbdf1f4b826ce8829e406124109e73cf1b2454b93a71c"
"checksum rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)" = "2791d88c6defac799c3f20d74f094ca33b9332612d9aef9078519c82e4fe04a5"
"checksum rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "022e0636ec2519ddae48154b028864bdce4eaf7d35226ab8e65c611be97b189d"
"checksum rayon 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0f0783f5880c56f5a308e219ac9309dbe781e064741dd5def4c617c440890305"
"checksum regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)" = "4fd4ace6a8cf7860714a2c2280d6c1f7e6a413486c13298bbc86fd3da019402f"
"checksum regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f9ec002c35e86791825ed294b50008eea9ddfc8def4420124fbc6b08db834957"
@ -854,14 +876,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f417c22df063e9450888a7561788e9bd46d3bb3c1466435b4eccb903807f147d"
"checksum semver 0.1.20 (registry+https://github.com/rust-lang/crates.io-index)" = "d4f410fedcf71af0345d7607d246e7ad15faaadd49d240ee3b24e5dc21a820ac"
"checksum shell32-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "72f20b8f3c060374edb8046591ba28f62448c369ccbdc7b02075103fb3a9e38d"
"checksum siphasher 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b1c3c58c9ac43c530919fe6bd8ef11ae2612f64c2bf8eab9346f5b71ce0617f2"
"checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23"
"checksum smallvec 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "fcc8d19212aacecf95e4a7a2179b26f7aeb9732a915cf01f05b0d3e044865410"
"checksum strsim 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "50c069df92e4b01425a8bf3576d5d417943a6a7272fbabaf5bd80b1aaa76442e"
"checksum strsim 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "67f84c44fbb2f91db7fef94554e6b2ac05909c9c0b0bc23bb98d3a1aebfe7f7c"
"checksum term_size 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3f7f5f3f71b0040cecc71af239414c23fd3c73570f5ff54cf50e03cef637f2a0"
"checksum thread-id 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a9539db560102d1cef46b8b78ce737ff0bb64e7e18d35b2a5688f7d097d0ff03"
"checksum thread_local 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "8576dbbfcaef9641452d5cf0df9b0e7eeab7694956dd33bb61515fb8f18cfdd5"
"checksum time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "3c7ec6d62a20df54e07ab3b78b9a3932972f4b7981de295563686849eb3989af"
"checksum tokio-core 0.1.0 (git+https://github.com/debris/tokio-core)" = "<none>"
"checksum tokio-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "06f40e15561569e24dab3dcf270c0bb950195b84dbed591dfb6591e28c9b9cff"
"checksum unicode-segmentation 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b905d0fc2a1f0befd86b0e72e31d1787944efef9d38b9358a9e92a69757f7e3b"
"checksum unicode-width 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2d6722facc10989f63ee0e20a83cd4e1714a9ae11529403ac7e0afd069abc39e"
"checksum utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1ca13c08c41c9c3e04224ed9ff80461d97e121589ff27c753a16cb10830ae0f"
@ -871,4 +894,4 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e"
"checksum xdg 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "77b831a5ba77110f438f0ac5583aafeb087f70432998ba6b7dcb1d32185db453"
"checksum yaml-rust 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "371cea3a33a58d11dc83c0992fb37e44f651ebdf2df12f9d939f6cb24be2a8fd"
"checksum yaml-rust 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e66366e18dc58b46801afbf2ca7661a9f59cc8c5962c29892b6039b4f86fa992"

View File

@ -13,6 +13,7 @@ clap = { version = "2", features = ["yaml"] }
chain = { path = "chain" }
keys = { path = "keys" }
message = { path = "message" }
network = { path = "network" }
miner = { path = "miner" }
p2p = { path = "p2p" }
script = { path = "script" }

View File

@ -70,6 +70,10 @@ impl Block {
pub fn hash(&self) -> H256 {
self.block_header.hash()
}
pub fn is_final(&self, height: u32) -> bool {
self.transactions.iter().all(|t| t.is_final(height, self.block_header.time))
}
}
#[cfg(test)]

View File

@ -14,7 +14,7 @@ pub trait RepresentH256 {
}
pub use rustc_serialize::hex;
pub use primitives::{hash, bytes};
pub use primitives::{hash, bytes, uint};
pub use self::block::Block;
pub use self::block_header::BlockHeader;
@ -23,7 +23,8 @@ pub use self::merkle_root::merkle_node_hash;
pub use self::transaction::{
Transaction, TransactionInput, TransactionOutput, OutPoint,
SEQUENCE_LOCKTIME_DISABLE_FLAG, SEQUENCE_FINAL,
SEQUENCE_LOCKTIME_TYPE_FLAG, SEQUENCE_LOCKTIME_MASK
SEQUENCE_LOCKTIME_TYPE_FLAG, SEQUENCE_LOCKTIME_MASK,
LOCKTIME_THRESHOLD
};
pub type ShortTransactionID = hash::H48;

View File

@ -30,6 +30,9 @@ pub const SEQUENCE_LOCKTIME_TYPE_FLAG: u32 = (1 << 22);
// applied to extract that lock-time from the sequence field.
pub const SEQUENCE_LOCKTIME_MASK: u32 = 0x0000ffff;
/// Threshold for `nLockTime`: below this value it is interpreted as block number,
/// otherwise as UNIX timestamp.
pub const LOCKTIME_THRESHOLD: u32 = 500000000; // Tue Nov 5 00:53:20 1985 UTC
#[derive(Debug, PartialEq, Clone, Default)]
pub struct OutPoint {
@ -66,10 +69,6 @@ impl OutPoint {
&self.hash
}
pub fn index(&self) -> u32 {
self.index
}
pub fn is_null(&self) -> bool {
self.hash.is_zero() && self.index == u32::max_value()
}
@ -82,6 +81,12 @@ pub struct TransactionInput {
pub sequence: u32,
}
impl TransactionInput {
pub fn is_final(&self) -> bool {
self.sequence == SEQUENCE_FINAL
}
}
impl Serializable for TransactionInput {
fn serialize(&self, stream: &mut Stream) {
stream
@ -116,20 +121,6 @@ impl HeapSizeOf for TransactionInput {
}
}
impl TransactionInput {
pub fn previous_output(&self) -> &OutPoint {
&self.previous_output
}
pub fn script_sig(&self) -> &[u8] {
&self.script_sig
}
pub fn sequence(&self) -> u32 {
self.sequence
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct TransactionOutput {
pub value: u64,
@ -176,16 +167,6 @@ impl HeapSizeOf for TransactionOutput {
}
}
impl TransactionOutput {
pub fn value(&self) -> u64 {
self.value
}
pub fn script_pubkey(&self) -> &[u8] {
&self.script_pubkey
}
}
#[derive(Debug, PartialEq, Default, Clone)]
pub struct Transaction {
pub version: i32,
@ -254,6 +235,24 @@ impl Transaction {
self.inputs.len() == 1 && self.inputs[0].previous_output.is_null()
}
pub fn is_final(&self, block_height: u32, block_time: u32) -> bool {
if self.lock_time == 0 {
return true;
}
let max_lock_time = if self.lock_time < LOCKTIME_THRESHOLD {
block_height
} else {
block_time
};
if self.lock_time < max_lock_time {
return true;
}
self.inputs.iter().all(TransactionInput::is_final)
}
pub fn total_spends(&self) -> u64 {
self.outputs
.iter()

View File

@ -5,4 +5,5 @@ authors = ["debris <marek.kotewicz@gmail.com>"]
[dependencies]
rust-crypto = "0.2.36"
siphasher = "0.1.1"
primitives = { path = "../primitives" }

View File

@ -1,10 +1,13 @@
extern crate crypto as rcrypto;
extern crate primitives;
extern crate siphasher;
use std::hash::Hasher;
use rcrypto::sha1::Sha1;
use rcrypto::sha2::Sha256;
use rcrypto::ripemd160::Ripemd160;
use rcrypto::digest::Digest;
use siphasher::sip::SipHasher24;
use primitives::hash::{H32, H160, H256};
pub struct DHash160 {
@ -146,6 +149,14 @@ pub fn dhash256(input: &[u8]) -> H256 {
result
}
/// SipHash-2-4
#[inline]
pub fn siphash24(key0: u64, key1: u64, input: &[u8]) -> u64 {
let mut hasher = SipHasher24::new_with_keys(key0, key1);
hasher.write(input);
hasher.finish()
}
/// Data checksum
#[inline]
pub fn checksum(data: &[u8]) -> H32 {
@ -157,7 +168,7 @@ pub fn checksum(data: &[u8]) -> H32 {
#[cfg(test)]
mod tests {
use primitives::bytes::Bytes;
use super::{ripemd160, sha1, sha256, dhash160, dhash256, checksum};
use super::{ripemd160, sha1, sha256, dhash160, dhash256, siphash24, checksum};
#[test]
fn test_ripemd160() {
@ -192,12 +203,19 @@ mod tests {
assert_eq!(result, expected);
}
#[test]
fn test_dhash256() {
#[test]
fn test_dhash256() {
let expected = "9595c9df90075148eb06860365df33584b75bff782a510c6cd4883a419833d50".into();
let result = dhash256(b"hello");
assert_eq!(result, expected);
}
}
#[test]
fn test_siphash24() {
let expected = 0x74f839c593dc67fd_u64;
let result = siphash24(0x0706050403020100_u64, 0x0F0E0D0C0B0A0908_u64, &[0; 1]);
assert_eq!(result, expected);
}
#[test]
fn test_checksum() {

View File

@ -14,6 +14,9 @@ pub trait BlockProvider {
/// resolves header bytes by block reference (number/hash)
fn block_header_bytes(&self, block_ref: BlockRef) -> Option<Bytes>;
/// resolves header bytes by block reference (number/hash)
fn block_header(&self, block_ref: BlockRef) -> Option<chain::BlockHeader>;
/// resolves deserialized block body by block reference (number/hash)
fn block(&self, block_ref: BlockRef) -> Option<chain::Block>;

View File

@ -1,5 +1,5 @@
use primitives::hash::H256;
use super::BlockLocation;
use super::{BlockLocation, IndexedBlock};
use chain;
use error::Error;
@ -46,4 +46,7 @@ pub trait BlockStapler {
/// insert block in the storage
fn insert_block(&self, block: &chain::Block) -> Result<BlockInsertedChain, Error>;
/// insert pre-processed block in the storage
fn insert_indexed_block(&self, block: &IndexedBlock) -> Result<BlockInsertedChain, Error>;
}

View File

@ -1,6 +1,8 @@
use chain;
use primitives::hash::H256;
use serialization::Serializable;
#[derive(Debug)]
pub struct IndexedBlock {
header: chain::BlockHeader,
header_hash: H256,
@ -28,6 +30,22 @@ impl From<chain::Block> for IndexedBlock {
}
impl IndexedBlock {
pub fn new(header: chain::BlockHeader, transaction_index: Vec<(H256, chain::Transaction)>) -> Self {
let mut block = IndexedBlock {
header_hash: header.hash(),
header: header,
transactions: Vec::with_capacity(transaction_index.len()),
transaction_hashes: Vec::with_capacity(transaction_index.len()),
};
for (h256, tx) in transaction_index {
block.transactions.push(tx);
block.transaction_hashes.push(h256);
}
block
}
pub fn transactions(&self) -> IndexedTransactions {
IndexedTransactions {
position: 0,
@ -35,6 +53,10 @@ impl IndexedBlock {
}
}
pub fn transaction_hashes(&self) -> &[H256] {
&self.transaction_hashes
}
pub fn header(&self) -> &chain::BlockHeader {
&self.header
}
@ -42,6 +64,30 @@ impl IndexedBlock {
pub fn hash(&self) -> &H256 {
&self.header_hash
}
pub fn transaction_count(&self) -> usize {
self.transaction_hashes.len()
}
pub fn to_block(&self) -> chain::Block {
chain::Block::new(
self.header.clone(),
self.transactions.clone(),
)
}
pub fn size(&self) -> usize {
// todo: optimize
self.to_block().serialized_size()
}
pub fn merkle_root(&self) -> H256 {
chain::merkle_root(&self.transaction_hashes)
}
pub fn is_final(&self, height: u32) -> bool {
self.transactions.iter().all(|t| t.is_final(height, self.header.time))
}
}
pub struct IndexedTransactions<'a> {

View File

@ -34,6 +34,18 @@ pub enum BlockRef {
Hash(primitives::hash::H256),
}
impl From<u32> for BlockRef {
fn from(u: u32) -> Self {
BlockRef::Number(u)
}
}
impl From<primitives::hash::H256> for BlockRef {
fn from(hash: primitives::hash::H256) -> Self {
BlockRef::Hash(hash)
}
}
#[derive(PartialEq, Debug)]
pub enum BlockLocation {
Main(u32),
@ -46,7 +58,7 @@ pub use best_block::BestBlock;
pub use storage::{Storage, Store};
pub use error::Error;
pub use kvdb::Database;
pub use transaction_provider::TransactionProvider;
pub use transaction_provider::{TransactionProvider, AsTransactionProvider};
pub use transaction_meta_provider::TransactionMetaProvider;
pub use block_stapler::{BlockStapler, BlockInsertedChain};
pub use block_provider::BlockProvider;

View File

@ -6,7 +6,7 @@ use kvdb::{Database, DatabaseConfig};
use byteorder::{LittleEndian, ByteOrder};
use primitives::hash::H256;
use primitives::bytes::Bytes;
use super::{BlockRef, BestBlock, BlockLocation};
use super::{BlockRef, BestBlock, BlockLocation, IndexedBlock, IndexedTransactions};
use serialization::{serialize, deserialize};
use chain;
use parking_lot::RwLock;
@ -40,6 +40,9 @@ const MAX_FORK_ROUTE_PRESET: usize = 128;
pub trait Store : BlockProvider + BlockStapler + TransactionProvider + TransactionMetaProvider {
/// get best block
fn best_block(&self) -> Option<BestBlock>;
/// get best header
fn best_header(&self) -> Option<chain::BlockHeader>;
}
/// Blockchain storage with rocksdb database
@ -165,48 +168,66 @@ impl Storage {
})
}
fn block_by_hash(&self, h: &H256) -> Option<IndexedBlock> {
self.block_header_by_hash(h).map(|header| {
let tx_index =
self.block_transaction_hashes_by_hash(h)
.into_iter()
.filter_map(|tx_hash| {
self.transaction_bytes(&tx_hash)
.map(|tx_bytes| {
(
tx_hash,
deserialize::<_, chain::Transaction>(tx_bytes.as_ref())
.expect("Error deserializing transaction, possible db corruption"),
)
})
})
.collect();
IndexedBlock::new(header, tx_index)
})
}
/// update transactions metadata in the specified database transaction
fn update_transactions_meta(&self, context: &mut UpdateContext, number: u32, accepted_txs: &[chain::Transaction])
fn update_transactions_meta(&self, context: &mut UpdateContext, number: u32, accepted_txs: &mut IndexedTransactions)
-> Result<(), Error>
{
if let Some(accepted_tx) = accepted_txs.iter().next() {
if let Some((accepted_hash, accepted_tx)) = accepted_txs.next() {
context.meta.insert(
accepted_tx.hash(),
TransactionMeta::new(number, accepted_tx.outputs.len()).coinbase()
accepted_hash.clone(),
TransactionMeta::new_coinbase(number, accepted_tx.outputs.len())
);
}
for accepted_tx in accepted_txs.iter().skip(1) {
for (accepted_hash, accepted_tx) in accepted_txs {
context.meta.insert(
accepted_tx.hash(),
accepted_hash.clone(),
TransactionMeta::new(number, accepted_tx.outputs.len())
);
for input in &accepted_tx.inputs {
if !match context.meta.get_mut(&input.previous_output.hash) {
Some(ref mut meta) => {
use std::collections::hash_map::Entry;
match context.meta.entry(input.previous_output.hash.clone()) {
Entry::Occupied(mut entry) => {
let meta = entry.get_mut();
if meta.is_spent(input.previous_output.index as usize) {
return Err(Error::double_spend(&input.previous_output.hash));
}
meta.denote_used(input.previous_output.index as usize);
},
Entry::Vacant(entry) => {
let mut meta = self.transaction_meta(&input.previous_output.hash)
.ok_or(Error::unknown_spending(&input.previous_output.hash))?;
if meta.is_spent(input.previous_output.index as usize) {
return Err(Error::double_spend(&input.previous_output.hash));
}
meta.note_used(input.previous_output.index as usize);
true
meta.denote_used(input.previous_output.index as usize);
entry.insert(meta);
},
None => false,
} {
let mut meta = self.transaction_meta(&input.previous_output.hash)
.ok_or(Error::unknown_spending(&input.previous_output.hash))?;
if meta.is_spent(input.previous_output.index as usize) {
return Err(Error::double_spend(&input.previous_output.hash));
}
meta.note_used(input.previous_output.index as usize);
context.meta.insert(
input.previous_output.hash.clone(),
meta);
}
}
}
@ -236,30 +257,24 @@ impl Storage {
// remove meta
context.db_transaction.delete(Some(COL_TRANSACTIONS_META), &**tx_hash);
// denote outputs used
if tx_hash_num == 0 { continue; } // coinbase transaction does not have inputs
// coinbase transaction does not have inputs
if tx_hash_num == 0 {
continue;
}
// denote outputs as unused
for input in &tx.inputs {
if !match context.meta.get_mut(&input.previous_output.hash) {
Some(ref mut meta) => {
meta.denote_used(input.previous_output.index as usize);
true
use std::collections::hash_map::Entry;
match context.meta.entry(input.previous_output.hash.clone()) {
Entry::Occupied(mut entry) => {
entry.get_mut().denote_unused(input.previous_output.index as usize);
},
Entry::Vacant(entry) => {
let mut meta = self.transaction_meta(&input.previous_output.hash)
.expect("No transaction metadata! Possible db corruption");
meta.denote_unused(input.previous_output.index as usize);
entry.insert(meta);
},
None => false,
} {
let mut meta =
self.transaction_meta(&input.previous_output.hash)
.unwrap_or_else(|| panic!(
// decanonization should always have meta
// because block could not have made canonical without writing meta
"No transaction metadata for {}! Corrupted DB? Reindex?",
&input.previous_output.hash
));
meta.denote_used(input.previous_output.index as usize);
context.meta.insert(
input.previous_output.hash.clone(),
meta);
}
}
}
@ -300,8 +315,8 @@ impl Storage {
}
fn canonize_block(&self, context: &mut UpdateContext, at_height: u32, hash: &H256) -> Result<(), Error> {
let transactions = self.block_transactions_by_hash(hash);
try!(self.update_transactions_meta(context, at_height, &transactions));
let block = try!(self.block_by_hash(hash).ok_or(Error::unknown_hash(hash)));
try!(self.update_transactions_meta(context, at_height, &mut block.transactions()));
// only canonical blocks are allowed to wield a number
context.db_transaction.put(Some(COL_BLOCK_HASHES), &u32_key(at_height), &**hash);
@ -385,6 +400,12 @@ impl BlockProvider for Storage {
self.resolve_hash(block_ref).and_then(|h| self.get(COL_BLOCK_HEADERS, &*h))
}
fn block_header(&self, block_ref: BlockRef) -> Option<chain::BlockHeader> {
self.block_header_bytes(block_ref).map(
|bytes| deserialize::<_, chain::BlockHeader>(bytes.as_ref())
.expect("Error deserializing header, possible db corruption"))
}
fn block_transaction_hashes(&self, block_ref: BlockRef) -> Vec<H256> {
self.resolve_hash(block_ref)
.map(|h| self.block_transaction_hashes_by_hash(&h))
@ -412,8 +433,12 @@ impl BlockProvider for Storage {
impl BlockStapler for Storage {
/// insert pre-processed block in the storage
fn insert_block(&self, block: &chain::Block) -> Result<BlockInsertedChain, Error> {
self.insert_indexed_block(&block.clone().into())
}
fn insert_indexed_block(&self, block: &IndexedBlock) -> Result<BlockInsertedChain, Error> {
// ! lock will be held during the entire insert routine
let mut best_block = self.best_block.write();
@ -428,39 +453,38 @@ impl BlockStapler for Storage {
let mut new_best_number = match best_block.as_ref().map(|b| b.number) {
Some(best_number) => {
if block.hash() == new_best_hash { best_number + 1 }
if block.hash() == &new_best_hash { best_number + 1 }
else { best_number }
},
None => 0,
};
let tx_space = block.transactions().len() * 32;
let tx_space = block.transaction_count() * 32;
let mut tx_refs = Vec::with_capacity(tx_space);
for tx in block.transactions() {
let tx_hash = tx.hash();
tx_refs.extend(&*tx_hash);
for (tx_hash, tx) in block.transactions() {
tx_refs.extend(&**tx_hash);
context.db_transaction.put(
Some(COL_TRANSACTIONS),
&*tx_hash,
&**tx_hash,
&serialize(tx),
);
}
context.db_transaction.put(Some(COL_BLOCK_TRANSACTIONS), &*block_hash, &tx_refs);
context.db_transaction.put(Some(COL_BLOCK_TRANSACTIONS), &**block_hash, &tx_refs);
context.db_transaction.put(
Some(COL_BLOCK_HEADERS),
&*block_hash,
&**block_hash,
&serialize(block.header())
);
// the block is continuing the main chain
let result = if best_block.as_ref().map(|b| b.number) != Some(new_best_number) {
try!(self.update_transactions_meta(&mut context, new_best_number, block.transactions()));
try!(self.update_transactions_meta(&mut context, new_best_number, &mut block.transactions()));
context.db_transaction.write_u32(Some(COL_META), KEY_BEST_BLOCK_NUMBER, new_best_number);
// updating main chain height reference
context.db_transaction.put(Some(COL_BLOCK_HASHES), &u32_key(new_best_number), &*block_hash);
context.db_transaction.write_u32(Some(COL_BLOCK_NUMBERS), &*block_hash, new_best_number);
context.db_transaction.put(Some(COL_BLOCK_HASHES), &u32_key(new_best_number), &**block_hash);
context.db_transaction.write_u32(Some(COL_BLOCK_NUMBERS), &**block_hash, new_best_number);
BlockInsertedChain::Main
}
@ -473,10 +497,10 @@ impl BlockStapler for Storage {
Ok(Some(mut reorg)) => {
// if so, we have new best main chain block
new_best_number = reorg.height + 1;
new_best_hash = block_hash;
new_best_hash = block_hash.clone();
// and we canonize it also by provisioning transactions
try!(self.update_transactions_meta(&mut context, new_best_number, block.transactions()));
try!(self.update_transactions_meta(&mut context, new_best_number, &mut block.transactions()));
context.db_transaction.write_u32(Some(COL_META), KEY_BEST_BLOCK_NUMBER, new_best_number);
context.db_transaction.put(Some(COL_BLOCK_HASHES), &u32_key(new_best_number), &*new_best_hash);
context.db_transaction.write_u32(Some(COL_BLOCK_NUMBERS), &*new_best_hash, new_best_number);
@ -596,6 +620,12 @@ impl Store for Storage {
fn best_block(&self) -> Option<BestBlock> {
self.best_block.read().clone()
}
fn best_header(&self) -> Option<chain::BlockHeader> {
self.best_block.read().as_ref().and_then(
|bb| Some(self.block_header_by_hash(&bb.hash).expect("Best block exists but no such header. Race condition?")),
)
}
}
#[cfg(test)]

View File

@ -2,7 +2,8 @@
use super::{
BlockRef, Store, Error, BestBlock, BlockLocation, BlockInsertedChain, BlockProvider,
BlockStapler, TransactionMetaProvider, TransactionProvider,
BlockStapler, TransactionMetaProvider, TransactionProvider, AsTransactionProvider,
IndexedBlock,
};
use chain::{self, Block};
use primitives::hash::H256;
@ -81,6 +82,13 @@ impl BlockProvider for TestStorage {
.map(|ref block| serialization::serialize(block.header()))
}
fn block_header(&self, block_ref: BlockRef) -> Option<chain::BlockHeader> {
let data = self.data.read();
self.resolve_hash(block_ref)
.and_then(|ref h| data.blocks.get(h))
.map(|ref block| block.header().clone())
}
fn block_transaction_hashes(&self, block_ref: BlockRef) -> Vec<H256> {
let data = self.data.read();
self.resolve_hash(block_ref)
@ -104,6 +112,10 @@ impl BlockProvider for TestStorage {
}
impl BlockStapler for TestStorage {
/// insert pre-processed block in the storage
fn insert_indexed_block(&self, block: &IndexedBlock) -> Result<BlockInsertedChain, Error> {
self.insert_block(&block.to_block())
}
fn insert_block(&self, block: &chain::Block) -> Result<BlockInsertedChain, Error> {
let hash = block.hash();
@ -163,6 +175,12 @@ impl TransactionProvider for TestStorage {
}
}
impl AsTransactionProvider for TestStorage {
fn as_transaction_provider(&self) -> &TransactionProvider {
&*self
}
}
impl TransactionMetaProvider for TestStorage {
// just spawns new meta so far, use real store for proper tests
fn transaction_meta(&self, hash: &H256) -> Option<TransactionMeta> {
@ -174,5 +192,11 @@ impl Store for TestStorage {
fn best_block(&self) -> Option<BestBlock> {
self.data.read().best_block.clone()
}
fn best_header(&self) -> Option<chain::BlockHeader> {
self.data.read().best_block.as_ref().and_then(
|bb| Some(self.block_header(BlockRef::Hash(bb.hash.clone())).expect("Best block exists but no such header. Race condition?"))
)
}
}

View File

@ -7,7 +7,8 @@ use byteorder::{LittleEndian, ByteOrder};
#[derive(Debug, Clone)]
pub struct TransactionMeta {
block_height: u32,
// first bit is coinbase flag, others - one per output listed
/// first bit indicate if transaction is a coinbase transaction
/// next bits indicate if transaction has spend outputs
bits: BitVec,
}
@ -17,7 +18,7 @@ pub enum Error {
}
impl TransactionMeta {
/// new transaction description for indexing
/// New transaction description for indexing
pub fn new(block_height: u32, outputs: usize) -> Self {
TransactionMeta {
block_height: block_height,
@ -25,23 +26,26 @@ impl TransactionMeta {
}
}
/// note that particular output has been used
pub fn note_used(&mut self, index: usize) {
/// New coinbase transaction
pub fn new_coinbase(block_height: u32, outputs: usize) -> Self {
let mut result = Self::new(block_height, outputs);
result.bits.set(0, true);
result
}
/// Returns true if it is a coinbase transaction
pub fn is_coinbase(&self) -> bool {
self.bits.get(0)
.expect("One bit should always exists, since it is created as usize + 1; minimum value of usize is 0; 0 + 1 = 1; qed")
}
/// Denote particular output as used
pub fn denote_used(&mut self, index: usize) {
self.bits.set(index + 1 , true);
}
pub fn coinbase(mut self) -> Self {
self.bits.set(0, true);
self
}
pub fn is_coinbase(&self) -> bool {
self.bits.get(0)
.expect("One bit should always exists, since it is created as usize + 1; minimum value of usize is 0; 0 + 1 = 1; qed")
}
/// note that particular output has been used
pub fn denote_used(&mut self, index: usize) {
/// Denote particular output as not used
pub fn denote_unused(&mut self, index: usize) {
self.bits.set(index + 1, false);
}
@ -61,8 +65,34 @@ impl TransactionMeta {
})
}
pub fn height(&self) -> u32 { self.block_height }
pub fn height(&self) -> u32 {
self.block_height
}
pub fn is_spent(&self, idx: usize) -> bool { self.bits.get(idx + 1).expect("Index should be verified by the caller") }
pub fn is_spent(&self, idx: usize) -> bool {
self.bits.get(idx + 1).expect("Index should be verified by the caller")
}
pub fn is_fully_spent(&self) -> bool {
// skip coinbase bit, the rest needs to true
self.bits.iter().skip(1).all(|x| x)
}
}
#[cfg(test)]
mod tests {
use super::TransactionMeta;
#[test]
fn test_is_fully_spent() {
let t = TransactionMeta::new(0, 0);
assert!(t.is_fully_spent());
let mut t = TransactionMeta::new(0, 1);
assert!(!t.is_fully_spent());
t.denote_used(0);
assert!(t.is_fully_spent());
t.denote_unused(0);
assert!(!t.is_fully_spent());
}
}

View File

@ -16,3 +16,8 @@ pub trait TransactionProvider {
fn transaction(&self, hash: &H256) -> Option<chain::Transaction>;
}
pub trait AsTransactionProvider {
/// returns `TransactionProvider`
fn as_transaction_provider(&self) -> &TransactionProvider;
}

View File

@ -10,3 +10,4 @@ bitcrypto = { path = "../crypto" }
chain = { path = "../chain" }
primitives = { path = "../primitives" }
serialization = { path = "../serialization" }
network = { path = "../network" }

View File

@ -3,10 +3,8 @@ mod block_header_and_ids;
mod block_transactions;
mod block_transactions_request;
mod command;
mod consensus;
mod inventory;
mod ip;
mod magic;
mod port;
mod prefilled_transaction;
mod service;
@ -16,10 +14,8 @@ pub use self::block_header_and_ids::BlockHeaderAndIDs;
pub use self::block_transactions::BlockTransactions;
pub use self::block_transactions_request::BlockTransactionsRequest;
pub use self::command::Command;
pub use self::consensus::ConsensusParams;
pub use self::inventory::{InventoryVector, InventoryType};
pub use self::ip::IpAddress;
pub use self::magic::Magic;
pub use self::port::Port;
pub use self::prefilled_transaction::PrefilledTransaction;
pub use self::service::Services;

View File

@ -9,10 +9,8 @@ pub enum Error {
Deserialize,
/// Command has wrong format or is unsupported.
InvalidCommand,
/// Network magic is not supported.
InvalidMagic,
/// Network magic comes from different network.
WrongMagic,
InvalidMagic,
/// Invalid checksum.
InvalidChecksum,
/// Invalid version.
@ -37,7 +35,6 @@ impl error::Error for Error {
Error::Deserialize => "Message Deserialization Error",
Error::InvalidCommand => "Invalid Message Command",
Error::InvalidMagic => "Invalid Network Magic",
Error::WrongMagic => "Wrong Network Magic",
Error::InvalidChecksum => "Invalid message chacksum",
Error::InvalidVersion => "Unsupported protocol version",
}

View File

@ -3,6 +3,7 @@ extern crate bitcrypto as crypto;
extern crate chain;
extern crate primitives;
extern crate serialization as ser;
extern crate network;
pub mod common;
mod message;
@ -12,7 +13,7 @@ mod error;
pub use primitives::{hash, bytes};
pub use common::{Command, Magic, Services};
pub use common::{Command, Services};
pub use message::{Message, MessageHeader, Payload, to_raw_message};
pub use serialization::{serialize_payload, deserialize_payload};
pub use error::{Error, MessageResult};

View File

@ -1,6 +1,7 @@
use ser::Stream;
use bytes::{TaggedBytes, Bytes};
use common::{Magic, Command};
use network::Magic;
use common::Command;
use serialization::serialize_payload;
use {Payload, MessageResult, MessageHeader};

View File

@ -1,7 +1,8 @@
use hash::H32;
use ser::{Serializable, Stream, Reader};
use crypto::checksum;
use common::{Command, Magic};
use network::Magic;
use common::Command;
use Error;
#[derive(Debug, PartialEq)]
@ -31,9 +32,9 @@ impl MessageHeader {
let mut reader = Reader::new(data);
let magic: u32 = try!(reader.read());
let magic = try!(Magic::from_u32(magic));
let magic = Magic::from(magic);
if expected != magic {
return Err(Error::WrongMagic);
return Err(Error::InvalidMagic);
}
let header = MessageHeader {
@ -61,7 +62,7 @@ impl Serializable for MessageHeader {
mod tests {
use bytes::Bytes;
use ser::serialize;
use common::Magic;
use network::Magic;
use super::MessageHeader;
#[test]

View File

@ -6,6 +6,7 @@ authors = ["Ethcore <admin@ethcore.io>"]
[dependencies]
heapsize = "0.3"
chain = { path = "../chain" }
db = { path = "../db" }
primitives = { path = "../primitives" }
serialization = { path = "../serialization" }
test-data = { path = "../test-data" }

61
miner/src/fee.rs Normal file
View File

@ -0,0 +1,61 @@
use chain::Transaction;
use db::TransactionProvider;
pub fn transaction_fee(store: &TransactionProvider, transaction: &Transaction) -> u64 {
let inputs_sum = transaction.inputs.iter()
.fold(0, |accumulator, input| {
let input_transaction = store.transaction(&input.previous_output.hash)
.expect("transaction must be verified by caller");
accumulator + input_transaction.outputs[input.previous_output.index as usize].value
});
let outputs_sum = transaction.outputs.iter().map(|output| output.value).sum();
inputs_sum.saturating_sub(outputs_sum)
}
pub fn transaction_fee_rate(store: &TransactionProvider, transaction: &Transaction) -> u64 {
use ser::Serializable;
transaction_fee(store, transaction) / transaction.serialized_size() as u64
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use db::{TestStorage, AsTransactionProvider};
use test_data;
use super::*;
#[test]
fn test_transaction_fee() {
let b0 = test_data::block_builder().header().nonce(1).build()
.transaction()
.output().value(1_000_000).build()
.build()
.transaction()
.output().value(2_000_000).build()
.build()
.build();
let tx0 = b0.transactions[0].clone();
let tx0_hash = tx0.hash();
let tx1 = b0.transactions[1].clone();
let tx1_hash = tx1.hash();
let b1 = test_data::block_builder().header().nonce(2).build()
.transaction()
.input().hash(tx0_hash).index(0).build()
.input().hash(tx1_hash).index(0).build()
.output().value(2_500_000).build()
.build()
.build();
let tx2 = b1.transactions[0].clone();
let db = Arc::new(TestStorage::with_blocks(&vec![b0, b1]));
assert_eq!(transaction_fee(db.as_transaction_provider(), &tx0), 0);
assert_eq!(transaction_fee(db.as_transaction_provider(), &tx1), 0);
assert_eq!(transaction_fee(db.as_transaction_provider(), &tx2), 500_000);
assert_eq!(transaction_fee_rate(db.as_transaction_provider(), &tx0), 0);
assert_eq!(transaction_fee_rate(db.as_transaction_provider(), &tx1), 0);
assert_eq!(transaction_fee_rate(db.as_transaction_provider(), &tx2), 4_950);
}
}

View File

@ -1,9 +1,12 @@
extern crate chain;
extern crate db;
extern crate heapsize;
extern crate primitives;
extern crate serialization as ser;
extern crate test_data;
mod fee;
mod memory_pool;
pub use fee::{transaction_fee, transaction_fee_rate};
pub use memory_pool::{MemoryPool, Information as MemoryPoolInformation, OrderingStrategy as MemoryPoolOrderingStrategy};

View File

@ -5,13 +5,15 @@
//! transactions.
//! It also guarantees that ancestor-descendant relation won't break during ordered removal (ancestors always removed
//! before descendants). Removal using `remove_by_hash` can break this rule.
use db::TransactionProvider;
use primitives::bytes::Bytes;
use primitives::hash::H256;
use chain::Transaction;
use std::cmp::Ordering;
use std::collections::HashMap;
use std::collections::HashSet;
use std::collections::BTreeSet;
use ser::Serializable;
use ser::{Serializable, serialize};
use heapsize::HeapSizeOf;
/// Transactions ordering strategy
@ -331,6 +333,10 @@ impl Storage {
}
}
pub fn read_by_hash(&self, h: &H256) -> Option<&Transaction> {
self.by_hash.get(h).map(|e| &e.transaction)
}
pub fn read_with_strategy(&self, strategy: OrderingStrategy) -> Option<H256> {
match strategy {
OrderingStrategy::ByTimestamp => self.references.ordered.by_storage_index.iter().map(|entry| entry.hash.clone()).nth(0),
@ -573,6 +579,11 @@ impl MemoryPool {
self.storage.remove_by_hash(h).map(|entry| entry.transaction)
}
/// Reads single transaction by its hash.
pub fn read_by_hash(&self, h: &H256) -> Option<&Transaction> {
self.storage.read_by_hash(h)
}
/// Reads hash of the 'top' transaction from the `MemoryPool` using selected strategy.
/// Ancestors are always returned before descendant transactions.
pub fn read_with_strategy(&mut self, strategy: OrderingStrategy) -> Option<H256> {
@ -684,6 +695,16 @@ impl MemoryPool {
}
}
impl TransactionProvider for MemoryPool {
fn transaction_bytes(&self, hash: &H256) -> Option<Bytes> {
self.get(hash).map(|t| serialize(t))
}
fn transaction(&self, hash: &H256) -> Option<Transaction> {
self.get(hash).cloned()
}
}
impl HeapSizeOf for MemoryPool {
fn heap_size_of_children(&self) -> usize {
self.storage.heap_size_of_children()

9
network/Cargo.toml Normal file
View File

@ -0,0 +1,9 @@
[package]
name = "network"
version = "0.1.0"
authors = ["debris <marek.kotewicz@gmail.com>"]
[dependencies]
serialization = { path = "../serialization" }
chain = { path = "../chain" }
primitives = { path = "../primitives" }

View File

@ -1,3 +1,4 @@
use hash::H256;
use super::Magic;
#[derive(Debug, Clone)]
@ -14,7 +15,7 @@ pub struct ConsensusParams {
impl ConsensusParams {
pub fn with_magic(magic: Magic) -> Self {
match magic {
Magic::Mainnet => ConsensusParams {
Magic::Mainnet | Magic::Other(_) => ConsensusParams {
bip16_time: 1333238400, // Apr 1 2012
bip65_height: 388381, // 000000000000000004c2b624ed5d7756c508d90fd0da2c7c679febfa6c4735f0
},
@ -28,6 +29,11 @@ impl ConsensusParams {
},
}
}
pub fn is_bip30_exception(&self, hash: &H256, height: u32) -> bool {
(height == 91842 && hash == &H256::from_reversed_str("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
(height == 91880 && hash == &H256::from_reversed_str("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721"))
}
}
#[cfg(test)]

12
network/src/lib.rs Normal file
View File

@ -0,0 +1,12 @@
extern crate chain;
extern crate primitives;
extern crate serialization as ser;
mod consensus;
mod magic;
pub use primitives::hash;
pub use consensus::ConsensusParams;
pub use magic::Magic;

View File

@ -3,21 +3,28 @@
use ser::{Stream, Serializable};
use chain::Block;
use Error;
use super::ConsensusParams;
const MAGIC_MAINNET: u32 = 0xD9B4BEF9;
const MAGIC_TESTNET: u32 = 0x0709110B;
const MAGIC_REGTEST: u32 = 0xDAB5BFFA;
const MAX_NBITS_MAINNET: u32 = 0x1d00ffff;
const MAX_NBITS_TESTNET: u32 = 0x1d00ffff;
const MAX_NBITS_REGTEST: u32 = 0x207fffff;
/// Bitcoin network
/// https://bitcoin.org/en/glossary/mainnet
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum Magic {
/// The original and main network for Bitcoin transactions, where satoshis have real economic value.
Mainnet,
/// The main bitcoin testnet.
Testnet,
/// Bitcoin regtest network.
Regtest,
/// Any other network. By default behaves like bitcoin mainnet.
Other(u32),
}
impl From<Magic> for u32 {
@ -26,23 +33,34 @@ impl From<Magic> for u32 {
Magic::Mainnet => MAGIC_MAINNET,
Magic::Testnet => MAGIC_TESTNET,
Magic::Regtest => MAGIC_REGTEST,
Magic::Other(magic) => magic,
}
}
}
impl From<u32> for Magic {
fn from(u: u32) -> Self {
match u {
MAGIC_MAINNET => Magic::Mainnet,
MAGIC_TESTNET => Magic::Testnet,
MAGIC_REGTEST => Magic::Regtest,
other => Magic::Other(other),
}
}
}
impl Magic {
pub fn from_u32(magic: u32) -> Result<Self, Error> {
match magic {
MAGIC_MAINNET => Ok(Magic::Mainnet),
MAGIC_TESTNET => Ok(Magic::Testnet),
MAGIC_REGTEST => Ok(Magic::Regtest),
_ => Err(Error::InvalidMagic),
pub fn max_nbits(&self) -> u32 {
match *self {
Magic::Mainnet | Magic::Other(_) => MAX_NBITS_MAINNET,
Magic::Testnet => MAX_NBITS_TESTNET,
Magic::Regtest => MAX_NBITS_REGTEST,
}
}
pub fn port(&self) -> u16 {
match *self {
Magic::Mainnet => 8333,
Magic::Mainnet | Magic::Other(_) => 8333,
Magic::Testnet => 18333,
Magic::Regtest => 18444,
}
@ -50,7 +68,7 @@ impl Magic {
pub fn rpc_port(&self) -> u16 {
match *self {
Magic::Mainnet => 8332,
Magic::Mainnet | Magic::Other(_) => 8332,
Magic::Testnet => 18332,
Magic::Regtest => 18443,
}
@ -58,7 +76,7 @@ impl Magic {
pub fn genesis_block(&self) -> Block {
match *self {
Magic::Mainnet => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a29ab5f49ffff001d1dac2b7c0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
Magic::Mainnet | Magic::Other(_) => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a29ab5f49ffff001d1dac2b7c0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
Magic::Testnet => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4adae5494dffff001d1aa4ae180101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
Magic::Regtest => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4adae5494dffff7f20020000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000".into(),
}
@ -77,18 +95,27 @@ impl Serializable for Magic {
#[cfg(test)]
mod tests {
use Error;
use super::{Magic, MAGIC_MAINNET, MAGIC_TESTNET, MAGIC_REGTEST};
use super::{
Magic, MAGIC_MAINNET, MAGIC_TESTNET, MAGIC_REGTEST,
MAX_NBITS_MAINNET, MAX_NBITS_TESTNET, MAX_NBITS_REGTEST,
};
#[test]
fn test_network_magic_number() {
assert_eq!(MAGIC_MAINNET, Magic::Mainnet.into());
assert_eq!(MAGIC_TESTNET, Magic::Testnet.into());
assert_eq!(MAGIC_REGTEST, Magic::Regtest.into());
assert_eq!(Magic::from_u32(MAGIC_MAINNET).unwrap(), Magic::Mainnet);
assert_eq!(Magic::from_u32(MAGIC_TESTNET).unwrap(), Magic::Testnet);
assert_eq!(Magic::from_u32(MAGIC_REGTEST).unwrap(), Magic::Regtest);
assert_eq!(Magic::from_u32(0).unwrap_err(), Error::InvalidMagic);
assert_eq!(Magic::Mainnet, MAGIC_MAINNET.into());
assert_eq!(Magic::Testnet, MAGIC_TESTNET.into());
assert_eq!(Magic::Regtest, MAGIC_REGTEST.into());
assert_eq!(Magic::Other(0), 0.into());
}
#[test]
fn test_network_max_nbits() {
assert_eq!(Magic::Mainnet.max_nbits(), MAX_NBITS_MAINNET);
assert_eq!(Magic::Testnet.max_nbits(), MAX_NBITS_TESTNET);
assert_eq!(Magic::Regtest.max_nbits(), MAX_NBITS_REGTEST);
}
#[test]

View File

@ -4,7 +4,7 @@ version = "0.1.0"
authors = ["debris <marek.kotewicz@gmail.com>"]
[dependencies]
tokio-core = { git = "https://github.com/debris/tokio-core" }
tokio-core = "0.1.1"
parking_lot = "0.3"
futures = "0.1"
futures-cpupool = "0.1"
@ -12,10 +12,11 @@ time = "0.1"
rand = "0.3"
log = "0.3"
abstract-ns = "0.2.1"
ns-dns-tokio = { git = "https://github.com/debris/abstract-ns", path = "ns-dns-tokio" }
ns-dns-tokio = "0.1"
csv = "0.14.7"
primitives = { path = "../primitives"}
primitives = { path = "../primitives" }
bitcrypto = { path = "../crypto" }
message = { path = "../message" }
serialization = { path = "../serialization"}
serialization = { path = "../serialization" }
network = { path = "../network" }

View File

@ -2,7 +2,7 @@ use std::{io, cmp};
use futures::{Future, Poll, Async};
use message::{Message, MessageResult, Error};
use message::types::{Version, Verack};
use message::common::Magic;
use network::Magic;
use io::{write_message, WriteMessage, ReadMessage, read_message};
pub fn handshake<A>(a: A, magic: Magic, version: Version, min_version: u32) -> Handshake<A> where A: io::Write + io::Read {
@ -199,7 +199,8 @@ mod tests {
use futures::Future;
use bytes::Bytes;
use ser::Stream;
use message::{Magic, Message};
use network::Magic;
use message::Message;
use message::types::Verack;
use message::types::version::{Version, V0, V106, V70001};
use super::{handshake, accept_handshake, HandshakeResult};

View File

@ -2,7 +2,8 @@ use std::io;
use futures::{Future, Poll, Async};
use tokio_core::io::{read_exact, ReadExact};
use crypto::checksum;
use message::{Error, MessageHeader, MessageResult, Magic, Command};
use network::Magic;
use message::{Error, MessageHeader, MessageResult, Command};
use bytes::Bytes;
use io::{read_header, ReadHeader};
@ -68,7 +69,8 @@ impl<A> Future for ReadAnyMessage<A> where A: io::Read {
mod tests {
use futures::Future;
use bytes::Bytes;
use message::{Magic, Error};
use network::Magic;
use message::Error;
use super::read_any_message;
#[test]
@ -79,7 +81,7 @@ mod tests {
let expected = (name, nonce);
assert_eq!(read_any_message(raw.as_ref(), Magic::Mainnet).wait().unwrap(), Ok(expected));
assert_eq!(read_any_message(raw.as_ref(), Magic::Testnet).wait().unwrap(), Err(Error::WrongMagic));
assert_eq!(read_any_message(raw.as_ref(), Magic::Testnet).wait().unwrap(), Err(Error::InvalidMagic));
}
#[test]

View File

@ -1,7 +1,8 @@
use std::io;
use futures::{Future, Poll, Async};
use tokio_core::io::{ReadExact, read_exact};
use message::{MessageHeader, MessageResult, Magic};
use message::{MessageHeader, MessageResult};
use network::Magic;
pub fn read_header<A>(a: A, magic: Magic) -> ReadHeader<A> where A: io::Read {
ReadHeader {
@ -30,7 +31,8 @@ impl<A> Future for ReadHeader<A> where A: io::Read {
mod tests {
use futures::Future;
use bytes::Bytes;
use message::{Magic, MessageHeader, Error};
use network::Magic;
use message::{MessageHeader, Error};
use super::read_header;
#[test]
@ -44,7 +46,7 @@ mod tests {
};
assert_eq!(read_header(raw.as_ref(), Magic::Mainnet).wait().unwrap().1, Ok(expected));
assert_eq!(read_header(raw.as_ref(), Magic::Testnet).wait().unwrap().1, Err(Error::WrongMagic));
assert_eq!(read_header(raw.as_ref(), Magic::Testnet).wait().unwrap().1, Err(Error::InvalidMagic));
}
#[test]

View File

@ -1,7 +1,8 @@
use std::io;
use std::marker::PhantomData;
use futures::{Poll, Future, Async};
use message::{MessageResult, Error, Magic, Payload};
use network::Magic;
use message::{MessageResult, Error, Payload};
use io::{read_header, ReadHeader, read_payload, ReadPayload};
pub fn read_message<M, A>(a: A, magic: Magic, version: u32) -> ReadMessage<M, A>
@ -74,7 +75,8 @@ impl<M, A> Future for ReadMessage<M, A> where A: io::Read, M: Payload {
mod tests {
use futures::Future;
use bytes::Bytes;
use message::{Magic, Error};
use network::Magic;
use message::Error;
use message::types::{Ping, Pong};
use super::read_message;
@ -83,7 +85,7 @@ mod tests {
let raw: Bytes = "f9beb4d970696e6700000000000000000800000083c00c765845303b6da97786".into();
let ping = Ping::new(u64::from_str_radix("8677a96d3b304558", 16).unwrap());
assert_eq!(read_message(raw.as_ref(), Magic::Mainnet, 0).wait().unwrap().1, Ok(ping));
assert_eq!(read_message::<Ping, _>(raw.as_ref(), Magic::Testnet, 0).wait().unwrap().1, Err(Error::WrongMagic));
assert_eq!(read_message::<Ping, _>(raw.as_ref(), Magic::Testnet, 0).wait().unwrap().1, Err(Error::InvalidMagic));
assert_eq!(read_message::<Pong, _>(raw.as_ref(), Magic::Mainnet, 0).wait().unwrap().1, Err(Error::InvalidCommand));
}

View File

@ -15,6 +15,7 @@ extern crate bitcrypto as crypto;
extern crate message;
extern crate primitives;
extern crate serialization as ser;
extern crate network;
mod io;
mod net;

View File

@ -3,7 +3,8 @@ use std::time::Duration;
use futures::{Future, Poll};
use tokio_core::reactor::Handle;
use tokio_core::net::TcpStream;
use message::{MessageResult, Magic};
use network::Magic;
use message::{MessageResult};
use io::{accept_handshake, AcceptHandshake, Deadline, deadline};
use net::{Config, Connection};

View File

@ -1,5 +1,6 @@
use std::net::SocketAddr;
use message::common::{Magic, Services, NetAddress};
use network::Magic;
use message::common::{Services, NetAddress};
use message::types::version::{Version, V0, V106, V70001};
use util::time::{Time, RealTime};
use util::nonce::{NonceGenerator, RandomNonce};

View File

@ -4,8 +4,8 @@ use std::net::SocketAddr;
use futures::{Future, Poll, Async};
use tokio_core::reactor::Handle;
use tokio_core::net::{TcpStream, TcpStreamNew};
use network::Magic;
use message::Error;
use message::common::Magic;
use message::types::Version;
use io::{handshake, Handshake, Deadline, deadline};
use net::{Config, Connection};

View File

@ -1,5 +1,5 @@
use std::net;
use message::Magic;
use network::Magic;
use message::common::Services;
use io::SharedTcpStream;

View File

@ -23,11 +23,6 @@ impl ConnectionCounter {
}
}
/// Returns maxiumum number of outbound connections.
pub fn max_outbound_connections(&self) -> u32 {
self.max_outbound_connections
}
/// Increases inbound connections counter by 1.
pub fn note_new_inbound_connection(&self) {
self.current_inbound_connections.fetch_add(1, Ordering::AcqRel);

View File

@ -12,17 +12,3 @@ impl NonceGenerator for RandomNonce {
rand::random()
}
}
pub struct StaticNonce(u64);
impl StaticNonce {
pub fn new(nonce: u64) -> Self {
StaticNonce(nonce)
}
}
impl NonceGenerator for StaticNonce {
fn get(&self) -> u64 {
self.0
}
}

View File

@ -1,5 +1,5 @@
use std::net::SocketAddr;
use message::Magic;
use network::Magic;
pub type PeerId = usize;

View File

@ -8,7 +8,7 @@ pub fn import(cfg: Config, matches: &ArgMatches) -> Result<(), String> {
// TODO: this might be unnecessary here!
try!(init_db(&cfg, &db));
let mut writer = create_sync_blocks_writer(db);
let mut writer = create_sync_blocks_writer(db, cfg.magic);
let blk_path = matches.value_of("PATH").expect("PATH is required in cli.yml; qed");
let blk_dir = try!(::import::open_blk_dir(blk_path).map_err(|_| "Import directory does not exist".to_owned()));

View File

@ -32,7 +32,7 @@ pub fn start(cfg: config::Config) -> Result<(), String> {
};
let sync_handle = el.handle();
let sync_connection_factory = create_sync_connection_factory(&sync_handle, cfg.magic.consensus_params(), db);
let sync_connection_factory = create_sync_connection_factory(&sync_handle, cfg.magic, db);
let p2p = try!(p2p::P2P::new(p2p_cfg, sync_connection_factory, el.handle()).map_err(|x| x.to_string()));
try!(p2p.run().map_err(|_| "Failed to start p2p module"));

View File

@ -1,6 +1,6 @@
use std::net;
use clap;
use message::Magic;
use network::Magic;
use {USER_AGENT, REGTEST_USER_AGENT};
pub struct Config {
@ -29,18 +29,18 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
};
let (in_connections, out_connections) = match magic {
Magic::Testnet | Magic::Mainnet => (10, 10),
Magic::Testnet | Magic::Mainnet | Magic::Other(_) => (10, 10),
Magic::Regtest => (1, 0),
};
let p2p_threads = match magic {
Magic::Testnet | Magic::Mainnet => 4,
Magic::Testnet | Magic::Mainnet | Magic::Other(_) => 4,
Magic::Regtest => 1,
};
// to skip idiotic 30 seconds delay in test-scripts
let user_agent = match magic {
Magic::Testnet | Magic::Mainnet => USER_AGENT,
Magic::Testnet | Magic::Mainnet | Magic::Other(_) => USER_AGENT,
Magic::Regtest => REGTEST_USER_AGENT,
};

View File

@ -12,6 +12,7 @@ extern crate chain;
extern crate keys;
extern crate script;
extern crate message;
extern crate network;
extern crate p2p;
extern crate sync;
extern crate import;

View File

@ -105,6 +105,15 @@ macro_rules! impl_hash {
}
}
impl cmp::PartialOrd for $name {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
let self_ref: &[u8] = &self.0;
let other_ref: &[u8] = &other.0;
self_ref.partial_cmp(other_ref)
}
}
impl Hash for $name {
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write(&self.0);

View File

@ -1,7 +1,7 @@
#![cfg_attr(asm_available, feature(asm))]
extern crate rustc_serialize;
#[macro_use] extern crate heapsize;
extern crate rustc_serialize;
pub mod bytes;
pub mod hash;

View File

@ -436,7 +436,7 @@ pub fn eval_script(
Opcode::OP_14 |
Opcode::OP_15 |
Opcode::OP_16 => {
let value = opcode as u8 - (Opcode::OP_1 as u8 - 1);
let value = (opcode as i32).wrapping_sub(Opcode::OP_1 as i32 - 1);
stack.push(Num::from(value).to_bytes());
},
Opcode::OP_CAT | Opcode::OP_SUBSTR | Opcode::OP_LEFT | Opcode::OP_RIGHT |
@ -1894,5 +1894,20 @@ mod tests {
.verify_clocktimeverify(true);
assert_eq!(verify_script(&input, &output, &flags, &checker), Err(Error::NumberOverflow));
}
// https://blockchain.info/rawtx/54fabd73f1d20c980a0686bf0035078e07f69c58437e4d586fb29aa0bee9814f
#[test]
fn test_arithmetic_correct_arguments_order() {
let tx: Transaction = "01000000010c0e314bd7bb14721b3cfd8e487cd6866173354f87ca2cf4d13c8d3feb4301a6000000004a483045022100d92e4b61452d91a473a43cde4b469a472467c0ba0cbd5ebba0834e4f4762810402204802b76b7783db57ac1f61d2992799810e173e91055938750815b6d8a675902e014fffffffff0140548900000000001976a914a86e8ee2a05a44613904e18132e49b2448adc4e688ac00000000".into();
let signer: TransactionInputSigner = tx.into();
let checker = TransactionSignatureChecker {
signer: signer,
input_index: 0,
};
let input: Script = "483045022100d92e4b61452d91a473a43cde4b469a472467c0ba0cbd5ebba0834e4f4762810402204802b76b7783db57ac1f61d2992799810e173e91055938750815b6d8a675902e014f".into();
let output: Script = "76009f69905160a56b210378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71ad6c".into();
let flags = VerificationFlags::default();
assert_eq!(verify_script(&input, &output, &flags, &checker), Ok(()));
}
}

View File

@ -455,6 +455,17 @@ impl Opcode {
pub fn is_push_value(&self) -> bool {
*self >= Opcode::OP_1NEGATE && *self <= Opcode::OP_16
}
pub fn is_within_op_n(&self) -> bool {
*self >= Opcode::OP_1 && *self <= Opcode::OP_16
}
pub fn decode_op_n(&self) -> u8 {
assert!(self.is_within_op_n());
let value = *self as u8;
let op0 = Opcode::OP_1 as u8 - 1;
value - op0
}
}
#[cfg(test)]

View File

@ -16,10 +16,6 @@ pub const MAX_PUBKEYS_PER_MULTISIG: usize = 20;
/// Maximum script length in bytes
pub const MAX_SCRIPT_SIZE: usize = 10000;
/// Threshold for `nLockTime`: below this value it is interpreted as block number,
/// otherwise as UNIX timestamp.
pub const LOCKTIME_THRESHOLD: u32 = 500000000; // Tue Nov 5 00:53:20 1985 UTC
#[derive(PartialEq, Debug)]
pub enum ScriptType {
NonStandard,
@ -178,7 +174,7 @@ impl Script {
let len = data.len();
let end = self.data.len();
if len > end {
if len > end || len == 0 {
return self.data.to_vec().into()
}
@ -213,15 +209,15 @@ impl Script {
let slice = try!(self.take(position + 1, len));
let n = try!(read_usize(slice, len));
let bytes = try!(self.take_checked(position + 1 + len, n));
let bytes = try!(self.take(position + 1 + len, n));
Instruction {
opcode: opcode,
step: len + n + 1,
data: Some(bytes),
}
},
o if o >= Opcode::OP_0 && o <= Opcode::OP_PUSHBYTES_75 => {
let bytes = try!(self.take_checked(position+ 1, opcode as usize));
o if o <= Opcode::OP_PUSHBYTES_75 => {
let bytes = try!(self.take(position + 1, opcode as usize));
Instruction {
opcode: o,
step: opcode as usize + 1,
@ -247,15 +243,6 @@ impl Script {
}
}
#[inline]
pub fn take_checked(&self, offset: usize, len: usize) -> Result<&[u8], Error> {
if len > MAX_SCRIPT_ELEMENT_SIZE {
Err(Error::ScriptSize)
} else {
self.take(offset, len)
}
}
/// Returns Script without OP_CODESEPARATOR opcodes
pub fn without_separators(&self) -> Script {
let mut pc = 0;
@ -322,78 +309,53 @@ impl Script {
Opcodes { position: 0, script: self }
}
pub fn sigop_count(&self, accurate: bool) -> Result<usize, Error> {
pub fn sigops_count(&self, serialized_script: bool) -> usize {
let mut last_opcode = Opcode::OP_0;
let mut result = 0;
let mut total = 0;
for opcode in self.opcodes() {
let opcode = try!(opcode);
let opcode = match opcode {
Ok(opcode) => opcode,
// If we push an invalid element, all previous CHECKSIGs are counted
_ => return total,
};
match opcode {
Opcode::OP_CHECKSIG | Opcode::OP_CHECKSIGVERIFY => { result += 1; },
Opcode::OP_CHECKSIG | Opcode::OP_CHECKSIGVERIFY => {
total += 1;
},
Opcode::OP_CHECKMULTISIG | Opcode::OP_CHECKMULTISIGVERIFY => {
if accurate {
match last_opcode {
Opcode::OP_1 |
Opcode::OP_2 |
Opcode::OP_3 |
Opcode::OP_4 |
Opcode::OP_5 |
Opcode::OP_6 |
Opcode::OP_7 |
Opcode::OP_8 |
Opcode::OP_9 |
Opcode::OP_10 |
Opcode::OP_11 |
Opcode::OP_12 |
Opcode::OP_13 |
Opcode::OP_14 |
Opcode::OP_15 |
Opcode::OP_16 => {
result += (last_opcode as u8 - (Opcode::OP_1 as u8 - 1)) as usize;
},
_ => {
result += MAX_PUBKEYS_PER_MULTISIG;
}
}
}
else {
result += MAX_PUBKEYS_PER_MULTISIG;
if serialized_script && last_opcode.is_within_op_n() {
total += last_opcode.decode_op_n() as usize;
} else {
total += MAX_PUBKEYS_PER_MULTISIG;
}
},
_ => { }
_ => (),
};
last_opcode = opcode;
}
Ok(result)
total
}
pub fn sigop_count_p2sh(&self, input_ref: &Script) -> Result<usize, Error> {
if !self.is_pay_to_script_hash() { return self.sigop_count(true); }
let mut script_data: Option<&[u8]> = None;
// we need last command
for next in input_ref.iter() {
let instruction = match next {
Err(_) => return Ok(0),
Ok(i) => i,
};
if instruction.opcode as u8 > Opcode::OP_16 as u8 {
return Ok(0);
}
script_data = instruction.data;
pub fn pay_to_script_hash_sigops(&self, prev_out: &Script) -> usize {
if !prev_out.is_pay_to_script_hash() {
return 0;
}
match script_data {
Some(slc) => {
let nested_script: Script = slc.to_vec().into();
nested_script.sigop_count(true)
},
None => Ok(0),
if self.data.is_empty() || !self.is_push_only() {
return 0;
}
let script: Script = self.iter().last()
.expect("self.data.is_empty() == false; qed")
.expect("self.data.is_push_only()")
.data.expect("self.data.is_push_only()")
.to_vec()
.into();
script.sigops_count(true)
}
}
@ -496,7 +458,7 @@ impl fmt::Display for Script {
#[cfg(test)]
mod tests {
use {Builder, Opcode};
use super::{Script, ScriptType};
use super::{Script, ScriptType, MAX_SCRIPT_ELEMENT_SIZE};
#[test]
fn test_is_pay_to_script_hash() {
@ -577,10 +539,46 @@ OP_ADD
#[test]
fn test_sigops_count() {
assert_eq!(1usize, Script::from("76a914aab76ba4877d696590d94ea3e02948b55294815188ac").sigop_count(false).unwrap());
assert_eq!(2usize, Script::from("522102004525da5546e7603eefad5ef971e82f7dad2272b34e6b3036ab1fe3d299c22f21037d7f2227e6c646707d1c61ecceb821794124363a2cf2c1d2a6f28cf01e5d6abe52ae").sigop_count(true).unwrap());
assert_eq!(20usize, Script::from("522102004525da5546e7603eefad5ef971e82f7dad2272b34e6b3036ab1fe3d299c22f21037d7f2227e6c646707d1c61ecceb821794124363a2cf2c1d2a6f28cf01e5d6abe52ae").sigop_count(false).unwrap());
assert_eq!(0usize, Script::from("a9146262b64aec1f4a4c1d21b32e9c2811dd2171fd7587").sigop_count(false).unwrap());
assert_eq!(1usize, Script::from("4104ae1a62fe09c5f51b13905f07f06b99a2f7159b2225f374cd378d71302fa28414e7aab37397f554a7df5f142c21c1b7303b8a0626f1baded5c72a704f7e6cd84cac").sigop_count(false).unwrap());
assert_eq!(1usize, Script::from("76a914aab76ba4877d696590d94ea3e02948b55294815188ac").sigops_count(false));
assert_eq!(2usize, Script::from("522102004525da5546e7603eefad5ef971e82f7dad2272b34e6b3036ab1fe3d299c22f21037d7f2227e6c646707d1c61ecceb821794124363a2cf2c1d2a6f28cf01e5d6abe52ae").sigops_count(true));
assert_eq!(20usize, Script::from("522102004525da5546e7603eefad5ef971e82f7dad2272b34e6b3036ab1fe3d299c22f21037d7f2227e6c646707d1c61ecceb821794124363a2cf2c1d2a6f28cf01e5d6abe52ae").sigops_count(false));
assert_eq!(0usize, Script::from("a9146262b64aec1f4a4c1d21b32e9c2811dd2171fd7587").sigops_count(false));
assert_eq!(1usize, Script::from("4104ae1a62fe09c5f51b13905f07f06b99a2f7159b2225f374cd378d71302fa28414e7aab37397f554a7df5f142c21c1b7303b8a0626f1baded5c72a704f7e6cd84cac").sigops_count(false));
}
#[test]
fn test_sigops_count_b73() {
let max_block_sigops = 20000;
let block_sigops = 0;
let mut script = vec![Opcode::OP_CHECKSIG as u8; max_block_sigops - block_sigops + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1];
script[max_block_sigops - block_sigops] = Opcode::OP_PUSHDATA4 as u8;
let overmax = MAX_SCRIPT_ELEMENT_SIZE + 1;
script[max_block_sigops - block_sigops + 1] = overmax as u8;
script[max_block_sigops - block_sigops + 2] = (overmax >> 8) as u8;
script[max_block_sigops - block_sigops + 3] = (overmax >> 16) as u8;
script[max_block_sigops - block_sigops + 4] = (overmax >> 24) as u8;
let script: Script = script.into();
assert_eq!(script.sigops_count(false), 20001);
}
#[test]
fn test_sigops_count_b74() {
let max_block_sigops = 20000;
let block_sigops = 0;
let mut script = vec![Opcode::OP_CHECKSIG as u8; max_block_sigops - block_sigops + MAX_SCRIPT_ELEMENT_SIZE + 42];
script[max_block_sigops - block_sigops + 1] = Opcode::OP_PUSHDATA4 as u8;
script[max_block_sigops - block_sigops + 2] = 0xfe;
script[max_block_sigops - block_sigops + 3] = 0xff;
script[max_block_sigops - block_sigops + 4] = 0xff;
script[max_block_sigops - block_sigops + 5] = 0xff;
let script: Script = script.into();
assert_eq!(script.sigops_count(false), 20001);
}
#[test]
fn test_script_empty_find_and_delete() {
let s: Script = vec![Opcode::OP_0 as u8].into();
let result = s.find_and_delete(&[]);
assert_eq!(s, result);
}
}

View File

@ -1,9 +1,9 @@
use keys::{Public, Signature};
use chain::{
SEQUENCE_FINAL, SEQUENCE_LOCKTIME_DISABLE_FLAG,
self, SEQUENCE_FINAL, SEQUENCE_LOCKTIME_DISABLE_FLAG,
SEQUENCE_LOCKTIME_MASK, SEQUENCE_LOCKTIME_TYPE_FLAG
};
use {script, SignatureVersion, Script, TransactionInputSigner, Num};
use {SignatureVersion, Script, TransactionInputSigner, Num};
pub trait SignatureChecker {
fn check_signature(
@ -64,8 +64,8 @@ impl SignatureChecker for TransactionSignatureChecker {
// the nLockTime in the transaction.
let lock_time_u32: u32 = lock_time.into();
if !(
(self.signer.lock_time < script::LOCKTIME_THRESHOLD && lock_time_u32 < script::LOCKTIME_THRESHOLD) ||
(self.signer.lock_time >= script::LOCKTIME_THRESHOLD && lock_time_u32 >= script::LOCKTIME_THRESHOLD)
(self.signer.lock_time < chain::LOCKTIME_THRESHOLD && lock_time_u32 < chain::LOCKTIME_THRESHOLD) ||
(self.signer.lock_time >= chain::LOCKTIME_THRESHOLD && lock_time_u32 >= chain::LOCKTIME_THRESHOLD)
) {
return false;
}

View File

@ -9,14 +9,16 @@ log = "0.3"
time = "0.1"
futures = "0.1"
futures-cpupool = "0.1"
tokio-core = { git = "https://github.com/debris/tokio-core" }
tokio-core = "0.1"
linked-hash-map = "0.3"
ethcore-devtools = { path = "../devtools" }
bit-vec = "0.4.3"
murmur3 = "0.3"
rand = "0.3"
byteorder = "0.5"
chain = { path = "../chain" }
bitcrypto = { path = "../crypto" }
db = { path = "../db" }
message = { path = "../message" }
miner = { path = "../miner" }
@ -26,6 +28,7 @@ script = { path = "../script" }
serialization = { path = "../serialization" }
test-data = { path = "../test-data" }
verification = { path = "../verification" }
network = { path = "../network" }
[features]
dev = []

View File

@ -1,8 +1,9 @@
use std::sync::Arc;
use chain;
use db;
use super::Error;
use network::Magic;
use verification::{Verify, ChainVerifier};
use super::Error;
pub struct BlocksWriter {
storage: Arc<db::Store>,
@ -10,40 +11,40 @@ pub struct BlocksWriter {
}
impl BlocksWriter {
pub fn new(storage: db::SharedStore) -> BlocksWriter {
pub fn new(storage: db::SharedStore, network: Magic) -> BlocksWriter {
BlocksWriter {
storage: storage.clone(),
verifier: ChainVerifier::new(storage),
verifier: ChainVerifier::new(storage, network),
}
}
pub fn append_block(&mut self, block: chain::Block) -> Result<(), Error> {
let indexed_block: db::IndexedBlock = block.into();
// TODO: share same verification code with synchronization_client
if self.storage.best_block().map_or(false, |bb| bb.hash != block.block_header.previous_header_hash) {
if self.storage.best_block().map_or(false, |bb| bb.hash != indexed_block.header().previous_header_hash) {
return Err(Error::OutOfOrderBlock);
}
match self.verifier.verify(&block) {
match self.verifier.verify(&indexed_block) {
Err(err) => Err(Error::Verification(err)),
Ok(_chain) => { try!(self.storage.insert_block(&block).map_err(Error::Database)); Ok(()) }
Ok(_chain) => { try!(self.storage.insert_indexed_block(&indexed_block).map_err(Error::Database)); Ok(()) }
}
}
}
#[cfg(test)]
mod tests {
use db;
use db::Store;
use std::sync::Arc;
use db::{self, Store};
use network::Magic;
use {test_data, verification};
use super::super::Error;
use super::BlocksWriter;
use test_data;
use verification;
#[test]
fn blocks_writer_appends_blocks() {
let db = Arc::new(db::TestStorage::with_genesis_block());
let mut blocks_target = BlocksWriter::new(db.clone());
let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet);
blocks_target.append_block(test_data::block_h1()).expect("Expecting no error");
assert_eq!(db.best_block().expect("Block is inserted").number, 1);
}
@ -51,7 +52,7 @@ mod tests {
#[test]
fn blocks_writer_verification_error() {
let db = Arc::new(db::TestStorage::with_genesis_block());
let mut blocks_target = BlocksWriter::new(db.clone());
let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet);
match blocks_target.append_block(test_data::block_h2()).unwrap_err() {
Error::OutOfOrderBlock => (),
_ => panic!("Unexpected error"),
@ -62,7 +63,7 @@ mod tests {
#[test]
fn blocks_writer_out_of_order_block() {
let db = Arc::new(db::TestStorage::with_genesis_block());
let mut blocks_target = BlocksWriter::new(db.clone());
let mut blocks_target = BlocksWriter::new(db.clone(), Magic::Testnet);
let wrong_block = test_data::block_builder()
.header().parent(test_data::genesis().hash()).build()

View File

@ -0,0 +1,76 @@
use std::collections::HashSet;
use rand::{thread_rng, Rng};
use bitcrypto::{sha256, siphash24};
use byteorder::{LittleEndian, ByteOrder};
use chain::{BlockHeader, ShortTransactionID};
use db::IndexedBlock;
use message::common::{BlockHeaderAndIDs, PrefilledTransaction};
use primitives::hash::H256;
use ser::{Stream, Serializable};
/// Maximum size of prefilled transactions in compact block
const MAX_COMPACT_BLOCK_PREFILLED_SIZE: usize = 10 * 1024;
pub fn build_compact_block(block: IndexedBlock, prefilled_transactions_indexes: HashSet<usize>) -> BlockHeaderAndIDs {
let nonce: u64 = thread_rng().gen();
let prefilled_transactions_len = prefilled_transactions_indexes.len();
let mut short_ids: Vec<ShortTransactionID> = Vec::with_capacity(block.transaction_count() - prefilled_transactions_len);
let mut prefilled_transactions: Vec<PrefilledTransaction> = Vec::with_capacity(prefilled_transactions_len);
let mut prefilled_transactions_size: usize = 0;
for (transaction_index, (transaction_hash, transaction)) in block.transactions().enumerate() {
let transaction_size = transaction.serialized_size();
if prefilled_transactions_size + transaction_size < MAX_COMPACT_BLOCK_PREFILLED_SIZE
&& prefilled_transactions_indexes.contains(&transaction_index) {
prefilled_transactions_size += transaction_size;
prefilled_transactions.push(PrefilledTransaction {
index: transaction_index,
transaction: transaction.clone(),
})
} else {
short_ids.push(short_transaction_id(nonce, block.header(), transaction_hash));
}
}
BlockHeaderAndIDs {
header: block.header().clone(),
nonce: nonce,
short_ids: short_ids,
prefilled_transactions: prefilled_transactions,
}
}
fn short_transaction_id(nonce: u64, block_header: &BlockHeader, transaction_hash: &H256) -> ShortTransactionID {
// Short transaction IDs are used to represent a transaction without sending a full 256-bit hash. They are calculated by:
// 1) single-SHA256 hashing the block header with the nonce appended (in little-endian)
let mut stream = Stream::new();
stream.append(block_header);
stream.append(&nonce);
let block_header_with_nonce_hash = sha256(&stream.out());
// 2) Running SipHash-2-4 with the input being the transaction ID and the keys (k0/k1) set to the first two little-endian
// 64-bit integers from the above hash, respectively.
let key0 = LittleEndian::read_u64(&block_header_with_nonce_hash[0..8]);
let key1 = LittleEndian::read_u64(&block_header_with_nonce_hash[8..16]);
let siphash_transaction_hash = siphash24(key0, key1, &**transaction_hash);
// 3) Dropping the 2 most significant bytes from the SipHash output to make it 6 bytes.
let mut siphash_transaction_hash_bytes = [0u8; 8];
LittleEndian::write_u64(&mut siphash_transaction_hash_bytes, siphash_transaction_hash);
siphash_transaction_hash_bytes[2..8].into()
}
#[cfg(test)]
mod tests {
#[test]
fn short_transaction_id_is_correct() {
// TODO
}
#[test]
fn compact_block_is_built_correctly() {
// TODO
}
}

View File

@ -27,6 +27,8 @@ pub struct ConnectionFilter {
last_blocks: LinkedHashMap<H256, ()>,
/// Last transactions from peer.
last_transactions: LinkedHashMap<H256, ()>,
/// Minimal fee in satoshis per 1000 bytes
fee_rate: Option<u64>,
}
/// Connection bloom filter
@ -70,6 +72,7 @@ impl Default for ConnectionFilter {
filter_flags: types::FilterFlags::None,
last_blocks: LinkedHashMap::new(),
last_transactions: LinkedHashMap::new(),
fee_rate: None,
}
}
}
@ -83,6 +86,7 @@ impl ConnectionFilter {
filter_flags: message.flags,
last_blocks: LinkedHashMap::new(),
last_transactions: LinkedHashMap::new(),
fee_rate: None,
}
}
@ -119,12 +123,106 @@ impl ConnectionFilter {
}
/// Check if transaction should be sent to this connection && optionally update filter
pub fn filter_transaction(&mut self, transaction_hash: &H256, transaction: &Transaction) -> bool {
pub fn filter_transaction(&mut self, transaction_hash: &H256, transaction: &Transaction, transaction_fee_rate: Option<u64>) -> bool {
// check if transaction is known
if self.last_transactions.contains_key(transaction_hash) {
return false;
}
// check if transaction fee rate is high enough for this peer
if let Some(fee_rate) = self.fee_rate {
if let Some(transaction_fee_rate) = transaction_fee_rate {
if transaction_fee_rate < fee_rate {
return false;
}
}
}
// check with bloom filter, if set
self.filter_transaction_with_bloom(transaction_hash, transaction)
}
/// Load filter
pub fn load(&mut self, message: &types::FilterLoad) {
self.bloom = Some(ConnectionBloom::new(message));
self.filter_flags = message.flags;
}
/// Add filter
pub fn add(&mut self, message: &types::FilterAdd) {
// ignore if filter is not currently set
if let Some(ref mut bloom) = self.bloom {
bloom.insert(&message.data);
}
}
/// Clear filter
pub fn clear(&mut self) {
self.bloom = None;
}
/// Limit transaction announcing by transaction fee
pub fn set_fee_rate(&mut self, fee_rate: u64) {
if fee_rate == 0 {
self.fee_rate = None;
}
else {
self.fee_rate = Some(fee_rate);
}
}
/// Convert `Block` to `MerkleBlock` using this filter
pub fn build_merkle_block(&mut self, block: Block) -> Option<MerkleBlockArtefacts> {
if self.bloom.is_none() {
return None;
}
// prepare result
let all_len = block.transactions.len();
let mut result = MerkleBlockArtefacts {
merkleblock: types::MerkleBlock {
block_header: block.block_header.clone(),
total_transactions: all_len as u32,
hashes: Vec::default(),
flags: Bytes::default(),
},
matching_transactions: Vec::new(),
};
// calculate hashes && match flags for all transactions
let (all_hashes, all_flags) = block.transactions.into_iter()
.fold((Vec::<H256>::with_capacity(all_len), BitVec::with_capacity(all_len)), |(mut all_hashes, mut all_flags), t| {
let hash = t.hash();
let flag = self.filter_transaction_with_bloom(&hash, &t);
if flag {
result.matching_transactions.push((hash.clone(), t));
}
all_flags.push(flag);
all_hashes.push(hash);
(all_hashes, all_flags)
});
// build partial merkle tree
let (hashes, flags) = PartialMerkleTree::build(all_hashes, all_flags);
result.merkleblock.hashes.extend(hashes);
// to_bytes() converts [true, false, true] to 0b10100000
// while protocol requires [true, false, true] to be serialized as 0x00000101
result.merkleblock.flags = flags.to_bytes().into_iter()
.map(|b|
((b & 0b10000000) >> 7) |
((b & 0b01000000) >> 5) |
((b & 0b00100000) >> 3) |
((b & 0b00010000) >> 1) |
((b & 0b00001000) << 1) |
((b & 0b00000100) << 3) |
((b & 0b00000010) << 5) |
((b & 0b00000001) << 7)).collect::<Vec<u8>>().into();
Some(result)
}
/// Check if transaction should be sent to this connection using bloom filter && optionally update filter
fn filter_transaction_with_bloom(&mut self, transaction_hash: &H256, transaction: &Transaction) -> bool {
// check with bloom filter, if set
match self.bloom {
/// if no filter is set for the connection => match everything
@ -188,75 +286,6 @@ impl ConnectionFilter {
},
}
}
/// Load filter
pub fn load(&mut self, message: &types::FilterLoad) {
self.bloom = Some(ConnectionBloom::new(message));
self.filter_flags = message.flags;
}
/// Add filter
pub fn add(&mut self, message: &types::FilterAdd) {
// ignore if filter is not currently set
if let Some(ref mut bloom) = self.bloom {
bloom.insert(&message.data);
}
}
/// Clear filter
pub fn clear(&mut self) {
self.bloom = None;
}
/// Convert `Block` to `MerkleBlock` using this filter
pub fn build_merkle_block(&mut self, block: Block) -> Option<MerkleBlockArtefacts> {
if self.bloom.is_none() {
return None;
}
// prepare result
let all_len = block.transactions.len();
let mut result = MerkleBlockArtefacts {
merkleblock: types::MerkleBlock {
block_header: block.block_header.clone(),
total_transactions: all_len as u32,
hashes: Vec::default(),
flags: Bytes::default(),
},
matching_transactions: Vec::new(),
};
// calculate hashes && match flags for all transactions
let (all_hashes, all_flags) = block.transactions.into_iter()
.fold((Vec::<H256>::with_capacity(all_len), BitVec::with_capacity(all_len)), |(mut all_hashes, mut all_flags), t| {
let hash = t.hash();
let flag = self.filter_transaction(&hash, &t);
if flag {
result.matching_transactions.push((hash.clone(), t));
}
all_flags.push(flag);
all_hashes.push(hash);
(all_hashes, all_flags)
});
// build partial merkle tree
let (hashes, flags) = PartialMerkleTree::build(all_hashes, all_flags);
result.merkleblock.hashes.extend(hashes);
// to_bytes() converts [true, false, true] to 0b10100000
// while protocol requires [true, false, true] to be serialized as 0x00000101
result.merkleblock.flags = flags.to_bytes().into_iter()
.map(|b|
((b & 0b10000000) >> 7) |
((b & 0b01000000) >> 5) |
((b & 0b00100000) >> 3) |
((b & 0b00010000) >> 1) |
((b & 0b00001000) << 1) |
((b & 0b00000100) << 3) |
((b & 0b00000010) << 5) |
((b & 0b00000001) << 7)).collect::<Vec<u8>>().into();
Some(result)
}
}
impl ConnectionBloom {
@ -493,13 +522,13 @@ pub mod tests {
let mut filter = ConnectionFilter::with_filterload(&default_filterload());
assert!(!filter.filter_transaction(&tx1.hash(), &tx1));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2));
assert!(!filter.filter_transaction(&tx1.hash(), &tx1, None));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2, None));
filter.add(&make_filteradd(&*tx1.hash()));
assert!(filter.filter_transaction(&tx1.hash(), &tx1));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2));
assert!(filter.filter_transaction(&tx1.hash(), &tx1, None));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2, None));
}
#[test]
@ -512,13 +541,13 @@ pub mod tests {
let mut filter = ConnectionFilter::with_filterload(&default_filterload());
assert!(!filter.filter_transaction(&tx1.hash(), &tx1));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2));
assert!(!filter.filter_transaction(&tx1.hash(), &tx1, None));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2, None));
filter.add(&make_filteradd(&tx1_out_data));
assert!(filter.filter_transaction(&tx1.hash(), &tx1));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2));
assert!(filter.filter_transaction(&tx1.hash(), &tx1, None));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2, None));
}
#[test]
@ -530,13 +559,13 @@ pub mod tests {
let mut filter = ConnectionFilter::with_filterload(&default_filterload());
assert!(!filter.filter_transaction(&tx1.hash(), &tx1));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2));
assert!(!filter.filter_transaction(&tx1.hash(), &tx1, None));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2, None));
filter.add(&make_filteradd(&tx1_previous_output));
assert!(filter.filter_transaction(&tx1.hash(), &tx1));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2));
assert!(filter.filter_transaction(&tx1.hash(), &tx1, None));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2, None));
}
#[test]
@ -549,13 +578,39 @@ pub mod tests {
let mut filter = ConnectionFilter::with_filterload(&default_filterload());
assert!(!filter.filter_transaction(&tx1.hash(), &tx1));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2));
assert!(!filter.filter_transaction(&tx1.hash(), &tx1, None));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2, None));
filter.add(&make_filteradd(&tx1_input_data));
assert!(filter.filter_transaction(&tx1.hash(), &tx1));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2));
assert!(filter.filter_transaction(&tx1.hash(), &tx1, None));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2, None));
}
#[test]
fn connection_filter_matches_transaction_by_fee_rate() {
let tx1: Transaction = test_data::TransactionBuilder::with_version(1).into();
let tx2: Transaction = test_data::TransactionBuilder::with_version(2).into();
let mut filter = ConnectionFilter::default();
assert!(filter.filter_transaction(&tx1.hash(), &tx1, Some(1000)));
assert!(filter.filter_transaction(&tx2.hash(), &tx2, Some(2000)));
filter.set_fee_rate(1500);
assert!(!filter.filter_transaction(&tx1.hash(), &tx1, Some(1000)));
assert!(filter.filter_transaction(&tx2.hash(), &tx2, Some(2000)));
filter.set_fee_rate(3000);
assert!(!filter.filter_transaction(&tx1.hash(), &tx1, Some(1000)));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2, Some(2000)));
filter.set_fee_rate(0);
assert!(filter.filter_transaction(&tx1.hash(), &tx1, Some(1000)));
assert!(filter.filter_transaction(&tx2.hash(), &tx2, Some(2000)));
}
#[test]

View File

@ -1,3 +1,5 @@
extern crate bitcrypto;
extern crate byteorder;
extern crate chain;
extern crate db;
#[macro_use]
@ -21,9 +23,11 @@ extern crate serialization as ser;
#[cfg(test)]
extern crate ethcore_devtools as devtools;
extern crate rand;
extern crate network;
mod best_headers_chain;
mod blocks_writer;
mod compact_block_builder;
mod connection_filter;
mod hash_queue;
mod inbound_connection;
@ -42,7 +46,7 @@ mod synchronization_verifier;
use std::sync::Arc;
use parking_lot::RwLock;
use tokio_core::reactor::Handle;
use message::common::ConsensusParams;
use network::Magic;
/// Sync errors.
#[derive(Debug)]
@ -56,12 +60,12 @@ pub enum Error {
}
/// Create blocks writer.
pub fn create_sync_blocks_writer(db: db::SharedStore) -> blocks_writer::BlocksWriter {
blocks_writer::BlocksWriter::new(db)
pub fn create_sync_blocks_writer(db: db::SharedStore, network: Magic) -> blocks_writer::BlocksWriter {
blocks_writer::BlocksWriter::new(db, network)
}
/// Create inbound synchronization connections factory for given `db`.
pub fn create_sync_connection_factory(handle: &Handle, consensus_params: ConsensusParams, db: db::SharedStore) -> p2p::LocalSyncNodeRef {
pub fn create_sync_connection_factory(handle: &Handle, network: Magic, db: db::SharedStore) -> p2p::LocalSyncNodeRef {
use synchronization_chain::Chain as SyncChain;
use synchronization_executor::LocalSynchronizationTaskExecutor as SyncExecutor;
use local_node::LocalNode as SyncNode;
@ -74,7 +78,7 @@ pub fn create_sync_connection_factory(handle: &Handle, consensus_params: Consens
let sync_executor = SyncExecutor::new(sync_chain.clone());
let sync_server = Arc::new(SynchronizationServer::new(sync_chain.clone(), sync_executor.clone()));
let sync_client_core = SynchronizationClientCore::new(SynchronizationConfig::new(), handle, sync_executor.clone(), sync_chain.clone());
let verifier = AsyncVerifier::new(consensus_params, sync_chain, sync_client_core.clone());
let verifier = AsyncVerifier::new(network, sync_chain, sync_client_core.clone());
let sync_client = SynchronizationClient::new(sync_client_core, verifier);
let sync_node = Arc::new(SyncNode::new(sync_server, sync_client, sync_executor));
SyncConnectionFactory::with_local_node(sync_node)

View File

@ -5,7 +5,7 @@ use db;
use p2p::OutboundSyncConnectionRef;
use message::common::{InventoryType, InventoryVector};
use message::types;
use synchronization_client::{Client, SynchronizationClient};
use synchronization_client::{Client, SynchronizationClient, BlockAnnouncementType};
use synchronization_executor::{Task as SynchronizationTask, TaskExecutor as SynchronizationTaskExecutor, LocalSynchronizationTaskExecutor};
use synchronization_server::{Server, SynchronizationServer};
use synchronization_verifier::AsyncVerifier;
@ -145,7 +145,7 @@ impl<T, U, V> LocalNode<T, U, V> where T: SynchronizationTaskExecutor + PeersCon
trace!(target: "sync", "Got `block` message from peer#{}. Block hash: {}", peer_index, message.block.hash().to_reversed_str());
// try to process new block
self.client.lock().on_peer_block(peer_index, message.block);
self.client.lock().on_peer_block(peer_index, message.block.into());
}
pub fn on_peer_headers(&self, peer_index: usize, message: types::Headers) {
@ -183,23 +183,41 @@ impl<T, U, V> LocalNode<T, U, V> where T: SynchronizationTaskExecutor + PeersCon
pub fn on_peer_sendheaders(&self, peer_index: usize, _message: types::SendHeaders) {
trace!(target: "sync", "Got `sendheaders` message from peer#{}", peer_index);
self.client.lock().on_peer_sendheaders(peer_index);
self.client.lock().on_peer_block_announcement_type(peer_index, BlockAnnouncementType::SendHeader);
}
pub fn on_peer_feefilter(&self, peer_index: usize, _message: types::FeeFilter) {
pub fn on_peer_feefilter(&self, peer_index: usize, message: types::FeeFilter) {
trace!(target: "sync", "Got `feefilter` message from peer#{}", peer_index);
self.client.lock().on_peer_feefilter(peer_index, &message);
}
pub fn on_peer_send_compact(&self, peer_index: usize, _message: types::SendCompact) {
pub fn on_peer_send_compact(&self, peer_index: usize, message: types::SendCompact) {
trace!(target: "sync", "Got `sendcmpct` message from peer#{}", peer_index);
// The second integer SHALL be interpreted as a little-endian version number. Nodes sending a sendcmpct message MUST currently set this value to 1.
// TODO: version 2 supports segregated witness transactions
if message.second != 1 {
return;
}
// Upon receipt of a "sendcmpct" message with the first and second integers set to 1, the node SHOULD announce new blocks by sending a cmpctblock message.
if message.first {
self.client.lock().on_peer_block_announcement_type(peer_index, BlockAnnouncementType::SendCompactBlock);
}
// else:
// Upon receipt of a "sendcmpct" message with the first integer set to 0, the node SHOULD NOT announce new blocks by sending a cmpctblock message,
// but SHOULD announce new blocks by sending invs or headers, as defined by BIP130.
// => work as before
}
pub fn on_peer_compact_block(&self, peer_index: usize, _message: types::CompactBlock) {
trace!(target: "sync", "Got `cmpctblock` message from peer#{}", peer_index);
}
pub fn on_peer_get_block_txn(&self, peer_index: usize, _message: types::GetBlockTxn) {
pub fn on_peer_get_block_txn(&self, peer_index: usize, message: types::GetBlockTxn) {
trace!(target: "sync", "Got `getblocktxn` message from peer#{}", peer_index);
self.server.serve_get_block_txn(peer_index, message.request.blockhash, message.request.indexes).map(|t| self.server.add_task(peer_index, t));
}
pub fn on_peer_block_txn(&self, peer_index: usize, _message: types::BlockTxn) {

View File

@ -2,15 +2,15 @@ use std::collections::{HashMap, HashSet, VecDeque};
use std::collections::hash_map::Entry;
use linked_hash_map::LinkedHashMap;
use time;
use chain::Block;
use primitives::hash::H256;
use db::IndexedBlock;
#[derive(Debug)]
/// Storage for blocks, for which we have no parent yet.
/// Blocks from this storage are either moved to verification queue, or removed at all.
pub struct OrphanBlocksPool {
/// Blocks from requested_hashes, but received out-of-order.
orphaned_blocks: HashMap<H256, HashMap<H256, Block>>,
orphaned_blocks: HashMap<H256, HashMap<H256, IndexedBlock>>,
/// Blocks that we have received without requesting with receiving time.
unknown_blocks: LinkedHashMap<H256, f64>,
}
@ -41,15 +41,15 @@ impl OrphanBlocksPool {
}
/// Insert orphaned block, for which we have already requested its parent block
pub fn insert_orphaned_block(&mut self, hash: H256, block: Block) {
pub fn insert_orphaned_block(&mut self, hash: H256, block: IndexedBlock) {
self.orphaned_blocks
.entry(block.block_header.previous_header_hash.clone())
.entry(block.header().previous_header_hash.clone())
.or_insert_with(HashMap::new)
.insert(hash, block);
}
/// Insert unknown block, for which we know nothing about its parent block
pub fn insert_unknown_block(&mut self, hash: H256, block: Block) {
pub fn insert_unknown_block(&mut self, hash: H256, block: IndexedBlock) {
let previous_value = self.unknown_blocks.insert(hash.clone(), time::precise_time_s());
assert_eq!(previous_value, None);
@ -67,11 +67,11 @@ impl OrphanBlocksPool {
}
/// Remove all blocks, depending on this parent
pub fn remove_blocks_for_parent(&mut self, hash: &H256) -> Vec<(H256, Block)> {
pub fn remove_blocks_for_parent(&mut self, hash: &H256) -> Vec<(H256, IndexedBlock)> {
let mut queue: VecDeque<H256> = VecDeque::new();
queue.push_back(hash.clone());
let mut removed: Vec<(H256, Block)> = Vec::new();
let mut removed: Vec<(H256, IndexedBlock)> = Vec::new();
while let Some(parent_hash) = queue.pop_front() {
if let Entry::Occupied(entry) = self.orphaned_blocks.entry(parent_hash) {
let (_, orphaned) = entry.remove_entry();
@ -86,9 +86,9 @@ impl OrphanBlocksPool {
}
/// Remove blocks with given hashes + all dependent blocks
pub fn remove_blocks(&mut self, hashes: &HashSet<H256>) -> Vec<(H256, Block)> {
pub fn remove_blocks(&mut self, hashes: &HashSet<H256>) -> Vec<(H256, IndexedBlock)> {
// TODO: excess clone
let mut removed: Vec<(H256, Block)> = Vec::new();
let mut removed: Vec<(H256, IndexedBlock)> = Vec::new();
let parent_orphan_keys: Vec<_> = self.orphaned_blocks.keys().cloned().collect();
for parent_orphan_key in parent_orphan_keys {
if let Entry::Occupied(mut orphan_entry) = self.orphaned_blocks.entry(parent_orphan_key) {
@ -138,7 +138,7 @@ mod tests {
let b1 = test_data::block_h1();
let b1_hash = b1.hash();
pool.insert_orphaned_block(b1_hash.clone(), b1);
pool.insert_orphaned_block(b1_hash.clone(), b1.into());
assert_eq!(pool.len(), 1);
assert!(!pool.contains_unknown_block(&b1_hash));
@ -151,7 +151,7 @@ mod tests {
let b1 = test_data::block_h1();
let b1_hash = b1.hash();
pool.insert_unknown_block(b1_hash.clone(), b1);
pool.insert_unknown_block(b1_hash.clone(), b1.into());
assert_eq!(pool.len(), 1);
assert!(pool.contains_unknown_block(&b1_hash));
@ -166,8 +166,8 @@ mod tests {
let b2 = test_data::block_h169();
let b2_hash = b2.hash();
pool.insert_orphaned_block(b1_hash.clone(), b1);
pool.insert_unknown_block(b2_hash.clone(), b2);
pool.insert_orphaned_block(b1_hash.clone(), b1.into());
pool.insert_unknown_block(b2_hash.clone(), b2.into());
assert_eq!(pool.len(), 2);
assert!(!pool.contains_unknown_block(&b1_hash));
@ -192,9 +192,9 @@ mod tests {
let b3 = test_data::block_h2();
let b3_hash = b3.hash();
pool.insert_orphaned_block(b1_hash.clone(), b1);
pool.insert_unknown_block(b2_hash.clone(), b2);
pool.insert_orphaned_block(b3_hash.clone(), b3);
pool.insert_orphaned_block(b1_hash.clone(), b1.into());
pool.insert_unknown_block(b2_hash.clone(), b2.into());
pool.insert_orphaned_block(b3_hash.clone(), b3.into());
let removed = pool.remove_blocks_for_parent(&test_data::genesis().hash());
assert_eq!(removed.len(), 2);
@ -222,11 +222,11 @@ mod tests {
let b5 = test_data::block_h181();
let b5_hash = b5.hash();
pool.insert_orphaned_block(b1_hash.clone(), b1);
pool.insert_orphaned_block(b2_hash.clone(), b2);
pool.insert_orphaned_block(b3_hash.clone(), b3);
pool.insert_orphaned_block(b4_hash.clone(), b4);
pool.insert_orphaned_block(b5_hash.clone(), b5);
pool.insert_orphaned_block(b1_hash.clone(), b1.into());
pool.insert_orphaned_block(b2_hash.clone(), b2.into());
pool.insert_orphaned_block(b3_hash.clone(), b3.into());
pool.insert_orphaned_block(b4_hash.clone(), b4.into());
pool.insert_orphaned_block(b5_hash.clone(), b5.into());
let mut blocks_to_remove: HashSet<H256> = HashSet::new();
blocks_to_remove.insert(b1_hash.clone());

View File

@ -3,9 +3,10 @@ use std::sync::Arc;
use std::collections::VecDeque;
use linked_hash_map::LinkedHashMap;
use parking_lot::RwLock;
use chain::{Block, BlockHeader, Transaction};
use db;
use chain::{BlockHeader, Transaction};
use db::{self, IndexedBlock};
use best_headers_chain::{BestHeadersChain, Information as BestHeadersInformation};
use primitives::bytes::Bytes;
use primitives::hash::H256;
use hash_queue::{HashQueueChain, HashPosition};
use miner::{MemoryPool, MemoryPoolOrderingStrategy, MemoryPoolInformation};
@ -227,8 +228,7 @@ impl Chain {
/// Get block header by number
pub fn block_header_by_number(&self, number: u32) -> Option<BlockHeader> {
if number <= self.best_storage_block.number {
// TODO: read block header only
self.storage.block(db::BlockRef::Number(number)).map(|b| b.block_header)
self.storage.block_header(db::BlockRef::Number(number))
} else {
self.headers_chain.at(number - self.best_storage_block.number)
}
@ -308,11 +308,11 @@ impl Chain {
}
/// Insert new best block to storage
pub fn insert_best_block(&mut self, hash: H256, block: &Block) -> Result<BlockInsertionResult, db::Error> {
let is_appending_to_main_branch = self.best_storage_block.hash == block.block_header.previous_header_hash;
pub fn insert_best_block(&mut self, hash: H256, block: &IndexedBlock) -> Result<BlockInsertionResult, db::Error> {
let is_appending_to_main_branch = self.best_storage_block.hash == block.header().previous_header_hash;
// insert to storage
let storage_insertion = try!(self.storage.insert_block(&block));
let storage_insertion = try!(self.storage.insert_indexed_block(&block));
// remember new best block hash
self.best_storage_block = self.storage.best_block().expect("Inserted block above");
@ -327,7 +327,7 @@ impl Chain {
// all transactions from this block were accepted
// => delete accepted transactions from verification queue and from the memory pool
let this_block_transactions_hashes = block.transactions.iter().map(|tx| tx.hash());
let this_block_transactions_hashes: Vec<H256> = block.transaction_hashes().iter().cloned().collect();
for transaction_accepted in this_block_transactions_hashes {
self.memory_pool.remove_by_hash(&transaction_accepted);
self.verifying_transactions.remove(&transaction_accepted);
@ -352,7 +352,7 @@ impl Chain {
// all transactions from this block were accepted
// + all transactions from previous blocks of this fork were accepted
// => delete accepted transactions from verification queue and from the memory pool
let this_block_transactions_hashes = block.transactions.iter().map(|tx| tx.hash());
let this_block_transactions_hashes: Vec<H256> = block.transaction_hashes().iter().cloned().collect();
let mut canonized_blocks_hashes: Vec<H256> = Vec::new();
let mut new_main_blocks_transactions_hashes: Vec<H256> = Vec::new();
while let Some(canonized_block_hash) = reorganization.pop_canonized() {
@ -360,7 +360,7 @@ impl Chain {
new_main_blocks_transactions_hashes.extend(canonized_transactions_hashes);
canonized_blocks_hashes.push(canonized_block_hash);
}
for transaction_accepted in this_block_transactions_hashes.chain(new_main_blocks_transactions_hashes.into_iter()) {
for transaction_accepted in this_block_transactions_hashes.into_iter().chain(new_main_blocks_transactions_hashes.into_iter()) {
self.memory_pool.remove_by_hash(&transaction_accepted);
self.verifying_transactions.remove(&transaction_accepted);
}
@ -610,6 +610,12 @@ impl Chain {
}
}
/// Get transaction by hash (if it's in memory pool or verifying)
pub fn transaction_by_hash(&self, hash: &H256) -> Option<Transaction> {
self.verifying_transactions.get(hash).cloned()
.or_else(|| self.memory_pool.read_by_hash(hash).cloned())
}
/// Insert transaction to memory pool
pub fn insert_verified_transaction(&mut self, transaction: Transaction) {
self.memory_pool.insert_verified(transaction);
@ -661,6 +667,18 @@ impl Chain {
}
}
impl db::TransactionProvider for Chain {
fn transaction_bytes(&self, hash: &H256) -> Option<Bytes> {
self.memory_pool.transaction_bytes(hash)
.or_else(|| self.storage.transaction_bytes(hash))
}
fn transaction(&self, hash: &H256) -> Option<Transaction> {
self.memory_pool.transaction(hash)
.or_else(|| self.storage.transaction(hash))
}
}
impl fmt::Debug for Information {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[sch:{} / bh:{} -> req:{} -> vfy:{} -> stored: {}]", self.scheduled, self.headers.best, self.requested, self.verifying, self.stored)
@ -772,7 +790,7 @@ mod tests {
assert!(chain.information().scheduled == 3 && chain.information().requested == 1
&& chain.information().verifying == 1 && chain.information().stored == 1);
// insert new best block to the chain
chain.insert_best_block(test_data::block_h1().hash(), &test_data::block_h1()).expect("Db error");
chain.insert_best_block(test_data::block_h1().hash(), &test_data::block_h1().into()).expect("Db error");
assert!(chain.information().scheduled == 3 && chain.information().requested == 1
&& chain.information().verifying == 1 && chain.information().stored == 2);
assert_eq!(db.best_block().expect("storage with genesis block is required").number, 1);
@ -787,13 +805,13 @@ mod tests {
let block1 = test_data::block_h1();
let block1_hash = block1.hash();
chain.insert_best_block(block1_hash.clone(), &block1).expect("Error inserting new block");
chain.insert_best_block(block1_hash.clone(), &block1.into()).expect("Error inserting new block");
assert_eq!(chain.block_locator_hashes(), vec![block1_hash.clone(), genesis_hash.clone()]);
let block2 = test_data::block_h2();
let block2_hash = block2.hash();
chain.insert_best_block(block2_hash.clone(), &block2).expect("Error inserting new block");
chain.insert_best_block(block2_hash.clone(), &block2.into()).expect("Error inserting new block");
assert_eq!(chain.block_locator_hashes(), vec![block2_hash.clone(), block1_hash.clone(), genesis_hash.clone()]);
let blocks0 = test_data::build_n_empty_blocks_from_genesis(11, 0);
@ -866,8 +884,8 @@ mod tests {
fn chain_intersect_with_inventory() {
let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block()));
// append 2 db blocks
chain.insert_best_block(test_data::block_h1().hash(), &test_data::block_h1()).expect("Error inserting new block");
chain.insert_best_block(test_data::block_h2().hash(), &test_data::block_h2()).expect("Error inserting new block");
chain.insert_best_block(test_data::block_h1().hash(), &test_data::block_h1().into()).expect("Error inserting new block");
chain.insert_best_block(test_data::block_h2().hash(), &test_data::block_h2().into()).expect("Error inserting new block");
// prepare blocks
let blocks0 = test_data::build_n_empty_blocks_from(9, 0, &test_data::block_h2().block_header);
@ -980,7 +998,7 @@ mod tests {
assert_eq!(chain.information().transactions.transactions_count, 1);
// when block is inserted to the database => all accepted transactions are removed from mempool && verifying queue
chain.insert_best_block(b1.hash(), &b1).expect("block accepted");
chain.insert_best_block(b1.hash(), &b1.into()).expect("block accepted");
assert_eq!(chain.information().transactions.transactions_count, 0);
assert!(!chain.forget_verifying_transaction(&tx1_hash));
@ -1050,15 +1068,15 @@ mod tests {
chain.insert_verified_transaction(tx2);
// no reorg
let result = chain.insert_best_block(b1.hash(), &b1).expect("no error");
let result = chain.insert_best_block(b1.hash(), &b1.into()).expect("no error");
assert_eq!(result.transactions_to_reverify.len(), 0);
// no reorg
let result = chain.insert_best_block(b2.hash(), &b2).expect("no error");
let result = chain.insert_best_block(b2.hash(), &b2.into()).expect("no error");
assert_eq!(result.transactions_to_reverify.len(), 0);
// reorg
let result = chain.insert_best_block(b3.hash(), &b3).expect("no error");
let result = chain.insert_best_block(b3.hash(), &b3.into()).expect("no error");
assert_eq!(result.transactions_to_reverify.len(), 2);
assert!(result.transactions_to_reverify.iter().any(|&(ref h, _)| h == &tx1_hash));
assert!(result.transactions_to_reverify.iter().any(|&(ref h, _)| h == &tx2_hash));
@ -1102,18 +1120,18 @@ mod tests {
chain.insert_verified_transaction(tx4);
chain.insert_verified_transaction(tx5);
assert_eq!(chain.insert_best_block(b0.hash(), &b0).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b0.hash()]));
assert_eq!(chain.insert_best_block(b0.hash(), &b0.clone().into()).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b0.hash()]));
assert_eq!(chain.information().transactions.transactions_count, 3);
assert_eq!(chain.insert_best_block(b1.hash(), &b1).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b1.hash()]));
assert_eq!(chain.insert_best_block(b1.hash(), &b1.clone().into()).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b1.hash()]));
assert_eq!(chain.information().transactions.transactions_count, 3);
assert_eq!(chain.insert_best_block(b2.hash(), &b2).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b2.hash()]));
assert_eq!(chain.insert_best_block(b2.hash(), &b2.clone().into()).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b2.hash()]));
assert_eq!(chain.information().transactions.transactions_count, 3);
assert_eq!(chain.insert_best_block(b3.hash(), &b3).expect("block accepted"), BlockInsertionResult::default());
assert_eq!(chain.insert_best_block(b3.hash(), &b3.clone().into()).expect("block accepted"), BlockInsertionResult::default());
assert_eq!(chain.information().transactions.transactions_count, 3);
assert_eq!(chain.insert_best_block(b4.hash(), &b4).expect("block accepted"), BlockInsertionResult::default());
assert_eq!(chain.insert_best_block(b4.hash(), &b4.clone().into()).expect("block accepted"), BlockInsertionResult::default());
assert_eq!(chain.information().transactions.transactions_count, 3);
// order matters
let insert_result = chain.insert_best_block(b5.hash(), &b5).expect("block accepted");
let insert_result = chain.insert_best_block(b5.hash(), &b5.clone().into()).expect("block accepted");
let transactions_to_reverify_hashes: Vec<_> = insert_result
.transactions_to_reverify
.into_iter()

View File

@ -7,8 +7,8 @@ use futures::{BoxFuture, Future, finished};
use futures::stream::Stream;
use tokio_core::reactor::{Handle, Interval};
use futures_cpupool::CpuPool;
use db;
use chain::{Block, BlockHeader, Transaction};
use db::{self, IndexedBlock};
use chain::{BlockHeader, Transaction};
use message::types;
use message::common::{InventoryVector, InventoryType};
use primitives::hash::H256;
@ -25,7 +25,9 @@ use synchronization_manager::{manage_synchronization_peers_blocks, manage_synchr
manage_unknown_orphaned_blocks, manage_orphaned_transactions, MANAGEMENT_INTERVAL_MS,
ManagePeersConfig, ManageUnknownBlocksConfig, ManageOrphanTransactionsConfig};
use synchronization_verifier::{Verifier, VerificationSink};
use compact_block_builder::build_compact_block;
use hash_queue::HashPosition;
use miner::transaction_fee_rate;
use time;
use std::time::Duration;
@ -189,12 +191,13 @@ pub trait Client : Send + 'static {
fn on_new_transactions_inventory(&mut self, peer_index: usize, transactions_hashes: Vec<H256>);
fn on_new_blocks_headers(&mut self, peer_index: usize, blocks_headers: Vec<BlockHeader>);
fn on_peer_blocks_notfound(&mut self, peer_index: usize, blocks_hashes: Vec<H256>);
fn on_peer_block(&mut self, peer_index: usize, block: Block);
fn on_peer_block(&mut self, peer_index: usize, block: IndexedBlock);
fn on_peer_transaction(&mut self, peer_index: usize, transaction: Transaction);
fn on_peer_filterload(&mut self, peer_index: usize, message: &types::FilterLoad);
fn on_peer_filteradd(&mut self, peer_index: usize, message: &types::FilterAdd);
fn on_peer_filterclear(&mut self, peer_index: usize);
fn on_peer_sendheaders(&mut self, peer_index: usize);
fn on_peer_block_announcement_type(&mut self, peer_index: usize, announcement_type: BlockAnnouncementType);
fn on_peer_feefilter(&mut self, peer_index: usize, message: &types::FeeFilter);
fn on_peer_disconnected(&mut self, peer_index: usize);
fn after_peer_nearly_blocks_verified(&mut self, peer_index: usize, future: BoxFuture<(), ()>);
}
@ -209,12 +212,13 @@ pub trait ClientCore : VerificationSink {
fn on_new_transactions_inventory(&mut self, peer_index: usize, transactions_hashes: Vec<H256>);
fn on_new_blocks_headers(&mut self, peer_index: usize, blocks_headers: Vec<BlockHeader>);
fn on_peer_blocks_notfound(&mut self, peer_index: usize, blocks_hashes: Vec<H256>);
fn on_peer_block(&mut self, peer_index: usize, block: Block) -> Option<VecDeque<(H256, Block)>>;
fn on_peer_block(&mut self, peer_index: usize, block: IndexedBlock) -> Option<VecDeque<(H256, IndexedBlock)>>;
fn on_peer_transaction(&mut self, peer_index: usize, transaction: Transaction) -> Option<VecDeque<(H256, Transaction)>>;
fn on_peer_filterload(&mut self, peer_index: usize, message: &types::FilterLoad);
fn on_peer_filteradd(&mut self, peer_index: usize, message: &types::FilterAdd);
fn on_peer_filterclear(&mut self, peer_index: usize);
fn on_peer_sendheaders(&mut self, peer_index: usize);
fn on_peer_block_announcement_type(&mut self, peer_index: usize, announcement_type: BlockAnnouncementType);
fn on_peer_feefilter(&mut self, peer_index: usize, message: &types::FeeFilter);
fn on_peer_disconnected(&mut self, peer_index: usize);
fn after_peer_nearly_blocks_verified(&mut self, peer_index: usize, future: BoxFuture<(), ()>);
fn execute_synchronization_tasks(&mut self, forced_blocks_requests: Option<Vec<H256>>);
@ -240,6 +244,17 @@ pub struct FilteredInventory {
pub notfound: Vec<InventoryVector>,
}
#[derive(Debug, Clone, Copy)]
/// New block announcement type
pub enum BlockAnnouncementType {
/// Send inventory with block hash
SendInventory,
/// Send block header
SendHeader,
/// Send compact block
SendCompactBlock,
}
/// Synchronization client facade
pub struct SynchronizationClient<T: TaskExecutor, U: Verifier> {
/// Client core
@ -356,8 +371,8 @@ impl<T, U> Client for SynchronizationClient<T, U> where T: TaskExecutor, U: Veri
self.core.lock().on_peer_blocks_notfound(peer_index, blocks_hashes);
}
fn on_peer_block(&mut self, peer_index: usize, block: Block) {
let blocks_to_verify = { self.core.lock().on_peer_block(peer_index, block) };
fn on_peer_block(&mut self, peer_index: usize, block: IndexedBlock) {
let blocks_to_verify = self.core.lock().on_peer_block(peer_index, block);
// verify selected blocks
if let Some(mut blocks_to_verify) = blocks_to_verify {
@ -397,8 +412,12 @@ impl<T, U> Client for SynchronizationClient<T, U> where T: TaskExecutor, U: Veri
self.core.lock().on_peer_filterclear(peer_index);
}
fn on_peer_sendheaders(&mut self, peer_index: usize) {
self.core.lock().on_peer_sendheaders(peer_index);
fn on_peer_block_announcement_type(&mut self, peer_index: usize, announcement_type: BlockAnnouncementType) {
self.core.lock().on_peer_block_announcement_type(peer_index, announcement_type);
}
fn on_peer_feefilter(&mut self, peer_index: usize, message: &types::FeeFilter) {
self.core.lock().on_peer_feefilter(peer_index, message);
}
fn on_peer_disconnected(&mut self, peer_index: usize) {
@ -590,8 +609,8 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
}
/// Process new block.
fn on_peer_block(&mut self, peer_index: usize, block: Block) -> Option<VecDeque<(H256, Block)>> {
let block_hash = block.hash();
fn on_peer_block(&mut self, peer_index: usize, block: IndexedBlock) -> Option<VecDeque<(H256, IndexedBlock)>> {
let block_hash = block.hash().clone();
// update peers to select next tasks
self.peers.on_block_received(peer_index, &block_hash);
@ -630,10 +649,17 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
}
}
/// Peer wants to get blocks headers instead of blocks hashes when announcing new blocks
fn on_peer_sendheaders(&mut self, peer_index: usize) {
/// Change the way peer is informed about new blocks
fn on_peer_block_announcement_type(&mut self, peer_index: usize, announcement_type: BlockAnnouncementType) {
if self.peers.is_known_peer(peer_index) {
self.peers.on_peer_sendheaders(peer_index);
self.peers.set_block_announcement_type(peer_index, announcement_type);
}
}
/// Peer wants to limit transaction announcing by transaction fee
fn on_peer_feefilter(&mut self, peer_index: usize, message: &types::FeeFilter) {
if self.peers.is_known_peer(peer_index) {
self.peers.on_peer_feefilter(peer_index, message.fee_rate);
}
}
@ -750,7 +776,7 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
impl<T> VerificationSink for SynchronizationClientCore<T> where T: TaskExecutor {
/// Process successful block verification
fn on_block_verification_success(&mut self, block: Block) {
fn on_block_verification_success(&mut self, block: IndexedBlock) {
let hash = block.hash();
// insert block to the storage
match {
@ -823,7 +849,7 @@ impl<T> VerificationSink for SynchronizationClientCore<T> where T: TaskExecutor
fn on_transaction_verification_success(&mut self, transaction: Transaction) {
let hash = transaction.hash();
{
let transaction_fee_rate = {
// insert transaction to the memory pool
let mut chain = self.chain.write();
@ -835,10 +861,13 @@ impl<T> VerificationSink for SynchronizationClientCore<T> where T: TaskExecutor
// transaction was in verification queue => insert to memory pool
chain.insert_verified_transaction(transaction.clone());
}
// calculate transaction fee rate
transaction_fee_rate(&*chain, &transaction)
};
// relay transaction to peers
self.relay_new_transactions(vec![(hash, &transaction)]);
self.relay_new_transactions(vec![(hash, &transaction, transaction_fee_rate)]);
}
/// Process failed transaction verification
@ -930,35 +959,62 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
let tasks: Vec<_> = {
self.peers.all_peers().into_iter()
.filter_map(|peer_index| {
let send_headers = self.peers.send_headers(peer_index);
let block_announcement_type = self.peers.block_announcement_type(peer_index);
if send_headers {
let filtered_blocks_hashes: Vec<_> = new_blocks_hashes.iter()
.filter(|h| self.peers.filter(peer_index).filter_block(h))
.collect();
let chain = self.chain.read();
let headers: Vec<_> = filtered_blocks_hashes.into_iter()
.filter_map(|h| chain.block_header_by_hash(&h))
.collect();
if !headers.is_empty() {
Some(Task::SendHeaders(peer_index, headers, ServerTaskIndex::None))
}
else {
None
}
} else {
let inventory: Vec<_> = new_blocks_hashes.iter()
.filter(|h| self.peers.filter(peer_index).filter_block(h))
.map(|h| InventoryVector {
inv_type: InventoryType::MessageBlock,
hash: h.clone(),
})
.collect();
if !inventory.is_empty() {
Some(Task::SendInventory(peer_index, inventory, ServerTaskIndex::None))
} else {
None
}
match block_announcement_type {
BlockAnnouncementType::SendHeader => {
let filtered_blocks_hashes: Vec<_> = new_blocks_hashes.iter()
.filter(|h| self.peers.filter(peer_index).filter_block(h))
.collect();
let chain = self.chain.read();
let headers: Vec<_> = filtered_blocks_hashes.into_iter()
.filter_map(|h| chain.block_header_by_hash(&h))
.collect();
if !headers.is_empty() {
Some(Task::SendHeaders(peer_index, headers, ServerTaskIndex::None))
}
else {
None
}
},
BlockAnnouncementType::SendCompactBlock => {
let indexed_blocks: Vec<db::IndexedBlock> = {
let chain = self.chain.read();
new_blocks_hashes.iter()
.filter_map(|h| chain.storage().block(db::BlockRef::Hash(h.clone())))
.map(|b| b.into())
.collect()
};
let block_header_and_ids: Vec<_> = indexed_blocks.into_iter()
.filter_map(|b| if self.peers.filter(peer_index).filter_block(&b.hash()) {
let prefilled_transactions_indexes = b.transactions().enumerate()
// we do not filter by fee rate here, because it only reasonable for non-mined transactions
.filter(|&(_, (h, t))| self.peers.filter_mut(peer_index).filter_transaction(h, t, None))
.map(|(idx, _)| idx)
.collect();
Some(build_compact_block(b, prefilled_transactions_indexes))
} else {
None
})
.collect();
Some(Task::SendCompactBlocks(peer_index, block_header_and_ids, ServerTaskIndex::None))
},
BlockAnnouncementType::SendInventory => {
let inventory: Vec<_> = new_blocks_hashes.iter()
.filter(|h| self.peers.filter(peer_index).filter_block(h))
.map(|h| InventoryVector {
inv_type: InventoryType::MessageBlock,
hash: h.clone(),
})
.collect();
if !inventory.is_empty() {
Some(Task::SendInventory(peer_index, inventory, ServerTaskIndex::None))
} else {
None
}
},
}
})
.collect()
@ -971,12 +1027,14 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
}
/// Relay new transactions
fn relay_new_transactions(&mut self, new_transactions: Vec<(H256, &Transaction)>) {
fn relay_new_transactions(&mut self, new_transactions: Vec<(H256, &Transaction, u64)>) {
let tasks: Vec<_> = self.peers.all_peers().into_iter()
.filter_map(|peer_index| {
let inventory: Vec<_> = new_transactions.iter()
.filter(|&&(ref h, tx)| self.peers.filter_mut(peer_index).filter_transaction(h, tx))
.map(|&(ref h, _)| InventoryVector {
.filter(|&&(ref h, tx, tx_fee_rate)| {
self.peers.filter_mut(peer_index).filter_transaction(h, tx, Some(tx_fee_rate))
})
.map(|&(ref h, _, _)| InventoryVector {
inv_type: InventoryType::MessageTx,
hash: h.clone(),
})
@ -1056,9 +1114,9 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
}
/// Process new peer block
fn process_peer_block(&mut self, peer_index: usize, block_hash: H256, block: Block) -> Option<VecDeque<(H256, Block)>> {
fn process_peer_block(&mut self, peer_index: usize, block_hash: H256, block: IndexedBlock) -> Option<VecDeque<(H256, IndexedBlock)>> {
// prepare list of blocks to verify + make all required changes to the chain
let mut result: Option<VecDeque<(H256, Block)>> = None;
let mut result: Option<VecDeque<(H256, IndexedBlock)>> = None;
let mut chain = self.chain.write();
match chain.block_state(&block_hash) {
BlockState::Verifying | BlockState::Stored => {
@ -1067,7 +1125,7 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
},
BlockState::Unknown | BlockState::Scheduled | BlockState::Requested => {
// check parent block state
match chain.block_state(&block.block_header.previous_header_hash) {
match chain.block_state(&block.header().previous_header_hash) {
BlockState::Unknown => {
if self.state.is_synchronizing() {
// when synchronizing, we tend to receive all blocks in-order
@ -1086,21 +1144,23 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
// remove this block from the queue
chain.forget_block_leave_header(&block_hash);
// remember this block as unknown
self.orphaned_blocks_pool.insert_unknown_block(block_hash, block);
if !self.orphaned_blocks_pool.contains_unknown_block(&block_hash) {
self.orphaned_blocks_pool.insert_unknown_block(block_hash, block);
}
}
},
BlockState::Verifying | BlockState::Stored => {
// remember peer as useful
self.peers.useful_peer(peer_index);
// schedule verification
let mut blocks_to_verify: VecDeque<(H256, Block)> = VecDeque::new();
let mut blocks_to_verify: VecDeque<(H256, IndexedBlock)> = VecDeque::new();
blocks_to_verify.push_back((block_hash.clone(), block));
blocks_to_verify.extend(self.orphaned_blocks_pool.remove_blocks_for_parent(&block_hash));
// forget blocks we are going to process
let blocks_hashes_to_forget: Vec<_> = blocks_to_verify.iter().map(|t| t.0.clone()).collect();
chain.forget_blocks_leave_header(&blocks_hashes_to_forget);
// remember that we are verifying these blocks
let blocks_headers_to_verify: Vec<_> = blocks_to_verify.iter().map(|&(ref h, ref b)| (h.clone(), b.block_header.clone())).collect();
let blocks_headers_to_verify: Vec<_> = blocks_to_verify.iter().map(|&(ref h, ref b)| (h.clone(), b.header().clone())).collect();
chain.verify_blocks(blocks_headers_to_verify);
// remember that we are verifying block from this peer
for &(ref verifying_block_hash, _) in &blocks_to_verify {
@ -1285,7 +1345,8 @@ pub mod tests {
use tokio_core::reactor::{Core, Handle};
use chain::{Block, Transaction};
use message::common::{InventoryVector, InventoryType};
use super::{Client, Config, SynchronizationClient, SynchronizationClientCore};
use message::types;
use super::{Client, Config, SynchronizationClient, SynchronizationClientCore, BlockAnnouncementType};
use connection_filter::tests::*;
use synchronization_executor::Task;
use synchronization_chain::{Chain, ChainRef};
@ -1351,7 +1412,7 @@ pub mod tests {
assert_eq!(sync.information().peers.active, 1);
// push unknown block => will be queued as orphan
sync.on_peer_block(5, block2);
sync.on_peer_block(5, block2.into());
assert!(sync.information().state.is_nearly_saturated());
assert_eq!(sync.information().orphaned_blocks, 1);
assert_eq!(sync.information().chain.scheduled, 0);
@ -1361,7 +1422,7 @@ pub mod tests {
assert_eq!(sync.information().peers.active, 1);
// push requested block => should be moved to the test storage && orphan should be moved
sync.on_peer_block(5, block1);
sync.on_peer_block(5, block1.into());
assert!(sync.information().state.is_saturated());
assert_eq!(sync.information().orphaned_blocks, 0);
assert_eq!(sync.information().chain.scheduled, 0);
@ -1378,7 +1439,7 @@ pub mod tests {
let mut sync = sync.lock();
sync.on_new_blocks_headers(5, vec![test_data::block_h1().block_header.clone(), test_data::block_h2().block_header.clone()]);
sync.on_peer_block(5, test_data::block_h169());
sync.on_peer_block(5, test_data::block_h169().into());
// out-of-order block was presented by the peer
assert!(sync.information().state.is_synchronizing());
@ -1426,11 +1487,11 @@ pub mod tests {
{
let mut sync = sync.lock();
// receive block from peer#2
sync.on_peer_block(2, block2);
sync.on_peer_block(2, block2.into());
assert!(sync.information().chain.requested == 2
&& sync.information().orphaned_blocks == 1);
// receive block from peer#1
sync.on_peer_block(1, block1);
sync.on_peer_block(1, block1.into());
assert!(sync.information().chain.requested == 0
&& sync.information().orphaned_blocks == 0
@ -1478,7 +1539,7 @@ pub mod tests {
sync.on_new_blocks_headers(1, vec![block.block_header.clone()]);
sync.on_new_blocks_headers(2, vec![block.block_header.clone()]);
executor.lock().take_tasks();
sync.on_peer_block(2, block.clone());
sync.on_peer_block(2, block.clone().into());
let tasks = executor.lock().take_tasks();
assert_eq!(tasks.len(), 5);
@ -1511,7 +1572,7 @@ pub mod tests {
assert_eq!(chain.information().headers.total, 2);
}
sync.on_peer_block(1, b1);
sync.on_peer_block(1, b1.into());
let tasks = executor.lock().take_tasks();
assert_eq!(tasks, vec![]);
@ -1522,7 +1583,7 @@ pub mod tests {
assert_eq!(chain.information().headers.total, 1);
}
sync.on_peer_block(1, b2);
sync.on_peer_block(1, b2.into());
let tasks = executor.lock().take_tasks();
assert_eq!(tasks, vec![Task::RequestBlocksHeaders(1), Task::RequestMemoryPool(1)]);
@ -1554,7 +1615,7 @@ pub mod tests {
assert_eq!(chain.information().headers.total, 2);
}
sync.on_peer_block(1, b2);
sync.on_peer_block(1, b2.into());
let tasks = executor.lock().take_tasks();
assert_eq!(tasks, vec![]);
@ -1565,7 +1626,7 @@ pub mod tests {
assert_eq!(chain.information().headers.total, 2);
}
sync.on_peer_block(1, b1);
sync.on_peer_block(1, b1.into());
let tasks = executor.lock().take_tasks();
assert_eq!(tasks, vec![Task::RequestBlocksHeaders(1), Task::RequestMemoryPool(1)]);
@ -1615,35 +1676,35 @@ pub mod tests {
Task::RequestBlocks(2, vec![fork2[0].hash(), fork2[1].hash(), fork2[2].hash()]),
]);
sync.on_peer_block(2, fork2[0].clone());
sync.on_peer_block(2, fork2[0].clone().into());
{
let chain = chain.read();
assert_eq!(chain.best_storage_block().hash, fork2[0].hash());
assert_eq!(chain.best_storage_block().number, 1);
}
sync.on_peer_block(1, fork1[0].clone());
sync.on_peer_block(1, fork1[0].clone().into());
{
let chain = chain.read();
assert_eq!(chain.best_storage_block().hash, fork2[0].hash());
assert_eq!(chain.best_storage_block().number, 1);
}
sync.on_peer_block(1, fork1[1].clone());
sync.on_peer_block(1, fork1[1].clone().into());
{
let chain = chain.read();
assert_eq!(chain.best_storage_block().hash, fork1[1].hash());
assert_eq!(chain.best_storage_block().number, 2);
}
sync.on_peer_block(2, fork2[1].clone());
sync.on_peer_block(2, fork2[1].clone().into());
{
let chain = chain.read();
assert_eq!(chain.best_storage_block().hash, fork1[1].hash());
assert_eq!(chain.best_storage_block().number, 2);
}
sync.on_peer_block(2, fork2[2].clone());
sync.on_peer_block(2, fork2[2].clone().into());
{
let chain = chain.read();
assert_eq!(chain.best_storage_block().hash, fork2[2].hash());
@ -1681,12 +1742,12 @@ pub mod tests {
assert_eq!(chain.information().headers.total, 3);
}
sync.on_peer_block(1, common_block.clone());
sync.on_peer_block(1, fork1[0].clone());
sync.on_peer_block(1, fork1[1].clone());
sync.on_peer_block(2, fork2[0].clone());
sync.on_peer_block(2, fork2[1].clone());
sync.on_peer_block(2, fork2[2].clone());
sync.on_peer_block(1, common_block.clone().into());
sync.on_peer_block(1, fork1[0].clone().into());
sync.on_peer_block(1, fork1[1].clone().into());
sync.on_peer_block(2, fork2[0].clone().into());
sync.on_peer_block(2, fork2[1].clone().into());
sync.on_peer_block(2, fork2[2].clone().into());
{
let chain = chain.read();
@ -1700,7 +1761,7 @@ pub mod tests {
let (_, _, _, chain, sync) = create_sync(None, None);
let mut sync = sync.lock();
sync.on_peer_block(1, test_data::block_h2());
sync.on_peer_block(1, test_data::block_h2().into());
assert_eq!(sync.information().orphaned_blocks, 1);
{
@ -1708,7 +1769,7 @@ pub mod tests {
assert_eq!(chain.best_storage_block().number, 0);
}
sync.on_peer_block(1, test_data::block_h1());
sync.on_peer_block(1, test_data::block_h1().into());
assert_eq!(sync.information().orphaned_blocks, 0);
{
@ -1722,7 +1783,7 @@ pub mod tests {
let (_, _, executor, _, sync) = create_sync(None, None);
let mut sync = sync.lock();
sync.on_peer_block(1, test_data::block_h2());
sync.on_peer_block(1, test_data::block_h2().into());
sync.on_new_blocks_inventory(1, vec![test_data::block_h1().hash(), test_data::block_h2().hash()]);
let tasks = executor.lock().take_tasks();
@ -1992,11 +2053,11 @@ pub mod tests {
sync.on_new_blocks_headers(1, vec![b10.block_header.clone(), b11.block_header.clone(), b12.block_header.clone()]);
sync.on_new_blocks_headers(2, vec![b10.block_header.clone(), b21.block_header.clone(), b22.block_header.clone()]);
sync.on_peer_block(1, b10.clone());
sync.on_peer_block(1, b11);
sync.on_peer_block(1, b12);
sync.on_peer_block(1, b10.clone().into());
sync.on_peer_block(1, b11.into());
sync.on_peer_block(1, b12.into());
sync.on_peer_block(2, b21.clone());
sync.on_peer_block(2, b21.clone().into());
// should not panic here
sync.on_new_blocks_headers(2, vec![b10.block_header.clone(), b21.block_header.clone(),
@ -2014,8 +2075,8 @@ pub mod tests {
let mut sync = sync.lock();
sync.on_new_blocks_headers(1, vec![b0.block_header.clone(), b1.block_header.clone()]);
sync.on_peer_block(1, b0.clone());
sync.on_peer_block(1, b1.clone());
sync.on_peer_block(1, b0.clone().into());
sync.on_peer_block(1, b1.clone().into());
// we were in synchronization state => block is not relayed
{
@ -2027,7 +2088,7 @@ pub mod tests {
]);
}
sync.on_peer_block(2, b2.clone());
sync.on_peer_block(2, b2.clone().into());
// we were in saturated state => block is relayed
{
@ -2037,7 +2098,7 @@ pub mod tests {
}
sync.on_new_blocks_headers(1, vec![b3.block_header.clone()]);
sync.on_peer_block(1, b3.clone());
sync.on_peer_block(1, b3.clone().into());
// we were in nearly saturated state => block is relayed
{
@ -2128,13 +2189,13 @@ pub mod tests {
let mut sync = sync.lock();
sync.on_peer_connected(1);
sync.on_peer_connected(2);
sync.on_peer_sendheaders(2);
sync.on_peer_block_announcement_type(2, BlockAnnouncementType::SendHeader);
sync.on_peer_connected(3);
// igonore tasks
{ executor.lock().take_tasks(); }
sync.on_peer_block(1, b0.clone());
sync.on_peer_block(1, b0.clone().into());
let tasks = executor.lock().take_tasks();
let inventory = vec![InventoryVector { inv_type: InventoryType::MessageBlock, hash: b0.hash() }];
@ -2144,4 +2205,93 @@ pub mod tests {
Task::SendInventory(3, inventory, ServerTaskIndex::None),
]);
}
#[test]
fn relay_new_transaction_with_feefilter() {
let (_, _, executor, chain, sync) = create_sync(None, None);
let b1 = test_data::block_builder().header().parent(test_data::genesis().hash()).build()
.transaction().output().value(1_000_000).build().build()
.build(); // genesis -> b1
let tx0 = b1.transactions[0].clone();
let tx1: Transaction = test_data::TransactionBuilder::with_output(800_000).add_input(&tx0, 0).into();
let tx1_hash = tx1.hash();
let mut sync = sync.lock();
sync.on_peer_connected(1);
sync.on_peer_connected(2);
sync.on_peer_connected(3);
sync.on_peer_connected(4);
sync.on_peer_block(1, b1.into());
{
use miner::transaction_fee_rate;
let chain = chain.read();
assert_eq!(transaction_fee_rate(&*chain, &tx1), 3333); // 200_000 / 60
}
sync.on_peer_feefilter(2, &types::FeeFilter { fee_rate: 3000, });
sync.on_peer_feefilter(3, &types::FeeFilter { fee_rate: 4000, });
// forget previous tasks
{ executor.lock().take_tasks(); }
sync.on_peer_transaction(1, tx1);
let tasks = executor.lock().take_tasks();
assert_eq!(tasks, vec![
Task::SendInventory(2, vec![
InventoryVector {
inv_type: InventoryType::MessageTx,
hash: tx1_hash.clone(),
}
], ServerTaskIndex::None),
Task::SendInventory(4, vec![
InventoryVector {
inv_type: InventoryType::MessageTx,
hash: tx1_hash.clone(),
}
], ServerTaskIndex::None),
]);
}
#[test]
fn receive_same_unknown_block_twice() {
let (_, _, _, _, sync) = create_sync(None, None);
let mut sync = sync.lock();
sync.on_peer_block(1, test_data::block_h2().into());
// should not panic here
sync.on_peer_block(2, test_data::block_h2().into());
}
#[test]
fn relay_new_block_after_sendcmpct() {
let (_, _, executor, _, sync) = create_sync(None, None);
let genesis = test_data::genesis();
let b0 = test_data::block_builder().header().parent(genesis.hash()).build().build();
let mut sync = sync.lock();
sync.on_peer_connected(1);
sync.on_peer_connected(2);
sync.on_peer_block_announcement_type(2, BlockAnnouncementType::SendCompactBlock);
sync.on_peer_connected(3);
// igonore tasks
{ executor.lock().take_tasks(); }
sync.on_peer_block(1, b0.clone().into());
let tasks = executor.lock().take_tasks();
let inventory = vec![InventoryVector { inv_type: InventoryType::MessageBlock, hash: b0.hash() }];
assert_eq!(tasks.len(), 3);
assert_eq!(tasks[0], Task::RequestBlocksHeaders(1));
match tasks[1] {
Task::SendCompactBlocks(2, _, _) => (),
_ => panic!("unexpected task"),
}
assert_eq!(tasks[2], Task::SendInventory(3, inventory, ServerTaskIndex::None));
}
}

View File

@ -2,7 +2,7 @@ use std::sync::Arc;
use std::collections::HashMap;
use parking_lot::Mutex;
use chain::{Block, BlockHeader, Transaction};
use message::common::{InventoryVector, InventoryType};
use message::common::{InventoryVector, InventoryType, BlockHeaderAndIDs, BlockTransactions};
use message::types;
use primitives::hash::H256;
use p2p::OutboundSyncConnectionRef;
@ -17,6 +17,7 @@ pub trait TaskExecutor : Send + 'static {
fn execute(&mut self, task: Task);
}
// TODO: get rid of unneeded ServerTaskIndex-es
/// Synchronization task for the peer.
#[derive(Debug, PartialEq)]
pub enum Task {
@ -34,12 +35,16 @@ pub enum Task {
SendMerkleBlock(usize, types::MerkleBlock),
/// Send transaction
SendTransaction(usize, Transaction),
/// Send block transactions
SendBlockTxn(usize, H256, Vec<Transaction>),
/// Send notfound
SendNotFound(usize, Vec<InventoryVector>, ServerTaskIndex),
/// Send inventory
SendInventory(usize, Vec<InventoryVector>, ServerTaskIndex),
/// Send headers
SendHeaders(usize, Vec<BlockHeader>, ServerTaskIndex),
/// Send compact blocks
SendCompactBlocks(usize, Vec<BlockHeaderAndIDs>, ServerTaskIndex),
/// Notify io about ignored request
Ignore(usize, u32),
}
@ -152,6 +157,19 @@ impl TaskExecutor for LocalSynchronizationTaskExecutor {
connection.send_transaction(&transaction_message);
}
},
Task::SendBlockTxn(peer_index, block_hash, transactions) => {
let transactions_message = types::BlockTxn {
request: BlockTransactions {
blockhash: block_hash,
transactions: transactions,
}
};
if let Some(connection) = self.peers.get_mut(&peer_index) {
trace!(target: "sync", "Sending blocktxn with {} transactions to peer#{}", transactions_message.request.transactions.len(), peer_index);
connection.send_block_txn(&transactions_message);
}
},
Task::SendNotFound(peer_index, unknown_inventory, id) => {
let notfound = types::NotFound {
inventory: unknown_inventory,
@ -187,6 +205,17 @@ impl TaskExecutor for LocalSynchronizationTaskExecutor {
}
}
},
Task::SendCompactBlocks(peer_index, compact_blocks, id) => {
if let Some(connection) = self.peers.get_mut(&peer_index) {
assert_eq!(id.raw(), None);
for compact_block in compact_blocks {
trace!(target: "sync", "Sending compact_block {:?} to peer#{}", compact_block.header.hash(), peer_index);
connection.send_compact_block(&types::CompactBlock {
header: compact_block,
});
}
}
},
Task::Ignore(peer_index, id) => {
if let Some(connection) = self.peers.get_mut(&peer_index) {
trace!(target: "sync", "Ignoring request from peer#{} with id {}", peer_index, id);

View File

@ -8,9 +8,9 @@ use primitives::hash::H256;
/// Management interval (in ms)
pub const MANAGEMENT_INTERVAL_MS: u64 = 10 * 1000;
/// Response time before getting block to decrease peer score
const DEFAULT_PEER_BLOCK_FAILURE_INTERVAL_MS: u32 = 5 * 1000;
const DEFAULT_PEER_BLOCK_FAILURE_INTERVAL_MS: u32 = 60 * 1000;
/// Response time before getting inventory to decrease peer score
const DEFAULT_PEER_INVENTORY_FAILURE_INTERVAL_MS: u32 = 5 * 1000;
const DEFAULT_PEER_INVENTORY_FAILURE_INTERVAL_MS: u32 = 60 * 1000;
/// Unknown orphan block removal time
const DEFAULT_UNKNOWN_BLOCK_REMOVAL_TIME_MS: u32 = 20 * 60 * 1000;
/// Maximal number of orphaned blocks
@ -225,7 +225,7 @@ mod tests {
let config = ManageUnknownBlocksConfig { removal_time_ms: 1000, max_number: 100 };
let mut pool = OrphanBlocksPool::new();
let block = test_data::genesis();
pool.insert_unknown_block(block.hash(), block);
pool.insert_unknown_block(block.hash(), block.into());
assert_eq!(manage_unknown_orphaned_blocks(&config, &mut pool), None);
assert_eq!(pool.len(), 1);
}
@ -238,7 +238,7 @@ mod tests {
let mut pool = OrphanBlocksPool::new();
let block = test_data::genesis();
let block_hash = block.hash();
pool.insert_unknown_block(block_hash.clone(), block);
pool.insert_unknown_block(block_hash.clone(), block.into());
sleep(Duration::from_millis(1));
assert_eq!(manage_unknown_orphaned_blocks(&config, &mut pool), Some(vec![block_hash]));
@ -253,8 +253,8 @@ mod tests {
let block1_hash = block1.hash();
let block2 = test_data::block_h2();
let block2_hash = block2.hash();
pool.insert_unknown_block(block1_hash.clone(), block1);
pool.insert_unknown_block(block2_hash.clone(), block2);
pool.insert_unknown_block(block1_hash.clone(), block1.into());
pool.insert_unknown_block(block2_hash.clone(), block2.into());
assert_eq!(manage_unknown_orphaned_blocks(&config, &mut pool), Some(vec![block1_hash]));
assert_eq!(pool.len(), 1);
}

View File

@ -4,6 +4,7 @@ use primitives::hash::H256;
use linked_hash_map::LinkedHashMap;
use time::precise_time_s;
use connection_filter::ConnectionFilter;
use synchronization_client::BlockAnnouncementType;
/// Max peer failures # before excluding from sync process
const MAX_PEER_FAILURES: usize = 2;
@ -27,8 +28,8 @@ pub struct Peers {
inventory_requests_order: LinkedHashMap<usize, f64>,
/// Peer connections filters.
filters: HashMap<usize, ConnectionFilter>,
/// Flags, informing that peer wants `headers` message instead of `inventory` when announcing new blocks
send_headers: HashSet<usize>,
/// The way peer is informed about new blocks
block_announcement_types: HashMap<usize, BlockAnnouncementType>,
}
/// Information on synchronization peers
@ -54,7 +55,7 @@ impl Peers {
inventory_requests: HashSet::new(),
inventory_requests_order: LinkedHashMap::new(),
filters: HashMap::new(),
send_headers: HashSet::new(),
block_announcement_types: HashMap::new(),
}
}
@ -160,10 +161,10 @@ impl Peers {
self.filters.entry(peer_index).or_insert_with(ConnectionFilter::default)
}
/// Does peer wants `headers` message instead of `inventory` when announcing new blocks
pub fn send_headers(&self, peer_index: usize) -> bool {
assert!(self.is_known_peer(peer_index));
self.send_headers.contains(&peer_index)
/// Get the way peer is informed about new blocks
pub fn block_announcement_type(&self, peer_index: usize) -> BlockAnnouncementType {
self.block_announcement_types.get(&peer_index).cloned()
.unwrap_or(BlockAnnouncementType::SendInventory)
}
/// Mark peer as useful.
@ -193,9 +194,14 @@ impl Peers {
self.inventory_requests_order.remove(&peer_index);
}
/// Peer wants `headers` message instead of `inventory` when announcing new blocks
pub fn on_peer_sendheaders(&mut self, peer_index: usize) {
self.send_headers.insert(peer_index);
/// Change the way peer is informed about new blocks
pub fn set_block_announcement_type(&mut self, peer_index: usize, announcement_type: BlockAnnouncementType) {
self.block_announcement_types.insert(peer_index, announcement_type);
}
/// Peer wants to limit transaction announcing by transaction fee
pub fn on_peer_feefilter(&mut self, peer_index: usize, fee_rate: u64) {
self.filter_mut(peer_index).set_fee_rate(fee_rate);
}
/// Peer has been disconnected
@ -209,7 +215,7 @@ impl Peers {
self.inventory_requests.remove(&peer_index);
self.inventory_requests_order.remove(&peer_index);
self.filters.remove(&peer_index);
self.send_headers.remove(&peer_index);
self.block_announcement_types.remove(&peer_index);
peer_blocks_requests
.map(|hs| hs.into_iter().collect())
}

View File

@ -1,7 +1,7 @@
use std::sync::{Arc, Weak};
use std::sync::atomic::{AtomicBool, Ordering};
use std::thread;
use std::collections::{VecDeque, HashMap};
use std::collections::{VecDeque, HashMap, HashSet};
use std::collections::hash_map::Entry;
use futures::{Future, BoxFuture, lazy, finished};
use parking_lot::{Mutex, Condvar};
@ -19,6 +19,7 @@ pub trait Server : Send + Sync + 'static {
fn serve_getdata(&self, peer_index: usize, inventory: FilteredInventory) -> Option<IndexedServerTask>;
fn serve_getblocks(&self, peer_index: usize, message: types::GetBlocks) -> Option<IndexedServerTask>;
fn serve_getheaders(&self, peer_index: usize, message: types::GetHeaders, id: Option<u32>) -> Option<IndexedServerTask>;
fn serve_get_block_txn(&self, peer_index: usize, block_hash: H256, indexes: Vec<usize>) -> Option<IndexedServerTask>;
fn serve_mempool(&self, peer_index: usize) -> Option<IndexedServerTask>;
fn add_task(&self, peer_index: usize, task: IndexedServerTask);
}
@ -103,6 +104,7 @@ pub enum ServerTask {
ServeGetData(FilteredInventory),
ServeGetBlocks(db::BestBlock, H256),
ServeGetHeaders(db::BestBlock, H256),
ServeGetBlockTxn(H256, Vec<usize>),
ServeMempool,
ReturnNotFound(Vec<InventoryVector>),
ReturnBlock(H256),
@ -187,6 +189,15 @@ impl SynchronizationServer {
None => unknown_items.push(item),
}
},
InventoryType::MessageTx => {
match chain.transaction_by_hash(&item.hash) {
Some(transaction) => {
let task = IndexedServerTask::new(ServerTask::ReturnTransaction(transaction), ServerTaskIndex::None);
new_tasks.push(task);
},
None => unknown_items.push(item),
}
},
_ => (), // TODO: process other inventory types
}
}
@ -235,6 +246,45 @@ impl SynchronizationServer {
// inform that we have processed task for peer
queue.lock().task_processed(peer_index);
},
// `getblocktxn` => `blocktxn`
ServerTask::ServeGetBlockTxn(block_hash, indexes) => {
let transactions = {
let chain = chain.read();
let storage = chain.storage();
if let Some(block) = storage.block(db::BlockRef::Hash(block_hash.clone())) {
let requested_len = indexes.len();
let transactions_len = block.transactions.len();
let mut read_indexes = HashSet::new();
let transactions: Vec<_> = indexes.into_iter()
.map(|index| {
if index >= transactions_len {
None
} else if !read_indexes.insert(index) {
None
} else {
Some(block.transactions[index].clone())
}
})
.take_while(Option::is_some)
.map(Option::unwrap) // take_while above
.collect();
if transactions.len() == requested_len {
Some(transactions)
} else {
// TODO: malformed
None
}
} else {
// TODO: else malformed
None
}
};
if let Some(transactions) = transactions {
trace!(target: "sync", "Going to respond with {} blocktxn transactions to peer#{}", transactions.len(), peer_index);
executor.lock().execute(Task::SendBlockTxn(peer_index, block_hash, transactions));
}
},
// `mempool` => `inventory`
ServerTask::ServeMempool => {
let inventory: Vec<_> = chain.read()
@ -399,6 +449,13 @@ impl Server for SynchronizationServer {
}
}
fn serve_get_block_txn(&self, _peer_index: usize, block_hash: H256, indexes: Vec<usize>) -> Option<IndexedServerTask> {
// TODO: Upon receipt of a properly-formatted getblocktxn message, nodes which *recently provided the sender
// of such a message a cmpctblock for the block hash identified in this message* MUST respond ...
let task = IndexedServerTask::new(ServerTask::ServeGetBlockTxn(block_hash, indexes), ServerTaskIndex::None);
Some(task)
}
fn serve_mempool(&self, _peer_index: usize) -> Option<IndexedServerTask> {
let task = IndexedServerTask::new(ServerTask::ServeMempool, ServerTaskIndex::None);
Some(task)
@ -539,6 +596,11 @@ pub mod tests {
None
}
fn serve_get_block_txn(&self, peer_index: usize, block_hash: H256, indexes: Vec<usize>) -> Option<IndexedServerTask> {
self.tasks.lock().push((peer_index, ServerTask::ServeGetBlockTxn(block_hash, indexes)));
None
}
fn serve_mempool(&self, peer_index: usize) -> Option<IndexedServerTask> {
self.tasks.lock().push((peer_index, ServerTask::ServeMempool));
None
@ -605,7 +667,7 @@ pub mod tests {
#[test]
fn server_getblocks_responds_inventory_when_have_unknown_blocks() {
let (chain, executor, server) = create_synchronization_server();
chain.write().insert_best_block(test_data::block_h1().hash(), &test_data::block_h1()).expect("Db write error");
chain.write().insert_best_block(test_data::block_h1().hash(), &test_data::block_h1().into()).expect("Db write error");
// when asking for blocks hashes
server.serve_getblocks(0, types::GetBlocks {
version: 0,
@ -640,7 +702,7 @@ pub mod tests {
#[test]
fn server_getheaders_responds_headers_when_have_unknown_blocks() {
let (chain, executor, server) = create_synchronization_server();
chain.write().insert_best_block(test_data::block_h1().hash(), &test_data::block_h1()).expect("Db write error");
chain.write().insert_best_block(test_data::block_h1().hash(), &test_data::block_h1().into()).expect("Db write error");
// when asking for blocks hashes
let dummy_id = 0;
server.serve_getheaders(0, types::GetHeaders {
@ -683,4 +745,83 @@ pub mod tests {
let tasks = DummyTaskExecutor::wait_tasks(executor);
assert_eq!(tasks, vec![Task::SendInventory(0, inventory, ServerTaskIndex::None)]);
}
#[test]
fn server_get_block_txn_responds_when_good_request() {
let (_, executor, server) = create_synchronization_server();
// when asking for block_txns
server.serve_get_block_txn(0, test_data::genesis().hash(), vec![0]).map(|t| server.add_task(0, t));
// server responds with transactions
let tasks = DummyTaskExecutor::wait_tasks(executor);
assert_eq!(tasks, vec![Task::SendBlockTxn(0, test_data::genesis().hash(), vec![
test_data::genesis().transactions[0].clone()
])]);
}
#[test]
fn server_get_block_txn_do_not_responds_when_bad_request() {
let (_, executor, server) = create_synchronization_server();
// when asking for block_txns
server.serve_get_block_txn(0, test_data::genesis().hash(), vec![1]).map(|t| server.add_task(0, t));
// server responds with transactions
let tasks = DummyTaskExecutor::wait_tasks(executor);
assert_eq!(tasks, vec![]);
}
#[test]
fn server_getdata_responds_notfound_when_transaction_is_inaccessible() {
let (_, executor, server) = create_synchronization_server();
// when asking for unknown transaction or transaction that is already in the storage
let inventory = vec![
InventoryVector {
inv_type: InventoryType::MessageTx,
hash: H256::default(),
},
InventoryVector {
inv_type: InventoryType::MessageTx,
hash: test_data::genesis().transactions[0].hash(),
},
];
server.serve_getdata(0, FilteredInventory::with_unfiltered(inventory.clone())).map(|t| server.add_task(0, t));
// => respond with notfound
let tasks = DummyTaskExecutor::wait_tasks(executor);
assert_eq!(tasks, vec![Task::SendNotFound(0, inventory, ServerTaskIndex::None)]);
}
#[test]
fn server_getdata_responds_transaction_when_transaction_is_in_memory() {
let (chain, executor, server) = create_synchronization_server();
let tx_verifying: Transaction = test_data::TransactionBuilder::with_output(10).into();
let tx_verifying_hash = tx_verifying.hash();
let tx_verified: Transaction = test_data::TransactionBuilder::with_output(20).into();
let tx_verified_hash = tx_verified.hash();
// given in-memory transaction
{
let mut chain = chain.write();
chain.verify_transaction(tx_verifying_hash.clone(), tx_verifying.clone());
chain.insert_verified_transaction(tx_verified.clone());
}
// when asking for known in-memory transaction
let inventory = vec![
InventoryVector {
inv_type: InventoryType::MessageTx,
hash: tx_verifying_hash,
},
InventoryVector {
inv_type: InventoryType::MessageTx,
hash: tx_verified_hash,
},
];
server.serve_getdata(0, FilteredInventory::with_unfiltered(inventory)).map(|t| server.add_task(0, t));
// => respond with transaction
let mut tasks = DummyTaskExecutor::wait_tasks(executor.clone());
// 2 tasks => can be situation when single task is ready
if tasks.len() != 2 {
tasks.extend(DummyTaskExecutor::wait_tasks_for(executor, 100));
}
assert_eq!(tasks, vec![
Task::SendTransaction(0, tx_verifying),
Task::SendTransaction(0, tx_verified),
]);
}
}

View File

@ -2,16 +2,17 @@ use std::thread;
use std::sync::Arc;
use std::sync::mpsc::{channel, Sender, Receiver};
use parking_lot::Mutex;
use chain::{Block, Transaction};
use message::common::ConsensusParams;
use chain::Transaction;
use network::{Magic, ConsensusParams};
use primitives::hash::H256;
use verification::{ChainVerifier, Verify as VerificationVerify};
use synchronization_chain::ChainRef;
use db::IndexedBlock;
/// Verification events sink
pub trait VerificationSink : Send + 'static {
/// When block verification has completed successfully.
fn on_block_verification_success(&mut self, block: Block);
fn on_block_verification_success(&mut self, block: IndexedBlock);
/// When block verification has failed.
fn on_block_verification_error(&mut self, err: &str, hash: &H256);
/// When transaction verification has completed successfully.
@ -23,7 +24,7 @@ pub trait VerificationSink : Send + 'static {
/// Verification thread tasks
enum VerificationTask {
/// Verify single block
VerifyBlock(Block),
VerifyBlock(IndexedBlock),
/// Verify single transaction
VerifyTransaction(Transaction),
/// Stop verification thread
@ -33,7 +34,7 @@ enum VerificationTask {
/// Synchronization verifier
pub trait Verifier : Send + 'static {
/// Verify block
fn verify_block(&self, block: Block);
fn verify_block(&self, block: IndexedBlock);
/// Verify transaction
fn verify_transaction(&self, transaction: Transaction);
}
@ -48,16 +49,16 @@ pub struct AsyncVerifier {
impl AsyncVerifier {
/// Create new async verifier
pub fn new<T: VerificationSink>(consensus_params: ConsensusParams, chain: ChainRef, sink: Arc<Mutex<T>>) -> Self {
pub fn new<T: VerificationSink>(network: Magic, chain: ChainRef, sink: Arc<Mutex<T>>) -> Self {
let (verification_work_sender, verification_work_receiver) = channel();
let storage = chain.read().storage();
let verifier = ChainVerifier::new(storage);
let verifier = ChainVerifier::new(storage, network);
AsyncVerifier {
verification_work_sender: verification_work_sender,
verification_worker_thread: Some(thread::Builder::new()
.name("Sync verification thread".to_string())
.spawn(move || {
AsyncVerifier::verification_worker_proc(sink, chain, consensus_params, verifier, verification_work_receiver)
AsyncVerifier::verification_worker_proc(sink, chain, network.consensus_params(), verifier, verification_work_receiver)
})
.expect("Error creating verification thread"))
}
@ -73,7 +74,7 @@ impl AsyncVerifier {
match task {
VerificationTask::VerifyBlock(block) => {
// for changes that are not relying on block#
let is_bip16_active_on_block = block.block_header.time >= bip16_time_border;
let is_bip16_active_on_block = block.header().time >= bip16_time_border;
let force_parameters_change = is_bip16_active_on_block != is_bip16_active;
if force_parameters_change {
parameters_change_steps = Some(0);
@ -132,7 +133,7 @@ impl Drop for AsyncVerifier {
impl Verifier for AsyncVerifier {
/// Verify block
fn verify_block(&self, block: Block) {
fn verify_block(&self, block: IndexedBlock) {
self.verification_work_sender
.send(VerificationTask::VerifyBlock(block))
.expect("Verification thread have the same lifetime as `AsyncVerifier`");
@ -151,11 +152,12 @@ pub mod tests {
use std::sync::Arc;
use std::collections::HashMap;
use parking_lot::Mutex;
use chain::{Block, Transaction};
use chain::Transaction;
use synchronization_client::SynchronizationClientCore;
use synchronization_executor::tests::DummyTaskExecutor;
use primitives::hash::H256;
use super::{Verifier, VerificationSink};
use db::IndexedBlock;
#[derive(Default)]
pub struct DummyVerifier {
@ -174,7 +176,7 @@ pub mod tests {
}
impl Verifier for DummyVerifier {
fn verify_block(&self, block: Block) {
fn verify_block(&self, block: IndexedBlock) {
match self.sink {
Some(ref sink) => match self.errors.get(&block.hash()) {
Some(err) => sink.lock().on_block_verification_error(&err, &block.hash()),

View File

@ -7,3 +7,4 @@ authors = ["Nikolay Volf <nikvolf@gmail.com>"]
chain = { path = "../chain" }
primitives = { path = "../primitives" }
serialization = { path = "../serialization" }
time = "0.1"

View File

@ -5,6 +5,11 @@ use chain;
use primitives::hash::H256;
use primitives::bytes::Bytes;
use invoke::{Invoke, Identity};
use std::cell::Cell;
thread_local! {
pub static TIMESTAMP_COUNTER: Cell<u32> = Cell::new(0);
}
pub struct BlockHashBuilder<F=Identity> {
callback: F,
@ -182,7 +187,7 @@ impl<F> BlockHeaderBuilder<F> where F: Invoke<chain::BlockHeader> {
pub fn with_callback(callback: F) -> Self {
BlockHeaderBuilder {
callback: callback,
time: 0,
time: TIMESTAMP_COUNTER.with(|counter| { let val = counter.get(); counter.set(val+1); val }),
nonce: 0,
merkle_root: H256::from(0),
parent: H256::from(0),

View File

@ -3,6 +3,7 @@
extern crate chain;
extern crate primitives;
extern crate serialization as ser;
extern crate time;
use chain::Block;

View File

@ -22,6 +22,7 @@ cargo clippy -p import
cargo clippy -p keys
cargo clippy -p message
cargo clippy -p miner
cargo clippy -p network
cargo clippy -p p2p
cargo clippy -p primitives
cargo clippy -p script

View File

@ -1,93 +1,97 @@
digraph dependencies {
N0[label="pbtc",shape=box];
N1[label="app_dirs",shape=box];
N2[label="chain",shape=box];
N3[label="clap",shape=box];
N4[label="db",shape=box];
N5[label="env_logger",shape=box];
N6[label="import",shape=box];
N7[label="keys",shape=box];
N8[label="log",shape=box];
N9[label="message",shape=box];
N10[label="miner",shape=box];
N11[label="p2p",shape=box];
N12[label="script",shape=box];
N13[label="sync",shape=box];
N14[label="verification",shape=box];
N15[label="abstract-ns",shape=box];
N16[label="futures",shape=box];
N17[label="quick-error",shape=box];
N18[label="rand",shape=box];
N19[label="aho-corasick",shape=box];
N20[label="memchr",shape=box];
N21[label="ansi_term",shape=box];
N22[label="ole32-sys",shape=box];
N23[label="shell32-sys",shape=box];
N24[label="winapi",shape=box];
N25[label="xdg",shape=box];
N26[label="arrayvec",shape=box];
N27[label="nodrop",shape=box];
N28[label="odds",shape=box];
N29[label="base58",shape=box];
N30[label="bit-vec",shape=box];
N31[label="bitcrypto",shape=box];
N32[label="primitives",shape=box];
N33[label="rust-crypto",shape=box];
N34[label="bitflags v0.4.0",shape=box];
N35[label="bitflags v0.7.0",shape=box];
N36[label="byteorder",shape=box];
N37[label="cfg-if",shape=box];
N38[label="heapsize",shape=box];
N39[label="rustc-serialize",shape=box];
N40[label="serialization",shape=box];
N41[label="libc",shape=box];
N42[label="strsim",shape=box];
N43[label="term_size",shape=box];
N44[label="unicode-segmentation",shape=box];
N45[label="unicode-width",shape=box];
N46[label="vec_map",shape=box];
N47[label="yaml-rust",shape=box];
N48[label="crossbeam",shape=box];
N49[label="csv",shape=box];
N50[label="elastic-array",shape=box];
N51[label="ethcore-devtools",shape=box];
N52[label="parking_lot",shape=box];
N53[label="rocksdb",shape=box];
N54[label="test-data",shape=box];
N55[label="deque",shape=box];
N56[label="domain",shape=box];
N57[label="tokio-core",shape=box];
N58[label="void",shape=box];
N59[label="regex",shape=box];
N60[label="eth-secp256k1",shape=box];
N61[label="gcc",shape=box];
N62[label="futures-cpupool",shape=box];
N63[label="num_cpus",shape=box];
N64[label="rayon",shape=box];
N65[label="kernel32-sys",shape=box];
N66[label="winapi-build",shape=box];
N67[label="lazy_static",shape=box];
N68[label="lazycell",shape=box];
N69[label="linked-hash-map",shape=box];
N70[label="mio",shape=box];
N71[label="miow",shape=box];
N72[label="net2",shape=box];
N73[label="nix",shape=box];
N74[label="slab",shape=box];
N75[label="ws2_32-sys",shape=box];
N76[label="rustc_version",shape=box];
N77[label="semver",shape=box];
N78[label="ns-dns-tokio",shape=box];
N79[label="owning_ref",shape=box];
N80[label="time",shape=box];
N81[label="parking_lot_core",shape=box];
N82[label="smallvec",shape=box];
N83[label="regex-syntax",shape=box];
N84[label="thread_local",shape=box];
N85[label="utf8-ranges",shape=box];
N86[label="rocksdb-sys",shape=box];
N87[label="scoped-tls",shape=box];
N88[label="thread-id",shape=box];
N2[label="bencher",shape=box];
N3[label="chain",shape=box];
N4[label="clap",shape=box];
N5[label="db",shape=box];
N6[label="env_logger",shape=box];
N7[label="import",shape=box];
N8[label="keys",shape=box];
N9[label="log",shape=box];
N10[label="message",shape=box];
N11[label="miner",shape=box];
N12[label="network",shape=box];
N13[label="p2p",shape=box];
N14[label="script",shape=box];
N15[label="sync",shape=box];
N16[label="verification",shape=box];
N17[label="abstract-ns",shape=box];
N18[label="futures",shape=box];
N19[label="quick-error",shape=box];
N20[label="rand",shape=box];
N21[label="aho-corasick",shape=box];
N22[label="memchr",shape=box];
N23[label="ansi_term",shape=box];
N24[label="ole32-sys",shape=box];
N25[label="shell32-sys",shape=box];
N26[label="winapi",shape=box];
N27[label="xdg",shape=box];
N28[label="arrayvec",shape=box];
N29[label="nodrop",shape=box];
N30[label="odds",shape=box];
N31[label="base58",shape=box];
N32[label="ethcore-devtools",shape=box];
N33[label="primitives",shape=box];
N34[label="test-data",shape=box];
N35[label="time",shape=box];
N36[label="bit-vec",shape=box];
N37[label="bitcrypto",shape=box];
N38[label="rust-crypto",shape=box];
N39[label="siphasher",shape=box];
N40[label="bitflags v0.4.0",shape=box];
N41[label="bitflags v0.7.0",shape=box];
N42[label="byteorder",shape=box];
N43[label="cfg-if",shape=box];
N44[label="heapsize",shape=box];
N45[label="rustc-serialize",shape=box];
N46[label="serialization",shape=box];
N47[label="libc",shape=box];
N48[label="strsim",shape=box];
N49[label="term_size",shape=box];
N50[label="unicode-segmentation",shape=box];
N51[label="unicode-width",shape=box];
N52[label="vec_map",shape=box];
N53[label="yaml-rust",shape=box];
N54[label="crossbeam",shape=box];
N55[label="csv",shape=box];
N56[label="elastic-array",shape=box];
N57[label="parking_lot",shape=box];
N58[label="rocksdb",shape=box];
N59[label="deque",shape=box];
N60[label="domain",shape=box];
N61[label="tokio-core",shape=box];
N62[label="regex",shape=box];
N63[label="eth-secp256k1",shape=box];
N64[label="gcc",shape=box];
N65[label="futures-cpupool",shape=box];
N66[label="num_cpus",shape=box];
N67[label="rayon",shape=box];
N68[label="kernel32-sys",shape=box];
N69[label="winapi-build",shape=box];
N70[label="lazy_static",shape=box];
N71[label="lazycell",shape=box];
N72[label="linked-hash-map",shape=box];
N73[label="mio",shape=box];
N74[label="miow",shape=box];
N75[label="net2",shape=box];
N76[label="nix",shape=box];
N77[label="slab",shape=box];
N78[label="ws2_32-sys",shape=box];
N79[label="murmur3",shape=box];
N80[label="rustc_version",shape=box];
N81[label="semver",shape=box];
N82[label="void",shape=box];
N83[label="ns-dns-tokio",shape=box];
N84[label="owning_ref",shape=box];
N85[label="parking_lot_core",shape=box];
N86[label="smallvec",shape=box];
N87[label="regex-syntax",shape=box];
N88[label="thread_local",shape=box];
N89[label="utf8-ranges",shape=box];
N90[label="rocksdb-sys",shape=box];
N91[label="scoped-tls",shape=box];
N92[label="thread-id",shape=box];
N0 -> N1[label="",style=dashed];
N0 -> N2[label="",style=dashed];
N0 -> N3[label="",style=dashed];
@ -102,220 +106,246 @@ digraph dependencies {
N0 -> N12[label="",style=dashed];
N0 -> N13[label="",style=dashed];
N0 -> N14[label="",style=dashed];
N1 -> N22[label="",style=dashed];
N1 -> N23[label="",style=dashed];
N0 -> N15[label="",style=dashed];
N0 -> N16[label="",style=dashed];
N1 -> N24[label="",style=dashed];
N1 -> N25[label="",style=dashed];
N2 -> N31[label="",style=dashed];
N1 -> N26[label="",style=dashed];
N1 -> N27[label="",style=dashed];
N2 -> N3[label="",style=dashed];
N2 -> N5[label="",style=dashed];
N2 -> N16[label="",style=dashed];
N2 -> N32[label="",style=dashed];
N2 -> N38[label="",style=dashed];
N2 -> N39[label="",style=dashed];
N2 -> N40[label="",style=dashed];
N3 -> N21[label="",style=dashed];
N3 -> N35[label="",style=dashed];
N3 -> N41[label="",style=dashed];
N3 -> N42[label="",style=dashed];
N3 -> N43[label="",style=dashed];
N2 -> N33[label="",style=dashed];
N2 -> N34[label="",style=dashed];
N2 -> N35[label="",style=dashed];
N3 -> N33[label="",style=dashed];
N3 -> N37[label="",style=dashed];
N3 -> N44[label="",style=dashed];
N3 -> N45[label="",style=dashed];
N3 -> N46[label="",style=dashed];
N3 -> N47[label="",style=dashed];
N4 -> N2[label="",style=dashed];
N4 -> N8[label="",style=dashed];
N4 -> N30[label="",style=dashed];
N4 -> N32[label="",style=dashed];
N4 -> N36[label="",style=dashed];
N4 -> N40[label="",style=dashed];
N4 -> N23[label="",style=dashed];
N4 -> N41[label="",style=dashed];
N4 -> N47[label="",style=dashed];
N4 -> N48[label="",style=dashed];
N4 -> N49[label="",style=dashed];
N4 -> N50[label="",style=dashed];
N4 -> N51[label="",style=dashed];
N4 -> N52[label="",style=dashed];
N4 -> N53[label="",style=dashed];
N4 -> N54[label="",style=dashed];
N5 -> N8[label="",style=dashed];
N5 -> N59[label="",style=dashed];
N6 -> N2[label="",style=dashed];
N6 -> N8[label="",style=dashed];
N6 -> N32[label="",style=dashed];
N6 -> N40[label="",style=dashed];
N7 -> N18[label="",style=dashed];
N7 -> N29[label="",style=dashed];
N7 -> N31[label="",style=dashed];
N7 -> N32[label="",style=dashed];
N7 -> N39[label="",style=dashed];
N7 -> N60[label="",style=dashed];
N7 -> N67[label="",style=dashed];
N9 -> N2[label="",style=dashed];
N9 -> N31[label="",style=dashed];
N9 -> N32[label="",style=dashed];
N9 -> N36[label="",style=dashed];
N9 -> N40[label="",style=dashed];
N10 -> N2[label="",style=dashed];
N10 -> N32[label="",style=dashed];
N10 -> N38[label="",style=dashed];
N10 -> N40[label="",style=dashed];
N10 -> N54[label="",style=dashed];
N11 -> N8[label="",style=dashed];
N11 -> N9[label="",style=dashed];
N11 -> N15[label="",style=dashed];
N11 -> N16[label="",style=dashed];
N11 -> N18[label="",style=dashed];
N11 -> N31[label="",style=dashed];
N11 -> N32[label="",style=dashed];
N11 -> N40[label="",style=dashed];
N11 -> N49[label="",style=dashed];
N11 -> N52[label="",style=dashed];
N11 -> N57[label="",style=dashed];
N11 -> N62[label="",style=dashed];
N11 -> N78[label="",style=dashed];
N11 -> N80[label="",style=dashed];
N12 -> N2[label="",style=dashed];
N12 -> N7[label="",style=dashed];
N12 -> N31[label="",style=dashed];
N12 -> N32[label="",style=dashed];
N12 -> N40[label="",style=dashed];
N13 -> N2[label="",style=dashed];
N13 -> N4[label="",style=dashed];
N13 -> N8[label="",style=dashed];
N5 -> N3[label="",style=dashed];
N5 -> N9[label="",style=dashed];
N5 -> N32[label="",style=dashed];
N5 -> N33[label="",style=dashed];
N5 -> N34[label="",style=dashed];
N5 -> N36[label="",style=dashed];
N5 -> N42[label="",style=dashed];
N5 -> N46[label="",style=dashed];
N5 -> N56[label="",style=dashed];
N5 -> N57[label="",style=dashed];
N5 -> N58[label="",style=dashed];
N6 -> N9[label="",style=dashed];
N6 -> N62[label="",style=dashed];
N7 -> N3[label="",style=dashed];
N7 -> N9[label="",style=dashed];
N7 -> N33[label="",style=dashed];
N7 -> N46[label="",style=dashed];
N8 -> N20[label="",style=dashed];
N8 -> N31[label="",style=dashed];
N8 -> N33[label="",style=dashed];
N8 -> N37[label="",style=dashed];
N8 -> N45[label="",style=dashed];
N8 -> N63[label="",style=dashed];
N8 -> N70[label="",style=dashed];
N10 -> N3[label="",style=dashed];
N10 -> N12[label="",style=dashed];
N10 -> N33[label="",style=dashed];
N10 -> N37[label="",style=dashed];
N10 -> N42[label="",style=dashed];
N10 -> N46[label="",style=dashed];
N11 -> N3[label="",style=dashed];
N11 -> N5[label="",style=dashed];
N11 -> N33[label="",style=dashed];
N11 -> N34[label="",style=dashed];
N11 -> N44[label="",style=dashed];
N11 -> N46[label="",style=dashed];
N12 -> N3[label="",style=dashed];
N12 -> N33[label="",style=dashed];
N12 -> N46[label="",style=dashed];
N13 -> N9[label="",style=dashed];
N13 -> N10[label="",style=dashed];
N13 -> N11[label="",style=dashed];
N13 -> N14[label="",style=dashed];
N13 -> N16[label="",style=dashed];
N13 -> N32[label="",style=dashed];
N13 -> N51[label="",style=dashed];
N13 -> N52[label="",style=dashed];
N13 -> N54[label="",style=dashed];
N13 -> N12[label="",style=dashed];
N13 -> N17[label="",style=dashed];
N13 -> N18[label="",style=dashed];
N13 -> N20[label="",style=dashed];
N13 -> N33[label="",style=dashed];
N13 -> N35[label="",style=dashed];
N13 -> N37[label="",style=dashed];
N13 -> N46[label="",style=dashed];
N13 -> N55[label="",style=dashed];
N13 -> N57[label="",style=dashed];
N13 -> N62[label="",style=dashed];
N13 -> N69[label="",style=dashed];
N13 -> N80[label="",style=dashed];
N14 -> N2[label="",style=dashed];
N14 -> N4[label="",style=dashed];
N13 -> N61[label="",style=dashed];
N13 -> N65[label="",style=dashed];
N13 -> N83[label="",style=dashed];
N14 -> N3[label="",style=dashed];
N14 -> N8[label="",style=dashed];
N14 -> N12[label="",style=dashed];
N14 -> N32[label="",style=dashed];
N14 -> N36[label="",style=dashed];
N14 -> N40[label="",style=dashed];
N14 -> N51[label="",style=dashed];
N14 -> N52[label="",style=dashed];
N14 -> N54[label="",style=dashed];
N14 -> N69[label="",style=dashed];
N14 -> N80[label="",style=dashed];
N14 -> N9[label="",style=dashed];
N14 -> N33[label="",style=dashed];
N14 -> N37[label="",style=dashed];
N14 -> N46[label="",style=dashed];
N15 -> N3[label="",style=dashed];
N15 -> N5[label="",style=dashed];
N15 -> N9[label="",style=dashed];
N15 -> N10[label="",style=dashed];
N15 -> N11[label="",style=dashed];
N15 -> N12[label="",style=dashed];
N15 -> N13[label="",style=dashed];
N15 -> N14[label="",style=dashed];
N15 -> N16[label="",style=dashed];
N15 -> N17[label="",style=dashed];
N15 -> N18[label="",style=dashed];
N16 -> N8[label="",style=dashed];
N18 -> N41[label="",style=dashed];
N19 -> N20[label="",style=dashed];
N20 -> N41[label="",style=dashed];
N22 -> N24[label="",style=dashed];
N22 -> N66[label="",style=dashed];
N23 -> N24[label="",style=dashed];
N23 -> N66[label="",style=dashed];
N26 -> N27[label=""];
N26 -> N28[label=""];
N27 -> N28[label=""];
N31 -> N32[label="",style=dashed];
N31 -> N33[label="",style=dashed];
N32 -> N38[label="",style=dashed];
N32 -> N39[label="",style=dashed];
N33 -> N18[label="",style=dashed];
N33 -> N39[label="",style=dashed];
N33 -> N41[label="",style=dashed];
N33 -> N61[label="",style=dashed];
N15 -> N20[label="",style=dashed];
N15 -> N32[label="",style=dashed];
N15 -> N33[label="",style=dashed];
N15 -> N34[label="",style=dashed];
N15 -> N35[label="",style=dashed];
N15 -> N36[label="",style=dashed];
N15 -> N37[label="",style=dashed];
N15 -> N42[label="",style=dashed];
N15 -> N46[label="",style=dashed];
N15 -> N57[label="",style=dashed];
N15 -> N61[label="",style=dashed];
N15 -> N65[label="",style=dashed];
N15 -> N72[label="",style=dashed];
N15 -> N79[label="",style=dashed];
N16 -> N3[label="",style=dashed];
N16 -> N5[label="",style=dashed];
N16 -> N9[label="",style=dashed];
N16 -> N12[label="",style=dashed];
N16 -> N14[label="",style=dashed];
N16 -> N32[label="",style=dashed];
N16 -> N33[label="",style=dashed];
N16 -> N34[label="",style=dashed];
N16 -> N35[label="",style=dashed];
N16 -> N42[label="",style=dashed];
N16 -> N46[label="",style=dashed];
N16 -> N57[label="",style=dashed];
N16 -> N72[label="",style=dashed];
N17 -> N18[label="",style=dashed];
N17 -> N19[label="",style=dashed];
N17 -> N20[label="",style=dashed];
N18 -> N9[label="",style=dashed];
N20 -> N47[label="",style=dashed];
N21 -> N22[label="",style=dashed];
N22 -> N47[label="",style=dashed];
N24 -> N26[label="",style=dashed];
N24 -> N69[label="",style=dashed];
N25 -> N26[label="",style=dashed];
N25 -> N69[label="",style=dashed];
N28 -> N29[label=""];
N28 -> N30[label=""];
N29 -> N30[label=""];
N32 -> N20[label="",style=dashed];
N33 -> N44[label="",style=dashed];
N33 -> N45[label="",style=dashed];
N33 -> N80[label="",style=dashed];
N38 -> N65[label="",style=dashed];
N40 -> N32[label="",style=dashed];
N40 -> N36[label="",style=dashed];
N43 -> N24[label="",style=dashed];
N43 -> N41[label="",style=dashed];
N43 -> N65[label="",style=dashed];
N49 -> N36[label="",style=dashed];
N49 -> N39[label="",style=dashed];
N51 -> N8[label="",style=dashed];
N51 -> N18[label="",style=dashed];
N52 -> N79[label="",style=dashed];
N52 -> N81[label="",style=dashed];
N53 -> N41[label="",style=dashed];
N53 -> N86[label="",style=dashed];
N54 -> N2[label="",style=dashed];
N54 -> N32[label="",style=dashed];
N54 -> N40[label="",style=dashed];
N55 -> N18[label="",style=dashed];
N56 -> N16[label="",style=dashed];
N56 -> N18[label="",style=dashed];
N56 -> N36[label="",style=dashed];
N56 -> N48[label="",style=dashed];
N56 -> N57[label="",style=dashed];
N56 -> N58[label="",style=dashed];
N57 -> N8[label="",style=dashed];
N57 -> N16[label="",style=dashed];
N57 -> N70[label="",style=dashed];
N57 -> N74[label="",style=dashed];
N57 -> N87[label="",style=dashed];
N59 -> N19[label="",style=dashed];
N34 -> N3[label="",style=dashed];
N34 -> N33[label="",style=dashed];
N34 -> N35[label="",style=dashed];
N34 -> N46[label="",style=dashed];
N35 -> N26[label="",style=dashed];
N35 -> N47[label="",style=dashed];
N35 -> N68[label="",style=dashed];
N37 -> N33[label="",style=dashed];
N37 -> N38[label="",style=dashed];
N37 -> N39[label="",style=dashed];
N38 -> N20[label="",style=dashed];
N38 -> N35[label="",style=dashed];
N38 -> N45[label="",style=dashed];
N38 -> N47[label="",style=dashed];
N38 -> N64[label="",style=dashed];
N44 -> N68[label="",style=dashed];
N46 -> N33[label="",style=dashed];
N46 -> N42[label="",style=dashed];
N49 -> N26[label="",style=dashed];
N49 -> N47[label="",style=dashed];
N49 -> N68[label="",style=dashed];
N55 -> N42[label="",style=dashed];
N55 -> N45[label="",style=dashed];
N57 -> N84[label="",style=dashed];
N57 -> N85[label="",style=dashed];
N58 -> N47[label="",style=dashed];
N58 -> N90[label="",style=dashed];
N59 -> N20[label="",style=dashed];
N59 -> N83[label="",style=dashed];
N59 -> N84[label="",style=dashed];
N59 -> N85[label="",style=dashed];
N60 -> N18[label="",style=dashed];
N60 -> N26[label="",style=dashed];
N60 -> N39[label="",style=dashed];
N60 -> N41[label="",style=dashed];
N60 -> N20[label="",style=dashed];
N60 -> N42[label="",style=dashed];
N60 -> N61[label="",style=dashed];
N61 -> N64[label="",style=dashed];
N62 -> N16[label="",style=dashed];
N62 -> N48[label="",style=dashed];
N62 -> N63[label="",style=dashed];
N63 -> N41[label="",style=dashed];
N64 -> N18[label="",style=dashed];
N64 -> N41[label="",style=dashed];
N64 -> N55[label="",style=dashed];
N64 -> N63[label="",style=dashed];
N65 -> N24[label="",style=dashed];
N61 -> N9[label="",style=dashed];
N61 -> N18[label="",style=dashed];
N61 -> N73[label="",style=dashed];
N61 -> N77[label="",style=dashed];
N61 -> N91[label="",style=dashed];
N62 -> N21[label="",style=dashed];
N62 -> N22[label="",style=dashed];
N62 -> N87[label="",style=dashed];
N62 -> N88[label="",style=dashed];
N62 -> N89[label="",style=dashed];
N63 -> N20[label="",style=dashed];
N63 -> N28[label="",style=dashed];
N63 -> N45[label="",style=dashed];
N63 -> N47[label="",style=dashed];
N63 -> N64[label="",style=dashed];
N64 -> N67[label="",style=dashed];
N65 -> N18[label="",style=dashed];
N65 -> N54[label="",style=dashed];
N65 -> N66[label="",style=dashed];
N70 -> N8[label="",style=dashed];
N70 -> N24[label="",style=dashed];
N70 -> N41[label="",style=dashed];
N70 -> N65[label="",style=dashed];
N70 -> N68[label="",style=dashed];
N70 -> N71[label="",style=dashed];
N70 -> N72[label="",style=dashed];
N70 -> N73[label="",style=dashed];
N70 -> N74[label="",style=dashed];
N71 -> N24[label="",style=dashed];
N71 -> N65[label="",style=dashed];
N71 -> N72[label="",style=dashed];
N71 -> N75[label="",style=dashed];
N72 -> N24[label="",style=dashed];
N72 -> N37[label="",style=dashed];
N72 -> N41[label="",style=dashed];
N72 -> N65[label="",style=dashed];
N72 -> N75[label="",style=dashed];
N73 -> N34[label="",style=dashed];
N73 -> N37[label="",style=dashed];
N73 -> N41[label="",style=dashed];
N73 -> N58[label="",style=dashed];
N66 -> N47[label="",style=dashed];
N67 -> N20[label="",style=dashed];
N67 -> N47[label="",style=dashed];
N67 -> N59[label="",style=dashed];
N67 -> N66[label="",style=dashed];
N68 -> N26[label="",style=dashed];
N68 -> N69[label="",style=dashed];
N73 -> N9[label="",style=dashed];
N73 -> N26[label="",style=dashed];
N73 -> N47[label="",style=dashed];
N73 -> N68[label="",style=dashed];
N73 -> N71[label="",style=dashed];
N73 -> N74[label="",style=dashed];
N73 -> N75[label="",style=dashed];
N73 -> N76[label="",style=dashed];
N73 -> N77[label="",style=dashed];
N75 -> N24[label="",style=dashed];
N75 -> N66[label="",style=dashed];
N76 -> N77[label="",style=dashed];
N78 -> N15[label="",style=dashed];
N78 -> N16[label="",style=dashed];
N78 -> N56[label="",style=dashed];
N78 -> N57[label="",style=dashed];
N80 -> N24[label="",style=dashed];
N80 -> N41[label="",style=dashed];
N80 -> N65[label="",style=dashed];
N81 -> N18[label="",style=dashed];
N81 -> N24[label="",style=dashed];
N81 -> N41[label="",style=dashed];
N81 -> N65[label="",style=dashed];
N81 -> N82[label="",style=dashed];
N84 -> N88[label="",style=dashed];
N86 -> N41[label="",style=dashed];
N86 -> N61[label="",style=dashed];
N88 -> N41[label="",style=dashed];
N88 -> N65[label="",style=dashed];
N74 -> N26[label="",style=dashed];
N74 -> N68[label="",style=dashed];
N74 -> N75[label="",style=dashed];
N74 -> N78[label="",style=dashed];
N75 -> N26[label="",style=dashed];
N75 -> N43[label="",style=dashed];
N75 -> N47[label="",style=dashed];
N75 -> N68[label="",style=dashed];
N75 -> N78[label="",style=dashed];
N76 -> N40[label="",style=dashed];
N76 -> N43[label="",style=dashed];
N76 -> N47[label="",style=dashed];
N76 -> N80[label="",style=dashed];
N76 -> N81[label="",style=dashed];
N76 -> N82[label="",style=dashed];
N78 -> N26[label="",style=dashed];
N78 -> N69[label="",style=dashed];
N79 -> N42[label="",style=dashed];
N80 -> N81[label="",style=dashed];
N83 -> N17[label="",style=dashed];
N83 -> N18[label="",style=dashed];
N83 -> N60[label="",style=dashed];
N83 -> N61[label="",style=dashed];
N85 -> N20[label="",style=dashed];
N85 -> N26[label="",style=dashed];
N85 -> N47[label="",style=dashed];
N85 -> N68[label="",style=dashed];
N85 -> N86[label="",style=dashed];
N88 -> N92[label="",style=dashed];
N90 -> N47[label="",style=dashed];
N90 -> N64[label="",style=dashed];
N92 -> N47[label="",style=dashed];
N92 -> N68[label="",style=dashed];
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 858 KiB

After

Width:  |  Height:  |  Size: 1.0 MiB

View File

@ -9,6 +9,7 @@ cargo test\
-p keys\
-p message\
-p miner\
-p network\
-p pbtc\
-p p2p\
-p primitives\

View File

@ -4,18 +4,17 @@ version = "0.1.0"
authors = ["Nikolay Volf <nikvolf@gmail.com>"]
[dependencies]
byteorder = "0.5"
parking_lot = "0.3"
linked-hash-map = "0.3"
time = "0.1"
log = "0.3"
ethcore-devtools = { path = "../devtools" }
primitives = { path = "../primitives" }
chain = { path = "../chain" }
serialization = { path = "../serialization" }
parking_lot = "0.3"
linked-hash-map = "0.3"
test-data = { path = "../test-data" }
byteorder = "0.5"
time = "0.1"
script = { path = "../script" }
log = "0.3"
[dependencies.db]
path = "../db"
features = ["dev"]
network = { path = "../network" }
db = { path = "../db", features = ["dev"] }

View File

@ -1,33 +1,35 @@
//! Bitcoin chain verifier
use std::collections::BTreeSet;
use db::{self, BlockRef, BlockLocation};
use chain;
use network::Magic;
use script::Script;
use super::{Verify, VerificationResult, Chain, Error, TransactionError, ContinueVerify};
use utils;
use {chain, utils};
const BLOCK_MAX_FUTURE: i64 = 2 * 60 * 60; // 2 hours
const COINBASE_MATURITY: u32 = 100; // 2 hours
const MAX_BLOCK_SIGOPS: usize = 20000;
const MAX_BLOCK_SIZE: usize = 1000000;
const BIP16_TIME: u32 = 1333238400;
pub struct ChainVerifier {
store: db::SharedStore,
verify_p2sh: bool,
verify_clocktimeverify: bool,
skip_pow: bool,
skip_sig: bool,
network: Magic,
}
impl ChainVerifier {
pub fn new(store: db::SharedStore) -> Self {
pub fn new(store: db::SharedStore, network: Magic) -> Self {
ChainVerifier {
store: store,
verify_p2sh: false,
verify_clocktimeverify: false,
skip_pow: false,
skip_sig: false
skip_sig: false,
network: network,
}
}
@ -53,45 +55,106 @@ impl ChainVerifier {
self
}
fn ordered_verify(&self, block: &chain::Block, at_height: u32) -> Result<(), Error> {
/// Returns previous transaction output.
/// NOTE: This function expects all previous blocks to be already in database.
fn previous_transaction_output(&self, block: &db::IndexedBlock, prevout: &chain::OutPoint) -> Option<chain::TransactionOutput> {
self.store.transaction(&prevout.hash)
.as_ref()
.or_else(|| block.transactions().find(|&(hash, _)| hash == &prevout.hash).and_then(|(_, tx)| Some(tx)))
.and_then(|tx| tx.outputs.iter().nth(prevout.index as usize).cloned())
}
let coinbase_spends = block.transactions()[0].total_spends();
/// Returns number of transaction signature operations.
/// NOTE: This function expects all previous blocks to be already in database.
fn transaction_sigops(&self, block: &db::IndexedBlock, transaction: &chain::Transaction, bip16_active: bool) -> usize {
let output_sigops: usize = transaction.outputs.iter().map(|output| {
let output_script: Script = output.script_pubkey.clone().into();
output_script.sigops_count(false)
}).sum();
if transaction.is_coinbase() {
return output_sigops;
}
let input_sigops: usize = transaction.inputs.iter().map(|input| {
let input_script: Script = input.script_sig.clone().into();
let mut sigops = input_script.sigops_count(false);
if bip16_active {
let previous_output = self.previous_transaction_output(block, &input.previous_output)
.expect("missing tx, out of order verification or malformed db");
let prevout_script: Script = previous_output.script_pubkey.into();
sigops += input_script.pay_to_script_hash_sigops(&prevout_script);
}
sigops
}).sum();
input_sigops + output_sigops
}
/// Returns number of block signature operations.
/// NOTE: This function expects all previous blocks to be already in database.
fn block_sigops(&self, block: &db::IndexedBlock) -> usize {
// strict pay-to-script-hash signature operations count toward block
// signature operations limit is enforced with BIP16
let bip16_active = block.header().time >= self.network.consensus_params().bip16_time;
block.transactions().map(|(_, tx)| self.transaction_sigops(block, tx, bip16_active)).sum()
}
fn ordered_verify(&self, block: &db::IndexedBlock, at_height: u32) -> Result<(), Error> {
if !block.is_final(at_height) {
return Err(Error::NonFinalBlock);
}
// transaction verification including number of signature operations checking
if self.block_sigops(block) > MAX_BLOCK_SIGOPS {
return Err(Error::MaximumSigops);
}
let block_hash = block.hash();
let consensus_params = self.network.consensus_params();
// check that difficulty matches the adjusted level
if let Some(work) = self.work_required(block, at_height) {
if !self.skip_pow && work != block.header().nbits {
trace!(target: "verification", "pow verification error at height: {}", at_height);
trace!(target: "verification", "expected work: {}, got {}", work, block.header().nbits);
return Err(Error::Difficulty);
}
}
let coinbase_spends = block.transactions()
.nth(0)
.expect("block emptyness should be checked at this point")
.1
.total_spends();
// bip30
for (tx_index, (tx_hash, _)) in block.transactions().enumerate() {
if let Some(meta) = self.store.transaction_meta(tx_hash) {
if !meta.is_fully_spent() && !consensus_params.is_bip30_exception(&block_hash, at_height) {
return Err(Error::Transaction(tx_index, TransactionError::UnspentTransactionWithTheSameHash));
}
}
}
let mut total_unspent = 0u64;
for (tx_index, tx) in block.transactions().iter().enumerate().skip(1) {
for (tx_index, (_, tx)) in block.transactions().enumerate().skip(1) {
let mut total_claimed: u64 = 0;
for (_, input) in tx.inputs.iter().enumerate() {
for input in &tx.inputs {
// Coinbase maturity check
if let Some(previous_meta) = self.store.transaction_meta(&input.previous_output.hash) {
// check if it exists only
// it will fail a little later if there is no transaction at all
if previous_meta.is_coinbase() &&
(at_height < COINBASE_MATURITY || at_height - COINBASE_MATURITY < previous_meta.height())
{
(at_height < COINBASE_MATURITY || at_height - COINBASE_MATURITY < previous_meta.height()) {
return Err(Error::Transaction(tx_index, TransactionError::Maturity));
}
}
let reference_tx = try!(
let previous_output = self.previous_transaction_output(block, &input.previous_output)
.expect("missing tx, out of order verification or malformed db");
self.store.transaction(&input.previous_output.hash)
// todo: optimize block decomposition vec<transaction> -> hashmap<h256, transaction>
.or(block.transactions().iter().find(|tx| !tx.is_coinbase() && tx.hash() == input.previous_output.hash).cloned())
.ok_or(
Error::Transaction(tx_index, TransactionError::UnknownReference(input.previous_output.hash.clone()))
)
);
let output = try!(reference_tx.outputs.get(input.previous_output.index as usize)
.ok_or(
Error::Transaction(tx_index, TransactionError::Input(input.previous_output.index as usize))
)
);
total_claimed += output.value;
total_claimed += previous_output.value;
}
let total_spends = tx.total_spends();
@ -113,10 +176,10 @@ impl ChainVerifier {
}
fn verify_transaction(&self,
block: &chain::Block,
block: &db::IndexedBlock,
transaction: &chain::Transaction,
sequence: usize,
) -> Result<usize, TransactionError> {
) -> Result<(), TransactionError> {
use script::{
TransactionInputSigner,
TransactionSignatureChecker,
@ -125,46 +188,27 @@ impl ChainVerifier {
verify_script,
};
let mut sigops = utils::transaction_sigops(transaction)
.map_err(|e| TransactionError::SignatureMallformed(e.to_string()))?;
if sequence == 0 { return Ok(sigops); }
if sequence == 0 {
return Ok(());
}
// must not be coinbase (sequence = 0 is returned above)
if transaction.is_coinbase() { return Err(TransactionError::MisplacedCoinbase(sequence)); }
if sigops >= MAX_BLOCK_SIGOPS { return Err(TransactionError::Sigops(sigops)); }
// strict pay-to-script-hash signature operations count toward block
// signature operations limit is enforced with BIP16
let is_strict_p2sh = block.header().time >= BIP16_TIME;
for (input_index, input) in transaction.inputs().iter().enumerate() {
let store_parent_transaction = self.store.transaction(&input.previous_output.hash);
let parent_transaction = store_parent_transaction
.as_ref()
.or_else(|| block.transactions.iter().find(|t| t.hash() == input.previous_output.hash))
.ok_or_else(|| TransactionError::Inconclusive(input.previous_output.hash.clone()))?;
if parent_transaction.outputs.len() <= input.previous_output.index as usize {
return Err(TransactionError::Input(input_index));
}
// signature verification
let signer: TransactionInputSigner = transaction.clone().into();
let paired_output = &parent_transaction.outputs[input.previous_output.index as usize];
let paired_output = match self.previous_transaction_output(block, &input.previous_output) {
Some(output) => output,
_ => return Err(TransactionError::Inconclusive(input.previous_output.hash.clone()))
};
let checker = TransactionSignatureChecker {
signer: signer,
input_index: input_index,
};
let input: Script = input.script_sig().to_vec().into();
let output: Script = paired_output.script_pubkey.to_vec().into();
if is_strict_p2sh && output.is_pay_to_script_hash() {
sigops += utils::p2sh_sigops(&output, &input);
if sigops >= MAX_BLOCK_SIGOPS { return Err(TransactionError::SigopsP2SH(sigops)); }
}
let input: Script = input.script_sig.clone().into();
let output: Script = paired_output.script_pubkey.into();
let flags = VerificationFlags::default()
.verify_p2sh(self.verify_p2sh)
@ -182,19 +226,19 @@ impl ChainVerifier {
}
}
Ok(sigops)
Ok(())
}
fn verify_block(&self, block: &chain::Block) -> VerificationResult {
fn verify_block(&self, block: &db::IndexedBlock) -> VerificationResult {
let hash = block.hash();
// There should be at least 1 transaction
if block.transactions().is_empty() {
if block.transaction_count() == 0 {
return Err(Error::Empty);
}
// target difficulty threshold
if !self.skip_pow && !utils::check_nbits(&hash, block.header().nbits) {
if !self.skip_pow && !utils::check_nbits(self.network.max_nbits(), &hash, block.header().nbits) {
return Err(Error::Pow);
}
@ -203,8 +247,19 @@ impl ChainVerifier {
return Err(Error::Timestamp);
}
if let Some(median_timestamp) = self.median_timestamp(block) {
if median_timestamp >= block.header().time {
trace!(
target: "verification", "median timestamp verification failed, median: {}, current: {}",
median_timestamp,
block.header().time
);
return Err(Error::Timestamp);
}
}
// todo: serialized_size function is at least suboptimal
let size = ::serialization::Serializable::serialized_size(block);
let size = block.size();
if size > MAX_BLOCK_SIZE {
return Err(Error::Size(size))
}
@ -214,33 +269,20 @@ impl ChainVerifier {
return Err(Error::MerkleRoot);
}
let first_tx = block.transactions().nth(0).expect("transaction count is checked above to be greater than 0").1;
// check first transaction is a coinbase transaction
if !block.transactions()[0].is_coinbase() {
if !first_tx.is_coinbase() {
return Err(Error::Coinbase)
}
// check that coinbase has a valid signature
let coinbase = &block.transactions()[0];
// is_coinbase() = true above guarantees that there is at least one input
let coinbase_script_len = coinbase.inputs[0].script_sig().len();
let coinbase_script_len = first_tx.inputs[0].script_sig.len();
if coinbase_script_len < 2 || coinbase_script_len > 100 {
return Err(Error::CoinbaseSignatureLength(coinbase_script_len));
}
// transaction verification including number of signature operations checking
let mut block_sigops = 0;
for (idx, transaction) in block.transactions().iter().enumerate() {
block_sigops += try!(
self.verify_transaction(
block,
transaction,
idx,
).map_err(|e| Error::Transaction(idx, e))
);
if block_sigops > MAX_BLOCK_SIGOPS {
return Err(Error::MaximumSigops);
}
for (idx, (_, transaction)) in block.transactions().enumerate() {
try!(self.verify_transaction(block, transaction, idx).map_err(|e| Error::Transaction(idx, e)))
}
// todo: pre-process projected block number once verification is parallel!
@ -258,15 +300,61 @@ impl ChainVerifier {
},
}
}
fn median_timestamp(&self, block: &db::IndexedBlock) -> Option<u32> {
let mut timestamps = BTreeSet::new();
let mut block_ref = block.header().previous_header_hash.clone().into();
// TODO: optimize it, so it does not make 11 redundant queries each time
for _ in 0..11 {
let previous_header = match self.store.block_header(block_ref) {
Some(h) => h,
None => { break; }
};
timestamps.insert(previous_header.time);
block_ref = previous_header.previous_header_hash.into();
}
if timestamps.len() > 2 {
let timestamps: Vec<_> = timestamps.into_iter().collect();
Some(timestamps[timestamps.len() / 2])
}
else { None }
}
fn work_required(&self, block: &db::IndexedBlock, height: u32) -> Option<u32> {
if height == 0 {
return None;
}
let previous_ref = block.header().previous_header_hash.clone().into();
let previous_header = self.store.block_header(previous_ref).expect("self.height != 0; qed");
if utils::is_retarget_height(height) {
let retarget_ref = (height - utils::RETARGETING_INTERVAL).into();
let retarget_header = self.store.block_header(retarget_ref).expect("self.height != 0 && self.height % RETARGETING_INTERVAL == 0; qed");
// timestamp of block(height - RETARGETING_INTERVAL)
let retarget_timestamp = retarget_header.time;
// timestamp of parent block
let last_timestamp = previous_header.time;
// nbits of last block
let last_nbits = previous_header.nbits;
return Some(utils::work_required_retarget(self.network.max_nbits(), retarget_timestamp, last_timestamp, last_nbits));
}
// TODO: if.testnet
Some(previous_header.nbits)
}
}
impl Verify for ChainVerifier {
fn verify(&self, block: &chain::Block) -> VerificationResult {
fn verify(&self, block: &db::IndexedBlock) -> VerificationResult {
let result = self.verify_block(block);
trace!(
target: "verification", "Block {} (transactions: {}) verification finished. Result {:?}",
block.hash().to_reversed_str(),
block.transactions().len(),
block.transaction_count(),
result,
);
result
@ -276,9 +364,9 @@ impl Verify for ChainVerifier {
impl ContinueVerify for ChainVerifier {
type State = usize;
fn continue_verify(&self, block: &chain::Block, state: usize) -> VerificationResult {
fn continue_verify(&self, block: &db::IndexedBlock, state: usize) -> VerificationResult {
// verify transactions (except coinbase)
for (idx, transaction) in block.transactions().iter().enumerate().skip(state - 1) {
for (idx, (_, transaction)) in block.transactions().enumerate().skip(state - 1) {
try!(self.verify_transaction(block, transaction, idx).map_err(|e| Error::Transaction(idx, e)));
}
@ -293,30 +381,29 @@ impl ContinueVerify for ChainVerifier {
#[cfg(test)]
mod tests {
use std::sync::Arc;
use db::{TestStorage, Storage, Store, BlockStapler, IndexedBlock};
use network::Magic;
use devtools::RandomTempPath;
use {script, test_data};
use super::ChainVerifier;
use super::super::{Verify, Chain, Error, TransactionError};
use db::{TestStorage, Storage, Store, BlockStapler};
use test_data;
use std::sync::Arc;
use devtools::RandomTempPath;
use script;
#[test]
fn verify_orphan() {
let storage = TestStorage::with_blocks(&vec![test_data::genesis()]);
let b2 = test_data::block_h2();
let verifier = ChainVerifier::new(Arc::new(storage));
let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet);
assert_eq!(Chain::Orphan, verifier.verify(&b2).unwrap());
assert_eq!(Chain::Orphan, verifier.verify(&b2.into()).unwrap());
}
#[test]
fn verify_smoky() {
let storage = TestStorage::with_blocks(&vec![test_data::genesis()]);
let b1 = test_data::block_h1();
let verifier = ChainVerifier::new(Arc::new(storage));
assert_eq!(Chain::Main, verifier.verify(&b1).unwrap());
let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet);
assert_eq!(Chain::Main, verifier.verify(&b1.into()).unwrap());
}
#[test]
@ -328,8 +415,8 @@ mod tests {
]
);
let b1 = test_data::block_h170();
let verifier = ChainVerifier::new(Arc::new(storage));
assert_eq!(Chain::Main, verifier.verify(&b1).unwrap());
let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet);
assert_eq!(Chain::Main, verifier.verify(&b1.into()).unwrap());
}
#[test]
@ -340,13 +427,13 @@ mod tests {
]
);
let b170 = test_data::block_h170();
let verifier = ChainVerifier::new(Arc::new(storage));
let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet);
let should_be = Err(Error::Transaction(
1,
TransactionError::Inconclusive("c997a5e56e104102fa209c6a852dd90660a20b2d9c352423edce25857fcd3704".into())
));
assert_eq!(should_be, verifier.verify(&b170));
assert_eq!(should_be, verifier.verify(&b170.into()));
}
#[test]
@ -374,14 +461,14 @@ mod tests {
.merkled_header().parent(genesis.hash()).build()
.build();
let verifier = ChainVerifier::new(Arc::new(storage)).pow_skip().signatures_skip();
let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet).pow_skip().signatures_skip();
let expected = Err(Error::Transaction(
1,
TransactionError::Maturity,
));
assert_eq!(expected, verifier.verify(&block));
assert_eq!(expected, verifier.verify(&block.into()));
}
#[test]
@ -392,6 +479,7 @@ mod tests {
let genesis = test_data::block_builder()
.transaction()
.coinbase()
.output().value(1).build()
.build()
.transaction()
.output().value(50).build()
@ -410,10 +498,10 @@ mod tests {
.merkled_header().parent(genesis.hash()).build()
.build();
let verifier = ChainVerifier::new(Arc::new(storage)).pow_skip().signatures_skip();
let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet).pow_skip().signatures_skip();
let expected = Ok(Chain::Main);
assert_eq!(expected, verifier.verify(&block));
assert_eq!(expected, verifier.verify(&block.into()));
}
@ -425,6 +513,7 @@ mod tests {
let genesis = test_data::block_builder()
.transaction()
.coinbase()
.output().value(1).build()
.build()
.transaction()
.output().value(50).build()
@ -448,10 +537,10 @@ mod tests {
.merkled_header().parent(genesis.hash()).build()
.build();
let verifier = ChainVerifier::new(Arc::new(storage)).pow_skip().signatures_skip();
let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet).pow_skip().signatures_skip();
let expected = Ok(Chain::Main);
assert_eq!(expected, verifier.verify(&block));
assert_eq!(expected, verifier.verify(&block.into()));
}
#[test]
@ -462,6 +551,7 @@ mod tests {
let genesis = test_data::block_builder()
.transaction()
.coinbase()
.output().value(1).build()
.build()
.transaction()
.output().value(50).build()
@ -485,13 +575,14 @@ mod tests {
.merkled_header().parent(genesis.hash()).build()
.build();
let verifier = ChainVerifier::new(Arc::new(storage)).pow_skip().signatures_skip();
let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet).pow_skip().signatures_skip();
let expected = Err(Error::Transaction(2, TransactionError::Overspend));
assert_eq!(expected, verifier.verify(&block));
assert_eq!(expected, verifier.verify(&block.into()));
}
#[test]
#[ignore]
fn coinbase_happy() {
let path = RandomTempPath::create_dir();
@ -528,11 +619,11 @@ mod tests {
.merkled_header().parent(best_hash).build()
.build();
let verifier = ChainVerifier::new(Arc::new(storage)).pow_skip().signatures_skip();
let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet).pow_skip().signatures_skip();
let expected = Ok(Chain::Main);
assert_eq!(expected, verifier.verify(&block))
assert_eq!(expected, verifier.verify(&block.into()))
}
#[test]
@ -563,7 +654,7 @@ mod tests {
builder_tx2 = builder_tx2.push_opcode(script::Opcode::OP_CHECKSIG)
}
let block = test_data::block_builder()
let block: IndexedBlock = test_data::block_builder()
.transaction().coinbase().build()
.transaction()
.input()
@ -578,12 +669,13 @@ mod tests {
.build()
.build()
.merkled_header().parent(genesis.hash()).build()
.build();
.build()
.into();
let verifier = ChainVerifier::new(Arc::new(storage)).pow_skip().signatures_skip();
let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet).pow_skip().signatures_skip();
let expected = Err(Error::MaximumSigops);
assert_eq!(expected, verifier.verify(&block));
assert_eq!(expected, verifier.verify(&block.into()));
}
#[test]
@ -598,21 +690,22 @@ mod tests {
.build();
storage.insert_block(&genesis).unwrap();
let block = test_data::block_builder()
let block: IndexedBlock = test_data::block_builder()
.transaction()
.coinbase()
.output().value(5000000001).build()
.build()
.merkled_header().parent(genesis.hash()).build()
.build();
.build()
.into();
let verifier = ChainVerifier::new(Arc::new(storage)).pow_skip().signatures_skip();
let verifier = ChainVerifier::new(Arc::new(storage), Magic::Testnet).pow_skip().signatures_skip();
let expected = Err(Error::CoinbaseOverspend {
expected_max: 5000000000,
actual: 5000000001
});
assert_eq!(expected, verifier.verify(&block));
assert_eq!(expected, verifier.verify(&block.into()));
}
}

105
verification/src/compact.rs Normal file
View File

@ -0,0 +1,105 @@
use uint::U256;
#[derive(Debug, PartialEq, Clone, Copy)]
pub struct Compact(u32);
impl From<u32> for Compact {
fn from(u: u32) -> Self {
Compact(u)
}
}
impl From<Compact> for u32 {
fn from(c: Compact) -> Self {
c.0
}
}
impl Compact {
pub fn new(u: u32) -> Self {
Compact(u)
}
/// Computes the target [0, T] that a blockhash must land in to be valid
/// Returns None, if there is an overflow or its negative value
pub fn to_u256(&self) -> Result<U256, U256> {
let size = self.0 >> 24;
let mut word = self.0 & 0x007fffff;
let result = if size <= 3 {
word >>= 8 * (3 - size as usize);
word.into()
} else {
U256::from(word) << (8 * (size as usize - 3))
};
let is_negative = word != 0 && (self.0 & 0x00800000) != 0;
let is_overflow = (word != 0 && size > 34) ||
(word > 0xff && size > 33) ||
(word > 0xffff && size > 32);
if is_negative || is_overflow {
Err(result)
} else {
Ok(result)
}
}
pub fn from_u256(val: U256) -> Self {
let mut size = (val.bits() + 7) / 8;
let mut compact = if size <= 3 {
(val.low_u64() << (8 * (3 - size))) as u32
} else {
let bn = val >> (8 * (size - 3));
bn.low_u32()
};
if (compact & 0x00800000) != 0 {
compact >>= 8;
size += 1;
}
assert!((compact & !0x007fffff) == 0);
assert!(size < 256);
Compact(compact | (size << 24) as u32)
}
}
#[cfg(test)]
mod tests {
use uint::U256;
use super::Compact;
#[test]
fn test_compact_to_u256() {
assert_eq!(Compact::new(0x01003456).to_u256(), Ok(0.into()));
assert_eq!(Compact::new(0x01123456).to_u256(), Ok(0x12.into()));
assert_eq!(Compact::new(0x02008000).to_u256(), Ok(0x80.into()));
assert_eq!(Compact::new(0x05009234).to_u256(), Ok(0x92340000u64.into()));
// negative -0x12345600
assert!(Compact::new(0x04923456).to_u256().is_err());
assert_eq!(Compact::new(0x04123456).to_u256(), Ok(0x12345600u64.into()));
}
#[test]
fn test_from_u256() {
let test1 = U256::from(1000u64);
assert_eq!(Compact::new(0x0203e800), Compact::from_u256(test1));
let test2 = U256::from(2).pow(U256::from(256-32)) - U256::from(1);
assert_eq!(Compact::new(0x1d00ffff), Compact::from_u256(test2));
}
#[test]
fn test_compact_to_from_u256() {
// TODO: it does not work both ways for small values... check why
let compact = Compact::new(0x1d00ffff);
let compact2 = Compact::from_u256(compact.to_u256().unwrap());
assert_eq!(compact, compact2);
let compact = Compact::new(0x05009234);
let compact2 = Compact::from_u256(compact.to_u256().unwrap());
assert_eq!(compact, compact2);
}
}

View File

@ -1,27 +1,30 @@
//! Bitcoin blocks verification
extern crate db;
extern crate primitives;
extern crate chain;
extern crate serialization;
extern crate byteorder;
extern crate parking_lot;
extern crate linked_hash_map;
extern crate byteorder;
extern crate time;
extern crate script;
#[macro_use]
extern crate log;
extern crate db;
extern crate chain;
extern crate network;
extern crate primitives;
extern crate serialization;
extern crate script;
#[cfg(test)]
extern crate ethcore_devtools as devtools;
#[cfg(test)]
extern crate test_data;
mod queue;
mod utils;
mod chain_verifier;
mod compact;
mod utils;
pub use primitives::{uint, hash};
pub use queue::Queue;
pub use chain_verifier::ChainVerifier;
use primitives::hash::H256;
@ -54,6 +57,8 @@ pub enum Error {
CoinbaseSignatureLength(usize),
/// Block size is invalid
Size(usize),
/// Block transactions are not final.
NonFinalBlock,
}
#[derive(Debug, PartialEq)]
@ -79,6 +84,8 @@ pub enum TransactionError {
SigopsP2SH(usize),
/// Coinbase transaction is found at position that is not 0
MisplacedCoinbase(usize),
/// Not fully spent transaction with the same hash already exists, bip30.
UnspentTransactionWithTheSameHash,
}
#[derive(PartialEq, Debug)]
@ -107,11 +114,11 @@ pub type VerificationResult = Result<Chain, Error>;
/// Interface for block verification
pub trait Verify : Send + Sync {
fn verify(&self, block: &chain::Block) -> VerificationResult;
fn verify(&self, block: &db::IndexedBlock) -> VerificationResult;
}
/// Trait for verifier that can be interrupted and continue from the specific point
pub trait ContinueVerify : Verify + Send + Sync {
type State;
fn continue_verify(&self, block: &chain::Block, state: Self::State) -> VerificationResult;
fn continue_verify(&self, block: &db::IndexedBlock, state: Self::State) -> VerificationResult;
}

View File

@ -1,13 +1,62 @@
#![allow(dead_code)]
//! Verification utilities
use primitives::hash::H256;
use std::cmp;
use hash::H256;
use uint::U256;
use byteorder::{BigEndian, ByteOrder};
use chain;
use script::{self, Script};
use compact::Compact;
const MAX_NBITS: u32 = 0x207fffff;
// Timespan constants
const RETARGETING_FACTOR: u32 = 4;
const TARGET_SPACING_SECONDS: u32 = 10 * 60;
const DOUBLE_SPACING_SECONDS: u32 = 2 * TARGET_SPACING_SECONDS;
const TARGET_TIMESPAN_SECONDS: u32 = 2 * 7 * 24 * 60 * 60;
pub fn check_nbits(hash: &H256, n_bits: u32) -> bool {
if n_bits > MAX_NBITS { return false; }
// The upper and lower bounds for retargeting timespan
const MIN_TIMESPAN: u32 = TARGET_TIMESPAN_SECONDS / RETARGETING_FACTOR;
const MAX_TIMESPAN: u32 = TARGET_TIMESPAN_SECONDS * RETARGETING_FACTOR;
// Target number of blocks, 2 weaks, 2016
pub const RETARGETING_INTERVAL: u32 = TARGET_TIMESPAN_SECONDS / TARGET_SPACING_SECONDS;
pub fn is_retarget_height(height: u32) -> bool {
height % RETARGETING_INTERVAL == 0
}
fn retarget_timespan(retarget_timestamp: u32, last_timestamp: u32) -> u32 {
let timespan = last_timestamp - retarget_timestamp;
range_constrain(timespan as u32, MIN_TIMESPAN, MAX_TIMESPAN)
}
pub fn work_required_retarget(max_nbits: u32, retarget_timestamp: u32, last_timestamp: u32, last_nbits: u32) -> u32 {
// ignore overflows here
let mut retarget = Compact::new(last_nbits).to_u256().unwrap_or_else(|x| x);
let maximum = Compact::new(max_nbits).to_u256().unwrap_or_else(|x| x);
// multiplication overflow potential
retarget = retarget * U256::from(retarget_timespan(retarget_timestamp, last_timestamp));
retarget = retarget / U256::from(TARGET_TIMESPAN_SECONDS);
if retarget > maximum {
Compact::from_u256(maximum).into()
} else {
Compact::from_u256(retarget).into()
}
}
pub fn work_required_testnet() -> u32 {
unimplemented!();
}
fn range_constrain(value: u32, min: u32, max: u32) -> u32 {
cmp::min(cmp::max(value, min), max)
}
/// Simple nbits check that does not require 256-bit arithmetic
pub fn check_nbits(max_nbits: u32, hash: &H256, n_bits: u32) -> bool {
if n_bits > max_nbits {
return false;
}
let hash_bytes: &[u8] = &**hash;
@ -58,35 +107,11 @@ pub fn block_reward_satoshi(block_height: u32) -> u64 {
res
}
pub fn transaction_sigops(transaction: &chain::Transaction) -> Result<usize, script::Error> {
let mut result = 0usize;
for output in &transaction.outputs {
let output_script: Script = output.script_pubkey.to_vec().into();
// todo: not always allow malformed output?
result += output_script.sigop_count(false).unwrap_or(0);
}
if transaction.is_coinbase() { return Ok(result); }
for input in &transaction.inputs {
let input_script: Script = input.script_sig().to_vec().into();
result += try!(input_script.sigop_count(false));
}
Ok(result)
}
pub fn p2sh_sigops(output: &Script, input_ref: &Script) -> usize {
// todo: not always skip malformed output?
output.sigop_count_p2sh(input_ref).unwrap_or(0)
}
#[cfg(test)]
mod tests {
use network::Magic;
use super::{block_reward_satoshi, check_nbits};
use primitives::hash::H256;
use hash::H256;
#[test]
fn reward() {
@ -102,29 +127,31 @@ mod tests {
#[test]
fn nbits() {
let max_nbits = Magic::Regtest.max_nbits();
// strictly equal
let hash = H256::from_reversed_str("00000000000000001bc330000000000000000000000000000000000000000000");
let nbits = 0x181bc330u32;
assert!(check_nbits(&hash, nbits));
assert!(check_nbits(max_nbits, &hash, nbits));
// nbits match but not equal (greater)
let hash = H256::from_reversed_str("00000000000000001bc330000000000000000000000000000000000000000001");
let nbits = 0x181bc330u32;
assert!(!check_nbits(&hash, nbits));
assert!(!check_nbits(max_nbits, &hash, nbits));
// greater
let hash = H256::from_reversed_str("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
let nbits = 0x181bc330u32;
assert!(!check_nbits(&hash, nbits));
assert!(!check_nbits(max_nbits, &hash, nbits));
// some real examples
let hash = H256::from_reversed_str("000000000000000001f942eb4bfa0aeccb6a14c268f4c72d5fff17270da771b9");
let nbits = 404129525;
assert!(check_nbits(&hash, nbits));
assert!(check_nbits(max_nbits, &hash, nbits));
let hash = H256::from_reversed_str("00000000000000000e753ef636075711efd2cbf5a8473c7c5b67755a3701e0c2");
let nbits = 404129525;
assert!(check_nbits(&hash, nbits));
assert!(check_nbits(max_nbits, &hash, nbits));
}
}