Merge branch 'master' into reorg

This commit is contained in:
NikVolf 2016-11-06 21:14:59 +03:00
commit 78de572dbf
22 changed files with 816 additions and 397 deletions

41
Cargo.lock generated
View File

@ -4,7 +4,7 @@ version = "0.1.0"
dependencies = [
"app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"chain 0.1.0",
"clap 2.16.2 (registry+https://github.com/rust-lang/crates.io-index)",
"clap 2.17.1 (registry+https://github.com/rust-lang/crates.io-index)",
"db 0.1.0",
"env_logger 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"import 0.1.0",
@ -58,7 +58,7 @@ version = "0.3.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"nodrop 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
"odds 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
"odds 0.2.24 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -112,7 +112,7 @@ dependencies = [
[[package]]
name = "clap"
version = "2.16.2"
version = "2.17.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -131,6 +131,15 @@ name = "crossbeam"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "csv"
version = "0.14.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "db"
version = "0.1.0"
@ -324,7 +333,7 @@ dependencies = [
[[package]]
name = "mio"
version = "0.6.0"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -333,7 +342,7 @@ dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"net2 0.2.26 (registry+https://github.com/rust-lang/crates.io-index)",
"nix 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"nix 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -363,7 +372,7 @@ dependencies = [
[[package]]
name = "nix"
version = "0.6.0"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bitflags 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -379,7 +388,7 @@ name = "nodrop"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"odds 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
"odds 0.2.24 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -403,7 +412,7 @@ dependencies = [
[[package]]
name = "odds"
version = "0.2.23"
version = "0.2.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@ -426,6 +435,7 @@ version = "0.1.0"
dependencies = [
"abstract-ns 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"bitcrypto 0.1.0",
"csv 0.14.7 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
@ -611,6 +621,9 @@ version = "0.1.0"
dependencies = [
"chain 0.1.0",
"db 0.1.0",
"futures 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"message 0.1.0",
"miner 0.1.0",
@ -619,6 +632,7 @@ dependencies = [
"primitives 0.1.0",
"test-data 0.1.0",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.0 (git+https://github.com/debris/tokio-core)",
"verification 0.1.0",
]
@ -675,7 +689,7 @@ source = "git+https://github.com/debris/tokio-core#623ce443d89cd9ffa2c1adae8d2eb
dependencies = [
"futures 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
"scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -763,8 +777,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d"
"checksum byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855"
"checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c"
"checksum clap 2.16.2 (registry+https://github.com/rust-lang/crates.io-index)" = "08aac7b078ec0a58e1d4b43cfb11d47001f8eb7c6f6f2bda4f5eed43c82491f1"
"checksum clap 2.17.1 (registry+https://github.com/rust-lang/crates.io-index)" = "27dac76762fb56019b04aed3ccb43a770a18f80f9c2eb62ee1a18d9fb4ea2430"
"checksum crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0c5ea215664ca264da8a9d9c3be80d2eaf30923c259d03e870388eb927508f97"
"checksum csv 0.14.7 (registry+https://github.com/rust-lang/crates.io-index)" = "266c1815d7ca63a5bd86284043faf91e8c95e943e55ce05dc0ae08e952de18bc"
"checksum deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1614659040e711785ed8ea24219140654da1729f3ec8a47a9719d041112fe7bf"
"checksum domain 0.1.0 (git+https://github.com/debris/domain)" = "<none>"
"checksum elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4bc9250a632e7c001b741eb0ec6cee93c9a5b6d5f1879696a4b94d62b012210a"
@ -781,14 +796,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6d262045c5b87c0861b3f004610afd0e2c851e2908d08b6c870cbb9d5f494ecd"
"checksum log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ab83497bf8bf4ed2a74259c1c802351fcd67a65baa86394b6ba73c36f4838054"
"checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20"
"checksum mio 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2dadd39d4b47343e10513ac2a731c979517a4761224ecb6bbd243602300c9537"
"checksum mio 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "410a1a0ff76f5a226f1e4e3ff1756128e65cd30166e39c3892283e2ac09d5b67"
"checksum miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d5bfc6782530ac8ace97af10a540054a37126b63b0702ddaaa243b73b5745b9a"
"checksum net2 0.2.26 (registry+https://github.com/rust-lang/crates.io-index)" = "5edf9cb6be97212423aed9413dd4729d62b370b5e1c571750e882cebbbc1e3e2"
"checksum nix 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a7bb1da2be7da3cbffda73fc681d509ffd9e665af478d2bee1907cee0bc64b2"
"checksum nix 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a0d95c5fa8b641c10ad0b8887454ebaafa3c92b5cd5350f8fc693adafd178e7b"
"checksum nodrop 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "0dbbadd3f4c98dea0bd3d9b4be4c0cdaf1ab57035cb2e41fce3983db5add7cc5"
"checksum ns-dns-tokio 0.1.0 (git+https://github.com/debris/abstract-ns)" = "<none>"
"checksum num_cpus 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8890e6084723d57d0df8d2720b0d60c6ee67d6c93e7169630e4371e88765dcad"
"checksum odds 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)" = "e04630a62b3f1cc8c58b4d8f2555a40136f02b420e158242936ef286a72d33a0"
"checksum odds 0.2.24 (registry+https://github.com/rust-lang/crates.io-index)" = "97b2d7c12734955740d14f7a6723d8dd8ed53cf16770ab38ca6a1aaf3124fc0d"
"checksum ole32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d2c49021782e5233cd243168edfa8037574afed4eba4bbaf538b3d8d1789d8c"
"checksum owning_ref 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8d91377085359426407a287ab16884a0111ba473aa6844ff01d4ec20ce3d75e7"
"checksum parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "e1435e7a2a00dfebededd6c6bdbd54008001e94b4a2aadd6aef0dc4c56317621"

View File

@ -6,6 +6,7 @@ use Error;
const MAGIC_MAINNET: u32 = 0xD9B4BEF9;
const MAGIC_TESTNET: u32 = 0x0709110B;
const MAGIC_REGTEST: u32 = 0xDAB5BFFA;
/// Bitcoin network
/// https://bitcoin.org/en/glossary/mainnet
@ -14,6 +15,7 @@ pub enum Magic {
/// The original and main network for Bitcoin transactions, where satoshis have real economic value.
Mainnet,
Testnet,
Regtest,
}
impl From<Magic> for u32 {
@ -21,6 +23,7 @@ impl From<Magic> for u32 {
match m {
Magic::Mainnet => MAGIC_MAINNET,
Magic::Testnet => MAGIC_TESTNET,
Magic::Regtest => MAGIC_REGTEST,
}
}
}
@ -30,6 +33,7 @@ impl Magic {
match magic {
MAGIC_MAINNET => Ok(Magic::Mainnet),
MAGIC_TESTNET => Ok(Magic::Testnet),
MAGIC_REGTEST => Ok(Magic::Regtest),
_ => Err(Error::InvalidMagic),
}
}
@ -38,6 +42,7 @@ impl Magic {
match *self {
Magic::Mainnet => 8333,
Magic::Testnet => 18333,
Magic::Regtest => 18444,
}
}
@ -45,6 +50,7 @@ impl Magic {
match *self {
Magic::Mainnet => 8332,
Magic::Testnet => 18332,
Magic::Regtest => 18443,
}
}
}
@ -58,14 +64,16 @@ impl Serializable for Magic {
#[cfg(test)]
mod tests {
use Error;
use super::{Magic, MAGIC_MAINNET, MAGIC_TESTNET};
use super::{Magic, MAGIC_MAINNET, MAGIC_TESTNET, MAGIC_REGTEST};
#[test]
fn test_network_magic_number() {
assert_eq!(MAGIC_MAINNET, Magic::Mainnet.into());
assert_eq!(MAGIC_TESTNET, Magic::Testnet.into());
assert_eq!(MAGIC_REGTEST, Magic::Regtest.into());
assert_eq!(Magic::from_u32(MAGIC_MAINNET).unwrap(), Magic::Mainnet);
assert_eq!(Magic::from_u32(MAGIC_TESTNET).unwrap(), Magic::Testnet);
assert_eq!(Magic::from_u32(MAGIC_REGTEST).unwrap(), Magic::Regtest);
assert_eq!(Magic::from_u32(0).unwrap_err(), Error::InvalidMagic);
}
@ -73,11 +81,13 @@ mod tests {
fn test_network_port() {
assert_eq!(Magic::Mainnet.port(), 8333);
assert_eq!(Magic::Testnet.port(), 18333);
assert_eq!(Magic::Regtest.port(), 18444);
}
#[test]
fn test_network_rpc_port() {
assert_eq!(Magic::Mainnet.rpc_port(), 8332);
assert_eq!(Magic::Testnet.rpc_port(), 18332);
assert_eq!(Magic::Regtest.rpc_port(), 18443);
}
}

View File

@ -12,7 +12,7 @@ mod error;
pub use primitives::{hash, bytes};
pub use common::{Command, Magic};
pub use common::{Command, Magic, Services};
pub use message::{Message, MessageHeader, Payload, to_raw_message};
pub use serialization::{serialize_payload, deserialize_payload};
pub use error::{Error, MessageResult};

View File

@ -13,6 +13,7 @@ rand = "0.3"
log = "0.3"
abstract-ns = "0.2.1"
ns-dns-tokio = { git = "https://github.com/debris/abstract-ns", path = "ns-dns-tokio" }
csv = "0.14.7"
primitives = { path = "../primitives"}
bitcrypto = { path = "../crypto" }

View File

@ -1,4 +1,5 @@
use std::net::SocketAddr;
use std::path::PathBuf;
use net::Config as NetConfig;
#[derive(Debug)]
@ -19,4 +20,6 @@ pub struct Config {
pub peers: Vec<SocketAddr>,
/// Connect to these nodes to retrieve peer addresses, and disconnect.
pub seeds: Vec<String>,
/// p2p module cache directory.
pub node_table_path: PathBuf,
}

View File

@ -9,6 +9,7 @@ extern crate parking_lot;
extern crate log;
extern crate abstract_ns;
extern crate ns_dns_tokio;
extern crate csv;
extern crate bitcrypto as crypto;
extern crate message;
@ -24,7 +25,7 @@ mod config;
mod event_loop;
mod p2p;
pub const VERSION: u32 = 70_001;
pub const VERSION: u32 = 70_014;
pub const USER_AGENT: &'static str = "pbtc";
pub use primitives::{hash, bytes};

View File

@ -1,4 +1,4 @@
use std::{io, net, error, time};
use std::{io, net, error, time, path};
use std::sync::Arc;
use parking_lot::RwLock;
use futures::{Future, finished, failed, BoxFuture};
@ -34,19 +34,24 @@ pub struct Context {
remote: Remote,
/// Local synchronization node.
local_sync_node: LocalSyncNodeRef,
/// Node table path.
node_table_path: path::PathBuf,
}
impl Context {
/// Creates new context with reference to local sync node, thread pool and event loop.
pub fn new(local_sync_node: LocalSyncNodeRef, pool_handle: CpuPool, remote: Remote, config: &Config) -> Self {
Context {
pub fn new(local_sync_node: LocalSyncNodeRef, pool_handle: CpuPool, remote: Remote, config: &Config) -> Result<Self, Box<error::Error>> {
let context = Context {
connections: Default::default(),
connection_counter: ConnectionCounter::new(config.inbound_connections, config.outbound_connections),
node_table: Default::default(),
node_table: RwLock::new(try!(NodeTable::from_file(&config.node_table_path))),
pool: pool_handle,
remote: remote,
local_sync_node: local_sync_node,
}
node_table_path: config.node_table_path.clone(),
};
Ok(context)
}
/// Spawns a future using thread pool and schedules execution of it with event loop handle.
@ -111,6 +116,10 @@ impl Context {
Context::connect::<NormalSessionFactory>(context.clone(), address, config.clone());
}
if let Err(_err) = context.node_table.read().save_to_file(&context.node_table_path) {
error!("Saving node table to disk failed");
}
Ok(())
})
.for_each(|_| Ok(()))
@ -185,8 +194,9 @@ impl Context {
channel.session().initialize(channel.clone());
Context::on_message(context.clone(), channel)
},
Ok(DeadlineStatus::Meet(Err(_))) => {
Ok(DeadlineStatus::Meet(Err(err))) => {
// protocol error
trace!("Accepting handshake from {} failed with error: {}", socket, err);
// TODO: close socket
context.node_table.write().note_failure(&socket);
context.connection_counter.note_close_inbound_connection();
@ -194,7 +204,7 @@ impl Context {
},
Ok(DeadlineStatus::Timeout) => {
// connection time out
trace!("Handshake with {} timedout", socket);
trace!("Accepting handshake from {} timed out", socket);
// TODO: close socket
context.node_table.write().note_failure(&socket);
context.connection_counter.note_close_inbound_connection();
@ -202,6 +212,7 @@ impl Context {
},
Err(_) => {
// network error
trace!("Accepting handshake from {} failed with network error", socket);
context.node_table.write().note_failure(&socket);
context.connection_counter.note_close_inbound_connection();
finished(Ok(())).boxed()
@ -369,15 +380,19 @@ impl Drop for P2P {
}
impl P2P {
pub fn new(config: Config, local_sync_node: LocalSyncNodeRef, handle: Handle) -> Self {
pub fn new(config: Config, local_sync_node: LocalSyncNodeRef, handle: Handle) -> Result<Self, Box<error::Error>> {
let pool = CpuPool::new(config.threads);
P2P {
let context = try!(Context::new(local_sync_node, pool.clone(), handle.remote().clone(), &config));
let p2p = P2P {
event_loop_handle: handle.clone(),
pool: pool.clone(),
context: Arc::new(Context::new(local_sync_node, pool, handle.remote().clone(), &config)),
pool: pool,
context: Arc::new(context),
config: config,
}
};
Ok(p2p)
}
pub fn run(&self) -> Result<(), Box<error::Error>> {

View File

@ -1,12 +1,14 @@
use std::{io, path, fs};
use std::collections::{HashMap, BTreeSet};
use std::collections::hash_map::Entry;
use std::net::SocketAddr;
use std::cmp::{PartialOrd, Ord, Ordering};
use csv;
use message::common::{Services, NetAddress};
use message::types::addr::AddressEntry;
use util::time::{Time, RealTime};
#[derive(PartialEq, Eq, Clone)]
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct Node {
/// Node address.
addr: SocketAddr,
@ -48,7 +50,7 @@ impl From<Node> for AddressEntry {
}
}
#[derive(PartialEq, Eq, Clone)]
#[derive(Debug, PartialEq, Eq, Clone)]
struct NodeByScore(Node);
impl From<Node> for NodeByScore {
@ -87,7 +89,7 @@ impl Ord for NodeByScore {
}
}
#[derive(PartialEq, Eq, Clone)]
#[derive(Debug, PartialEq, Eq, Clone)]
struct NodeByTime(Node);
impl From<Node> for NodeByTime {
@ -158,7 +160,7 @@ impl PartialOrd for Node {
}
}
#[derive(Default)]
#[derive(Default, Debug)]
pub struct NodeTable<T = RealTime> where T: Time {
/// Time source.
time: T,
@ -170,6 +172,25 @@ pub struct NodeTable<T = RealTime> where T: Time {
by_time: BTreeSet<NodeByTime>,
}
impl NodeTable {
/// Opens a file loads node_table from it.
pub fn from_file<P>(path: P) -> Result<Self, io::Error> where P: AsRef<path::Path> {
let res = fs::OpenOptions::new()
.create(true)
.read(true)
// without opening for write, mac os returns os error 22
.write(true)
.open(path)
.and_then(Self::load);
res
}
/// Saves node table to file
pub fn save_to_file<P>(&self, path: P) -> Result<(), io::Error> where P: AsRef<path::Path> {
fs::File::create(path).and_then(|file| self.save(file))
}
}
impl<T> NodeTable<T> where T: Time {
/// Inserts new address and services pair into NodeTable.
pub fn insert(&mut self, addr: SocketAddr, services: Services) {
@ -278,6 +299,52 @@ impl<T> NodeTable<T> where T: Time {
self.by_time.insert(node.clone().into());
}
}
/// Save node table in csv format.
pub fn save<W>(&self, write: W) -> Result<(), io::Error> where W: io::Write {
let mut writer = csv::Writer::from_writer(write)
.delimiter(b' ');
let iter = self.by_score.iter()
.map(|node| &node.0)
.take(1000);
let err = || io::Error::new(io::ErrorKind::Other, "Write csv error");
for n in iter {
let record = (n.addr.to_string(), n.time, u64::from(n.services), n.failures);
try!(writer.encode(record).map_err(|_| err()));
}
Ok(())
}
/// Loads table in from a csv source.
pub fn load<R>(read: R) -> Result<Self, io::Error> where R: io::Read, T: Default {
let mut rdr = csv::Reader::from_reader(read)
.has_headers(false)
.delimiter(b' ');
let mut node_table = NodeTable::default();
let err = || io::Error::new(io::ErrorKind::Other, "Load csv error");
for row in rdr.decode() {
let (addr, time, services, failures): (String, i64, u64, u32) = try!(row.map_err(|_| err()));
let node = Node {
addr: try!(addr.parse().map_err(|_| err())),
time: time,
services: services.into(),
failures: failures,
};
node_table.by_score.insert(node.clone().into());
node_table.by_time.insert(node.clone().into());
node_table.by_addr.insert(node.addr, node);
}
Ok(node_table)
}
}
#[cfg(test)]
@ -381,4 +448,40 @@ mod tests {
table.note_failure(&s0);
table.note_failure(&s1);
}
#[test]
fn test_save_and_load() {
let s0: SocketAddr = "127.0.0.1:8000".parse().unwrap();
let s1: SocketAddr = "127.0.0.1:8001".parse().unwrap();
let s2: SocketAddr = "127.0.0.1:8002".parse().unwrap();
let s3: SocketAddr = "127.0.0.1:8003".parse().unwrap();
let s4: SocketAddr = "127.0.0.1:8004".parse().unwrap();
let mut table = NodeTable::<IncrementalTime>::default();
table.insert(s0, Services::default());
table.insert(s1, Services::default());
table.insert(s2, Services::default());
table.insert(s3, Services::default());
table.insert(s4, Services::default());
table.note_used(&s2);
table.note_used(&s4);
table.note_used(&s1);
table.note_failure(&s2);
table.note_failure(&s3);
let mut db = Vec::new();
assert_eq!(table.save(&mut db).unwrap(), ());
let loaded_table = NodeTable::<IncrementalTime>::load(&db as &[u8]).unwrap();
assert_eq!(table.by_addr, loaded_table.by_addr);
assert_eq!(table.by_score, loaded_table.by_score);
assert_eq!(table.by_time, loaded_table.by_time);
let s = String::from_utf8(db).unwrap();
assert_eq!(
"127.0.0.1:8001 7 0 0
127.0.0.1:8004 6 0 0
127.0.0.1:8000 0 0 0
127.0.0.1:8002 5 0 1
127.0.0.1:8003 3 0 1
".to_string(), s);
}
}

View File

@ -5,7 +5,7 @@ pub trait Time {
fn get(&self) -> time::Timespec;
}
#[derive(Default)]
#[derive(Default, Debug)]
pub struct RealTime;
impl Time for RealTime {

View File

@ -5,7 +5,10 @@ about: Parity bitcoin client
args:
- testnet:
long: testnet
help: Use the test chain
help: Use the test network
- regtest:
long: regtest
help: Use private network for regtest
- connect:
short: c
long: connect

View File

@ -1,6 +1,7 @@
use std::net::SocketAddr;
use sync::create_sync_connection_factory;
use util::{open_db, init_db};
use message::Services;
use util::{open_db, init_db, node_table_path};
use {config, p2p};
pub fn start(cfg: config::Config) -> Result<(), String> {
@ -15,21 +16,23 @@ pub fn start(cfg: config::Config) -> Result<(), String> {
connection: p2p::NetConfig {
magic: cfg.magic,
local_address: SocketAddr::new("127.0.0.1".parse().unwrap(), cfg.port),
services: Default::default(),
services: Services::default().with_network(true),
user_agent: "pbtc".into(),
start_height: 0,
relay: false,
},
peers: cfg.connect.map_or_else(|| vec![], |x| vec![x]),
seeds: cfg.seednode.map_or_else(|| vec![], |x| vec![x]),
node_table_path: node_table_path(),
};
let db = open_db(cfg.use_disk_database);
init_db(&db);
let sync_connection_factory = create_sync_connection_factory(db);
let sync_handle = el.handle();
let sync_connection_factory = create_sync_connection_factory(&sync_handle, db);
let p2p = p2p::P2P::new(p2p_cfg, sync_connection_factory, el.handle());
let p2p = try!(p2p::P2P::new(p2p_cfg, sync_connection_factory, el.handle()).map_err(|x| x.to_string()));
try!(p2p.run().map_err(|_| "Failed to start p2p module"));
el.run(p2p::forever()).unwrap();
Ok(())

View File

@ -14,9 +14,11 @@ pub struct Config {
pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
let print_to_console = matches.is_present("printtoconsole");
let use_disk_database = matches.is_present("diskdb");
let magic = match matches.is_present("testnet") {
true => Magic::Testnet,
false => Magic::Mainnet,
let magic = match (matches.is_present("testnet"), matches.is_present("regtest")) {
(true, false) => Magic::Testnet,
(false, true) => Magic::Regtest,
(false, false) => Magic::Mainnet,
(true, true) => return Err("Only one testnet option can be used".into()),
};
let port = match matches.value_of("port") {

View File

@ -43,5 +43,3 @@ fn run() -> Result<(), String> {
_ => commands::start(cfg),
}
}

View File

@ -1,4 +1,5 @@
use std::sync::Arc;
use std::path::PathBuf;
use app_dirs::{app_dir, AppDataType};
use chain::Block;
use {db, APP_INFO};
@ -15,6 +16,12 @@ pub fn open_db(use_disk_database: bool) -> Arc<db::Store> {
}
}
pub fn node_table_path() -> PathBuf {
let mut node_table = app_dir(AppDataType::UserData, &APP_INFO, "p2p").expect("Failed to get app dir");
node_table.push("nodes.csv");
node_table
}
pub fn init_db(db: &Arc<db::Store>) {
// insert genesis block if db is empty
if db.best_block().is_none() {

View File

@ -7,6 +7,10 @@ authors = ["Ethcore <admin@ethcore.io>"]
parking_lot = "0.3"
log = "0.3"
time = "0.1"
futures = "0.1"
futures-cpupool = "0.1"
tokio-core = { git = "https://github.com/debris/tokio-core" }
linked-hash-map = "0.3"
chain = { path = "../chain" }
db = { path = "../db" }

View File

@ -2,9 +2,13 @@ extern crate chain;
extern crate db;
#[macro_use]
extern crate log;
extern crate futures;
extern crate futures_cpupool;
extern crate tokio_core;
extern crate message;
extern crate p2p;
extern crate parking_lot;
extern crate linked_hash_map;
extern crate primitives;
extern crate test_data;
extern crate time;
@ -19,11 +23,13 @@ mod local_node;
mod synchronization_chain;
mod synchronization_client;
mod synchronization_executor;
mod synchronization_manager;
mod synchronization_peers;
mod synchronization_server;
use std::sync::Arc;
use parking_lot::{Mutex, RwLock};
use tokio_core::reactor::Handle;
/// Sync errors.
#[derive(Debug)]
@ -42,7 +48,7 @@ pub fn create_sync_blocks_writer(db: Arc<db::Store>) -> blocks_writer::BlocksWri
}
/// Create inbound synchronization connections factory for given `db`.
pub fn create_sync_connection_factory(db: Arc<db::Store>) -> p2p::LocalSyncNodeRef {
pub fn create_sync_connection_factory(handle: &Handle, db: Arc<db::Store>) -> p2p::LocalSyncNodeRef {
use synchronization_chain::Chain as SyncChain;
use synchronization_executor::LocalSynchronizationTaskExecutor as SyncExecutor;
use local_node::LocalNode as SyncNode;
@ -53,7 +59,7 @@ pub fn create_sync_connection_factory(db: Arc<db::Store>) -> p2p::LocalSyncNodeR
let sync_chain = Arc::new(RwLock::new(SyncChain::new(db)));
let sync_executor = SyncExecutor::new(sync_chain.clone());
let sync_server = Arc::new(Mutex::new(SynchronizationServer::new(sync_chain.clone(), sync_executor.clone())));
let sync_client = SynchronizationClient::new(SynchronizationConfig::default(), sync_executor.clone(), sync_chain);
let sync_client = SynchronizationClient::new(SynchronizationConfig::default(), handle, sync_executor.clone(), sync_chain);
let sync_node = Arc::new(SyncNode::new(sync_server, sync_client, sync_executor));
SyncConnectionFactory::with_local_node(sync_node)
}

View File

@ -188,7 +188,7 @@ mod tests {
use synchronization_executor::tests::DummyTaskExecutor;
use synchronization_client::{Config, SynchronizationClient};
use synchronization_chain::Chain;
use p2p::{OutboundSyncConnection, OutboundSyncConnectionRef};
use p2p::{event_loop, OutboundSyncConnection, OutboundSyncConnectionRef};
use message::types;
use message::common::{InventoryVector, InventoryType};
use db;
@ -196,6 +196,7 @@ mod tests {
use test_data;
use synchronization_server::ServerTask;
use synchronization_server::tests::DummyServer;
use tokio_core::reactor::{Core, Handle};
struct DummyOutboundSyncConnection;
@ -227,19 +228,21 @@ mod tests {
fn send_notfound(&self, _message: &types::NotFound) {}
}
fn create_local_node() -> (Arc<Mutex<DummyTaskExecutor>>, Arc<Mutex<DummyServer>>, LocalNode<DummyTaskExecutor, DummyServer, SynchronizationClient<DummyTaskExecutor>>) {
fn create_local_node() -> (Core, Handle, Arc<Mutex<DummyTaskExecutor>>, Arc<Mutex<DummyServer>>, LocalNode<DummyTaskExecutor, DummyServer, SynchronizationClient<DummyTaskExecutor>>) {
let event_loop = event_loop();
let handle = event_loop.handle();
let chain = Arc::new(RwLock::new(Chain::new(Arc::new(db::TestStorage::with_genesis_block()))));
let executor = DummyTaskExecutor::new();
let server = Arc::new(Mutex::new(DummyServer::new()));
let config = Config { skip_verification: true };
let client = SynchronizationClient::new(config, executor.clone(), chain);
let config = Config { threads_num: 1, skip_verification: true };
let client = SynchronizationClient::new(config, &handle, executor.clone(), chain);
let local_node = LocalNode::new(server.clone(), client, executor.clone());
(executor, server, local_node)
(event_loop, handle, executor, server, local_node)
}
#[test]
fn local_node_request_inventory_on_sync_start() {
let (executor, _, local_node) = create_local_node();
let (_, _, executor, _, local_node) = create_local_node();
let peer_index = local_node.create_sync_session(0, DummyOutboundSyncConnection::new());
// start sync session
local_node.start_sync_session(peer_index, 0);
@ -250,7 +253,7 @@ mod tests {
#[test]
fn local_node_serves_block() {
let (_, server, local_node) = create_local_node();
let (_, _, _, server, local_node) = create_local_node();
let peer_index = local_node.create_sync_session(0, DummyOutboundSyncConnection::new());
// peer requests genesis block
let genesis_block_hash = test_data::genesis().hash();

View File

@ -5,6 +5,10 @@ use std::collections::HashMap;
use std::collections::hash_map::Entry;
use std::sync::mpsc::{channel, Sender, Receiver};
use parking_lot::Mutex;
use futures::{BoxFuture, Future, finished};
use futures::stream::Stream;
use tokio_core::reactor::{Handle, Interval};
use futures_cpupool::CpuPool;
use db;
use chain::{Block, RepresentH256};
use primitives::hash::H256;
@ -16,67 +20,112 @@ use synchronization_chain::{ChainRef, BlockState};
use synchronization_chain::{Information as ChainInformation};
use verification::{ChainVerifier, Error as VerificationError, Verify};
use synchronization_executor::{Task, TaskExecutor};
use synchronization_manager::{manage_synchronization_peers, MANAGEMENT_INTERVAL_MS};
use time;
use std::time::Duration;
///! Blocks synchronization process:
///!
///! TODO: Current assumptions:
///! 1) unknown blocks in `inventory` messages are returned as a consequent range, sorted from oldest to newest
///! 2) no forks support
///!
///! When new peer is connected:
///! 1) send `inventory` message with full block locator hashes
///! 1) send `inventory` message with full block locator hashes (see `LocalNode`)
///!
///! When `inventory` message is received from peer:
///! 1) if synchronization queue is empty:
///! 1.1) append all unknown blocks hashes to the `queued_hashes`
///! 1.2) mark peer as 'useful' for current synchronization stage (TODO)
///! 1.3) stop
///! 2) if intersection(`queued_hashes`, unknown blocks) is not empty && there are new unknown blocks:
///! 2.1) append new unknown blocks to the queued_hashes
///! 2.2) mark peer as 'useful' for current synchronization stage (TODO)
///! 2.3) stop
///! 3) if intersection(`queued_hashes`, unknown blocks) is not empty && there are no new unknown blocks:
///! 3.1) looks like peer is behind us in the blockchain (or these are blocks for the future)
///! 3.2) mark peer as 'suspicious' for current synchronization stage (TODO)
///! 3.3) stop
///! on_new_blocks_inventory: When `inventory` message is received from peer:
///! 1) queue_intersection = intersect(queue, inventory)
///! 2) if !queue_intersection.is_empty(): ===> responded with blocks within sync window
///! 2.1) remember peer as useful
///! 2.2) inventory_rest = inventory - queue_intersection
///! 2.3) if inventory_rest.is_empty(): ===> no new unknown blocks in inventory
///! 2.3.1) stop (2.3)
///! 2.4) if !inventory_rest.is_empty(): ===> has new unknown blocks in inventory
///! 2.4.1) queue_rest = queue after intersection
///! 2.4.2) if queue_rest.is_empty(): ===> has new unknown blocks in inventory, no fork
///! 2.4.3.1) scheduled_blocks.append(inventory_rest)
///! 2.4.3.2) stop (2.4.3)
///! 2.4.4) if !queue_rest.is_empty(): ===> has new unknown blocks in inventory, fork
///! 2.4.4.1) scheduled_blocks.append(inventory_rest)
///! 2.4.4.2) stop (2.4.4)
///! 2.4.5) stop (2.4)
///! 2.5) stop (2)
///! 3) if queue_intersection.is_empty(): ===> responded with out-of-sync-window blocks
///! 3.1) last_known_block = inventory.last(b => b.is_known())
///! 3.2) if last_known_block == None: ===> we know nothing about these blocks & we haven't asked for these
///! 3.2.1) peer will be excluded later by management thread
///! 3.2.2) stop (3.2)
///! 3.3) if last_known_block == last(inventory): ===> responded with all-known-blocks
///! 3.3.1) remember peer as useful (possibly had failures before && have been excluded from sync)
///! 3.3.2) stop (3.3)
///! 3.4) if last_known_block in the middle of inventory: ===> responded with forked blocks
///! 3.4.1) remember peer as useful
///! 3.4.2) inventory_rest = inventory after last_known_block
///! 3.4.3) scheduled_blocks.append(inventory_rest)
///! 3.4.4) stop (3.4)
///! 3.5) stop (3)
///!
///! After receiving `block` message:
///! 1) if any basic verification is failed (TODO):
///! 1.1) penalize peer
///! 1.2) stop
///! 1) if not(remove block) [i.e. block was not requested]:
///! 1.1) ignore it (TODO: try to append to the chain)
///! 1.2) stop
///! 2) if this block is first block in the `requested_hashes`:
///! 2.1) append to the verification queue (+ append to `verifying_hashes`) (TODO)
///! 2.2) for all children (from `orphaned_blocks`): append to the verification queue (TODO)
///! 2.3) stop
///! 3) remember in `orphaned_blocks`
///! on_peer_block: After receiving `block` message:
///! 1) if block_state(block) in (Scheduled, Verifying, Stored): ===> late delivery
///! 1.1) remember peer as useful
///! 1.2) stop (1)
///! 2) if block_state(block) == Requested: ===> on-time delivery
///! 2.1) remember peer as useful
///! 2.2) move block from requested to verifying queue
///! 2.2) queue verification().and_then(insert).or_else(reset_sync)
///! 2.3) stop (2)
///! 3) if block_state(block) == Unknown: ===> maybe we are on-top of chain && new block is announced?
///! 3.1) if block_state(block.parent_hash) == Unknown: ===> we do not know parent
///! 3.1.1) ignore this block
///! 3.1.2) stop (3.1)
///! 3.2) if block_state(block.parent_hash) != Unknown: ===> fork found
///! 3.2.1) ask peer for best inventory (after this block)
///! 3.2.2) append block to verifying queue
///! 3.2.3) queue verification().and_then(insert).or_else(reset_sync)
///! 3.2.4) stop (3.2)
///! 2.3) stop (2)
///!
///! After receiving `inventory` message OR receiving `block` message:
///! 1) if there are blocks hashes in `queued_hashes`:
///! execute_synchronization_tasks: After receiving `inventory` message OR receiving `block` message OR when management thread schedules tasks:
///! 1) if there are blocks in `scheduled` queue AND we can fit more blocks into memory: ===> ask for blocks
///! 1.1) select idle peers
///! 1.2) for each idle peer: query blocks from `queued_hashes`
///! 1.3) move requested blocks hashes from `queued_hashes` to `requested_hashes`
///! 1.2) for each idle peer: query chunk of blocks from `scheduled` queue
///! 1.3) move requested blocks from `scheduled` to `requested` queue
///! 1.4) mark idle peers as active
///! 2) if `queued_hashes` queue is not yet saturated:
///! 1.5) stop (1)
///! 2) if `scheduled` queue is not yet saturated: ===> ask for new blocks hashes
///! 2.1) for each idle peer: send shortened `getblocks` message
///! 2.2) 'forget' idle peers (mark them as not useful for synchronization) (TODO)
///! 2.2) 'forget' idle peers => they will be added again if respond with inventory
///! 2.3) stop (2)
///!
///! manage_synchronization_peers: When management thread awakes:
///! 1) for peer in active_peers.where(p => now() - p.last_request_time() > failure_interval):
///! 1.1) return all peer' tasks to the tasks pool (TODO: not implemented currently!!!)
///! 1.2) increase # of failures for this peer
///! 1.3) if # of failures > max_failures: ===> super-bad peer
///! 1.3.1) forget peer
///! 1.3.3) stop (1.3)
///! 1.4) if # of failures <= max_failures: ===> bad peer
///! 1.4.1) move peer to idle pool
///! 1.4.2) stop (1.4)
///! 2) schedule tasks from pool (if any)
///!
///! on_block_verification_success: When verification completes scuccessfully:
///! 1) if block_state(block) != Verifying: ===> parent verification failed
///! 1.1) stop (1)
///! 2) remove from verifying queue
///! 3) insert to the db
///!
///! on_block_verification_error: When verification completes with an error:
///! 1) remove block from verification queue
///! 2) remove all known children from all queues [so that new `block` messages will be ignored in on_peer_block.3.1.1]
///!
///! TODO: spawn management thread [watch for not-stalling sync]
///! TODO: check + optimize algorithm for Saturated state
/// Approximate maximal number of blocks hashes in scheduled queue.
const MAX_SCHEDULED_HASHES: u32 = 4 * 1024;
/// Approximate maximal number of blocks hashes in requested queue.
const MAX_REQUESTED_BLOCKS: u32 = 512;
const MAX_REQUESTED_BLOCKS: u32 = 256;
/// Approximate maximal number of blocks in verifying queue.
const MAX_VERIFYING_BLOCKS: u32 = 512;
const MAX_VERIFYING_BLOCKS: u32 = 256;
/// Minimum number of blocks to request from peer
const MIN_BLOCKS_IN_REQUEST: u32 = 32;
/// Maximum number of blocks to request from peer
const MAX_BLOCKS_IN_REQUEST: u32 = 512;
const MAX_BLOCKS_IN_REQUEST: u32 = 128;
/// Synchronization state
#[derive(Debug, Clone, Copy)]
@ -119,8 +168,9 @@ pub trait Client : Send + 'static {
}
/// Synchronization client configuration options.
#[derive(Default)]
pub struct Config {
/// Number of threads to allocate in synchronization CpuPool.
pub threads_num: usize,
/// Do not verify incoming blocks before inserting to db.
pub skip_verification: bool,
}
@ -129,6 +179,10 @@ pub struct Config {
pub struct SynchronizationClient<T: TaskExecutor> {
/// Synchronization state.
state: State,
/// Cpu pool.
pool: CpuPool,
/// Sync management worker.
management_worker: Option<BoxFuture<(), ()>>,
/// Synchronization peers.
peers: Peers,
/// Task executor.
@ -143,6 +197,15 @@ pub struct SynchronizationClient<T: TaskExecutor> {
verification_worker_thread: Option<thread::JoinHandle<()>>,
}
impl Default for Config {
fn default() -> Self {
Config {
threads_num: 4,
skip_verification: false,
}
}
}
impl State {
pub fn is_synchronizing(&self) -> bool {
match self {
@ -246,11 +309,13 @@ impl<T> Client for SynchronizationClient<T> where T: TaskExecutor {
impl<T> SynchronizationClient<T> where T: TaskExecutor {
/// Create new synchronization window
pub fn new(config: Config, executor: Arc<Mutex<T>>, chain: ChainRef) -> Arc<Mutex<Self>> {
pub fn new(config: Config, handle: &Handle, executor: Arc<Mutex<T>>, chain: ChainRef) -> Arc<Mutex<Self>> {
let sync = Arc::new(Mutex::new(
SynchronizationClient {
state: State::Saturated,
peers: Peers::new(),
pool: CpuPool::new(config.threads_num),
management_worker: None,
executor: executor,
chain: chain.clone(),
orphaned_blocks: HashMap::new(),
@ -273,6 +338,29 @@ impl<T> SynchronizationClient<T> where T: TaskExecutor {
.expect("Error creating verification thread"));
}
// TODO: start management worker only when synchronization is started
// currently impossible because there is no way to call Interval::new with Remote && Handle is not-Send
{
let csync = Arc::downgrade(&sync);
let mut sync = sync.lock();
let management_worker = Interval::new(Duration::from_millis(MANAGEMENT_INTERVAL_MS), handle)
.expect("Failed to create interval")
.and_then(move |_| {
let client = match csync.upgrade() {
Some(client) => client,
None => return Ok(()),
};
let mut client = client.lock();
manage_synchronization_peers(&mut client.peers);
client.execute_synchronization_tasks();
Ok(())
})
.for_each(|_| Ok(()))
.then(|_| finished::<(), ()>(()))
.boxed();
sync.management_worker = Some(sync.pool.spawn(management_worker).boxed());
}
sync
}
@ -303,7 +391,7 @@ impl<T> SynchronizationClient<T> where T: TaskExecutor {
let mut chain = self.chain.write();
loop {
'outer: loop {
// when synchronization is idling
// => request full inventory
if !chain.has_blocks_of_state(BlockState::Scheduled)
@ -344,7 +432,7 @@ impl<T> SynchronizationClient<T> where T: TaskExecutor {
chain.schedule_blocks_hashes(unknown_peer_hashes);
self.peers.insert(peer_index);
break;
break 'outer;
}
if last_known_peer_hash_index == 0 {
@ -444,7 +532,7 @@ impl<T> SynchronizationClient<T> where T: TaskExecutor {
let new_num_of_blocks = chain.best_block().number;
let blocks_diff = if new_num_of_blocks > num_of_blocks { new_num_of_blocks - num_of_blocks} else { 0 };
if timestamp_diff >= 60.0 || blocks_diff > 1000 {
self.state = State::Synchronizing(new_timestamp, new_num_of_blocks);
self.state = State::Synchronizing(time::precise_time_s(), chain.best_block().number);
info!(target: "sync", "Processed {} blocks in {} seconds. Chain information: {:?}"
, blocks_diff, timestamp_diff
@ -515,25 +603,30 @@ impl<T> SynchronizationClient<T> where T: TaskExecutor {
pub mod tests {
use std::sync::Arc;
use parking_lot::{Mutex, RwLock};
use tokio_core::reactor::{Core, Handle};
use chain::{Block, RepresentH256};
use super::{Client, Config, SynchronizationClient};
use synchronization_executor::Task;
use synchronization_chain::{Chain, ChainRef};
use synchronization_executor::tests::DummyTaskExecutor;
use p2p::event_loop;
use test_data;
use db;
fn create_sync() -> (Arc<Mutex<DummyTaskExecutor>>, Arc<Mutex<SynchronizationClient<DummyTaskExecutor>>>) {
fn create_sync() -> (Core, Handle, Arc<Mutex<DummyTaskExecutor>>, Arc<Mutex<SynchronizationClient<DummyTaskExecutor>>>) {
let event_loop = event_loop();
let handle = event_loop.handle();
let storage = Arc::new(db::TestStorage::with_genesis_block());
let chain = ChainRef::new(RwLock::new(Chain::new(storage.clone())));
let executor = DummyTaskExecutor::new();
let config = Config { skip_verification: true };
(executor.clone(), SynchronizationClient::new(config, executor, chain))
let config = Config { threads_num: 1, skip_verification: true };
let client = SynchronizationClient::new(config, &handle, executor.clone(), chain);
(event_loop, handle, executor, client)
}
#[test]
fn synchronization_saturated_on_start() {
let (_, sync) = create_sync();
let (_, _, _, sync) = create_sync();
let sync = sync.lock();
let info = sync.information();
assert!(!info.state.is_synchronizing());
@ -542,7 +635,7 @@ pub mod tests {
#[test]
fn synchronization_in_order_block_path() {
let (executor, sync) = create_sync();
let (_, _, executor, sync) = create_sync();
let mut sync = sync.lock();
let block1: Block = "010000006fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000982051fd1e4ba744bbbe680e1fee14677ba1a3c3540bf7b1cdb606e857233e0e61bc6649ffff001d01e362990101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d0104ffffffff0100f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac00000000".into();
@ -585,7 +678,7 @@ pub mod tests {
#[test]
fn synchronization_out_of_order_block_path() {
let (_, sync) = create_sync();
let (_, _, _, sync) = create_sync();
let mut sync = sync.lock();
let block2: Block = "010000004860eb18bf1b1620e37e9490fc8a427514416fd75159ab86688e9a8300000000d5fdcc541e25de1c7a5addedf24858b8bb665c9f36ef744ee42c316022c90f9bb0bc6649ffff001d08d2bd610101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d010bffffffff0100f2052a010000004341047211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073dee6c89064984f03385237d92167c13e236446b417ab79a0fcae412ae3316b77ac00000000".into();
@ -607,7 +700,7 @@ pub mod tests {
#[test]
fn synchronization_parallel_peers() {
let (executor, sync) = create_sync();
let (_, _, executor, sync) = create_sync();
let block1: Block = "010000006fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000982051fd1e4ba744bbbe680e1fee14677ba1a3c3540bf7b1cdb606e857233e0e61bc6649ffff001d01e362990101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d0104ffffffff0100f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac00000000".into();
let block2: Block = "010000004860eb18bf1b1620e37e9490fc8a427514416fd75159ab86688e9a8300000000d5fdcc541e25de1c7a5addedf24858b8bb665c9f36ef744ee42c316022c90f9bb0bc6649ffff001d08d2bd610101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d010bffffffff0100f2052a010000004341047211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073dee6c89064984f03385237d92167c13e236446b417ab79a0fcae412ae3316b77ac00000000".into();
@ -652,7 +745,7 @@ pub mod tests {
#[test]
fn synchronization_reset_when_peer_is_disconnected() {
let (_, sync) = create_sync();
let (_, _, _, sync) = create_sync();
// request new blocks
{
@ -671,7 +764,7 @@ pub mod tests {
#[test]
fn synchronization_not_starting_when_receiving_known_blocks() {
let (executor, sync) = create_sync();
let (_, _, executor, sync) = create_sync();
let mut sync = sync.lock();
// saturated => receive inventory with known blocks only
sync.on_new_blocks_inventory(1, vec![test_data::genesis().hash()]);

View File

@ -0,0 +1,26 @@
use time::precise_time_s;
use synchronization_peers::Peers;
/// Management interval (in ms)
pub const MANAGEMENT_INTERVAL_MS: u64 = 10 * 1000;
/// Response time to decrease peer score
const FAILURE_INTERVAL_S: f64 = 5f64;
/// Management worker
pub fn manage_synchronization_peers(peers: &mut Peers) {
// reset tasks for peers, which has not responded during given period
for (worst_peer_index, worst_peer_time) in peers.worst_peers() {
// check if peer has not responded within given time
let time_diff = worst_peer_time - precise_time_s();
if time_diff <= FAILURE_INTERVAL_S {
break;
}
// decrease score && move to the idle queue
trace!(target: "sync", "Failed to get response from peer#{} in {} seconds", worst_peer_index, time_diff);
peers.reset_tasks(worst_peer_index);
if peers.on_peer_failure(worst_peer_index) {
trace!(target: "sync", "Too many failures for peer#{}. Excluding from synchronization", worst_peer_index);
}
}
}

View File

@ -1,16 +1,23 @@
use std::collections::{HashMap, HashSet};
use std::collections::hash_map::Entry;
use primitives::hash::H256;
use linked_hash_map::LinkedHashMap;
use time::precise_time_s;
// TODO: sync score for peers + choose peers based on their score
/// Max peer failures # before excluding from sync process
const MAX_PEER_FAILURES: usize = 8;
/// Set of peers selected for synchronization.
#[derive(Debug)]
pub struct Peers {
/// Peers that have not pending blocks requests.
idle_peers: HashSet<usize>,
/// Pending block requests by peer.
blocks_requests: HashMap<usize, HashSet<H256>>,
/// Peers that have no pending requests.
idle: HashSet<usize>,
/// Pending requests by peer.
requests: HashMap<usize, HashSet<H256>>,
/// Peers failures.
failures: HashMap<usize, usize>,
/// Last message time from peer
times: LinkedHashMap<usize, f64>,
}
/// Information on synchronization peers
@ -26,8 +33,10 @@ pub struct Information {
impl Peers {
pub fn new() -> Peers {
Peers {
idle_peers: HashSet::new(),
blocks_requests: HashMap::new(),
idle: HashSet::new(),
requests: HashMap::new(),
failures: HashMap::new(),
times: LinkedHashMap::new(),
}
}
@ -35,75 +44,122 @@ impl Peers {
#[cfg(test)]
pub fn information(&self) -> Information {
Information {
idle: self.idle_peers.len(),
active: self.blocks_requests.len(),
idle: self.idle.len(),
active: self.requests.len(),
}
}
/// Get idle peer.
#[cfg(test)]
pub fn idle_peer(&self) -> Option<usize> {
self.idle_peers.iter().cloned().next()
self.idle.iter().cloned().next()
}
/// Get idle peers.
pub fn idle_peers(&self) -> Vec<usize> {
self.idle_peers.iter().cloned().collect()
self.idle.iter().cloned().collect()
}
/// Get worst peer.
pub fn worst_peers(&self) -> Vec<(usize, f64)> {
self.times.iter().map(|(&pi, &t)| (pi, t)).collect()
}
/// Insert new synchronization peer.
pub fn insert(&mut self, peer_index: usize) {
if !self.idle_peers.contains(&peer_index) && !self.blocks_requests.contains_key(&peer_index) {
self.idle_peers.insert(peer_index);
}
}
/// Block is received from peer.
pub fn on_block_received(&mut self, peer_index: usize, block_hash: &H256) {
if let Entry::Occupied(mut entry) = self.blocks_requests.entry(peer_index) {
entry.get_mut().remove(block_hash);
if entry.get().is_empty() {
self.idle_peers.insert(peer_index);
entry.remove_entry();
}
if !self.idle.contains(&peer_index) && !self.requests.contains_key(&peer_index) {
self.idle.insert(peer_index);
}
}
/// Peer has been disconnected
pub fn on_peer_disconnected(&mut self, peer_index: usize) {
self.idle_peers.remove(&peer_index);
self.blocks_requests.remove(&peer_index);
self.idle.remove(&peer_index);
self.requests.remove(&peer_index);
self.failures.remove(&peer_index);
self.times.remove(&peer_index);
}
/// Block is received from peer.
pub fn on_block_received(&mut self, peer_index: usize, block_hash: &H256) {
if let Entry::Occupied(mut entry) = self.requests.entry(peer_index) {
entry.get_mut().remove(block_hash);
if entry.get().is_empty() {
self.idle.insert(peer_index);
entry.remove_entry();
}
}
self.on_peer_message(peer_index);
}
/// Blocks have been requested from peer.
pub fn on_blocks_requested(&mut self, peer_index: usize, blocks_hashes: &Vec<H256>) {
// inventory can only be requested from idle peers
assert!(!self.blocks_requests.contains_key(&peer_index));
assert!(!self.requests.contains_key(&peer_index));
self.idle_peers.remove(&peer_index);
self.blocks_requests.entry(peer_index).or_insert(HashSet::new()).extend(blocks_hashes.iter().cloned());
self.idle.remove(&peer_index);
self.requests.entry(peer_index).or_insert(HashSet::new()).extend(blocks_hashes.iter().cloned());
self.times.insert(peer_index, precise_time_s());
}
/// Inventory has been requested from peer.
pub fn on_inventory_requested(&mut self, peer_index: usize) {
// inventory can only be requested from idle peers
assert!(!self.blocks_requests.contains_key(&peer_index));
self.idle_peers.remove(&peer_index);
assert!(!self.requests.contains_key(&peer_index));
self.idle.remove(&peer_index);
// peer is now out-of-synchronization process, because:
// 1) if it has new blocks, it will respond with `inventory` message && will be insrted back here
// 1) if it has new blocks, it will respond with `inventory` message && will be inserted back here
// 2) if it has no new blocks => either synchronization is completed, or it is behind us in sync
}
/// We have failed to get response from peer during given period
pub fn on_peer_failure(&mut self, peer_index: usize) -> bool {
let peer_failures = match self.failures.entry(peer_index) {
Entry::Occupied(mut entry) => {
let failures = entry.get() + 1;
entry.insert(failures) + 1;
failures
},
Entry::Vacant(entry) => *entry.insert(1),
};
let too_much_failures = peer_failures >= MAX_PEER_FAILURES;
if too_much_failures {
self.failures.remove(&peer_index);
self.requests.remove(&peer_index);
self.times.remove(&peer_index);
}
too_much_failures
}
/// Reset peers state
pub fn reset(&mut self) {
self.idle_peers.extend(self.blocks_requests.drain().map(|(k, _)| k));
self.idle.extend(self.requests.drain().map(|(k, _)| k));
self.failures.clear();
self.times.clear();
}
/// Reset peer tasks
pub fn reset_tasks(&mut self, peer_index: usize) {
self.requests.remove(&peer_index);
self.times.remove(&peer_index);
self.idle.insert(peer_index);
}
/// When sync message is received from peer
fn on_peer_message(&mut self, peer_index: usize) {
self.failures.remove(&peer_index);
self.times.remove(&peer_index);
if self.requests.contains_key(&peer_index) {
self.times.insert(peer_index, precise_time_s());
}
}
}
#[cfg(test)]
mod tests {
use super::Peers;
use super::{Peers, MAX_PEER_FAILURES};
use primitives::hash::H256;
#[test]
@ -201,4 +257,41 @@ mod tests {
assert_eq!(peers.information().idle, 3);
assert_eq!(peers.information().active, 0);
}
}
#[test]
fn peers_worst() {
let mut peers = Peers::new();
peers.insert(1);
peers.insert(2);
assert_eq!(peers.worst_peers(), vec![]);
peers.on_blocks_requested(1, &vec![H256::default()]);
assert_eq!(peers.worst_peers().len(), 1);
assert_eq!(peers.worst_peers()[0].0, 1);
peers.on_blocks_requested(2, &vec![H256::default()]);
assert_eq!(peers.worst_peers().len(), 2);
assert_eq!(peers.worst_peers()[0].0, 1);
assert_eq!(peers.worst_peers()[1].0, 2);
assert_eq!(peers.information().idle, 0);
assert_eq!(peers.information().active, 2);
peers.reset_tasks(1);
assert_eq!(peers.information().idle, 1);
assert_eq!(peers.information().active, 1);
assert_eq!(peers.worst_peers().len(), 1);
assert_eq!(peers.worst_peers()[0].0, 2);
for _ in 0..MAX_PEER_FAILURES {
peers.on_peer_failure(2);
}
assert_eq!(peers.worst_peers().len(), 0);
assert_eq!(peers.information().idle, 1);
assert_eq!(peers.information().active, 0);
}
}

View File

@ -1,87 +1,93 @@
digraph dependencies {
N0[label="pbtc",shape=box];
N1[label="chain",shape=box];
N2[label="clap",shape=box];
N3[label="db",shape=box];
N4[label="env_logger",shape=box];
N5[label="keys",shape=box];
N6[label="message",shape=box];
N7[label="miner",shape=box];
N8[label="p2p",shape=box];
N9[label="script",shape=box];
N10[label="sync",shape=box];
N11[label="verification",shape=box];
N12[label="abstract-ns",shape=box];
N13[label="futures",shape=box];
N14[label="quick-error",shape=box];
N15[label="rand",shape=box];
N16[label="aho-corasick",shape=box];
N17[label="memchr",shape=box];
N18[label="ansi_term",shape=box];
N19[label="arrayvec",shape=box];
N20[label="nodrop",shape=box];
N21[label="odds",shape=box];
N22[label="base58",shape=box];
N23[label="bit-vec",shape=box];
N24[label="bitcrypto",shape=box];
N25[label="primitives",shape=box];
N26[label="rust-crypto",shape=box];
N27[label="bitflags v0.4.0",shape=box];
N28[label="bitflags v0.7.0",shape=box];
N29[label="byteorder",shape=box];
N30[label="cfg-if",shape=box];
N31[label="heapsize",shape=box];
N32[label="rustc-serialize",shape=box];
N33[label="serialization",shape=box];
N34[label="libc",shape=box];
N35[label="strsim",shape=box];
N36[label="term_size",shape=box];
N37[label="unicode-segmentation",shape=box];
N38[label="unicode-width",shape=box];
N39[label="vec_map",shape=box];
N40[label="yaml-rust",shape=box];
N41[label="crossbeam",shape=box];
N42[label="elastic-array",shape=box];
N43[label="ethcore-devtools",shape=box];
N44[label="parking_lot",shape=box];
N45[label="rocksdb",shape=box];
N46[label="test-data",shape=box];
N47[label="deque",shape=box];
N48[label="domain",shape=box];
N49[label="tokio-core",shape=box];
N50[label="log",shape=box];
N51[label="regex",shape=box];
N52[label="eth-secp256k1",shape=box];
N53[label="gcc",shape=box];
N54[label="futures-cpupool",shape=box];
N55[label="num_cpus",shape=box];
N56[label="rayon",shape=box];
N57[label="kernel32-sys",shape=box];
N58[label="winapi",shape=box];
N59[label="winapi-build",shape=box];
N60[label="lazy_static",shape=box];
N61[label="lazycell",shape=box];
N62[label="linked-hash-map",shape=box];
N63[label="mio",shape=box];
N64[label="miow",shape=box];
N65[label="net2",shape=box];
N66[label="nix",shape=box];
N67[label="slab",shape=box];
N68[label="ws2_32-sys",shape=box];
N69[label="rustc_version",shape=box];
N70[label="semver",shape=box];
N71[label="void",shape=box];
N72[label="ns-dns-tokio",shape=box];
N73[label="owning_ref",shape=box];
N74[label="time",shape=box];
N75[label="parking_lot_core",shape=box];
N76[label="smallvec",shape=box];
N77[label="regex-syntax",shape=box];
N78[label="thread_local",shape=box];
N79[label="utf8-ranges",shape=box];
N80[label="rocksdb-sys",shape=box];
N81[label="scoped-tls",shape=box];
N82[label="thread-id",shape=box];
N1[label="app_dirs",shape=box];
N2[label="chain",shape=box];
N3[label="clap",shape=box];
N4[label="db",shape=box];
N5[label="env_logger",shape=box];
N6[label="import",shape=box];
N7[label="keys",shape=box];
N8[label="log",shape=box];
N9[label="message",shape=box];
N10[label="miner",shape=box];
N11[label="p2p",shape=box];
N12[label="script",shape=box];
N13[label="sync",shape=box];
N14[label="verification",shape=box];
N15[label="abstract-ns",shape=box];
N16[label="futures",shape=box];
N17[label="quick-error",shape=box];
N18[label="rand",shape=box];
N19[label="aho-corasick",shape=box];
N20[label="memchr",shape=box];
N21[label="ansi_term",shape=box];
N22[label="ole32-sys",shape=box];
N23[label="shell32-sys",shape=box];
N24[label="winapi",shape=box];
N25[label="xdg",shape=box];
N26[label="arrayvec",shape=box];
N27[label="nodrop",shape=box];
N28[label="odds",shape=box];
N29[label="base58",shape=box];
N30[label="bit-vec",shape=box];
N31[label="bitcrypto",shape=box];
N32[label="primitives",shape=box];
N33[label="rust-crypto",shape=box];
N34[label="bitflags v0.4.0",shape=box];
N35[label="bitflags v0.7.0",shape=box];
N36[label="byteorder",shape=box];
N37[label="cfg-if",shape=box];
N38[label="heapsize",shape=box];
N39[label="rustc-serialize",shape=box];
N40[label="serialization",shape=box];
N41[label="libc",shape=box];
N42[label="strsim",shape=box];
N43[label="term_size",shape=box];
N44[label="unicode-segmentation",shape=box];
N45[label="unicode-width",shape=box];
N46[label="vec_map",shape=box];
N47[label="yaml-rust",shape=box];
N48[label="crossbeam",shape=box];
N49[label="csv",shape=box];
N50[label="elastic-array",shape=box];
N51[label="ethcore-devtools",shape=box];
N52[label="parking_lot",shape=box];
N53[label="rocksdb",shape=box];
N54[label="test-data",shape=box];
N55[label="deque",shape=box];
N56[label="domain",shape=box];
N57[label="tokio-core",shape=box];
N58[label="void",shape=box];
N59[label="regex",shape=box];
N60[label="eth-secp256k1",shape=box];
N61[label="gcc",shape=box];
N62[label="futures-cpupool",shape=box];
N63[label="num_cpus",shape=box];
N64[label="rayon",shape=box];
N65[label="kernel32-sys",shape=box];
N66[label="winapi-build",shape=box];
N67[label="lazy_static",shape=box];
N68[label="lazycell",shape=box];
N69[label="linked-hash-map",shape=box];
N70[label="mio",shape=box];
N71[label="miow",shape=box];
N72[label="net2",shape=box];
N73[label="nix",shape=box];
N74[label="slab",shape=box];
N75[label="ws2_32-sys",shape=box];
N76[label="rustc_version",shape=box];
N77[label="semver",shape=box];
N78[label="ns-dns-tokio",shape=box];
N79[label="owning_ref",shape=box];
N80[label="time",shape=box];
N81[label="parking_lot_core",shape=box];
N82[label="smallvec",shape=box];
N83[label="regex-syntax",shape=box];
N84[label="thread_local",shape=box];
N85[label="utf8-ranges",shape=box];
N86[label="rocksdb-sys",shape=box];
N87[label="scoped-tls",shape=box];
N88[label="thread-id",shape=box];
N0 -> N1[label="",style=dashed];
N0 -> N2[label="",style=dashed];
N0 -> N3[label="",style=dashed];
@ -93,192 +99,219 @@ digraph dependencies {
N0 -> N9[label="",style=dashed];
N0 -> N10[label="",style=dashed];
N0 -> N11[label="",style=dashed];
N0 -> N12[label="",style=dashed];
N0 -> N13[label="",style=dashed];
N0 -> N14[label="",style=dashed];
N1 -> N22[label="",style=dashed];
N1 -> N23[label="",style=dashed];
N1 -> N24[label="",style=dashed];
N1 -> N25[label="",style=dashed];
N1 -> N31[label="",style=dashed];
N1 -> N32[label="",style=dashed];
N1 -> N33[label="",style=dashed];
N2 -> N18[label="",style=dashed];
N2 -> N28[label="",style=dashed];
N2 -> N34[label="",style=dashed];
N2 -> N35[label="",style=dashed];
N2 -> N36[label="",style=dashed];
N2 -> N37[label="",style=dashed];
N2 -> N31[label="",style=dashed];
N2 -> N32[label="",style=dashed];
N2 -> N38[label="",style=dashed];
N2 -> N39[label="",style=dashed];
N2 -> N40[label="",style=dashed];
N3 -> N1[label="",style=dashed];
N3 -> N23[label="",style=dashed];
N3 -> N25[label="",style=dashed];
N3 -> N29[label="",style=dashed];
N3 -> N33[label="",style=dashed];
N3 -> N21[label="",style=dashed];
N3 -> N35[label="",style=dashed];
N3 -> N41[label="",style=dashed];
N3 -> N42[label="",style=dashed];
N3 -> N43[label="",style=dashed];
N3 -> N44[label="",style=dashed];
N3 -> N45[label="",style=dashed];
N3 -> N46[label="",style=dashed];
N3 -> N47[label="",style=dashed];
N4 -> N2[label="",style=dashed];
N4 -> N30[label="",style=dashed];
N4 -> N32[label="",style=dashed];
N4 -> N36[label="",style=dashed];
N4 -> N40[label="",style=dashed];
N4 -> N50[label="",style=dashed];
N4 -> N51[label="",style=dashed];
N5 -> N15[label="",style=dashed];
N5 -> N22[label="",style=dashed];
N5 -> N24[label="",style=dashed];
N5 -> N25[label="",style=dashed];
N5 -> N32[label="",style=dashed];
N5 -> N52[label="",style=dashed];
N5 -> N60[label="",style=dashed];
N6 -> N1[label="",style=dashed];
N6 -> N24[label="",style=dashed];
N6 -> N25[label="",style=dashed];
N6 -> N29[label="",style=dashed];
N6 -> N33[label="",style=dashed];
N7 -> N1[label="",style=dashed];
N7 -> N25[label="",style=dashed];
N4 -> N52[label="",style=dashed];
N4 -> N53[label="",style=dashed];
N4 -> N54[label="",style=dashed];
N5 -> N8[label="",style=dashed];
N5 -> N59[label="",style=dashed];
N6 -> N2[label="",style=dashed];
N6 -> N8[label="",style=dashed];
N6 -> N32[label="",style=dashed];
N6 -> N40[label="",style=dashed];
N7 -> N18[label="",style=dashed];
N7 -> N29[label="",style=dashed];
N7 -> N31[label="",style=dashed];
N7 -> N33[label="",style=dashed];
N7 -> N46[label="",style=dashed];
N8 -> N6[label="",style=dashed];
N8 -> N12[label="",style=dashed];
N8 -> N13[label="",style=dashed];
N8 -> N15[label="",style=dashed];
N8 -> N24[label="",style=dashed];
N8 -> N25[label="",style=dashed];
N8 -> N44[label="",style=dashed];
N8 -> N49[label="",style=dashed];
N8 -> N50[label="",style=dashed];
N8 -> N54[label="",style=dashed];
N8 -> N72[label="",style=dashed];
N8 -> N74[label="",style=dashed];
N9 -> N1[label="",style=dashed];
N9 -> N5[label="",style=dashed];
N9 -> N24[label="",style=dashed];
N9 -> N25[label="",style=dashed];
N9 -> N33[label="",style=dashed];
N10 -> N1[label="",style=dashed];
N10 -> N3[label="",style=dashed];
N10 -> N6[label="",style=dashed];
N10 -> N8[label="",style=dashed];
N10 -> N11[label="",style=dashed];
N10 -> N25[label="",style=dashed];
N10 -> N44[label="",style=dashed];
N10 -> N50[label="",style=dashed];
N10 -> N62[label="",style=dashed];
N11 -> N1[label="",style=dashed];
N11 -> N3[label="",style=dashed];
N7 -> N32[label="",style=dashed];
N7 -> N39[label="",style=dashed];
N7 -> N60[label="",style=dashed];
N7 -> N67[label="",style=dashed];
N9 -> N2[label="",style=dashed];
N9 -> N31[label="",style=dashed];
N9 -> N32[label="",style=dashed];
N9 -> N36[label="",style=dashed];
N9 -> N40[label="",style=dashed];
N10 -> N2[label="",style=dashed];
N10 -> N32[label="",style=dashed];
N10 -> N38[label="",style=dashed];
N10 -> N40[label="",style=dashed];
N10 -> N54[label="",style=dashed];
N11 -> N8[label="",style=dashed];
N11 -> N9[label="",style=dashed];
N11 -> N25[label="",style=dashed];
N11 -> N29[label="",style=dashed];
N11 -> N33[label="",style=dashed];
N11 -> N43[label="",style=dashed];
N11 -> N44[label="",style=dashed];
N11 -> N46[label="",style=dashed];
N11 -> N15[label="",style=dashed];
N11 -> N16[label="",style=dashed];
N11 -> N18[label="",style=dashed];
N11 -> N31[label="",style=dashed];
N11 -> N32[label="",style=dashed];
N11 -> N40[label="",style=dashed];
N11 -> N49[label="",style=dashed];
N11 -> N52[label="",style=dashed];
N11 -> N57[label="",style=dashed];
N11 -> N62[label="",style=dashed];
N11 -> N74[label="",style=dashed];
N12 -> N13[label="",style=dashed];
N12 -> N14[label="",style=dashed];
N12 -> N15[label="",style=dashed];
N13 -> N50[label="",style=dashed];
N15 -> N34[label="",style=dashed];
N16 -> N17[label="",style=dashed];
N17 -> N34[label="",style=dashed];
N19 -> N20[label=""];
N19 -> N21[label=""];
N20 -> N21[label=""];
N24 -> N25[label="",style=dashed];
N24 -> N26[label="",style=dashed];
N25 -> N31[label="",style=dashed];
N25 -> N32[label="",style=dashed];
N26 -> N15[label="",style=dashed];
N26 -> N32[label="",style=dashed];
N26 -> N34[label="",style=dashed];
N26 -> N53[label="",style=dashed];
N26 -> N74[label="",style=dashed];
N31 -> N57[label="",style=dashed];
N33 -> N25[label="",style=dashed];
N33 -> N29[label="",style=dashed];
N36 -> N34[label="",style=dashed];
N36 -> N57[label="",style=dashed];
N36 -> N58[label="",style=dashed];
N43 -> N15[label="",style=dashed];
N44 -> N73[label="",style=dashed];
N44 -> N75[label="",style=dashed];
N45 -> N34[label="",style=dashed];
N45 -> N80[label="",style=dashed];
N46 -> N1[label="",style=dashed];
N46 -> N25[label="",style=dashed];
N46 -> N33[label="",style=dashed];
N47 -> N15[label="",style=dashed];
N48 -> N13[label="",style=dashed];
N48 -> N15[label="",style=dashed];
N48 -> N29[label="",style=dashed];
N48 -> N49[label="",style=dashed];
N49 -> N13[label="",style=dashed];
N49 -> N50[label="",style=dashed];
N49 -> N63[label="",style=dashed];
N49 -> N67[label="",style=dashed];
N49 -> N81[label="",style=dashed];
N51 -> N16[label="",style=dashed];
N51 -> N17[label="",style=dashed];
N51 -> N77[label="",style=dashed];
N51 -> N78[label="",style=dashed];
N51 -> N79[label="",style=dashed];
N52 -> N15[label="",style=dashed];
N52 -> N19[label="",style=dashed];
N52 -> N32[label="",style=dashed];
N52 -> N34[label="",style=dashed];
N52 -> N53[label="",style=dashed];
N53 -> N56[label="",style=dashed];
N54 -> N13[label="",style=dashed];
N54 -> N41[label="",style=dashed];
N54 -> N55[label="",style=dashed];
N55 -> N34[label="",style=dashed];
N56 -> N15[label="",style=dashed];
N56 -> N34[label="",style=dashed];
N56 -> N47[label="",style=dashed];
N56 -> N55[label="",style=dashed];
N57 -> N58[label="",style=dashed];
N57 -> N59[label="",style=dashed];
N63 -> N34[label="",style=dashed];
N63 -> N50[label="",style=dashed];
N63 -> N57[label="",style=dashed];
N63 -> N58[label="",style=dashed];
N63 -> N61[label="",style=dashed];
N63 -> N64[label="",style=dashed];
N63 -> N65[label="",style=dashed];
N63 -> N66[label="",style=dashed];
N63 -> N67[label="",style=dashed];
N64 -> N57[label="",style=dashed];
N64 -> N58[label="",style=dashed];
N64 -> N65[label="",style=dashed];
N64 -> N68[label="",style=dashed];
N65 -> N30[label="",style=dashed];
N65 -> N34[label="",style=dashed];
N65 -> N57[label="",style=dashed];
N65 -> N58[label="",style=dashed];
N65 -> N68[label="",style=dashed];
N66 -> N27[label="",style=dashed];
N66 -> N30[label="",style=dashed];
N66 -> N34[label="",style=dashed];
N66 -> N69[label="",style=dashed];
N66 -> N70[label="",style=dashed];
N66 -> N71[label="",style=dashed];
N68 -> N58[label="",style=dashed];
N68 -> N59[label="",style=dashed];
N69 -> N70[label="",style=dashed];
N72 -> N12[label="",style=dashed];
N72 -> N13[label="",style=dashed];
N72 -> N48[label="",style=dashed];
N72 -> N49[label="",style=dashed];
N74 -> N34[label="",style=dashed];
N74 -> N57[label="",style=dashed];
N74 -> N58[label="",style=dashed];
N75 -> N15[label="",style=dashed];
N75 -> N34[label="",style=dashed];
N75 -> N57[label="",style=dashed];
N75 -> N58[label="",style=dashed];
N75 -> N76[label="",style=dashed];
N78 -> N82[label="",style=dashed];
N80 -> N34[label="",style=dashed];
N80 -> N53[label="",style=dashed];
N82 -> N34[label="",style=dashed];
N82 -> N57[label="",style=dashed];
N11 -> N78[label="",style=dashed];
N11 -> N80[label="",style=dashed];
N12 -> N2[label="",style=dashed];
N12 -> N7[label="",style=dashed];
N12 -> N31[label="",style=dashed];
N12 -> N32[label="",style=dashed];
N12 -> N40[label="",style=dashed];
N13 -> N2[label="",style=dashed];
N13 -> N4[label="",style=dashed];
N13 -> N8[label="",style=dashed];
N13 -> N9[label="",style=dashed];
N13 -> N10[label="",style=dashed];
N13 -> N11[label="",style=dashed];
N13 -> N14[label="",style=dashed];
N13 -> N16[label="",style=dashed];
N13 -> N32[label="",style=dashed];
N13 -> N52[label="",style=dashed];
N13 -> N54[label="",style=dashed];
N13 -> N57[label="",style=dashed];
N13 -> N62[label="",style=dashed];
N13 -> N69[label="",style=dashed];
N13 -> N80[label="",style=dashed];
N14 -> N2[label="",style=dashed];
N14 -> N4[label="",style=dashed];
N14 -> N12[label="",style=dashed];
N14 -> N32[label="",style=dashed];
N14 -> N36[label="",style=dashed];
N14 -> N40[label="",style=dashed];
N14 -> N51[label="",style=dashed];
N14 -> N52[label="",style=dashed];
N14 -> N54[label="",style=dashed];
N14 -> N69[label="",style=dashed];
N14 -> N80[label="",style=dashed];
N15 -> N16[label="",style=dashed];
N15 -> N17[label="",style=dashed];
N15 -> N18[label="",style=dashed];
N16 -> N8[label="",style=dashed];
N18 -> N41[label="",style=dashed];
N19 -> N20[label="",style=dashed];
N20 -> N41[label="",style=dashed];
N22 -> N24[label="",style=dashed];
N22 -> N66[label="",style=dashed];
N23 -> N24[label="",style=dashed];
N23 -> N66[label="",style=dashed];
N26 -> N27[label=""];
N26 -> N28[label=""];
N27 -> N28[label=""];
N31 -> N32[label="",style=dashed];
N31 -> N33[label="",style=dashed];
N32 -> N38[label="",style=dashed];
N32 -> N39[label="",style=dashed];
N33 -> N18[label="",style=dashed];
N33 -> N39[label="",style=dashed];
N33 -> N41[label="",style=dashed];
N33 -> N61[label="",style=dashed];
N33 -> N80[label="",style=dashed];
N38 -> N65[label="",style=dashed];
N40 -> N32[label="",style=dashed];
N40 -> N36[label="",style=dashed];
N43 -> N24[label="",style=dashed];
N43 -> N41[label="",style=dashed];
N43 -> N65[label="",style=dashed];
N49 -> N36[label="",style=dashed];
N49 -> N39[label="",style=dashed];
N51 -> N18[label="",style=dashed];
N52 -> N79[label="",style=dashed];
N52 -> N81[label="",style=dashed];
N53 -> N41[label="",style=dashed];
N53 -> N86[label="",style=dashed];
N54 -> N2[label="",style=dashed];
N54 -> N32[label="",style=dashed];
N54 -> N40[label="",style=dashed];
N55 -> N18[label="",style=dashed];
N56 -> N16[label="",style=dashed];
N56 -> N18[label="",style=dashed];
N56 -> N36[label="",style=dashed];
N56 -> N48[label="",style=dashed];
N56 -> N57[label="",style=dashed];
N56 -> N58[label="",style=dashed];
N57 -> N8[label="",style=dashed];
N57 -> N16[label="",style=dashed];
N57 -> N70[label="",style=dashed];
N57 -> N74[label="",style=dashed];
N57 -> N87[label="",style=dashed];
N59 -> N19[label="",style=dashed];
N59 -> N20[label="",style=dashed];
N59 -> N83[label="",style=dashed];
N59 -> N84[label="",style=dashed];
N59 -> N85[label="",style=dashed];
N60 -> N18[label="",style=dashed];
N60 -> N26[label="",style=dashed];
N60 -> N39[label="",style=dashed];
N60 -> N41[label="",style=dashed];
N60 -> N61[label="",style=dashed];
N61 -> N64[label="",style=dashed];
N62 -> N16[label="",style=dashed];
N62 -> N48[label="",style=dashed];
N62 -> N63[label="",style=dashed];
N63 -> N41[label="",style=dashed];
N64 -> N18[label="",style=dashed];
N64 -> N41[label="",style=dashed];
N64 -> N55[label="",style=dashed];
N64 -> N63[label="",style=dashed];
N65 -> N24[label="",style=dashed];
N65 -> N66[label="",style=dashed];
N70 -> N8[label="",style=dashed];
N70 -> N24[label="",style=dashed];
N70 -> N41[label="",style=dashed];
N70 -> N65[label="",style=dashed];
N70 -> N68[label="",style=dashed];
N70 -> N71[label="",style=dashed];
N70 -> N72[label="",style=dashed];
N70 -> N73[label="",style=dashed];
N70 -> N74[label="",style=dashed];
N71 -> N24[label="",style=dashed];
N71 -> N65[label="",style=dashed];
N71 -> N72[label="",style=dashed];
N71 -> N75[label="",style=dashed];
N72 -> N24[label="",style=dashed];
N72 -> N37[label="",style=dashed];
N72 -> N41[label="",style=dashed];
N72 -> N65[label="",style=dashed];
N72 -> N75[label="",style=dashed];
N73 -> N34[label="",style=dashed];
N73 -> N37[label="",style=dashed];
N73 -> N41[label="",style=dashed];
N73 -> N58[label="",style=dashed];
N73 -> N76[label="",style=dashed];
N73 -> N77[label="",style=dashed];
N75 -> N24[label="",style=dashed];
N75 -> N66[label="",style=dashed];
N76 -> N77[label="",style=dashed];
N78 -> N15[label="",style=dashed];
N78 -> N16[label="",style=dashed];
N78 -> N56[label="",style=dashed];
N78 -> N57[label="",style=dashed];
N80 -> N24[label="",style=dashed];
N80 -> N41[label="",style=dashed];
N80 -> N65[label="",style=dashed];
N81 -> N18[label="",style=dashed];
N81 -> N24[label="",style=dashed];
N81 -> N41[label="",style=dashed];
N81 -> N65[label="",style=dashed];
N81 -> N82[label="",style=dashed];
N84 -> N88[label="",style=dashed];
N86 -> N41[label="",style=dashed];
N86 -> N61[label="",style=dashed];
N88 -> N41[label="",style=dashed];
N88 -> N65[label="",style=dashed];
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 722 KiB

After

Width:  |  Height:  |  Size: 848 KiB