Merge pull request #347 from ethcore/sync_v2

Sync refactoring
This commit is contained in:
Svyatoslav Nikolsky 2016-12-27 14:33:42 +03:00 committed by GitHub
commit a0020bbda0
56 changed files with 6361 additions and 6158 deletions

View File

@ -4,14 +4,15 @@ use ser::{Deserializable, Reader, Error as ReaderError};
use transaction::Transaction; use transaction::Transaction;
use read_and_hash::ReadAndHash; use read_and_hash::ReadAndHash;
#[derive(Debug, Clone)] #[derive(Debug, Default, Clone)]
pub struct IndexedTransaction { pub struct IndexedTransaction {
pub hash: H256, pub hash: H256,
pub raw: Transaction, pub raw: Transaction,
} }
impl From<Transaction> for IndexedTransaction { impl<T> From<T> for IndexedTransaction where Transaction: From<T> {
fn from(tx: Transaction) -> Self { fn from(other: T) -> Self {
let tx = Transaction::from(other);
IndexedTransaction { IndexedTransaction {
hash: tx.hash(), hash: tx.hash(),
raw: tx, raw: tx,

View File

@ -6,7 +6,7 @@ use ser::{
}; };
use common::{Port, IpAddress, Services}; use common::{Port, IpAddress, Services};
#[derive(Debug, PartialEq)] #[derive(Debug, Default, PartialEq)]
pub struct NetAddress { pub struct NetAddress {
pub services: Services, pub services: Services,
pub address: IpAddress, pub address: IpAddress,

View File

@ -50,6 +50,22 @@ pub struct InventoryVector {
pub hash: H256, pub hash: H256,
} }
impl InventoryVector {
pub fn tx(hash: H256) -> Self {
InventoryVector {
inv_type: InventoryType::MessageTx,
hash: hash,
}
}
pub fn block(hash: H256) -> Self {
InventoryVector {
inv_type: InventoryType::MessageBlock,
hash: hash,
}
}
}
impl Serializable for InventoryVector { impl Serializable for InventoryVector {
fn serialize(&self, stream: &mut Stream) { fn serialize(&self, stream: &mut Stream) {
stream stream

View File

@ -5,6 +5,12 @@ use ser::{Serializable, Stream, Deserializable, Reader, Error as ReaderError};
#[derive(Debug, PartialEq, Clone, Copy)] #[derive(Debug, PartialEq, Clone, Copy)]
pub struct IpAddress(net::IpAddr); pub struct IpAddress(net::IpAddr);
impl Default for IpAddress {
fn default() -> Self {
IpAddress(net::IpAddr::V4(net::Ipv4Addr::new(0, 0, 0, 0)))
}
}
impl From<net::IpAddr> for IpAddress { impl From<net::IpAddr> for IpAddress {
fn from(ip: net::IpAddr) -> Self { fn from(ip: net::IpAddr) -> Self {
IpAddress(ip) IpAddress(ip)

View File

@ -2,7 +2,7 @@ use std::io;
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use ser::{Serializable, Stream, Deserializable, Reader, Error as ReaderError}; use ser::{Serializable, Stream, Deserializable, Reader, Error as ReaderError};
#[derive(Debug, PartialEq, Clone, Copy)] #[derive(Debug, Default, PartialEq, Clone, Copy)]
pub struct Port(u16); pub struct Port(u16);
impl From<u16> for Port { impl From<u16> for Port {

View File

@ -8,6 +8,14 @@ pub struct Block {
pub block: ChainBlock, pub block: ChainBlock,
} }
impl Block {
pub fn with_block(block: ChainBlock) -> Self {
Block {
block: block,
}
}
}
impl Payload for Block { impl Payload for Block {
fn version() -> u32 { fn version() -> u32 {
0 0

View File

@ -7,13 +7,21 @@ pub struct FeeFilter {
pub fee_rate: u64, pub fee_rate: u64,
} }
impl FeeFilter {
pub fn with_fee_rate(fee_rate: u64) -> Self {
FeeFilter {
fee_rate: fee_rate,
}
}
}
impl Payload for FeeFilter { impl Payload for FeeFilter {
fn version() -> u32 { fn version() -> u32 {
70013 70013
} }
fn command() -> &'static str { fn command() -> &'static str {
"cmpctblock" "feefilter"
} }
fn deserialize_payload<T>(reader: &mut Reader<T>, _version: u32) -> MessageResult<Self> where T: io::Read { fn deserialize_payload<T>(reader: &mut Reader<T>, _version: u32) -> MessageResult<Self> where T: io::Read {

View File

@ -3,6 +3,8 @@ use bytes::Bytes;
use ser::{Stream, Reader}; use ser::{Stream, Reader};
use {Payload, MessageResult}; use {Payload, MessageResult};
pub const FILTERADD_MAX_DATA_LEN: usize = 520;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub struct FilterAdd { pub struct FilterAdd {
// TODO: check how this should be serialized // TODO: check how this should be serialized

View File

@ -3,6 +3,9 @@ use bytes::Bytes;
use ser::{Serializable, Deserializable, Stream, Reader, Error as ReaderError}; use ser::{Serializable, Deserializable, Stream, Reader, Error as ReaderError};
use {Payload, MessageResult}; use {Payload, MessageResult};
pub const FILTERLOAD_MAX_FILTER_LEN: usize = 36_000;
pub const FILTERLOAD_MAX_HASH_FUNCS: usize = 50;
#[derive(Debug, PartialEq, Clone, Copy)] #[derive(Debug, PartialEq, Clone, Copy)]
#[repr(u8)] #[repr(u8)]
/// Controls how the filter is updated after match is found. /// Controls how the filter is updated after match is found.

View File

@ -3,6 +3,8 @@ use hash::H256;
use ser::{Stream, Reader}; use ser::{Stream, Reader};
use {Payload, MessageResult}; use {Payload, MessageResult};
pub const GETBLOCKS_MAX_RESPONSE_HASHES: usize = 500;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub struct GetBlocks { pub struct GetBlocks {
pub version: u32, pub version: u32,

View File

@ -3,11 +3,21 @@ use ser::{Stream, Reader};
use common::InventoryVector; use common::InventoryVector;
use {Payload, MessageResult}; use {Payload, MessageResult};
pub const GETDATA_MAX_INVENTORY_LEN: usize = 50_000;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub struct GetData { pub struct GetData {
pub inventory: Vec<InventoryVector>, pub inventory: Vec<InventoryVector>,
} }
impl GetData {
pub fn with_inventory(inventory: Vec<InventoryVector>) -> Self {
GetData {
inventory: inventory,
}
}
}
impl Payload for GetData { impl Payload for GetData {
fn version() -> u32 { fn version() -> u32 {
0 0

View File

@ -3,6 +3,8 @@ use hash::H256;
use ser::{Stream, Reader}; use ser::{Stream, Reader};
use {Payload, MessageResult}; use {Payload, MessageResult};
pub const GETHEADERS_MAX_RESPONSE_HEADERS: usize = 2_000;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub struct GetHeaders { pub struct GetHeaders {
pub version: u32, pub version: u32,
@ -10,6 +12,16 @@ pub struct GetHeaders {
pub hash_stop: H256, pub hash_stop: H256,
} }
impl GetHeaders {
pub fn with_block_locator_hashes(block_locator_hashes: Vec<H256>) -> Self {
GetHeaders {
version: 0, // this field is ignored by implementations
block_locator_hashes: block_locator_hashes,
hash_stop: H256::default(),
}
}
}
impl Payload for GetHeaders { impl Payload for GetHeaders {
fn version() -> u32 { fn version() -> u32 {
0 0

View File

@ -3,11 +3,21 @@ use chain::BlockHeader;
use ser::{Stream, Reader, Serializable, Deserializable, CompactInteger, Error as ReaderError}; use ser::{Stream, Reader, Serializable, Deserializable, CompactInteger, Error as ReaderError};
use {Payload, MessageResult}; use {Payload, MessageResult};
pub const HEADERS_MAX_HEADERS_LEN: usize = 2000;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub struct Headers { pub struct Headers {
pub headers: Vec<BlockHeader>, pub headers: Vec<BlockHeader>,
} }
impl Headers {
pub fn with_headers(headers: Vec<BlockHeader>) -> Self {
Headers {
headers: headers,
}
}
}
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
struct HeaderWithTxnCount { struct HeaderWithTxnCount {
header: BlockHeader, header: BlockHeader,

View File

@ -3,11 +3,21 @@ use ser::{Stream, Reader};
use common::InventoryVector; use common::InventoryVector;
use {Payload, MessageResult}; use {Payload, MessageResult};
pub const INV_MAX_INVENTORY_LEN: usize = 50_000;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub struct Inv { pub struct Inv {
pub inventory: Vec<InventoryVector>, pub inventory: Vec<InventoryVector>,
} }
impl Inv {
pub fn with_inventory(inventory: Vec<InventoryVector>) -> Self {
Inv {
inventory: inventory,
}
}
}
impl Payload for Inv { impl Payload for Inv {
fn version() -> u32 { fn version() -> u32 {
0 0

View File

@ -30,17 +30,17 @@ pub use self::block::Block;
pub use self::blocktxn::BlockTxn; pub use self::blocktxn::BlockTxn;
pub use self::compactblock::CompactBlock; pub use self::compactblock::CompactBlock;
pub use self::feefilter::FeeFilter; pub use self::feefilter::FeeFilter;
pub use self::filterload::FilterLoad; pub use self::filterload::{FilterLoad, FILTERLOAD_MAX_FILTER_LEN, FILTERLOAD_MAX_HASH_FUNCS};
pub use self::filterload::FilterFlags; pub use self::filterload::FilterFlags;
pub use self::filterclear::FilterClear; pub use self::filterclear::FilterClear;
pub use self::filteradd::FilterAdd; pub use self::filteradd::{FilterAdd, FILTERADD_MAX_DATA_LEN};
pub use self::getaddr::GetAddr; pub use self::getaddr::GetAddr;
pub use self::getblocks::GetBlocks; pub use self::getblocks::{GetBlocks, GETBLOCKS_MAX_RESPONSE_HASHES};
pub use self::getblocktxn::GetBlockTxn; pub use self::getblocktxn::GetBlockTxn;
pub use self::getdata::GetData; pub use self::getdata::{GetData, GETDATA_MAX_INVENTORY_LEN};
pub use self::getheaders::GetHeaders; pub use self::getheaders::{GetHeaders, GETHEADERS_MAX_RESPONSE_HEADERS};
pub use self::headers::Headers; pub use self::headers::{Headers, HEADERS_MAX_HEADERS_LEN};
pub use self::inv::Inv; pub use self::inv::{Inv, INV_MAX_INVENTORY_LEN};
pub use self::mempool::MemPool; pub use self::mempool::MemPool;
pub use self::merkle_block::MerkleBlock; pub use self::merkle_block::MerkleBlock;
pub use self::notfound::NotFound; pub use self::notfound::NotFound;

View File

@ -8,6 +8,14 @@ pub struct NotFound {
pub inventory: Vec<InventoryVector>, pub inventory: Vec<InventoryVector>,
} }
impl NotFound {
pub fn with_inventory(inventory: Vec<InventoryVector>) -> Self {
NotFound {
inventory: inventory,
}
}
}
impl Payload for NotFound { impl Payload for NotFound {
fn version() -> u32 { fn version() -> u32 {
0 0

View File

@ -8,6 +8,14 @@ pub struct Tx {
pub transaction: Transaction, pub transaction: Transaction,
} }
impl Tx {
pub fn with_transaction(transaction: Transaction) -> Self {
Tx {
transaction: transaction,
}
}
}
impl Payload for Tx { impl Payload for Tx {
fn version() -> u32 { fn version() -> u32 {
0 0

View File

@ -15,6 +15,12 @@ pub enum Version {
V70001(V0, V106, V70001), V70001(V0, V106, V70001),
} }
impl Default for Version {
fn default() -> Version {
Version::V0(V0::default())
}
}
impl Payload for Version { impl Payload for Version {
fn version() -> u32 { fn version() -> u32 {
0 0
@ -78,9 +84,17 @@ impl Version {
Version::V70001(ref s, _, _) => s.services, Version::V70001(ref s, _, _) => s.services,
} }
} }
pub fn relay_transactions(&self) -> bool {
match *self {
Version::V0(_) => true,
Version::V106(_, _) => true,
Version::V70001(_, _, ref v) => v.relay,
}
}
} }
#[derive(Debug, PartialEq)] #[derive(Debug, Default, PartialEq)]
pub struct V0 { pub struct V0 {
pub version: u32, pub version: u32,
pub services: Services, pub services: Services,

View File

@ -57,7 +57,7 @@ mod benchmarks {
let mut pool = MemoryPool::new(); let mut pool = MemoryPool::new();
let mut transactions = prepare_independent_transactions(iterations); let mut transactions = prepare_independent_transactions(iterations);
b.bench_n(iterations as u64, |b| b.iter(|| { b.bench_n(iterations as u64, |b| b.iter(|| {
pool.insert_verified(transactions.pop_front().unwrap()) pool.insert_verified(transactions.pop_front().unwrap().into())
})); }));
} }
@ -67,10 +67,10 @@ mod benchmarks {
let iterations = 100usize; let iterations = 100usize;
let mut pool = MemoryPool::new(); let mut pool = MemoryPool::new();
let mut transactions = prepare_dependent_transactions(iterations); let mut transactions = prepare_dependent_transactions(iterations);
pool.insert_verified(transactions.pop_front().unwrap()); pool.insert_verified(transactions.pop_front().unwrap().into());
b.bench_n(iterations as u64, |b| b.iter(|| { b.bench_n(iterations as u64, |b| b.iter(|| {
pool.insert_verified(transactions.pop_front().unwrap()) pool.insert_verified(transactions.pop_front().unwrap().into())
})); }));
} }
@ -84,10 +84,10 @@ mod benchmarks {
let iterations = 100usize; let iterations = 100usize;
let mut pool = MemoryPool::new(); let mut pool = MemoryPool::new();
let mut transactions = prepare_dependent_transactions(iterations); let mut transactions = prepare_dependent_transactions(iterations);
pool.insert_verified(transactions.pop_front().unwrap()); pool.insert_verified(transactions.pop_front().unwrap().into());
b.bench_n(iterations as u64, |b| b.iter(|| { b.bench_n(iterations as u64, |b| b.iter(|| {
pool.insert_verified(transactions.pop_back().unwrap()) pool.insert_verified(transactions.pop_back().unwrap().into())
})); }));
} }
@ -97,7 +97,7 @@ mod benchmarks {
let iterations = 100; let iterations = 100;
let mut pool = MemoryPool::new(); let mut pool = MemoryPool::new();
for transaction in prepare_independent_transactions(iterations) { for transaction in prepare_independent_transactions(iterations) {
pool.insert_verified(transaction) pool.insert_verified(transaction.into())
} }
b.bench_n(iterations as u64, |b| b.iter(|| { b.bench_n(iterations as u64, |b| b.iter(|| {
pool.remove_with_strategy(MemoryPoolOrderingStrategy::ByTimestamp) pool.remove_with_strategy(MemoryPoolOrderingStrategy::ByTimestamp)
@ -110,7 +110,7 @@ mod benchmarks {
let iterations = 100; let iterations = 100;
let mut pool = MemoryPool::new(); let mut pool = MemoryPool::new();
for transaction in prepare_dependent_transactions(iterations) { for transaction in prepare_dependent_transactions(iterations) {
pool.insert_verified(transaction) pool.insert_verified(transaction.into())
} }
b.bench_n(iterations as u64, |b| b.iter(|| { b.bench_n(iterations as u64, |b| b.iter(|| {
pool.remove_with_strategy(MemoryPoolOrderingStrategy::ByTimestamp) pool.remove_with_strategy(MemoryPoolOrderingStrategy::ByTimestamp)

View File

@ -8,7 +8,7 @@
use db::{TransactionProvider, PreviousTransactionOutputProvider}; use db::{TransactionProvider, PreviousTransactionOutputProvider};
use primitives::bytes::Bytes; use primitives::bytes::Bytes;
use primitives::hash::H256; use primitives::hash::H256;
use chain::{Transaction, OutPoint, TransactionOutput}; use chain::{IndexedTransaction, Transaction, OutPoint, TransactionOutput};
use std::cmp::Ordering; use std::cmp::Ordering;
use std::collections::HashMap; use std::collections::HashMap;
use std::collections::HashSet; use std::collections::HashSet;
@ -466,9 +466,9 @@ impl Storage {
} }
} }
pub fn remove_by_prevout(&mut self, prevout: &OutPoint) -> Option<Vec<Transaction>> { pub fn remove_by_prevout(&mut self, prevout: &OutPoint) -> Option<Vec<IndexedTransaction>> {
let mut queue: VecDeque<OutPoint> = VecDeque::new(); let mut queue: VecDeque<OutPoint> = VecDeque::new();
let mut removed: Vec<Transaction> = Vec::new(); let mut removed: Vec<IndexedTransaction> = Vec::new();
queue.push_back(prevout.clone()); queue.push_back(prevout.clone());
while let Some(prevout) = queue.pop_front() { while let Some(prevout) = queue.pop_front() {
@ -478,14 +478,14 @@ impl Storage {
hash: entry_hash.clone(), hash: entry_hash.clone(),
index: idx as u32, index: idx as u32,
})); }));
removed.push(entry.transaction); removed.push(IndexedTransaction::new(entry.hash, entry.transaction));
} }
} }
Some(removed) Some(removed)
} }
pub fn remove_by_parent_hash(&mut self, h: &H256) -> Option<Vec<Transaction>> { pub fn remove_by_parent_hash(&mut self, h: &H256) -> Option<Vec<IndexedTransaction>> {
// this code will run only when ancestor transaction is inserted // this code will run only when ancestor transaction is inserted
// in memory pool after its descendants // in memory pool after its descendants
if let Some(mut descendants) = self.references.by_input.get(h).map(|d| d.iter().cloned().collect::<Vec<H256>>()) { if let Some(mut descendants) = self.references.by_input.get(h).map(|d| d.iter().cloned().collect::<Vec<H256>>()) {
@ -520,7 +520,7 @@ impl Storage {
// move all descendants out of storage for later insertion // move all descendants out of storage for later insertion
Some(all_descendants.into_iter() Some(all_descendants.into_iter()
.filter_map(|hash| self.remove_by_hash(hash).map(|entry| entry.transaction)) .filter_map(|hash| self.remove_by_hash(hash).map(|entry| IndexedTransaction::new(entry.hash, entry.transaction)))
.collect()) .collect())
} }
else { else {
@ -528,19 +528,21 @@ impl Storage {
} }
} }
pub fn remove_with_strategy(&mut self, strategy: OrderingStrategy) -> Option<Transaction> { pub fn remove_with_strategy(&mut self, strategy: OrderingStrategy) -> Option<IndexedTransaction> {
let top_hash = match strategy { let top_hash = match strategy {
OrderingStrategy::ByTimestamp => self.references.ordered.by_storage_index.iter().map(|entry| entry.hash.clone()).nth(0), OrderingStrategy::ByTimestamp => self.references.ordered.by_storage_index.iter().map(|entry| entry.hash.clone()).nth(0),
OrderingStrategy::ByTransactionScore => self.references.ordered.by_transaction_score.iter().map(|entry| entry.hash.clone()).nth(0), OrderingStrategy::ByTransactionScore => self.references.ordered.by_transaction_score.iter().map(|entry| entry.hash.clone()).nth(0),
OrderingStrategy::ByPackageScore => self.references.ordered.by_package_score.iter().map(|entry| entry.hash.clone()).nth(0), OrderingStrategy::ByPackageScore => self.references.ordered.by_package_score.iter().map(|entry| entry.hash.clone()).nth(0),
}; };
top_hash.map(|hash| self.remove_by_hash(&hash) top_hash.map(|hash| {
.expect("`hash` is read from `references`; entries in `references` have corresponging entries in `by_hash`; `remove_by_hash` removes entry from `by_hash`; qed") let entry = self.remove_by_hash(&hash)
.transaction) .expect("`hash` is read from `references`; entries in `references` have corresponging entries in `by_hash`; `remove_by_hash` removes entry from `by_hash`; qed");
IndexedTransaction::new(entry.hash, entry.transaction)
})
} }
pub fn remove_n_with_strategy(&mut self, mut n: usize, strategy: OrderingStrategy) -> Vec<Transaction> { pub fn remove_n_with_strategy(&mut self, mut n: usize, strategy: OrderingStrategy) -> Vec<IndexedTransaction> {
let mut result: Vec<Transaction> = Vec::new(); let mut result: Vec<IndexedTransaction> = Vec::new();
loop { loop {
if n == 0 { if n == 0 {
break; break;
@ -647,7 +649,7 @@ impl MemoryPool {
} }
/// Insert verified transaction to the `MemoryPool` /// Insert verified transaction to the `MemoryPool`
pub fn insert_verified(&mut self, t: Transaction) { pub fn insert_verified(&mut self, t: IndexedTransaction) {
let entry = self.make_entry(t); let entry = self.make_entry(t);
let descendants = self.storage.remove_by_parent_hash(&entry.hash); let descendants = self.storage.remove_by_parent_hash(&entry.hash);
self.storage.insert(entry); self.storage.insert(entry);
@ -676,7 +678,7 @@ impl MemoryPool {
} }
/// Removes transaction (and all its descendants) which has spent given output /// Removes transaction (and all its descendants) which has spent given output
pub fn remove_by_prevout(&mut self, prevout: &OutPoint) -> Option<Vec<Transaction>> { pub fn remove_by_prevout(&mut self, prevout: &OutPoint) -> Option<Vec<IndexedTransaction>> {
self.storage.remove_by_prevout(prevout) self.storage.remove_by_prevout(prevout)
} }
@ -700,13 +702,13 @@ impl MemoryPool {
/// Removes the 'top' transaction from the `MemoryPool` using selected strategy. /// Removes the 'top' transaction from the `MemoryPool` using selected strategy.
/// Ancestors are always removed before descendant transactions. /// Ancestors are always removed before descendant transactions.
pub fn remove_with_strategy(&mut self, strategy: OrderingStrategy) -> Option<Transaction> { pub fn remove_with_strategy(&mut self, strategy: OrderingStrategy) -> Option<IndexedTransaction> {
self.storage.remove_with_strategy(strategy) self.storage.remove_with_strategy(strategy)
} }
/// Removes up to n transactions from the `MemoryPool`, using selected strategy. /// Removes up to n transactions from the `MemoryPool`, using selected strategy.
/// Ancestors are always removed before descendant transactions. /// Ancestors are always removed before descendant transactions.
pub fn remove_n_with_strategy(&mut self, n: usize, strategy: OrderingStrategy) -> Vec<Transaction> { pub fn remove_n_with_strategy(&mut self, n: usize, strategy: OrderingStrategy) -> Vec<IndexedTransaction> {
self.storage.remove_n_with_strategy(n, strategy) self.storage.remove_n_with_strategy(n, strategy)
} }
@ -745,15 +747,14 @@ impl MemoryPool {
self.storage.is_output_spent(prevout) self.storage.is_output_spent(prevout)
} }
fn make_entry(&mut self, t: Transaction) -> Entry { fn make_entry(&mut self, t: IndexedTransaction) -> Entry {
let hash = t.hash(); let ancestors = self.get_ancestors(&t.raw);
let ancestors = self.get_ancestors(&t); let size = self.get_transaction_size(&t.raw);
let size = self.get_transaction_size(&t);
let storage_index = self.get_storage_index(); let storage_index = self.get_storage_index();
let miner_fee = self.get_transaction_miner_fee(&t); let miner_fee = self.get_transaction_miner_fee(&t.raw);
Entry { Entry {
transaction: t, transaction: t.raw,
hash: hash, hash: t.hash,
ancestors: ancestors, ancestors: ancestors,
storage_index: storage_index, storage_index: storage_index,
size: size, size: size,
@ -872,7 +873,7 @@ mod tests {
fn to_memory_pool(chain: &mut ChainBuilder) -> MemoryPool { fn to_memory_pool(chain: &mut ChainBuilder) -> MemoryPool {
let mut pool = MemoryPool::new(); let mut pool = MemoryPool::new();
for transaction in chain.transactions.iter().cloned() { for transaction in chain.transactions.iter().cloned() {
pool.insert_verified(transaction); pool.insert_verified(transaction.into());
} }
pool pool
} }
@ -883,11 +884,11 @@ mod tests {
let size1 = pool.heap_size_of_children(); let size1 = pool.heap_size_of_children();
pool.insert_verified(Transaction::default()); pool.insert_verified(Transaction::default().into());
let size2 = pool.heap_size_of_children(); let size2 = pool.heap_size_of_children();
assert!(size2 > size1); assert!(size2 > size1);
pool.insert_verified(Transaction::default()); pool.insert_verified(Transaction::default().into());
let size3 = pool.heap_size_of_children(); let size3 = pool.heap_size_of_children();
assert!(size3 > size2); assert!(size3 > size2);
} }
@ -895,11 +896,11 @@ mod tests {
#[test] #[test]
fn test_memory_pool_insert_same_transaction() { fn test_memory_pool_insert_same_transaction() {
let mut pool = MemoryPool::new(); let mut pool = MemoryPool::new();
pool.insert_verified(Transaction::default()); pool.insert_verified(Transaction::default().into());
assert_eq!(pool.get_transactions_ids().len(), 1); assert_eq!(pool.get_transactions_ids().len(), 1);
// insert the same transaction again // insert the same transaction again
pool.insert_verified(Transaction::default()); pool.insert_verified(Transaction::default().into());
assert_eq!(pool.get_transactions_ids().len(), 1); assert_eq!(pool.get_transactions_ids().len(), 1);
} }
@ -909,7 +910,7 @@ mod tests {
assert_eq!(pool.read_with_strategy(OrderingStrategy::ByTimestamp), None); assert_eq!(pool.read_with_strategy(OrderingStrategy::ByTimestamp), None);
assert_eq!(pool.read_n_with_strategy(100, OrderingStrategy::ByTimestamp), vec![]); assert_eq!(pool.read_n_with_strategy(100, OrderingStrategy::ByTimestamp), vec![]);
pool.insert_verified(Transaction::default()); pool.insert_verified(Transaction::default().into());
assert_eq!(pool.read_with_strategy(OrderingStrategy::ByTimestamp), Some(Transaction::default().hash())); assert_eq!(pool.read_with_strategy(OrderingStrategy::ByTimestamp), Some(Transaction::default().hash()));
assert_eq!(pool.read_n_with_strategy(100, OrderingStrategy::ByTimestamp), vec![Transaction::default().hash()]); assert_eq!(pool.read_n_with_strategy(100, OrderingStrategy::ByTimestamp), vec![Transaction::default().hash()]);
assert_eq!(pool.read_with_strategy(OrderingStrategy::ByTimestamp), Some(Transaction::default().hash())); assert_eq!(pool.read_with_strategy(OrderingStrategy::ByTimestamp), Some(Transaction::default().hash()));
@ -922,15 +923,15 @@ mod tests {
assert_eq!(pool.remove_with_strategy(OrderingStrategy::ByTimestamp), None); assert_eq!(pool.remove_with_strategy(OrderingStrategy::ByTimestamp), None);
assert_eq!(pool.remove_n_with_strategy(100, OrderingStrategy::ByTimestamp), vec![]); assert_eq!(pool.remove_n_with_strategy(100, OrderingStrategy::ByTimestamp), vec![]);
pool.insert_verified(Transaction::default()); pool.insert_verified(Transaction::default().into());
let removed = pool.remove_with_strategy(OrderingStrategy::ByTimestamp); let removed = pool.remove_with_strategy(OrderingStrategy::ByTimestamp);
assert!(removed.is_some()); assert!(removed.is_some());
assert_eq!(removed.unwrap(), Transaction::default()); assert_eq!(removed.unwrap(), Transaction::default().into());
pool.insert_verified(Transaction::default()); pool.insert_verified(Transaction::default().into());
let removed = pool.remove_n_with_strategy(100, OrderingStrategy::ByTimestamp); let removed = pool.remove_n_with_strategy(100, OrderingStrategy::ByTimestamp);
assert_eq!(removed.len(), 1); assert_eq!(removed.len(), 1);
assert_eq!(removed[0], Transaction::default()); assert_eq!(removed[0], Transaction::default().into());
assert_eq!(pool.remove_with_strategy(OrderingStrategy::ByTimestamp), None); assert_eq!(pool.remove_with_strategy(OrderingStrategy::ByTimestamp), None);
assert_eq!(pool.remove_n_with_strategy(100, OrderingStrategy::ByTimestamp), vec![]); assert_eq!(pool.remove_n_with_strategy(100, OrderingStrategy::ByTimestamp), vec![]);
@ -940,7 +941,7 @@ mod tests {
fn test_memory_pool_remove_by_hash() { fn test_memory_pool_remove_by_hash() {
let mut pool = MemoryPool::new(); let mut pool = MemoryPool::new();
pool.insert_verified(Transaction::default()); pool.insert_verified(Transaction::default().into());
assert_eq!(pool.get_transactions_ids().len(), 1); assert_eq!(pool.get_transactions_ids().len(), 1);
// remove and check remaining transactions // remove and check remaining transactions
@ -963,16 +964,16 @@ mod tests {
// insert child, then parent // insert child, then parent
let mut pool = MemoryPool::new(); let mut pool = MemoryPool::new();
pool.insert_verified(chain.at(2)); // timestamp 0 pool.insert_verified(chain.at(2).into()); // timestamp 0
pool.insert_verified(chain.at(1)); // timestamp 1 pool.insert_verified(chain.at(1).into()); // timestamp 1
pool.insert_verified(chain.at(0)); // timestamp 2 pool.insert_verified(chain.at(0).into()); // timestamp 2
// check that parent transaction was removed before child trnasaction // check that parent transaction was removed before child trnasaction
let transactions = pool.remove_n_with_strategy(3, OrderingStrategy::ByTimestamp); let transactions = pool.remove_n_with_strategy(3, OrderingStrategy::ByTimestamp);
assert_eq!(transactions.len(), 3); assert_eq!(transactions.len(), 3);
assert_eq!(transactions[0], chain.at(0)); assert_eq!(transactions[0], chain.at(0).into());
assert_eq!(transactions[1], chain.at(1)); assert_eq!(transactions[1], chain.at(1).into());
assert_eq!(transactions[2], chain.at(2)); assert_eq!(transactions[2], chain.at(2).into());
} }
#[test] #[test]
@ -988,9 +989,9 @@ mod tests {
// check that parent transaction was removed before child trnasaction // check that parent transaction was removed before child trnasaction
let transactions = pool.remove_n_with_strategy(3, OrderingStrategy::ByTimestamp); let transactions = pool.remove_n_with_strategy(3, OrderingStrategy::ByTimestamp);
assert_eq!(transactions.len(), 3); assert_eq!(transactions.len(), 3);
assert_eq!(transactions[0], chain.at(0)); assert_eq!(transactions[0], chain.at(0).into());
assert_eq!(transactions[1], chain.at(1)); assert_eq!(transactions[1], chain.at(1).into());
assert_eq!(transactions[2], chain.at(2)); assert_eq!(transactions[2], chain.at(2).into());
} }
#[test] #[test]
@ -1008,12 +1009,12 @@ mod tests {
assert_eq!(pool.get_transactions_ids().len(), 2); assert_eq!(pool.get_transactions_ids().len(), 2);
// insert child transaction back to the pool & assert transactions are removed in correct order // insert child transaction back to the pool & assert transactions are removed in correct order
pool.insert_verified(chain.at(1)); pool.insert_verified(chain.at(1).into());
let transactions = pool.remove_n_with_strategy(3, OrderingStrategy::ByTransactionScore); let transactions = pool.remove_n_with_strategy(3, OrderingStrategy::ByTransactionScore);
assert_eq!(transactions.len(), 3); assert_eq!(transactions.len(), 3);
assert_eq!(transactions[0], chain.at(0)); assert_eq!(transactions[0], chain.at(0).into());
assert_eq!(transactions[1], chain.at(1)); assert_eq!(transactions[1], chain.at(1).into());
assert_eq!(transactions[2], chain.at(2)); assert_eq!(transactions[2], chain.at(2).into());
} }
#[test] #[test]
@ -1027,7 +1028,7 @@ mod tests {
let mut transactions_size = 0; let mut transactions_size = 0;
for transaction_index in 0..4 { for transaction_index in 0..4 {
pool.insert_verified(chain.at(transaction_index)); pool.insert_verified(chain.at(transaction_index).into());
transactions_size += chain.size(transaction_index); transactions_size += chain.size(transaction_index);
let info = pool.information(); let info = pool.information();
@ -1048,15 +1049,15 @@ mod tests {
// remove transactions [0, 3, 1] (timestamps: [0, 0, 1]) {conflict resolved by hash} // remove transactions [0, 3, 1] (timestamps: [0, 0, 1]) {conflict resolved by hash}
let transactions = pool.remove_n_with_strategy(3, OrderingStrategy::ByTimestamp); let transactions = pool.remove_n_with_strategy(3, OrderingStrategy::ByTimestamp);
assert_eq!(transactions.len(), 3); assert_eq!(transactions.len(), 3);
assert_eq!(transactions[0], chain.at(0)); assert_eq!(transactions[0], chain.at(0).into());
assert_eq!(transactions[1], chain.at(3)); assert_eq!(transactions[1], chain.at(3).into());
assert_eq!(transactions[2], chain.at(1)); assert_eq!(transactions[2], chain.at(1).into());
assert_eq!(pool.get_transactions_ids().len(), 1); assert_eq!(pool.get_transactions_ids().len(), 1);
// remove transactions [2] (timestamps: [2]) // remove transactions [2] (timestamps: [2])
let transactions = pool.remove_n_with_strategy(3, OrderingStrategy::ByTimestamp); let transactions = pool.remove_n_with_strategy(3, OrderingStrategy::ByTimestamp);
assert_eq!(transactions.len(), 1); assert_eq!(transactions.len(), 1);
assert_eq!(transactions[0], chain.at(2)); assert_eq!(transactions[0], chain.at(2).into());
} }
#[test] #[test]
@ -1070,10 +1071,10 @@ mod tests {
let transactions = pool.remove_n_with_strategy(4, OrderingStrategy::ByTransactionScore); let transactions = pool.remove_n_with_strategy(4, OrderingStrategy::ByTransactionScore);
assert_eq!(transactions.len(), 4); assert_eq!(transactions.len(), 4);
assert_eq!(transactions[0], chain.at(1)); assert_eq!(transactions[0], chain.at(1).into());
assert_eq!(transactions[1], chain.at(2)); assert_eq!(transactions[1], chain.at(2).into());
assert_eq!(transactions[2], chain.at(3)); assert_eq!(transactions[2], chain.at(3).into());
assert_eq!(transactions[3], chain.at(0)); assert_eq!(transactions[3], chain.at(0).into());
} }
#[test] #[test]
@ -1092,10 +1093,10 @@ mod tests {
let transactions = pool.remove_n_with_strategy(4, OrderingStrategy::ByTransactionScore); let transactions = pool.remove_n_with_strategy(4, OrderingStrategy::ByTransactionScore);
assert_eq!(transactions.len(), 4); assert_eq!(transactions.len(), 4);
assert_eq!(transactions[0], chain.at(3)); assert_eq!(transactions[0], chain.at(3).into());
assert_eq!(transactions[1], chain.at(2)); assert_eq!(transactions[1], chain.at(2).into());
assert_eq!(transactions[2], chain.at(0)); assert_eq!(transactions[2], chain.at(0).into());
assert_eq!(transactions[3], chain.at(1)); assert_eq!(transactions[3], chain.at(1).into());
} }
#[test] #[test]
@ -1115,8 +1116,8 @@ mod tests {
// < // <
// score({ transaction2 }) = 35/60 // score({ transaction2 }) = 35/60
let expected = vec![chain.hash(2), chain.hash(0)]; let expected = vec![chain.hash(2), chain.hash(0)];
pool.insert_verified(chain.at(0)); pool.insert_verified(chain.at(0).into());
pool.insert_verified(chain.at(2)); pool.insert_verified(chain.at(2).into());
assert_eq!(pool.read_n_with_strategy(2, OrderingStrategy::ByPackageScore), expected); assert_eq!(pool.read_n_with_strategy(2, OrderingStrategy::ByPackageScore), expected);
// { transaction0, transaction1 } now have bigger score than { transaction2 }: // { transaction0, transaction1 } now have bigger score than { transaction2 }:
@ -1125,7 +1126,7 @@ mod tests {
// score({ transaction2 }) = 35/60 ~ 0.583 // score({ transaction2 }) = 35/60 ~ 0.583
// => chain1 is boosted // => chain1 is boosted
// => so transaction with lesser individual score (but with bigger package score) is mined first // => so transaction with lesser individual score (but with bigger package score) is mined first
pool.insert_verified(chain.at(1)); pool.insert_verified(chain.at(1).into());
let expected = vec![chain.hash(0), chain.hash(1), chain.hash(2)]; let expected = vec![chain.hash(0), chain.hash(1), chain.hash(2)];
assert_eq!(pool.read_n_with_strategy(3, OrderingStrategy::ByPackageScore), expected); assert_eq!(pool.read_n_with_strategy(3, OrderingStrategy::ByPackageScore), expected);
@ -1134,7 +1135,7 @@ mod tests {
// > // >
// score({ transaction2, transaction3 }) = (35 + 10) / 120 ~ 0.375 // score({ transaction2, transaction3 }) = (35 + 10) / 120 ~ 0.375
// => chain2 is not boosted // => chain2 is not boosted
pool.insert_verified(chain.at(3)); pool.insert_verified(chain.at(3).into());
let expected = vec![chain.hash(0), chain.hash(1), chain.hash(2), chain.hash(3)]; let expected = vec![chain.hash(0), chain.hash(1), chain.hash(2), chain.hash(3)];
assert_eq!(pool.read_n_with_strategy(4, OrderingStrategy::ByPackageScore), expected); assert_eq!(pool.read_n_with_strategy(4, OrderingStrategy::ByPackageScore), expected);
@ -1143,7 +1144,7 @@ mod tests {
// < // <
// score({ transaction2, transaction3, transaction4 }) = (35 + 10 + 100) / 180 ~ 0.806 // score({ transaction2, transaction3, transaction4 }) = (35 + 10 + 100) / 180 ~ 0.806
// => chain2 is boosted // => chain2 is boosted
pool.insert_verified(chain.at(4)); pool.insert_verified(chain.at(4).into());
let expected = vec![chain.hash(2), chain.hash(3), chain.hash(4), chain.hash(0), chain.hash(1)]; let expected = vec![chain.hash(2), chain.hash(3), chain.hash(4), chain.hash(0), chain.hash(1)];
assert_eq!(pool.read_n_with_strategy(5, OrderingStrategy::ByPackageScore), expected); assert_eq!(pool.read_n_with_strategy(5, OrderingStrategy::ByPackageScore), expected);
@ -1167,15 +1168,15 @@ mod tests {
// chain1_parent is not linked to the chain1_grandchild // chain1_parent is not linked to the chain1_grandchild
// => they are in separate chains now // => they are in separate chains now
// => chain2 has greater score than both of these chains // => chain2 has greater score than both of these chains
pool.insert_verified(chain.at(3)); pool.insert_verified(chain.at(3).into());
pool.insert_verified(chain.at(0)); pool.insert_verified(chain.at(0).into());
pool.insert_verified(chain.at(2)); pool.insert_verified(chain.at(2).into());
let expected = vec![chain.hash(3), chain.hash(0), chain.hash(2)]; let expected = vec![chain.hash(3), chain.hash(0), chain.hash(2)];
assert_eq!(pool.read_n_with_strategy(3, OrderingStrategy::ByPackageScore), expected); assert_eq!(pool.read_n_with_strategy(3, OrderingStrategy::ByPackageScore), expected);
// insert the missing transaction to link together chain1 // insert the missing transaction to link together chain1
// => it now will have better score than chain2 // => it now will have better score than chain2
pool.insert_verified(chain.at(1)); pool.insert_verified(chain.at(1).into());
let expected = vec![chain.hash(0), chain.hash(1), chain.hash(3), chain.hash(2)]; let expected = vec![chain.hash(0), chain.hash(1), chain.hash(3), chain.hash(2)];
assert_eq!(pool.read_n_with_strategy(4, OrderingStrategy::ByPackageScore), expected); assert_eq!(pool.read_n_with_strategy(4, OrderingStrategy::ByPackageScore), expected);
} }
@ -1199,9 +1200,9 @@ mod tests {
// insert level1 + level2. There are two chains: // insert level1 + level2. There are two chains:
// score({ transaction3, transaction5 }) = 40 + 60 // score({ transaction3, transaction5 }) = 40 + 60
// score({ transaction4, transaction5 }) = 50 + 60 // score({ transaction4, transaction5 }) = 50 + 60
pool.insert_verified(chain.at(5)); pool.insert_verified(chain.at(5).into());
pool.insert_verified(chain.at(3)); pool.insert_verified(chain.at(3).into());
pool.insert_verified(chain.at(4)); pool.insert_verified(chain.at(4).into());
let expected = vec![chain.hash(4), chain.hash(3), chain.hash(5)]; let expected = vec![chain.hash(4), chain.hash(3), chain.hash(5)];
assert_eq!(pool.read_n_with_strategy(3, OrderingStrategy::ByTransactionScore), expected); assert_eq!(pool.read_n_with_strategy(3, OrderingStrategy::ByTransactionScore), expected);
assert_eq!(pool.read_n_with_strategy(3, OrderingStrategy::ByPackageScore), expected); assert_eq!(pool.read_n_with_strategy(3, OrderingStrategy::ByPackageScore), expected);
@ -1210,7 +1211,7 @@ mod tests {
// score({ transaction3, transaction5 }) = 40 + 60 // score({ transaction3, transaction5 }) = 40 + 60
// score({ transaction4, transaction5 }) = 50 + 60 // score({ transaction4, transaction5 }) = 50 + 60
// score({ transaction2, transaction5 }) = 30 + 60 // score({ transaction2, transaction5 }) = 30 + 60
pool.insert_verified(chain.at(2)); pool.insert_verified(chain.at(2).into());
let expected = vec![chain.hash(4), chain.hash(3), chain.hash(2), chain.hash(5)]; let expected = vec![chain.hash(4), chain.hash(3), chain.hash(2), chain.hash(5)];
assert_eq!(pool.read_n_with_strategy(4, OrderingStrategy::ByTransactionScore), expected); assert_eq!(pool.read_n_with_strategy(4, OrderingStrategy::ByTransactionScore), expected);
assert_eq!(pool.read_n_with_strategy(4, OrderingStrategy::ByPackageScore), expected); assert_eq!(pool.read_n_with_strategy(4, OrderingStrategy::ByPackageScore), expected);
@ -1220,7 +1221,7 @@ mod tests {
// score({ transaction1, transaction4, transaction5 }) = 20 + 50 + 60 / 3 ~ 0.333 // score({ transaction1, transaction4, transaction5 }) = 20 + 50 + 60 / 3 ~ 0.333
// score({ transaction2, transaction5 }) = 30 + 60 / 2 = 0.45 // score({ transaction2, transaction5 }) = 30 + 60 / 2 = 0.45
// but second chain will be removed first anyway because previous #1 ({ transaction4, transaction5}) now depends on level 01 // but second chain will be removed first anyway because previous #1 ({ transaction4, transaction5}) now depends on level 01
pool.insert_verified(chain.at(1)); pool.insert_verified(chain.at(1).into());
let expected = vec![chain.hash(3), chain.hash(2), chain.hash(1), chain.hash(4), chain.hash(5)]; let expected = vec![chain.hash(3), chain.hash(2), chain.hash(1), chain.hash(4), chain.hash(5)];
assert_eq!(pool.read_n_with_strategy(5, OrderingStrategy::ByTransactionScore), expected); assert_eq!(pool.read_n_with_strategy(5, OrderingStrategy::ByTransactionScore), expected);
assert_eq!(pool.read_n_with_strategy(5, OrderingStrategy::ByPackageScore), expected); assert_eq!(pool.read_n_with_strategy(5, OrderingStrategy::ByPackageScore), expected);
@ -1230,7 +1231,7 @@ mod tests {
// score({ transaction0, transaction4, transaction5 }) = (10 + 50 + 60) / (60 + 60 + 142) ~ 0.458 // score({ transaction0, transaction4, transaction5 }) = (10 + 50 + 60) / (60 + 60 + 142) ~ 0.458
// score({ transaction1, transaction3, transaction5 }) = (20 + 50 + 60) / (60 + 60 + 142) ~ 0.496 // score({ transaction1, transaction3, transaction5 }) = (20 + 50 + 60) / (60 + 60 + 142) ~ 0.496
// score({ transaction2, transaction5 }) = (30 + 60) / (60 + 142) ~ 0.445 // score({ transaction2, transaction5 }) = (30 + 60) / (60 + 142) ~ 0.445
pool.insert_verified(chain.at(0)); pool.insert_verified(chain.at(0).into());
let expected = vec![chain.hash(2), chain.hash(1), chain.hash(0), chain.hash(4), chain.hash(3), chain.hash(5)]; let expected = vec![chain.hash(2), chain.hash(1), chain.hash(0), chain.hash(4), chain.hash(3), chain.hash(5)];
assert_eq!(pool.read_n_with_strategy(6, OrderingStrategy::ByTransactionScore), expected); assert_eq!(pool.read_n_with_strategy(6, OrderingStrategy::ByTransactionScore), expected);
assert_eq!(pool.read_n_with_strategy(6, OrderingStrategy::ByPackageScore), expected); assert_eq!(pool.read_n_with_strategy(6, OrderingStrategy::ByPackageScore), expected);
@ -1251,17 +1252,17 @@ mod tests {
assert!(!pool.is_spent(&OutPoint { hash: chain.hash(1), index: 0, })); assert!(!pool.is_spent(&OutPoint { hash: chain.hash(1), index: 0, }));
assert!(!pool.is_spent(&OutPoint { hash: chain.hash(2), index: 0, })); assert!(!pool.is_spent(&OutPoint { hash: chain.hash(2), index: 0, }));
pool.insert_verified(chain.at(0)); pool.insert_verified(chain.at(0).into());
assert!(!pool.is_spent(&OutPoint { hash: chain.hash(0), index: 0, })); assert!(!pool.is_spent(&OutPoint { hash: chain.hash(0), index: 0, }));
assert!(!pool.is_spent(&OutPoint { hash: chain.hash(1), index: 0, })); assert!(!pool.is_spent(&OutPoint { hash: chain.hash(1), index: 0, }));
assert!(!pool.is_spent(&OutPoint { hash: chain.hash(2), index: 0, })); assert!(!pool.is_spent(&OutPoint { hash: chain.hash(2), index: 0, }));
pool.insert_verified(chain.at(1)); pool.insert_verified(chain.at(1).into());
assert!(!pool.is_spent(&OutPoint { hash: chain.hash(0), index: 0, })); assert!(!pool.is_spent(&OutPoint { hash: chain.hash(0), index: 0, }));
assert!(!pool.is_spent(&OutPoint { hash: chain.hash(1), index: 0, })); assert!(!pool.is_spent(&OutPoint { hash: chain.hash(1), index: 0, }));
assert!(!pool.is_spent(&OutPoint { hash: chain.hash(2), index: 0, })); assert!(!pool.is_spent(&OutPoint { hash: chain.hash(2), index: 0, }));
pool.insert_verified(chain.at(2)); pool.insert_verified(chain.at(2).into());
assert!(pool.is_spent(&OutPoint { hash: chain.hash(0), index: 0, })); assert!(pool.is_spent(&OutPoint { hash: chain.hash(0), index: 0, }));
assert!(!pool.is_spent(&OutPoint { hash: chain.hash(1), index: 0, })); assert!(!pool.is_spent(&OutPoint { hash: chain.hash(1), index: 0, }));
assert!(!pool.is_spent(&OutPoint { hash: chain.hash(2), index: 0, })); assert!(!pool.is_spent(&OutPoint { hash: chain.hash(2), index: 0, }));
@ -1284,13 +1285,13 @@ mod tests {
.reset().add_output(40).store(chain); // transaction3 .reset().add_output(40).store(chain); // transaction3
let mut pool = MemoryPool::new(); let mut pool = MemoryPool::new();
pool.insert_verified(chain.at(0)); pool.insert_verified(chain.at(0).into());
pool.insert_verified(chain.at(1)); pool.insert_verified(chain.at(1).into());
pool.insert_verified(chain.at(2)); pool.insert_verified(chain.at(2).into());
pool.insert_verified(chain.at(3)); pool.insert_verified(chain.at(3).into());
assert_eq!(pool.information().transactions_count, 4); assert_eq!(pool.information().transactions_count, 4);
assert_eq!(pool.remove_by_prevout(&OutPoint { hash: chain.hash(0), index: 0 }), Some(vec![chain.at(1), chain.at(2)])); assert_eq!(pool.remove_by_prevout(&OutPoint { hash: chain.hash(0), index: 0 }), Some(vec![chain.at(1).into(), chain.at(2).into()]));
assert_eq!(pool.information().transactions_count, 2); assert_eq!(pool.information().transactions_count, 2);
} }
@ -1307,9 +1308,9 @@ mod tests {
.reset().set_input(&chain.at(0), 2).add_output(70).store(chain); // no double spend: t0[2] -> t6 .reset().set_input(&chain.at(0), 2).add_output(70).store(chain); // no double spend: t0[2] -> t6
let mut pool = MemoryPool::new(); let mut pool = MemoryPool::new();
pool.insert_verified(chain.at(1)); pool.insert_verified(chain.at(1).into());
pool.insert_verified(chain.at(2)); pool.insert_verified(chain.at(2).into());
pool.insert_verified(chain.at(4)); pool.insert_verified(chain.at(4).into());
// when output is spent by nonfinal transaction // when output is spent by nonfinal transaction
match pool.check_double_spend(&chain.at(3)) { match pool.check_double_spend(&chain.at(3)) {
DoubleSpendCheckResult::NonFinalDoubleSpend(set) => { DoubleSpendCheckResult::NonFinalDoubleSpend(set) => {
@ -1352,7 +1353,7 @@ mod tests {
.reset().set_input(&chain.at(0), 0).add_output(40).store(chain); // good replacement: t0[0] -> t2 .reset().set_input(&chain.at(0), 0).add_output(40).store(chain); // good replacement: t0[0] -> t2
let mut pool = MemoryPool::new(); let mut pool = MemoryPool::new();
pool.insert_verified(chain.at(1)); pool.insert_verified(chain.at(1).into());
// when output is spent by nonfinal transaction // when output is spent by nonfinal transaction
match pool.check_double_spend(&chain.at(2)) { match pool.check_double_spend(&chain.at(2)) {
@ -1377,4 +1378,16 @@ mod tests {
} }
} }
#[test]
fn test_memory_poolis_spent() {
let tx1: Transaction = TransactionBuilder::with_default_input(0).into();
let tx2: Transaction = TransactionBuilder::with_default_input(1).into();
let out1 = tx1.inputs[0].previous_output.clone();
let out2 = tx2.inputs[0].previous_output.clone();
let mut memory_pool = MemoryPool::new();
memory_pool.insert_verified(tx1.into());
assert!(memory_pool.is_spent(&out1));
assert!(!memory_pool.is_spent(&out2));
}
} }

View File

@ -5,17 +5,15 @@ use protocol::Protocol;
use net::PeerContext; use net::PeerContext;
pub type InboundSyncConnectionRef = Box<InboundSyncConnection>; pub type InboundSyncConnectionRef = Box<InboundSyncConnection>;
pub type OutboundSyncConnectionRef = Box<OutboundSyncConnection>; pub type OutboundSyncConnectionRef = Arc<OutboundSyncConnection>;
pub type LocalSyncNodeRef = Box<LocalSyncNode>; pub type LocalSyncNodeRef = Box<LocalSyncNode>;
// TODO: use this to respond to construct Version message (start_height field)
pub trait LocalSyncNode : Send + Sync { pub trait LocalSyncNode : Send + Sync {
fn start_height(&self) -> i32;
fn create_sync_session(&self, height: i32, outbound: OutboundSyncConnectionRef) -> InboundSyncConnectionRef; fn create_sync_session(&self, height: i32, outbound: OutboundSyncConnectionRef) -> InboundSyncConnectionRef;
} }
pub trait InboundSyncConnection : Send + Sync { pub trait InboundSyncConnection : Send + Sync {
fn start_sync_session(&self, version: u32); fn start_sync_session(&self, version: types::Version);
fn close_session(&self); fn close_session(&self);
fn on_inventory(&self, message: types::Inv); fn on_inventory(&self, message: types::Inv);
fn on_getdata(&self, message: types::GetData); fn on_getdata(&self, message: types::GetData);
@ -73,10 +71,6 @@ impl OutboundSync {
context: context, context: context,
} }
} }
pub fn boxed(self) -> Box<OutboundSyncConnection> {
Box::new(self)
}
} }
impl OutboundSyncConnection for OutboundSync { impl OutboundSyncConnection for OutboundSync {
@ -176,7 +170,7 @@ pub struct SyncProtocol {
impl SyncProtocol { impl SyncProtocol {
pub fn new(context: Arc<PeerContext>) -> Self { pub fn new(context: Arc<PeerContext>) -> Self {
let outbound_connection = OutboundSync::new(context.clone()).boxed(); let outbound_connection = Arc::new(OutboundSync::new(context.clone()));
let inbound_connection = context.global().create_sync_session(0, outbound_connection); let inbound_connection = context.global().create_sync_session(0, outbound_connection);
SyncProtocol { SyncProtocol {
inbound_connection: inbound_connection, inbound_connection: inbound_connection,
@ -187,7 +181,20 @@ impl SyncProtocol {
impl Protocol for SyncProtocol { impl Protocol for SyncProtocol {
fn initialize(&mut self) { fn initialize(&mut self) {
self.inbound_connection.start_sync_session(self.context.info().version); // TODO
use message::common;
let version = types::Version::V0(types::version::V0 {
version: self.context.info().version,
services: common::Services::default(),
timestamp: 0,
receiver: common::NetAddress {
services: common::Services::default(),
address: common::IpAddress::from("127.0.0.1"),
port: common::Port::from(0),
},
});
self.inbound_connection.start_sync_session(version);
} }
fn on_message(&mut self, command: &Command, payload: &Bytes) -> Result<(), Error> { fn on_message(&mut self, command: &Command, payload: &Bytes) -> Result<(), Error> {

View File

@ -1,5 +1,5 @@
use std::net::SocketAddr; use std::net::SocketAddr;
use sync::{create_local_sync_node, create_sync_connection_factory}; use sync::{create_sync_peers, create_local_sync_node, create_sync_connection_factory};
use message::Services; use message::Services;
use util::{open_db, init_db, node_table_path}; use util::{open_db, init_db, node_table_path};
use {config, p2p, PROTOCOL_VERSION, PROTOCOL_MINIMUM}; use {config, p2p, PROTOCOL_VERSION, PROTOCOL_MINIMUM};
@ -34,8 +34,9 @@ pub fn start(cfg: config::Config) -> Result<(), String> {
}; };
let sync_handle = el.handle(); let sync_handle = el.handle();
let local_sync_node = create_local_sync_node(&sync_handle, cfg.magic, db.clone()); let sync_peers = create_sync_peers();
let sync_connection_factory = create_sync_connection_factory(local_sync_node.clone()); let local_sync_node = create_local_sync_node(&sync_handle, cfg.magic, db.clone(), sync_peers.clone());
let sync_connection_factory = create_sync_connection_factory(sync_peers.clone(), local_sync_node.clone());
let p2p = try!(p2p::P2P::new(p2p_cfg, sync_connection_factory, el.handle()).map_err(|x| x.to_string())); let p2p = try!(p2p::P2P::new(p2p_cfg, sync_connection_factory, el.handle()).map_err(|x| x.to_string()));
let rpc_deps = rpc::Dependencies { let rpc_deps = rpc::Dependencies {

View File

@ -19,6 +19,10 @@ impl Bytes {
pub fn take(self) -> Vec<u8> { pub fn take(self) -> Vec<u8> {
self.0 self.0
} }
pub fn len(&self) -> usize {
self.0.len()
}
} }
impl<'a> From<&'a [u8]> for Bytes { impl<'a> From<&'a [u8]> for Bytes {

View File

@ -1,36 +1,50 @@
use std::sync::Arc;
use std::collections::VecDeque; use std::collections::VecDeque;
use std::sync::Arc;
use parking_lot::Mutex; use parking_lot::Mutex;
use chain; use chain;
use db; use db;
use network::Magic; use network::Magic;
use orphan_blocks_pool::OrphanBlocksPool;
use synchronization_verifier::{Verifier, SyncVerifier, VerificationTask,
VerificationSink, BlockVerificationSink, TransactionVerificationSink};
use primitives::hash::H256; use primitives::hash::H256;
use super::Error; use super::Error;
use synchronization_verifier::{Verifier, SyncVerifier, VerificationTask,
VerificationSink, BlockVerificationSink, TransactionVerificationSink};
use types::StorageRef;
use utils::OrphanBlocksPool;
/// Maximum number of orphaned in-memory blocks
pub const MAX_ORPHANED_BLOCKS: usize = 1024; pub const MAX_ORPHANED_BLOCKS: usize = 1024;
/// Synchronous block writer
pub struct BlocksWriter { pub struct BlocksWriter {
storage: db::SharedStore, /// Blocks storage
storage: StorageRef,
/// Orphaned blocks pool
orphaned_blocks_pool: OrphanBlocksPool, orphaned_blocks_pool: OrphanBlocksPool,
/// Blocks verifier
verifier: SyncVerifier<BlocksWriterSink>, verifier: SyncVerifier<BlocksWriterSink>,
/// Verification events receiver
sink: Arc<BlocksWriterSinkData>, sink: Arc<BlocksWriterSinkData>,
/// True if verification is enabled
verification: bool, verification: bool,
} }
/// Verification events receiver
struct BlocksWriterSink { struct BlocksWriterSink {
/// Reference to blocks writer data
data: Arc<BlocksWriterSinkData>, data: Arc<BlocksWriterSinkData>,
} }
/// Blocks writer data
struct BlocksWriterSinkData { struct BlocksWriterSinkData {
storage: db::SharedStore, /// Blocks storage
storage: StorageRef,
/// Last verification error
err: Mutex<Option<Error>>, err: Mutex<Option<Error>>,
} }
impl BlocksWriter { impl BlocksWriter {
pub fn new(storage: db::SharedStore, network: Magic, verification: bool) -> BlocksWriter { /// Create new synchronous blocks writer
pub fn new(storage: StorageRef, network: Magic, verification: bool) -> BlocksWriter {
let sink_data = Arc::new(BlocksWriterSinkData::new(storage.clone())); let sink_data = Arc::new(BlocksWriterSinkData::new(storage.clone()));
let sink = Arc::new(BlocksWriterSink::new(sink_data.clone())); let sink = Arc::new(BlocksWriterSink::new(sink_data.clone()));
let verifier = SyncVerifier::new(network, storage.clone(), sink); let verifier = SyncVerifier::new(network, storage.clone(), sink);
@ -43,14 +57,16 @@ impl BlocksWriter {
} }
} }
/// Append new block
pub fn append_block(&mut self, block: chain::IndexedBlock) -> Result<(), Error> { pub fn append_block(&mut self, block: chain::IndexedBlock) -> Result<(), Error> {
// do not append block if it is already there // do not append block if it is already there
if self.storage.contains_block(db::BlockRef::Hash(block.hash().clone())) { if self.storage.contains_block(db::BlockRef::Hash(block.hash().clone())) {
return Ok(()); return Ok(());
} }
// verify && insert only if parent block is already in the storage // verify && insert only if parent block is already in the storage
if !self.storage.contains_block(db::BlockRef::Hash(block.header.raw.previous_header_hash.clone())) { if !self.storage.contains_block(db::BlockRef::Hash(block.header.raw.previous_header_hash.clone())) {
self.orphaned_blocks_pool.insert_orphaned_block(block.hash().clone(), block); self.orphaned_blocks_pool.insert_orphaned_block(block);
// we can't hold many orphaned blocks in memory during import // we can't hold many orphaned blocks in memory during import
if self.orphaned_blocks_pool.len() > MAX_ORPHANED_BLOCKS { if self.orphaned_blocks_pool.len() > MAX_ORPHANED_BLOCKS {
return Err(Error::TooManyOrphanBlocks); return Err(Error::TooManyOrphanBlocks);
@ -59,7 +75,7 @@ impl BlocksWriter {
} }
// verify && insert block && all its orphan children // verify && insert block && all its orphan children
let mut verification_queue: VecDeque<chain::IndexedBlock> = self.orphaned_blocks_pool.remove_blocks_for_parent(block.hash()).into_iter().map(|(_, b)| b).collect(); let mut verification_queue: VecDeque<chain::IndexedBlock> = self.orphaned_blocks_pool.remove_blocks_for_parent(block.hash());
verification_queue.push_front(block); verification_queue.push_front(block);
while let Some(block) = verification_queue.pop_front() { while let Some(block) = verification_queue.pop_front() {
if self.verification { if self.verification {
@ -78,6 +94,7 @@ impl BlocksWriter {
} }
impl BlocksWriterSink { impl BlocksWriterSink {
/// Create new verification events receiver
pub fn new(data: Arc<BlocksWriterSinkData>) -> Self { pub fn new(data: Arc<BlocksWriterSinkData>) -> Self {
BlocksWriterSink { BlocksWriterSink {
data: data, data: data,
@ -86,13 +103,15 @@ impl BlocksWriterSink {
} }
impl BlocksWriterSinkData { impl BlocksWriterSinkData {
pub fn new(storage: db::SharedStore) -> Self { /// Create new blocks writer data
pub fn new(storage: StorageRef) -> Self {
BlocksWriterSinkData { BlocksWriterSinkData {
storage: storage, storage: storage,
err: Mutex::new(None), err: Mutex::new(None),
} }
} }
/// Take last verification error
pub fn error(&self) -> Option<Error> { pub fn error(&self) -> Option<Error> {
self.err.lock().take() self.err.lock().take()
} }
@ -115,7 +134,7 @@ impl BlockVerificationSink for BlocksWriterSink {
} }
impl TransactionVerificationSink for BlocksWriterSink { impl TransactionVerificationSink for BlocksWriterSink {
fn on_transaction_verification_success(&self, _transaction: chain::Transaction) { fn on_transaction_verification_success(&self, _transaction: chain::IndexedTransaction) {
unreachable!("not intended to verify transactions") unreachable!("not intended to verify transactions")
} }

View File

@ -1,711 +0,0 @@
use std::cmp::min;
use linked_hash_map::LinkedHashMap;
use bit_vec::BitVec;
use murmur3::murmur3_32;
use chain::{Block, Transaction, OutPoint, merkle_node_hash};
use ser::serialize;
use message::types;
use primitives::bytes::Bytes;
use primitives::hash::H256;
use script::Script;
/// Constant optimized to create large differences in the seed for different values of `hash_functions_num`.
const SEED_OFFSET: u32 = 0xFBA4C795;
/// Max last blocks to store for given peer. TODO: how bitcoind deals with this?
pub const MAX_LAST_BLOCKS_TO_STORE: usize = 2048;
/// Max last transactions to store for given peer
pub const MAX_LAST_TRANSACTIONS_TO_STORE: usize = 64;
/// Filter, which controls data relayed over connection.
#[derive(Debug)]
pub struct ConnectionFilter {
/// Bloom filter, if set.
bloom: Option<ConnectionBloom>,
/// Filter update type.
filter_flags: types::FilterFlags,
/// Last blocks from peer.
last_blocks: LinkedHashMap<H256, bool>,
/// Last transactions from peer.
last_transactions: LinkedHashMap<H256, ()>,
/// Minimal fee in satoshis per 1000 bytes
fee_rate: Option<u64>,
}
/// Connection bloom filter
#[derive(Debug)]
struct ConnectionBloom {
/// Filter storage.
filter: BitVec,
/// Number of hash functions to use in bloom filter.
hash_functions_num: u32,
/// Value to add to Murmur3 hash seed when calculating hash.
tweak: u32,
}
/// `merkleblock` build artefacts
#[derive(Debug, PartialEq)]
pub struct MerkleBlockArtefacts {
/// `merkleblock` message
pub merkleblock: types::MerkleBlock,
/// All matching transactions
pub matching_transactions: Vec<(H256, Transaction)>,
}
/// Service structure to construct `merkleblock` message.
struct PartialMerkleTree {
/// All transactions length.
all_len: usize,
/// All transactions hashes.
all_hashes: Vec<H256>,
/// Match flags for all transactions.
all_matches: BitVec,
/// Partial hashes.
hashes: Vec<H256>,
/// Partial match flags.
matches: BitVec,
}
impl Default for ConnectionFilter {
fn default() -> Self {
ConnectionFilter {
bloom: None,
filter_flags: types::FilterFlags::None,
last_blocks: LinkedHashMap::new(),
last_transactions: LinkedHashMap::new(),
fee_rate: None,
}
}
}
impl ConnectionFilter {
#[cfg(test)]
/// Create new connection with given filter params
pub fn with_filterload(message: &types::FilterLoad) -> Self {
ConnectionFilter {
bloom: Some(ConnectionBloom::new(message)),
filter_flags: message.flags,
last_blocks: LinkedHashMap::new(),
last_transactions: LinkedHashMap::new(),
fee_rate: None,
}
}
/// We have a knowledge that block with given hash is known to this connection
pub fn known_block(&mut self, block_hash: &H256, is_sent_compact: bool) {
// remember that peer knows about this block
if !self.last_blocks.contains_key(block_hash) {
if self.last_blocks.len() == MAX_LAST_BLOCKS_TO_STORE {
self.last_blocks.pop_front();
}
self.last_blocks.insert(block_hash.clone(), is_sent_compact);
}
}
/// We have a knowledge that transaction with given hash is known to this connection
pub fn known_transaction(&mut self, transaction_hash: &H256) {
// remember that peer knows about this block
if !self.last_transactions.contains_key(transaction_hash) {
if self.last_transactions.len() == MAX_LAST_TRANSACTIONS_TO_STORE {
self.last_transactions.pop_front();
}
self.last_transactions.insert(transaction_hash.clone(), ());
}
}
/// Is compact block with this hash has been sent recently
pub fn is_known_compact_block(&self, block_hash: &H256) -> bool {
self.last_blocks.get(block_hash).cloned().unwrap_or(false)
}
/// Check if block should be sent to this connection
pub fn filter_block(&self, block_hash: &H256) -> bool {
// check if block is known
!self.last_blocks.contains_key(block_hash)
}
/// Check if transaction should be sent to this connection && optionally update filter
pub fn filter_transaction(&mut self, transaction_hash: &H256, transaction: &Transaction, transaction_fee_rate: Option<u64>) -> bool {
// check if transaction is known
if self.last_transactions.contains_key(transaction_hash) {
return false;
}
// check if transaction fee rate is high enough for this peer
if let Some(fee_rate) = self.fee_rate {
if let Some(transaction_fee_rate) = transaction_fee_rate {
if transaction_fee_rate < fee_rate {
return false;
}
}
}
// check with bloom filter, if set
self.filter_transaction_with_bloom(transaction_hash, transaction)
}
/// Load filter
pub fn load(&mut self, message: &types::FilterLoad) {
self.bloom = Some(ConnectionBloom::new(message));
self.filter_flags = message.flags;
}
/// Add filter
pub fn add(&mut self, message: &types::FilterAdd) {
// ignore if filter is not currently set
if let Some(ref mut bloom) = self.bloom {
bloom.insert(&message.data);
}
}
/// Clear filter
pub fn clear(&mut self) {
self.bloom = None;
}
/// Limit transaction announcing by transaction fee
pub fn set_fee_rate(&mut self, fee_rate: u64) {
if fee_rate == 0 {
self.fee_rate = None;
}
else {
self.fee_rate = Some(fee_rate);
}
}
/// Convert `Block` to `MerkleBlock` using this filter
pub fn build_merkle_block(&mut self, block: Block) -> Option<MerkleBlockArtefacts> {
if self.bloom.is_none() {
return None;
}
// prepare result
let all_len = block.transactions.len();
let mut result = MerkleBlockArtefacts {
merkleblock: types::MerkleBlock {
block_header: block.block_header.clone(),
total_transactions: all_len as u32,
hashes: Vec::default(),
flags: Bytes::default(),
},
matching_transactions: Vec::new(),
};
// calculate hashes && match flags for all transactions
let (all_hashes, all_flags) = block.transactions.into_iter()
.fold((Vec::<H256>::with_capacity(all_len), BitVec::with_capacity(all_len)), |(mut all_hashes, mut all_flags), t| {
let hash = t.hash();
let flag = self.filter_transaction_with_bloom(&hash, &t);
if flag {
result.matching_transactions.push((hash.clone(), t));
}
all_flags.push(flag);
all_hashes.push(hash);
(all_hashes, all_flags)
});
// build partial merkle tree
let (hashes, flags) = PartialMerkleTree::build(all_hashes, all_flags);
result.merkleblock.hashes.extend(hashes);
// to_bytes() converts [true, false, true] to 0b10100000
// while protocol requires [true, false, true] to be serialized as 0x00000101
result.merkleblock.flags = flags.to_bytes().into_iter()
.map(|b|
((b & 0b10000000) >> 7) |
((b & 0b01000000) >> 5) |
((b & 0b00100000) >> 3) |
((b & 0b00010000) >> 1) |
((b & 0b00001000) << 1) |
((b & 0b00000100) << 3) |
((b & 0b00000010) << 5) |
((b & 0b00000001) << 7)).collect::<Vec<u8>>().into();
Some(result)
}
/// Check if transaction should be sent to this connection using bloom filter && optionally update filter
fn filter_transaction_with_bloom(&mut self, transaction_hash: &H256, transaction: &Transaction) -> bool {
// check with bloom filter, if set
match self.bloom {
/// if no filter is set for the connection => match everything
None => true,
/// filter using bloom filter, then update
Some(ref mut bloom) => {
let mut is_match = false;
// match if filter contains any arbitrary script data element in any scriptPubKey in tx
for (output_index, output) in transaction.outputs.iter().enumerate() {
let script = Script::new(output.script_pubkey.clone());
for instruction in script.iter().filter_map(|i| i.ok()) {
if let Some(instruction_data) = instruction.data {
if bloom.contains(instruction_data) {
is_match = true;
let is_update_needed = self.filter_flags == types::FilterFlags::All
|| (self.filter_flags == types::FilterFlags::PubKeyOnly && (script.is_pay_to_public_key() || script.is_multisig_script()));
if is_update_needed {
bloom.insert(&serialize(&OutPoint {
hash: transaction_hash.clone(),
index: output_index as u32,
}));
}
}
}
}
}
if is_match {
return is_match;
}
// match if filter contains transaction itself
if bloom.contains(&**transaction_hash) {
return true;
}
// match if filter contains an outpoint this transaction spends
for input in &transaction.inputs {
// check if match previous output
let previous_output = serialize(&input.previous_output);
is_match = bloom.contains(&*previous_output);
if is_match {
return true;
}
// check if match any arbitrary script data element in any scriptSig in tx
let script = Script::new(input.script_sig.clone());
for instruction in script.iter().filter_map(|i| i.ok()) {
if let Some(instruction_data) = instruction.data {
is_match = bloom.contains(&*instruction_data);
if is_match {
return true;
}
}
}
}
// no matches
false
},
}
}
}
impl ConnectionBloom {
/// Create with given parameters
pub fn new(message: &types::FilterLoad) -> Self {
ConnectionBloom {
filter: BitVec::from_bytes(&message.filter),
hash_functions_num: message.hash_functions,
tweak: message.tweak,
}
}
/// True if filter contains given bytes
pub fn contains(&self, data: &[u8]) -> bool {
for hash_function_idx in 0..self.hash_functions_num {
let murmur_seed = hash_function_idx.overflowing_mul(SEED_OFFSET).0.overflowing_add(self.tweak).0;
let murmur_hash = murmur3_32(&mut data.as_ref(), murmur_seed) as usize % self.filter.len();
if !self.filter.get(murmur_hash).expect("mod operation above") {
return false;
}
}
true
}
/// Add bytes to the filter
pub fn insert(&mut self, data: &[u8]) {
for hash_function_idx in 0..self.hash_functions_num {
let murmur_seed = hash_function_idx.overflowing_mul(SEED_OFFSET).0.overflowing_add(self.tweak).0;
let murmur_hash = murmur3_32(&mut data.as_ref(), murmur_seed) as usize % self.filter.len();
self.filter.set(murmur_hash, true);
}
}
}
impl PartialMerkleTree {
/// Build partial merkle tree as described here:
/// https://bitcoin.org/en/developer-reference#creating-a-merkleblock-message
pub fn build(all_hashes: Vec<H256>, all_matches: BitVec) -> (Vec<H256>, BitVec) {
let mut partial_merkle_tree = PartialMerkleTree {
all_len: all_hashes.len(),
all_hashes: all_hashes,
all_matches: all_matches,
hashes: Vec::new(),
matches: BitVec::new(),
};
partial_merkle_tree.build_tree();
(partial_merkle_tree.hashes, partial_merkle_tree.matches)
}
#[cfg(test)]
/// Parse partial merkle tree as described here:
/// https://bitcoin.org/en/developer-reference#parsing-a-merkleblock-message
pub fn parse(all_len: usize, hashes: Vec<H256>, matches: BitVec) -> Result<(H256, Vec<H256>, BitVec), String> {
let mut partial_merkle_tree = PartialMerkleTree {
all_len: all_len,
all_hashes: Vec::new(),
all_matches: BitVec::from_elem(all_len, false),
hashes: hashes,
matches: matches,
};
let merkle_root = try!(partial_merkle_tree.parse_tree());
Ok((merkle_root, partial_merkle_tree.all_hashes, partial_merkle_tree.all_matches))
}
fn build_tree(&mut self) {
let tree_height = self.tree_height();
self.build_branch(tree_height, 0)
}
#[cfg(test)]
fn parse_tree(&mut self) -> Result<H256, String> {
if self.all_len == 0 {
return Err("no transactions".into());
}
if self.hashes.len() > self.all_len {
return Err("too many hashes".into());
}
if self.matches.len() < self.hashes.len() {
return Err("too few matches".into());
}
// parse tree
let mut matches_used = 0usize;
let mut hashes_used = 0usize;
let tree_height = self.tree_height();
let merkle_root = try!(self.parse_branch(tree_height, 0, &mut matches_used, &mut hashes_used));
if matches_used != self.matches.len() {
return Err("not all matches used".into());
}
if hashes_used != self.hashes.len() {
return Err("not all hashes used".into());
}
Ok(merkle_root)
}
fn build_branch(&mut self, height: usize, pos: usize) {
// determine whether this node is the parent of at least one matched txid
let transactions_begin = pos << height;
let transactions_end = min(self.all_len, (pos + 1) << height);
let flag = (transactions_begin..transactions_end).any(|idx| self.all_matches[idx]);
// remember flag
self.matches.push(flag);
// proceeed with descendants
if height == 0 || !flag {
// we're at the leaf level || there is no match
let hash = self.branch_hash(height, pos);
self.hashes.push(hash);
} else {
// proceed with left child
self.build_branch(height - 1, pos << 1);
// proceed with right child if any
if (pos << 1) + 1 < self.level_width(height - 1) {
self.build_branch(height - 1, (pos << 1) + 1);
}
}
}
#[cfg(test)]
fn parse_branch(&mut self, height: usize, pos: usize, matches_used: &mut usize, hashes_used: &mut usize) -> Result<H256, String> {
if *matches_used >= self.matches.len() {
return Err("all matches used".into());
}
let flag = self.matches[*matches_used];
*matches_used += 1;
if height == 0 || !flag {
// we're at the leaf level || there is no match
if *hashes_used > self.hashes.len() {
return Err("all hashes used".into());
}
// get node hash
let ref hash = self.hashes[*hashes_used];
*hashes_used += 1;
// on leaf level && matched flag set => mark transaction as matched
if height == 0 && flag {
self.all_hashes.push(hash.clone());
self.all_matches.set(pos, true);
}
Ok(hash.clone())
} else {
// proceed with left child
let left = try!(self.parse_branch(height - 1, pos << 1, matches_used, hashes_used));
// proceed with right child if any
let has_right_child = (pos << 1) + 1 < self.level_width(height - 1);
let right = if has_right_child {
try!(self.parse_branch(height - 1, (pos << 1) + 1, matches_used, hashes_used))
} else {
left.clone()
};
if has_right_child && left == right {
Err("met same hash twice".into())
} else {
Ok(merkle_node_hash(&left, &right))
}
}
}
fn tree_height(&self) -> usize {
let mut height = 0usize;
while self.level_width(height) > 1 {
height += 1;
}
height
}
fn level_width(&self, height: usize) -> usize {
(self.all_len + (1 << height) - 1) >> height
}
fn branch_hash(&self, height: usize, pos: usize) -> H256 {
if height == 0 {
self.all_hashes[pos].clone()
} else {
let left = self.branch_hash(height - 1, pos << 1);
let right = if (pos << 1) + 1 < self.level_width(height - 1) {
self.branch_hash(height - 1, (pos << 1) + 1)
} else {
left.clone()
};
merkle_node_hash(&left, &right)
}
}
}
#[cfg(test)]
pub mod tests {
use std::iter::{Iterator, repeat};
use test_data;
use message::types;
use chain::{merkle_root, Transaction};
use primitives::hash::H256;
use primitives::bytes::Bytes;
use ser::serialize;
use super::{ConnectionFilter, ConnectionBloom, PartialMerkleTree,
MAX_LAST_BLOCKS_TO_STORE, MAX_LAST_TRANSACTIONS_TO_STORE};
pub fn default_filterload() -> types::FilterLoad {
types::FilterLoad {
filter: Bytes::from(repeat(0u8).take(1024).collect::<Vec<_>>()),
hash_functions: 10,
tweak: 5,
flags: types::FilterFlags::None,
}
}
pub fn make_filteradd(data: &[u8]) -> types::FilterAdd {
types::FilterAdd {
data: data.into(),
}
}
#[test]
fn bloom_insert_data() {
let mut bloom = ConnectionBloom::new(&default_filterload());
assert!(!bloom.contains(&*H256::default()));
bloom.insert(&*H256::default());
assert!(bloom.contains(&*H256::default()));
}
#[test]
fn connection_filter_matches_transaction_by_hash() {
let tx1: Transaction = test_data::TransactionBuilder::with_output(10).into();
let tx2: Transaction = test_data::TransactionBuilder::with_output(20).into();
let mut filter = ConnectionFilter::with_filterload(&default_filterload());
assert!(!filter.filter_transaction(&tx1.hash(), &tx1, None));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2, None));
filter.add(&make_filteradd(&*tx1.hash()));
assert!(filter.filter_transaction(&tx1.hash(), &tx1, None));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2, None));
}
#[test]
fn connection_filter_matches_transaction_by_output_script_data_element() {
// https://webbtc.com/tx/eb3b82c0884e3efa6d8b0be55b4915eb20be124c9766245bcc7f34fdac32bccb
// output script: OP_DUP OP_HASH160 380cb3c594de4e7e9b8e18db182987bebb5a4f70 OP_EQUALVERIFY OP_CHECKSIG
let tx1: Transaction = "01000000024de8b0c4c2582db95fa6b3567a989b664484c7ad6672c85a3da413773e63fdb8000000006b48304502205b282fbc9b064f3bc823a23edcc0048cbb174754e7aa742e3c9f483ebe02911c022100e4b0b3a117d36cab5a67404dddbf43db7bea3c1530e0fe128ebc15621bd69a3b0121035aa98d5f77cd9a2d88710e6fc66212aff820026f0dad8f32d1f7ce87457dde50ffffffff4de8b0c4c2582db95fa6b3567a989b664484c7ad6672c85a3da413773e63fdb8010000006f004730440220276d6dad3defa37b5f81add3992d510d2f44a317fd85e04f93a1e2daea64660202200f862a0da684249322ceb8ed842fb8c859c0cb94c81e1c5308b4868157a428ee01ab51210232abdc893e7f0631364d7fd01cb33d24da45329a00357b3a7886211ab414d55a51aeffffffff02e0fd1c00000000001976a914380cb3c594de4e7e9b8e18db182987bebb5a4f7088acc0c62d000000000017142a9bc5447d664c1d0141392a842d23dba45c4f13b17500000000".into();
let tx1_out_data: Bytes = "380cb3c594de4e7e9b8e18db182987bebb5a4f70".into();
let tx2 = Transaction::default();
let mut filter = ConnectionFilter::with_filterload(&default_filterload());
assert!(!filter.filter_transaction(&tx1.hash(), &tx1, None));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2, None));
filter.add(&make_filteradd(&tx1_out_data));
assert!(filter.filter_transaction(&tx1.hash(), &tx1, None));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2, None));
}
#[test]
fn connection_filter_matches_transaction_by_previous_output_point() {
// https://webbtc.com/tx/eb3b82c0884e3efa6d8b0be55b4915eb20be124c9766245bcc7f34fdac32bccb
let tx1: Transaction = "01000000024de8b0c4c2582db95fa6b3567a989b664484c7ad6672c85a3da413773e63fdb8000000006b48304502205b282fbc9b064f3bc823a23edcc0048cbb174754e7aa742e3c9f483ebe02911c022100e4b0b3a117d36cab5a67404dddbf43db7bea3c1530e0fe128ebc15621bd69a3b0121035aa98d5f77cd9a2d88710e6fc66212aff820026f0dad8f32d1f7ce87457dde50ffffffff4de8b0c4c2582db95fa6b3567a989b664484c7ad6672c85a3da413773e63fdb8010000006f004730440220276d6dad3defa37b5f81add3992d510d2f44a317fd85e04f93a1e2daea64660202200f862a0da684249322ceb8ed842fb8c859c0cb94c81e1c5308b4868157a428ee01ab51210232abdc893e7f0631364d7fd01cb33d24da45329a00357b3a7886211ab414d55a51aeffffffff02e0fd1c00000000001976a914380cb3c594de4e7e9b8e18db182987bebb5a4f7088acc0c62d000000000017142a9bc5447d664c1d0141392a842d23dba45c4f13b17500000000".into();
let tx1_previous_output: Bytes = serialize(&tx1.inputs[0].previous_output);
let tx2 = Transaction::default();
let mut filter = ConnectionFilter::with_filterload(&default_filterload());
assert!(!filter.filter_transaction(&tx1.hash(), &tx1, None));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2, None));
filter.add(&make_filteradd(&tx1_previous_output));
assert!(filter.filter_transaction(&tx1.hash(), &tx1, None));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2, None));
}
#[test]
fn connection_filter_matches_transaction_by_input_script_data_element() {
// https://webbtc.com/tx/eb3b82c0884e3efa6d8b0be55b4915eb20be124c9766245bcc7f34fdac32bccb
// input script: PUSH DATA 304502205b282fbc9b064f3bc823a23edcc0048cbb174754e7aa742e3c9f483ebe02911c022100e4b0b3a117d36cab5a67404dddbf43db7bea3c1530e0fe128ebc15621bd69a3b01
let tx1: Transaction = "01000000024de8b0c4c2582db95fa6b3567a989b664484c7ad6672c85a3da413773e63fdb8000000006b48304502205b282fbc9b064f3bc823a23edcc0048cbb174754e7aa742e3c9f483ebe02911c022100e4b0b3a117d36cab5a67404dddbf43db7bea3c1530e0fe128ebc15621bd69a3b0121035aa98d5f77cd9a2d88710e6fc66212aff820026f0dad8f32d1f7ce87457dde50ffffffff4de8b0c4c2582db95fa6b3567a989b664484c7ad6672c85a3da413773e63fdb8010000006f004730440220276d6dad3defa37b5f81add3992d510d2f44a317fd85e04f93a1e2daea64660202200f862a0da684249322ceb8ed842fb8c859c0cb94c81e1c5308b4868157a428ee01ab51210232abdc893e7f0631364d7fd01cb33d24da45329a00357b3a7886211ab414d55a51aeffffffff02e0fd1c00000000001976a914380cb3c594de4e7e9b8e18db182987bebb5a4f7088acc0c62d000000000017142a9bc5447d664c1d0141392a842d23dba45c4f13b17500000000".into();
let tx1_input_data: Bytes = "304502205b282fbc9b064f3bc823a23edcc0048cbb174754e7aa742e3c9f483ebe02911c022100e4b0b3a117d36cab5a67404dddbf43db7bea3c1530e0fe128ebc15621bd69a3b01".into();
let tx2 = Transaction::default();
let mut filter = ConnectionFilter::with_filterload(&default_filterload());
assert!(!filter.filter_transaction(&tx1.hash(), &tx1, None));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2, None));
filter.add(&make_filteradd(&tx1_input_data));
assert!(filter.filter_transaction(&tx1.hash(), &tx1, None));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2, None));
}
#[test]
fn connection_filter_matches_transaction_by_fee_rate() {
let tx1: Transaction = test_data::TransactionBuilder::with_version(1).into();
let tx2: Transaction = test_data::TransactionBuilder::with_version(2).into();
let mut filter = ConnectionFilter::default();
assert!(filter.filter_transaction(&tx1.hash(), &tx1, Some(1000)));
assert!(filter.filter_transaction(&tx2.hash(), &tx2, Some(2000)));
filter.set_fee_rate(1500);
assert!(!filter.filter_transaction(&tx1.hash(), &tx1, Some(1000)));
assert!(filter.filter_transaction(&tx2.hash(), &tx2, Some(2000)));
filter.set_fee_rate(3000);
assert!(!filter.filter_transaction(&tx1.hash(), &tx1, Some(1000)));
assert!(!filter.filter_transaction(&tx2.hash(), &tx2, Some(2000)));
filter.set_fee_rate(0);
assert!(filter.filter_transaction(&tx1.hash(), &tx1, Some(1000)));
assert!(filter.filter_transaction(&tx2.hash(), &tx2, Some(2000)));
}
#[test]
// test from core implementation (slow)
// https://github.com/bitcoin/bitcoin/blob/master/src/test/pmt_tests.cpp
fn test_build_merkle_block() {
use bit_vec::BitVec;
use rand::{Rng, SeedableRng, StdRng};
let rng_seed: &[_] = &[0, 0, 0, 0];
let mut rng: StdRng = SeedableRng::from_seed(rng_seed);
// for some transactions counts
let tx_counts: Vec<usize> = vec![1, 4, 7, 17, 56, 100, 127, 256, 312, 513, 1000, 4095];
for tx_count in tx_counts {
// build block with given transactions number
let transactions: Vec<Transaction> = (0..tx_count).map(|n| test_data::TransactionBuilder::with_version(n as i32).into()).collect();
let hashes: Vec<_> = transactions.iter().map(|t| t.hash()).collect();
let merkle_root = merkle_root(&hashes);
// mark different transactions as matched
for seed_tweak in 1..15 {
let mut matches: BitVec = BitVec::with_capacity(tx_count);
let mut matched_hashes: Vec<H256> = Vec::with_capacity(tx_count);
for i in 0usize..tx_count {
let is_match = (rng.gen::<u32>() & ((1 << (seed_tweak / 2)) - 1)) == 0;
matches.push(is_match);
if is_match {
matched_hashes.push(hashes[i].clone());
}
}
// build partial merkle tree
let (built_hashes, built_flags) = PartialMerkleTree::build(hashes.clone(), matches.clone());
// parse tree back
let (parsed_root, parsed_hashes, parsed_positions) = PartialMerkleTree::parse(tx_count, built_hashes, built_flags)
.expect("no error");
assert_eq!(matched_hashes, parsed_hashes);
assert_eq!(matches, parsed_positions);
assert_eq!(merkle_root, parsed_root);
}
}
}
#[test]
fn block_is_filtered_out_when_it_is_received_from_peer() {
let blocks = test_data::build_n_empty_blocks_from_genesis((MAX_LAST_BLOCKS_TO_STORE + 1) as u32, 1);
let mut filter = ConnectionFilter::default();
assert!(filter.filter_block(&blocks[0].hash()));
filter.known_block(&blocks[0].hash(), false);
assert!(!filter.filter_block(&blocks[0].hash()));
for block in blocks.iter().skip(1).take(MAX_LAST_BLOCKS_TO_STORE - 1) {
filter.known_block(&block.hash(), false);
assert!(!filter.filter_block(&blocks[0].hash()));
}
filter.known_block(&blocks[MAX_LAST_BLOCKS_TO_STORE].hash(), false);
assert!(filter.filter_block(&blocks[0].hash()));
}
#[test]
fn transaction_is_filtered_out_when_it_is_received_from_peer() {
let transactions: Vec<Transaction> = (0..MAX_LAST_TRANSACTIONS_TO_STORE + 1)
.map(|version| test_data::TransactionBuilder::with_version(version as i32).into())
.collect();
let mut filter = ConnectionFilter::default();
assert!(filter.filter_transaction(&transactions[0].hash(), &transactions[0], None));
filter.known_transaction(&transactions[0].hash());
assert!(!filter.filter_transaction(&transactions[0].hash(), &transactions[0], None));
for transaction in transactions.iter().skip(1).take(MAX_LAST_TRANSACTIONS_TO_STORE - 1) {
filter.known_transaction(&transaction.hash());
assert!(!filter.filter_transaction(&transactions[0].hash(), &transactions[0], None));
}
filter.known_transaction(&transactions[MAX_LAST_TRANSACTIONS_TO_STORE].hash());
assert!(filter.filter_transaction(&transactions[0].hash(), &transactions[0], None));
}
#[test]
fn known_compact_block() {
let mut filter = ConnectionFilter::default();
filter.known_block(&test_data::block_h1().hash(), true);
filter.known_block(&test_data::block_h2().hash(), false);
assert!(filter.is_known_compact_block(&test_data::block_h1().hash()));
assert!(!filter.is_known_compact_block(&test_data::block_h2().hash()));
}
}

View File

@ -1,103 +1,216 @@
use chain::{IndexedTransaction, IndexedBlock};
use message::types; use message::types;
use p2p::{InboundSyncConnection, InboundSyncConnectionRef}; use p2p::{InboundSyncConnection, InboundSyncConnectionRef};
use local_node::LocalNodeRef; use types::{PeersRef, LocalNodeRef, PeerIndex, RequestId};
use utils::KnownHashType;
/// Inbound synchronization connection
pub struct InboundConnection { pub struct InboundConnection {
local_node: LocalNodeRef, /// Index of peer for this connection
peer_index: usize, peer_index: PeerIndex,
/// Peers reference
peers: PeersRef,
/// Reference to synchronization node
node: LocalNodeRef,
} }
impl InboundConnection { impl InboundConnection {
pub fn new(local_node: LocalNodeRef, peer_index: usize) -> InboundSyncConnectionRef { /// Create new inbound connection
Box::new(InboundConnection { pub fn new(peer_index: PeerIndex, peers: PeersRef, node: LocalNodeRef) -> InboundConnection {
local_node: local_node, InboundConnection {
peer_index: peer_index, peer_index: peer_index,
}) peers: peers,
node: node,
}
}
/// Box inbound connection
pub fn boxed(self) -> InboundSyncConnectionRef {
Box::new(self)
} }
} }
impl InboundSyncConnection for InboundConnection { impl InboundSyncConnection for InboundConnection {
fn start_sync_session(&self, version: u32) { fn start_sync_session(&self, version: types::Version) {
self.local_node.start_sync_session(self.peer_index, version); self.node.on_connect(self.peer_index, version);
} }
fn close_session(&self) { fn close_session(&self) {
self.local_node.stop_sync_session(self.peer_index) self.peers.remove(self.peer_index);
self.node.on_disconnect(self.peer_index);
} }
fn on_inventory(&self, message: types::Inv) { fn on_inventory(&self, message: types::Inv) {
self.local_node.on_peer_inventory(self.peer_index, message); // if inventory is empty - just ignore this message
if message.inventory.is_empty() {
return;
}
// if inventory length is too big => possible DOS
if message.inventory.len() > types::INV_MAX_INVENTORY_LEN {
self.peers.dos(self.peer_index, &format!("'inv' message contains {} entries", message.inventory.len()));
return;
}
self.node.on_inventory(self.peer_index, message);
} }
fn on_getdata(&self, message: types::GetData) { fn on_getdata(&self, message: types::GetData) {
self.local_node.on_peer_getdata(self.peer_index, message); // if inventory is empty - just ignore this message
if message.inventory.is_empty() {
return;
}
// if inventory length is too big => possible DOS
if message.inventory.len() > types::GETDATA_MAX_INVENTORY_LEN {
self.peers.dos(self.peer_index, &format!("'getdata' message contains {} entries", message.inventory.len()));
return;
}
self.node.on_getdata(self.peer_index, message);
} }
fn on_getblocks(&self, message: types::GetBlocks) { fn on_getblocks(&self, message: types::GetBlocks) {
self.local_node.on_peer_getblocks(self.peer_index, message); self.node.on_getblocks(self.peer_index, message);
} }
fn on_getheaders(&self, message: types::GetHeaders, id: u32) { fn on_getheaders(&self, message: types::GetHeaders, id: RequestId) {
self.local_node.on_peer_getheaders(self.peer_index, message, id); self.node.on_getheaders(self.peer_index, message, id);
} }
fn on_transaction(&self, message: types::Tx) { fn on_transaction(&self, message: types::Tx) {
self.local_node.on_peer_transaction(self.peer_index, message); let tx: IndexedTransaction = message.transaction.into();
self.peers.hash_known_as(self.peer_index, tx.hash.clone(), KnownHashType::Transaction);
self.node.on_transaction(self.peer_index, tx);
} }
fn on_block(&self, message: types::Block) { fn on_block(&self, message: types::Block) {
self.local_node.on_peer_block(self.peer_index, message); let block: IndexedBlock = message.block.into();
self.peers.hash_known_as(self.peer_index, block.hash().clone(), KnownHashType::Block);
self.node.on_block(self.peer_index, block);
} }
fn on_headers(&self, message: types::Headers) { fn on_headers(&self, message: types::Headers) {
self.local_node.on_peer_headers(self.peer_index, message); // if headers are empty - just ignore this message
if message.headers.is_empty() {
return;
}
// if there are too many headers => possible DOS
if message.headers.len() > types::HEADERS_MAX_HEADERS_LEN {
self.peers.dos(self.peer_index, &format!("'headers' message contains {} headers", message.headers.len()));
return;
}
self.node.on_headers(self.peer_index, message);
} }
fn on_mempool(&self, message: types::MemPool) { fn on_mempool(&self, message: types::MemPool) {
self.local_node.on_peer_mempool(self.peer_index, message); self.node.on_mempool(self.peer_index, message);
} }
fn on_filterload(&self, message: types::FilterLoad) { fn on_filterload(&self, message: types::FilterLoad) {
self.local_node.on_peer_filterload(self.peer_index, message); // if filter is too big => possible DOS
if message.filter.len() > types::FILTERLOAD_MAX_FILTER_LEN {
self.peers.dos(self.peer_index, &format!("'filterload' message contains {}-len filter", message.filter.len()));
return;
}
// if too many hash functions => possible DOS
if message.hash_functions as usize > types::FILTERLOAD_MAX_HASH_FUNCS {
self.peers.dos(self.peer_index, &format!("'filterload' message contains {} hash functions", message.hash_functions));
return;
}
self.node.on_filterload(self.peer_index, message);
} }
fn on_filteradd(&self, message: types::FilterAdd) { fn on_filteradd(&self, message: types::FilterAdd) {
self.local_node.on_peer_filteradd(self.peer_index, message); // if filter item is too big => possible DOS
if message.data.len() > types::FILTERADD_MAX_DATA_LEN {
self.peers.dos(self.peer_index, &format!("'filteradd' message contains {}-len data item", message.data.len()));
return;
}
self.node.on_filteradd(self.peer_index, message);
} }
fn on_filterclear(&self, message: types::FilterClear) { fn on_filterclear(&self, message: types::FilterClear) {
self.local_node.on_peer_filterclear(self.peer_index, message); self.node.on_filterclear(self.peer_index, message);
} }
fn on_merkleblock(&self, message: types::MerkleBlock) { fn on_merkleblock(&self, message: types::MerkleBlock) {
self.local_node.on_peer_merkleblock(self.peer_index, message); self.node.on_merkleblock(self.peer_index, message);
} }
fn on_sendheaders(&self, message: types::SendHeaders) { fn on_sendheaders(&self, message: types::SendHeaders) {
self.local_node.on_peer_sendheaders(self.peer_index, message); self.node.on_sendheaders(self.peer_index, message);
} }
fn on_feefilter(&self, message: types::FeeFilter) { fn on_feefilter(&self, message: types::FeeFilter) {
self.local_node.on_peer_feefilter(self.peer_index, message); self.node.on_feefilter(self.peer_index, message);
} }
fn on_send_compact(&self, message: types::SendCompact) { fn on_send_compact(&self, message: types::SendCompact) {
self.local_node.on_peer_send_compact(self.peer_index, message); self.node.on_send_compact(self.peer_index, message);
} }
fn on_compact_block(&self, message: types::CompactBlock) { fn on_compact_block(&self, message: types::CompactBlock) {
self.local_node.on_peer_compact_block(self.peer_index, message); self.node.on_compact_block(self.peer_index, message);
} }
fn on_get_block_txn(&self, message: types::GetBlockTxn) { fn on_get_block_txn(&self, message: types::GetBlockTxn) {
self.local_node.on_peer_get_block_txn(self.peer_index, message); self.node.on_get_block_txn(self.peer_index, message);
} }
fn on_block_txn(&self, message: types::BlockTxn) { fn on_block_txn(&self, message: types::BlockTxn) {
self.local_node.on_peer_block_txn(self.peer_index, message); self.node.on_block_txn(self.peer_index, message);
} }
fn on_notfound(&self, message: types::NotFound) { fn on_notfound(&self, message: types::NotFound) {
self.local_node.on_peer_notfound(self.peer_index, message); self.node.on_notfound(self.peer_index, message);
}
}
#[cfg(test)]
pub mod tests {
use std::collections::HashMap;
use std::sync::Arc;
use parking_lot::Mutex;
use message::types;
use p2p::OutboundSyncConnection;
use types::RequestId;
pub struct DummyOutboundSyncConnection {
pub messages: Mutex<HashMap<String, usize>>,
}
impl DummyOutboundSyncConnection {
pub fn new() -> Arc<DummyOutboundSyncConnection> {
Arc::new(DummyOutboundSyncConnection {
messages: Mutex::new(HashMap::new()),
})
}
}
impl OutboundSyncConnection for DummyOutboundSyncConnection {
fn send_inventory(&self, _message: &types::Inv) { *self.messages.lock().entry("inventory".to_owned()).or_insert(0) += 1; }
fn send_getdata(&self, _message: &types::GetData) { *self.messages.lock().entry("getdata".to_owned()).or_insert(0) += 1; }
fn send_getblocks(&self, _message: &types::GetBlocks) { *self.messages.lock().entry("getblocks".to_owned()).or_insert(0) += 1; }
fn send_getheaders(&self, _message: &types::GetHeaders) { *self.messages.lock().entry("getheaders".to_owned()).or_insert(0) += 1; }
fn send_transaction(&self, _message: &types::Tx) { *self.messages.lock().entry("transaction".to_owned()).or_insert(0) += 1; }
fn send_block(&self, _message: &types::Block) { *self.messages.lock().entry("block".to_owned()).or_insert(0) += 1; }
fn send_headers(&self, _message: &types::Headers) { *self.messages.lock().entry("headers".to_owned()).or_insert(0) += 1; }
fn respond_headers(&self, _message: &types::Headers, _id: RequestId) { *self.messages.lock().entry("headers".to_owned()).or_insert(0) += 1; }
fn send_mempool(&self, _message: &types::MemPool) { *self.messages.lock().entry("mempool".to_owned()).or_insert(0) += 1; }
fn send_filterload(&self, _message: &types::FilterLoad) { *self.messages.lock().entry("filterload".to_owned()).or_insert(0) += 1; }
fn send_filteradd(&self, _message: &types::FilterAdd) { *self.messages.lock().entry("filteradd".to_owned()).or_insert(0) += 1; }
fn send_filterclear(&self, _message: &types::FilterClear) { *self.messages.lock().entry("filterclear".to_owned()).or_insert(0) += 1; }
fn send_merkleblock(&self, _message: &types::MerkleBlock) { *self.messages.lock().entry("merkleblock".to_owned()).or_insert(0) += 1; }
fn send_sendheaders(&self, _message: &types::SendHeaders) { *self.messages.lock().entry("sendheaders".to_owned()).or_insert(0) += 1; }
fn send_feefilter(&self, _message: &types::FeeFilter) { *self.messages.lock().entry("feefilter".to_owned()).or_insert(0) += 1; }
fn send_send_compact(&self, _message: &types::SendCompact) { *self.messages.lock().entry("sendcompact".to_owned()).or_insert(0) += 1; }
fn send_compact_block(&self, _message: &types::CompactBlock) { *self.messages.lock().entry("cmpctblock".to_owned()).or_insert(0) += 1; }
fn send_get_block_txn(&self, _message: &types::GetBlockTxn) { *self.messages.lock().entry("getblocktxn".to_owned()).or_insert(0) += 1; }
fn send_block_txn(&self, _message: &types::BlockTxn) { *self.messages.lock().entry("blocktxn".to_owned()).or_insert(0) += 1; }
fn send_notfound(&self, _message: &types::NotFound) { *self.messages.lock().entry("notfound".to_owned()).or_insert(0) += 1; }
fn ignored(&self, _id: RequestId) {}
fn close(&self) {}
} }
} }

View File

@ -1,28 +1,41 @@
use std::sync::atomic::{AtomicUsize, Ordering};
use p2p::{LocalSyncNode, LocalSyncNodeRef, OutboundSyncConnectionRef, InboundSyncConnectionRef}; use p2p::{LocalSyncNode, LocalSyncNodeRef, OutboundSyncConnectionRef, InboundSyncConnectionRef};
use local_node::LocalNodeRef;
use inbound_connection::InboundConnection; use inbound_connection::InboundConnection;
use types::{PeersRef, LocalNodeRef};
/// Inbound synchronization connection factory
pub struct InboundConnectionFactory { pub struct InboundConnectionFactory {
local_node: LocalNodeRef, /// Peers reference
peers: PeersRef,
/// Reference to synchronization node
node: LocalNodeRef,
/// Throughout counter of synchronization peers
counter: AtomicUsize,
} }
impl InboundConnectionFactory { impl InboundConnectionFactory {
pub fn with_local_node(local_node: LocalNodeRef) -> LocalSyncNodeRef { /// Create new inbound connection factory
Box::new( pub fn new(peers: PeersRef, node: LocalNodeRef) -> Self {
InboundConnectionFactory { InboundConnectionFactory {
local_node: local_node, peers: peers,
} node: node,
) counter: AtomicUsize::new(0),
}
}
/// Box inbound connection factory
pub fn boxed(self) -> LocalSyncNodeRef {
Box::new(self)
} }
} }
impl LocalSyncNode for InboundConnectionFactory { impl LocalSyncNode for InboundConnectionFactory {
fn start_height(&self) -> i32 { fn create_sync_session(&self, _best_block_height: i32, outbound_connection: OutboundSyncConnectionRef) -> InboundSyncConnectionRef {
self.local_node.best_block().number as i32 let peer_index = self.counter.fetch_add(1, Ordering::SeqCst) + 1;
} trace!(target: "sync", "Creating new sync session with peer#{}", peer_index);
// remember outbound connection
fn create_sync_session(&self, best_block_height: i32, outbound_connection: OutboundSyncConnectionRef) -> InboundSyncConnectionRef { self.peers.insert(peer_index, outbound_connection);
let peer_index = self.local_node.create_sync_session(best_block_height, outbound_connection); // create new inbound connection
InboundConnection::new(self.local_node.clone(), peer_index) InboundConnection::new(peer_index, self.peers.clone(), self.node.clone()).boxed()
} }
} }

View File

@ -25,25 +25,24 @@ extern crate ethcore_devtools as devtools;
extern crate rand; extern crate rand;
extern crate network; extern crate network;
mod best_headers_chain;
mod blocks_writer; mod blocks_writer;
mod compact_block_builder;
mod connection_filter;
mod hash_queue;
mod inbound_connection; mod inbound_connection;
mod inbound_connection_factory; mod inbound_connection_factory;
mod local_node; mod local_node;
mod orphan_blocks_pool;
mod orphan_transactions_pool;
mod synchronization_chain; mod synchronization_chain;
mod synchronization_client; mod synchronization_client;
mod synchronization_client_core;
mod synchronization_executor; mod synchronization_executor;
mod synchronization_manager; mod synchronization_manager;
mod synchronization_peers; mod synchronization_peers;
mod synchronization_peers_tasks;
mod synchronization_server; mod synchronization_server;
mod synchronization_verifier; mod synchronization_verifier;
mod types;
mod utils;
pub use local_node::LocalNodeRef; pub use types::LocalNodeRef;
pub use types::PeersRef;
use std::sync::Arc; use std::sync::Arc;
use parking_lot::RwLock; use parking_lot::RwLock;
@ -67,14 +66,25 @@ pub fn create_sync_blocks_writer(db: db::SharedStore, network: Magic, verificati
blocks_writer::BlocksWriter::new(db, network, verification) blocks_writer::BlocksWriter::new(db, network, verification)
} }
/// Create synchronization peers
pub fn create_sync_peers() -> PeersRef {
use synchronization_peers::PeersImpl;
Arc::new(PeersImpl::default())
}
/// Creates local sync node for given `db` /// Creates local sync node for given `db`
pub fn create_local_sync_node(handle: &Handle, network: Magic, db: db::SharedStore) -> LocalNodeRef { pub fn create_local_sync_node(handle: &Handle, network: Magic, db: db::SharedStore, peers: PeersRef) -> LocalNodeRef {
use miner::MemoryPool;
use synchronization_chain::Chain as SyncChain; use synchronization_chain::Chain as SyncChain;
use synchronization_executor::LocalSynchronizationTaskExecutor as SyncExecutor; use synchronization_executor::LocalSynchronizationTaskExecutor as SyncExecutor;
use local_node::LocalNode as SyncNode; use local_node::LocalNode as SyncNode;
use synchronization_server::SynchronizationServer; use synchronization_server::ServerImpl;
use synchronization_client::{SynchronizationClient, SynchronizationClientCore, CoreVerificationSink, Config as SynchronizationConfig}; use synchronization_client::SynchronizationClient;
use synchronization_client_core::{SynchronizationClientCore, CoreVerificationSink, Config as SynchronizationConfig};
use synchronization_verifier::AsyncVerifier; use synchronization_verifier::AsyncVerifier;
use utils::SynchronizationState;
use types::SynchronizationStateRef;
let sync_client_config = SynchronizationConfig { let sync_client_config = SynchronizationConfig {
network: network, network: network,
@ -84,20 +94,22 @@ pub fn create_local_sync_node(handle: &Handle, network: Magic, db: db::SharedSto
threads_num: 4, threads_num: 4,
}; };
let sync_chain = Arc::new(RwLock::new(SyncChain::new(db.clone()))); let memory_pool = Arc::new(RwLock::new(MemoryPool::new()));
let sync_state = SynchronizationStateRef::new(SynchronizationState::with_storage(db.clone()));
let sync_chain = SyncChain::new(db.clone(), memory_pool.clone());
let chain_verifier = Arc::new(ChainVerifier::new(db.clone(), network)); let chain_verifier = Arc::new(ChainVerifier::new(db.clone(), network));
let sync_executor = SyncExecutor::new(sync_chain.clone()); let sync_executor = SyncExecutor::new(peers.clone());
let sync_server = Arc::new(SynchronizationServer::new(sync_chain.clone(), sync_executor.clone())); let sync_server = Arc::new(ServerImpl::new(peers.clone(), db.clone(), memory_pool.clone(), sync_executor.clone()));
let sync_client_core = SynchronizationClientCore::new(sync_client_config, handle, sync_executor.clone(), sync_chain.clone(), chain_verifier.clone()); let sync_client_core = SynchronizationClientCore::new(sync_client_config, handle, sync_state.clone(), peers.clone(), sync_executor.clone(), sync_chain, chain_verifier.clone());
let verifier_sink = Arc::new(CoreVerificationSink::new(sync_client_core.clone())); let verifier_sink = Arc::new(CoreVerificationSink::new(sync_client_core.clone()));
let verifier = AsyncVerifier::new(chain_verifier, sync_chain, verifier_sink); let verifier = AsyncVerifier::new(chain_verifier, db.clone(), memory_pool.clone(), verifier_sink);
let sync_client = SynchronizationClient::new(sync_client_core, verifier); let sync_client = SynchronizationClient::new(sync_state.clone(), sync_client_core, verifier);
Arc::new(SyncNode::new(sync_server, sync_client, sync_executor)) Arc::new(SyncNode::new(network, db, memory_pool, peers, sync_state, sync_executor, sync_client, sync_server))
} }
/// Create inbound synchronization connections factory for given local sync node. /// Create inbound synchronization connections factory for given local sync node.
pub fn create_sync_connection_factory(local_sync_node: LocalNodeRef) -> p2p::LocalSyncNodeRef { pub fn create_sync_connection_factory(peers: PeersRef, local_sync_node: LocalNodeRef) -> p2p::LocalSyncNodeRef {
use inbound_connection_factory::InboundConnectionFactory as SyncConnectionFactory; use inbound_connection_factory::InboundConnectionFactory as SyncConnectionFactory;
SyncConnectionFactory::with_local_node(local_sync_node) SyncConnectionFactory::new(peers, local_sync_node).boxed()
} }

View File

@ -1,40 +1,39 @@
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use parking_lot::{Mutex, Condvar}; use parking_lot::{Mutex, Condvar};
use db; use time;
use chain::Transaction; use futures::{Future, lazy, finished};
use p2p::OutboundSyncConnectionRef; use chain::{Transaction, IndexedTransaction, IndexedBlock};
use message::common::{InventoryType, InventoryVector};
use message::types; use message::types;
use synchronization_client::{Client, SynchronizationClient, BlockAnnouncementType}; use miner::BlockAssembler;
use synchronization_executor::{Task as SynchronizationTask, TaskExecutor as SynchronizationTaskExecutor, LocalSynchronizationTaskExecutor}; use network::Magic;
use synchronization_server::{Server, SynchronizationServer}; use synchronization_client::{Client};
use synchronization_verifier::{AsyncVerifier, TransactionVerificationSink}; use synchronization_executor::{Task as SynchronizationTask, TaskExecutor};
use synchronization_server::{Server, ServerTask};
use synchronization_verifier::{TransactionVerificationSink};
use primitives::hash::H256; use primitives::hash::H256;
use miner::BlockTemplate; use miner::BlockTemplate;
use synchronization_peers::{TransactionAnnouncementType, BlockAnnouncementType};
// TODO: check messages before processing (filterload' filter is max 36000, nHashFunc is <= 50, etc) use types::{PeerIndex, RequestId, StorageRef, MemoryPoolRef, PeersRef, ExecutorRef,
ClientRef, ServerRef, SynchronizationStateRef};
pub type LocalNodeRef = Arc<LocalNode<LocalSynchronizationTaskExecutor, SynchronizationServer, SynchronizationClient<LocalSynchronizationTaskExecutor, AsyncVerifier>>>;
/// Local synchronization node /// Local synchronization node
pub struct LocalNode<T: SynchronizationTaskExecutor + PeersConnections, pub struct LocalNode<T: TaskExecutor, U: Server, V: Client> {
U: Server, /// Network we are working on
V: Client> { network: Magic,
/// Throughout counter of synchronization peers /// Storage reference
peer_counter: AtomicUsize, storage: StorageRef,
/// Memory pool reference
memory_pool: MemoryPoolRef,
/// Synchronization peers
peers: PeersRef,
/// Shared synchronization state
state: SynchronizationStateRef,
/// Synchronization executor /// Synchronization executor
executor: Arc<Mutex<T>>, executor: ExecutorRef<T>,
/// Synchronization process /// Synchronization process
client: Arc<Mutex<V>>, client: ClientRef<V>,
/// Synchronization server /// Synchronization server
server: Arc<U>, server: ServerRef<U>,
}
/// Peers list
pub trait PeersConnections {
fn add_peer_connection(&mut self, peer_index: usize, outbound_connection: OutboundSyncConnectionRef);
fn remove_peer_connection(&mut self, peer_index: usize);
} }
/// Transaction accept verification sink /// Transaction accept verification sink
@ -48,162 +47,179 @@ struct TransactionAcceptSinkData {
waiter: Condvar, waiter: Condvar,
} }
impl<T, U, V> LocalNode<T, U, V> where T: SynchronizationTaskExecutor + PeersConnections, impl<T, U, V> LocalNode<T, U, V> where T: TaskExecutor, U: Server, V: Client {
U: Server, /// Create new synchronization node
V: Client { pub fn new(network: Magic, storage: StorageRef, memory_pool: MemoryPoolRef, peers: PeersRef,
/// New synchronization node with given storage state: SynchronizationStateRef, executor: ExecutorRef<T>, client: ClientRef<V>, server: ServerRef<U>) -> Self {
pub fn new(server: Arc<U>, client: Arc<Mutex<V>>, executor: Arc<Mutex<T>>) -> Self {
LocalNode { LocalNode {
peer_counter: AtomicUsize::new(0), network: network,
storage: storage,
memory_pool: memory_pool,
peers: peers,
state: state,
executor: executor, executor: executor,
client: client, client: client,
server: server, server: server,
} }
} }
/// Best block hash (including non-verified, requested && non-requested blocks) /// When new peer connects to the node
pub fn best_block(&self) -> db::BestBlock { pub fn on_connect(&self, peer_index: PeerIndex, version: types::Version) {
self.client.lock().best_block()
}
pub fn create_sync_session(&self, _best_block_height: i32, outbound_connection: OutboundSyncConnectionRef) -> usize {
// save connection for future
let peer_index = self.peer_counter.fetch_add(1, Ordering::SeqCst) + 1;
trace!(target: "sync", "Creating new sync session with peer#{}", peer_index);
self.client.lock().on_peer_connected(peer_index);
self.executor.lock().add_peer_connection(peer_index, outbound_connection);
peer_index
}
pub fn start_sync_session(&self, peer_index: usize, _version: u32) {
trace!(target: "sync", "Starting new sync session with peer#{}", peer_index); trace!(target: "sync", "Starting new sync session with peer#{}", peer_index);
// request inventory from peer // light clients may not want transactions broadcasting until filter for connection is set
self.executor.lock().execute(SynchronizationTask::RequestBlocksHeaders(peer_index)); if !version.relay_transactions() {
self.peers.set_transaction_announcement_type(peer_index, TransactionAnnouncementType::DoNotAnnounce);
}
// start synchronization session with peer
self.client.on_connect(peer_index);
} }
pub fn stop_sync_session(&self, peer_index: usize) { /// When peer disconnects
pub fn on_disconnect(&self, peer_index: PeerIndex) {
trace!(target: "sync", "Stopping sync session with peer#{}", peer_index); trace!(target: "sync", "Stopping sync session with peer#{}", peer_index);
self.executor.lock().remove_peer_connection(peer_index); // stop synchronization session with peer
self.client.lock().on_peer_disconnected(peer_index); self.client.on_disconnect(peer_index);
} }
pub fn on_peer_inventory(&self, peer_index: usize, message: types::Inv) { /// When inventory message is received
pub fn on_inventory(&self, peer_index: PeerIndex, message: types::Inv) {
trace!(target: "sync", "Got `inventory` message from peer#{}. Inventory len: {}", peer_index, message.inventory.len()); trace!(target: "sync", "Got `inventory` message from peer#{}. Inventory len: {}", peer_index, message.inventory.len());
self.client.on_inventory(peer_index, message);
// if there are unknown blocks => start synchronizing with peer
let blocks_inventory = self.blocks_inventory(&message.inventory);
if !blocks_inventory.is_empty() {
self.client.lock().on_new_blocks_inventory(peer_index, blocks_inventory);
}
// if there are unknown transactions => add to memory pool
let transactions_inventory = self.transactions_inventory(&message.inventory);
if !transactions_inventory.is_empty() {
self.client.lock().on_new_transactions_inventory(peer_index, transactions_inventory);
}
// currently we do not setup connection filter => skip InventoryType::MessageFilteredBlock
// currently we do not send sendcmpct message => skip InventoryType::MessageCompactBlock
} }
pub fn on_peer_getdata(&self, peer_index: usize, message: types::GetData) { /// When headers message is received
trace!(target: "sync", "Got `getdata` message from peer#{}", peer_index); pub fn on_headers(&self, peer_index: PeerIndex, message: types::Headers) {
trace!(target: "sync", "Got `headers` message from peer#{}. Headers len: {}", peer_index, message.headers.len());
let filtered_inventory = { self.client.on_headers(peer_index, message);
let mut client = self.client.lock();
client.filter_getdata_inventory(peer_index, message.inventory)
};
self.server.serve_getdata(peer_index, filtered_inventory).map(|t| self.server.add_task(peer_index, t));
} }
pub fn on_peer_getblocks(&self, peer_index: usize, message: types::GetBlocks) { /// When transaction is received
trace!(target: "sync", "Got `getblocks` message from peer#{}", peer_index); pub fn on_transaction(&self, peer_index: PeerIndex, tx: IndexedTransaction) {
// we ignore all transactions while synchronizing, as memory pool contains
self.server.serve_getblocks(peer_index, message).map(|t| self.server.add_task(peer_index, t)); // only verified transactions && we can not verify on-top transactions while
} // we are not on the top
if self.state.synchronizing() {
pub fn on_peer_getheaders(&self, peer_index: usize, message: types::GetHeaders, id: u32) { trace!(target: "sync", "Ignored `transaction` message from peer#{}. Tx hash: {}", peer_index, tx.hash.to_reversed_str());
trace!(target: "sync", "Got `getheaders` message from peer#{}", peer_index);
// do not serve getheaders requests until we are synchronized
let mut client = self.client.lock();
if client.state().is_synchronizing() {
self.executor.lock().execute(SynchronizationTask::Ignore(peer_index, id));
return; return;
} }
trace!(target: "sync", "Got `transaction` message from peer#{}. Tx hash: {}", peer_index, tx.hash.to_reversed_str());
self.client.on_transaction(peer_index, tx);
}
/// When block is received
pub fn on_block(&self, peer_index: PeerIndex, block: IndexedBlock) {
trace!(target: "sync", "Got `block` message from peer#{}. Block hash: {}", peer_index, block.header.hash.to_reversed_str());
self.client.on_block(peer_index, block);
}
/// When notfound is received
pub fn on_notfound(&self, peer_index: PeerIndex, message: types::NotFound) {
trace!(target: "sync", "Got `notfound` message from peer#{}", peer_index);
self.client.on_notfound(peer_index, message);
}
/// When peer is requesting for items
pub fn on_getdata(&self, peer_index: PeerIndex, message: types::GetData) {
if self.state.synchronizing() {
trace!(target: "sync", "Ignored `getdata` message from peer#{}. Inventory len: {}", peer_index, message.inventory.len());
return;
}
trace!(target: "sync", "Got `getdata` message from peer#{}. Inventory len: {}", peer_index, message.inventory.len());
self.server.execute(ServerTask::ServeGetData(peer_index, message));
}
/// When peer is requesting for known blocks hashes
pub fn on_getblocks(&self, peer_index: PeerIndex, message: types::GetBlocks) {
if self.state.synchronizing() {
trace!(target: "sync", "Ignored `getblocks` message from peer#{}", peer_index);
return;
}
trace!(target: "sync", "Got `getblocks` message from peer#{}", peer_index);
self.server.execute(ServerTask::ServeGetBlocks(peer_index, message));
}
/// When peer is requesting for known blocks headers
pub fn on_getheaders(&self, peer_index: PeerIndex, message: types::GetHeaders, id: RequestId) {
if self.state.synchronizing() {
trace!(target: "sync", "Ignored `getheaders` message from peer#{}", peer_index);
self.executor.execute(SynchronizationTask::Ignore(peer_index, id));
return;
}
trace!(target: "sync", "Got `getheaders` message from peer#{}", peer_index);
// simulating bitcoind for passing tests: if we are in nearly-saturated state // simulating bitcoind for passing tests: if we are in nearly-saturated state
// and peer, which has just provided a new blocks to us, is asking for headers // and peer, which has just provided a new blocks to us, is asking for headers
// => do not serve getheaders until we have fully process his blocks + wait until headers are served before returning // => do not serve getheaders until we have fully process his blocks + wait until headers are served before returning
self.server.serve_getheaders(peer_index, message, Some(id)) let server = Arc::downgrade(&self.server);
.map(|task| { let server_task = ServerTask::ServeGetHeaders(peer_index, message, id);
let weak_server = Arc::downgrade(&self.server); let lazy_server_task = lazy(move || {
let task = task.future::<U>(peer_index, weak_server); server.upgrade().map(|s| s.execute(server_task));
client.after_peer_nearly_blocks_verified(peer_index, Box::new(task)); finished::<(), ()>(())
}); }).boxed();
self.client.after_peer_nearly_blocks_verified(peer_index, lazy_server_task);
} }
pub fn on_peer_transaction(&self, peer_index: usize, message: types::Tx) { /// When peer is requesting for memory pool contents
trace!(target: "sync", "Got `transaction` message from peer#{}. Transaction hash: {}", peer_index, message.transaction.hash().to_reversed_str()); pub fn on_mempool(&self, peer_index: PeerIndex, _message: types::MemPool) {
if self.state.synchronizing() {
// try to process new transaction trace!(target: "sync", "Ignored `mempool` message from peer#{}", peer_index);
self.client.lock().on_peer_transaction(peer_index, message.transaction); return;
}
pub fn on_peer_block(&self, peer_index: usize, message: types::Block) {
trace!(target: "sync", "Got `block` message from peer#{}. Block hash: {}", peer_index, message.block.hash().to_reversed_str());
// try to process new block
self.client.lock().on_peer_block(peer_index, message.block.into());
}
pub fn on_peer_headers(&self, peer_index: usize, message: types::Headers) {
trace!(target: "sync", "Got `headers` message from peer#{}. # of headers: {}", peer_index, message.headers.len());
if !message.headers.is_empty() {
self.client.lock().on_new_blocks_headers(peer_index, message.headers);
} }
}
pub fn on_peer_mempool(&self, peer_index: usize, _message: types::MemPool) {
trace!(target: "sync", "Got `mempool` message from peer#{}", peer_index); trace!(target: "sync", "Got `mempool` message from peer#{}", peer_index);
self.server.execute(ServerTask::ServeMempool(peer_index));
self.server.serve_mempool(peer_index).map(|t| self.server.add_task(peer_index, t));
} }
pub fn on_peer_filterload(&self, peer_index: usize, message: types::FilterLoad) { /// When peer asks us from specific transactions from specific block
pub fn on_get_block_txn(&self, peer_index: PeerIndex, message: types::GetBlockTxn) {
if self.state.synchronizing() {
trace!(target: "sync", "Ignored `getblocktxn` message from peer#{}", peer_index);
return;
}
trace!(target: "sync", "Got `getblocktxn` message from peer#{}", peer_index);
self.server.execute(ServerTask::ServeGetBlockTxn(peer_index, message));
}
/// When peer sets bloom filter for connection
pub fn on_filterload(&self, peer_index: PeerIndex, message: types::FilterLoad) {
trace!(target: "sync", "Got `filterload` message from peer#{}", peer_index); trace!(target: "sync", "Got `filterload` message from peer#{}", peer_index);
self.client.lock().on_peer_filterload(peer_index, &message); self.peers.set_bloom_filter(peer_index, message);
} }
pub fn on_peer_filteradd(&self, peer_index: usize, message: types::FilterAdd) { /// When peer updates bloom filter for connection
pub fn on_filteradd(&self, peer_index: PeerIndex, message: types::FilterAdd) {
trace!(target: "sync", "Got `filteradd` message from peer#{}", peer_index); trace!(target: "sync", "Got `filteradd` message from peer#{}", peer_index);
self.client.lock().on_peer_filteradd(peer_index, &message); self.peers.update_bloom_filter(peer_index, message);
} }
pub fn on_peer_filterclear(&self, peer_index: usize, _message: types::FilterClear) { /// When peer removes bloom filter from connection
pub fn on_filterclear(&self, peer_index: PeerIndex, _message: types::FilterClear) {
trace!(target: "sync", "Got `filterclear` message from peer#{}", peer_index); trace!(target: "sync", "Got `filterclear` message from peer#{}", peer_index);
self.client.lock().on_peer_filterclear(peer_index); self.peers.clear_bloom_filter(peer_index);
} }
pub fn on_peer_merkleblock(&self, peer_index: usize, _message: types::MerkleBlock) { /// When peer sets up a minimum fee rate filter for connection
trace!(target: "sync", "Got `merkleblock` message from peer#{}", peer_index); pub fn on_feefilter(&self, peer_index: PeerIndex, message: types::FeeFilter) {
}
pub fn on_peer_sendheaders(&self, peer_index: usize, _message: types::SendHeaders) {
trace!(target: "sync", "Got `sendheaders` message from peer#{}", peer_index);
self.client.lock().on_peer_block_announcement_type(peer_index, BlockAnnouncementType::SendHeader);
}
pub fn on_peer_feefilter(&self, peer_index: usize, message: types::FeeFilter) {
trace!(target: "sync", "Got `feefilter` message from peer#{}", peer_index); trace!(target: "sync", "Got `feefilter` message from peer#{}", peer_index);
self.client.lock().on_peer_feefilter(peer_index, &message); self.peers.set_fee_filter(peer_index, message);
} }
pub fn on_peer_send_compact(&self, peer_index: usize, message: types::SendCompact) { /// When peer asks us to announce new blocks using headers message
pub fn on_sendheaders(&self, peer_index: PeerIndex, _message: types::SendHeaders) {
trace!(target: "sync", "Got `sendheaders` message from peer#{}", peer_index);
self.peers.set_block_announcement_type(peer_index, BlockAnnouncementType::SendHeaders);
}
/// When peer asks us to announce new blocks using cpmctblock message
pub fn on_send_compact(&self, peer_index: PeerIndex, message: types::SendCompact) {
trace!(target: "sync", "Got `sendcmpct` message from peer#{}", peer_index); trace!(target: "sync", "Got `sendcmpct` message from peer#{}", peer_index);
// The second integer SHALL be interpreted as a little-endian version number. Nodes sending a sendcmpct message MUST currently set this value to 1. // The second integer SHALL be interpreted as a little-endian version number. Nodes sending a sendcmpct message MUST currently set this value to 1.
@ -214,49 +230,41 @@ impl<T, U, V> LocalNode<T, U, V> where T: SynchronizationTaskExecutor + PeersCon
// Upon receipt of a "sendcmpct" message with the first and second integers set to 1, the node SHOULD announce new blocks by sending a cmpctblock message. // Upon receipt of a "sendcmpct" message with the first and second integers set to 1, the node SHOULD announce new blocks by sending a cmpctblock message.
if message.first { if message.first {
self.client.lock().on_peer_block_announcement_type(peer_index, BlockAnnouncementType::SendCompactBlock); self.peers.set_block_announcement_type(peer_index, BlockAnnouncementType::SendCompactBlock);
} }
// else: // else:
// Upon receipt of a "sendcmpct" message with the first integer set to 0, the node SHOULD NOT announce new blocks by sending a cmpctblock message, // Upon receipt of a "sendcmpct" message with the first integer set to 0, the node SHOULD NOT announce new blocks by sending a cmpctblock message,
// but SHOULD announce new blocks by sending invs or headers, as defined by BIP130. // but SHOULD announce new blocks by sending invs or headers, as defined by BIP130.
// => work as before // => work as before
} }
pub fn on_peer_compact_block(&self, peer_index: usize, _message: types::CompactBlock) { /// When peer sents us a merkle block
pub fn on_merkleblock(&self, peer_index: PeerIndex, _message: types::MerkleBlock) {
trace!(target: "sync", "Got `merkleblock` message from peer#{}", peer_index);
// we never setup filter on connections => misbehaving
self.peers.misbehaving(peer_index, &format!("Got unrequested 'merkleblock' message"));
}
/// When peer sents us a compact block
pub fn on_compact_block(&self, peer_index: PeerIndex, _message: types::CompactBlock) {
trace!(target: "sync", "Got `cmpctblock` message from peer#{}", peer_index); trace!(target: "sync", "Got `cmpctblock` message from peer#{}", peer_index);
// we never ask compact block from peers => misbehaving
self.peers.misbehaving(peer_index, &format!("Got unrequested 'cmpctblock' message"));
} }
pub fn on_peer_get_block_txn(&self, peer_index: usize, message: types::GetBlockTxn) { /// When peer sents us specific transactions for specific block
trace!(target: "sync", "Got `getblocktxn` message from peer#{}", peer_index); pub fn on_block_txn(&self, peer_index: PeerIndex, _message: types::BlockTxn) {
// Upon receipt of a properly-formatted getblocktxn message, nodes which *recently provided the sender
// of such a message a cmpctblock for the block hash identified in this message* MUST respond ...
// => we should check if we have send cmpctblock before
if {
let mut client = self.client.lock();
client.is_compact_block_sent_recently(peer_index, &message.request.blockhash)
} {
self.server.serve_get_block_txn(peer_index, message.request.blockhash, message.request.indexes).map(|t| self.server.add_task(peer_index, t));
}
}
pub fn on_peer_block_txn(&self, peer_index: usize, _message: types::BlockTxn) {
trace!(target: "sync", "Got `blocktxn` message from peer#{}", peer_index); trace!(target: "sync", "Got `blocktxn` message from peer#{}", peer_index);
} // we never ask for this => misbehaving
self.peers.misbehaving(peer_index, &format!("Got unrequested 'blocktxn' message"));
pub fn on_peer_notfound(&self, peer_index: usize, message: types::NotFound) {
trace!(target: "sync", "Got `notfound` message from peer#{}", peer_index);
let blocks_inventory = self.blocks_inventory(&message.inventory);
self.client.lock().on_peer_blocks_notfound(peer_index, blocks_inventory);
} }
pub fn accept_transaction(&self, transaction: Transaction) -> Result<H256, String> { pub fn accept_transaction(&self, transaction: Transaction) -> Result<H256, String> {
let sink_data = Arc::new(TransactionAcceptSinkData::default()); let sink_data = Arc::new(TransactionAcceptSinkData::default());
let sink = TransactionAcceptSink::new(sink_data.clone()).boxed(); let sink = TransactionAcceptSink::new(sink_data.clone()).boxed();
{ {
let mut client = self.client.lock(); if let Err(err) = self.client.accept_transaction(transaction, sink) {
if let Err(err) = client.accept_transaction(transaction, sink) {
return Err(err.into()); return Err(err.into());
} }
} }
@ -264,22 +272,9 @@ impl<T, U, V> LocalNode<T, U, V> where T: SynchronizationTaskExecutor + PeersCon
} }
pub fn get_block_template(&self) -> BlockTemplate { pub fn get_block_template(&self) -> BlockTemplate {
let client = self.client.lock(); let block_assembler = BlockAssembler::default();
client.get_block_template() let memory_pool = &*self.memory_pool.read();
} block_assembler.create_new_block(&self.storage, memory_pool, time::get_time().sec as u32, self.network)
fn transactions_inventory(&self, inventory: &[InventoryVector]) -> Vec<H256> {
inventory.iter()
.filter(|item| item.inv_type == InventoryType::MessageTx)
.map(|item| item.hash.clone())
.collect()
}
fn blocks_inventory(&self, inventory: &[InventoryVector]) -> Vec<H256> {
inventory.iter()
.filter(|item| item.inv_type == InventoryType::MessageBlock)
.map(|item| item.hash.clone())
.collect()
} }
} }
@ -308,8 +303,8 @@ impl TransactionAcceptSinkData {
} }
impl TransactionVerificationSink for TransactionAcceptSink { impl TransactionVerificationSink for TransactionAcceptSink {
fn on_transaction_verification_success(&self, tx: Transaction) { fn on_transaction_verification_success(&self, tx: IndexedTransaction) {
*self.data.result.lock() = Some(Ok(tx.hash())); *self.data.result.lock() = Some(Ok(tx.hash));
self.data.waiter.notify_all(); self.data.waiter.notify_all();
} }
@ -320,20 +315,21 @@ impl TransactionVerificationSink for TransactionAcceptSink {
} }
#[cfg(test)] #[cfg(test)]
mod tests { pub mod tests {
use std::sync::Arc; use std::sync::Arc;
use parking_lot::{Mutex, RwLock}; use parking_lot::RwLock;
use connection_filter::tests::{default_filterload, make_filteradd};
use synchronization_executor::Task; use synchronization_executor::Task;
use synchronization_executor::tests::DummyTaskExecutor; use synchronization_executor::tests::DummyTaskExecutor;
use synchronization_client::{Config, SynchronizationClient, SynchronizationClientCore, CoreVerificationSink, FilteredInventory}; use synchronization_client::SynchronizationClient;
use synchronization_client_core::{Config, SynchronizationClientCore, CoreVerificationSink};
use synchronization_chain::Chain; use synchronization_chain::Chain;
use p2p::{event_loop, OutboundSyncConnection, OutboundSyncConnectionRef}; use p2p::event_loop;
use message::types; use message::types;
use message::common::{InventoryVector, InventoryType, BlockTransactionsRequest}; use message::common::{InventoryVector, InventoryType};
use network::Magic; use network::Magic;
use chain::Transaction; use chain::Transaction;
use db; use db;
use miner::MemoryPool;
use super::LocalNode; use super::LocalNode;
use test_data; use test_data;
use synchronization_server::ServerTask; use synchronization_server::ServerTask;
@ -342,74 +338,53 @@ mod tests {
use tokio_core::reactor::{Core, Handle}; use tokio_core::reactor::{Core, Handle};
use primitives::bytes::Bytes; use primitives::bytes::Bytes;
use verification::BackwardsCompatibleChainVerifier as ChainVerifier; use verification::BackwardsCompatibleChainVerifier as ChainVerifier;
use std::iter::repeat;
use synchronization_peers::PeersImpl;
use utils::SynchronizationState;
use types::SynchronizationStateRef;
struct DummyOutboundSyncConnection; pub fn default_filterload() -> types::FilterLoad {
types::FilterLoad {
impl DummyOutboundSyncConnection { filter: Bytes::from(repeat(0u8).take(1024).collect::<Vec<_>>()),
pub fn new() -> OutboundSyncConnectionRef { hash_functions: 10,
Box::new(DummyOutboundSyncConnection {}) tweak: 5,
flags: types::FilterFlags::None,
} }
} }
impl OutboundSyncConnection for DummyOutboundSyncConnection { pub fn make_filteradd(data: &[u8]) -> types::FilterAdd {
fn send_inventory(&self, _message: &types::Inv) {} types::FilterAdd {
fn send_getdata(&self, _message: &types::GetData) {} data: data.into(),
fn send_getblocks(&self, _message: &types::GetBlocks) {} }
fn send_getheaders(&self, _message: &types::GetHeaders) {}
fn send_transaction(&self, _message: &types::Tx) {}
fn send_block(&self, _message: &types::Block) {}
fn send_headers(&self, _message: &types::Headers) {}
fn respond_headers(&self, _message: &types::Headers, _id: u32) {}
fn send_mempool(&self, _message: &types::MemPool) {}
fn send_filterload(&self, _message: &types::FilterLoad) {}
fn send_filteradd(&self, _message: &types::FilterAdd) {}
fn send_filterclear(&self, _message: &types::FilterClear) {}
fn send_merkleblock(&self, _message: &types::MerkleBlock) {}
fn send_sendheaders(&self, _message: &types::SendHeaders) {}
fn send_feefilter(&self, _message: &types::FeeFilter) {}
fn send_send_compact(&self, _message: &types::SendCompact) {}
fn send_compact_block(&self, _message: &types::CompactBlock) {}
fn send_get_block_txn(&self, _message: &types::GetBlockTxn) {}
fn send_block_txn(&self, _message: &types::BlockTxn) {}
fn send_notfound(&self, _message: &types::NotFound) {}
fn ignored(&self, _id: u32) {}
fn close(&self) {}
} }
fn create_local_node(verifier: Option<DummyVerifier>) -> (Core, Handle, Arc<Mutex<DummyTaskExecutor>>, Arc<DummyServer>, LocalNode<DummyTaskExecutor, DummyServer, SynchronizationClient<DummyTaskExecutor, DummyVerifier>>) { fn create_local_node(verifier: Option<DummyVerifier>) -> (Core, Handle, Arc<DummyTaskExecutor>, Arc<DummyServer>, LocalNode<DummyTaskExecutor, DummyServer, SynchronizationClient<DummyTaskExecutor, DummyVerifier>>) {
let event_loop = event_loop(); let event_loop = event_loop();
let handle = event_loop.handle(); let handle = event_loop.handle();
let chain = Arc::new(RwLock::new(Chain::new(Arc::new(db::TestStorage::with_genesis_block())))); let memory_pool = Arc::new(RwLock::new(MemoryPool::new()));
let storage = Arc::new(db::TestStorage::with_genesis_block());
let sync_state = SynchronizationStateRef::new(SynchronizationState::with_storage(storage.clone()));
let chain = Chain::new(storage.clone(), memory_pool.clone());
let sync_peers = Arc::new(PeersImpl::default());
let executor = DummyTaskExecutor::new(); let executor = DummyTaskExecutor::new();
let server = Arc::new(DummyServer::new()); let server = Arc::new(DummyServer::new());
let config = Config { network: Magic::Mainnet, threads_num: 1, close_connection_on_bad_block: true }; let config = Config { network: Magic::Mainnet, threads_num: 1, close_connection_on_bad_block: true };
let chain_verifier = Arc::new(ChainVerifier::new(chain.read().storage(), Magic::Mainnet)); let chain_verifier = Arc::new(ChainVerifier::new(storage.clone(), Magic::Mainnet));
let client_core = SynchronizationClientCore::new(config, &handle, executor.clone(), chain.clone(), chain_verifier); let client_core = SynchronizationClientCore::new(config, &handle, sync_state.clone(), sync_peers.clone(), executor.clone(), chain, chain_verifier);
let mut verifier = match verifier { let mut verifier = match verifier {
Some(verifier) => verifier, Some(verifier) => verifier,
None => DummyVerifier::default(), None => DummyVerifier::default(),
}; };
verifier.set_sink(Arc::new(CoreVerificationSink::new(client_core.clone()))); verifier.set_sink(Arc::new(CoreVerificationSink::new(client_core.clone())));
let client = SynchronizationClient::new(client_core, verifier); let client = SynchronizationClient::new(sync_state.clone(), client_core, verifier);
let local_node = LocalNode::new(server.clone(), client, executor.clone()); let local_node = LocalNode::new(Magic::Mainnet, storage, memory_pool, sync_peers, sync_state, executor.clone(), client, server.clone());
(event_loop, handle, executor, server, local_node) (event_loop, handle, executor, server, local_node)
} }
#[test]
fn local_node_request_inventory_on_sync_start() {
let (_, _, executor, _, local_node) = create_local_node(None);
let peer_index = local_node.create_sync_session(0, DummyOutboundSyncConnection::new());
// start sync session
local_node.start_sync_session(peer_index, 0);
// => ask for inventory
let tasks = executor.lock().take_tasks();
assert_eq!(tasks, vec![Task::RequestBlocksHeaders(peer_index)]);
}
#[test] #[test]
fn local_node_serves_block() { fn local_node_serves_block() {
let (_, _, _, server, local_node) = create_local_node(None); let (_, _, _, server, local_node) = create_local_node(None);
let peer_index = local_node.create_sync_session(0, DummyOutboundSyncConnection::new()); let peer_index = 0; local_node.on_connect(peer_index, types::Version::default());
// peer requests genesis block // peer requests genesis block
let genesis_block_hash = test_data::genesis().hash(); let genesis_block_hash = test_data::genesis().hash();
let inventory = vec![ let inventory = vec![
@ -418,207 +393,12 @@ mod tests {
hash: genesis_block_hash.clone(), hash: genesis_block_hash.clone(),
} }
]; ];
local_node.on_peer_getdata(peer_index, types::GetData { local_node.on_getdata(peer_index, types::GetData {
inventory: inventory.clone() inventory: inventory.clone()
}); });
// => `getdata` is served // => `getdata` is served
let tasks = server.take_tasks(); let tasks = server.take_tasks();
assert_eq!(tasks, vec![(peer_index, ServerTask::ServeGetData(FilteredInventory::with_unfiltered(inventory)))]); assert_eq!(tasks, vec![ServerTask::ServeGetData(peer_index, types::GetData::with_inventory(inventory))]);
}
#[test]
fn local_node_serves_merkleblock() {
let (_, _, _, server, local_node) = create_local_node(None);
let genesis = test_data::genesis();
let b1 = test_data::block_builder().header().parent(genesis.hash()).build()
.transaction().output().value(10).build().build()
.build(); // genesis -> b1
let b2 = test_data::block_builder().header().parent(b1.hash()).build()
.transaction().output().value(20).build().build()
.build(); // genesis -> b1 -> b2
let tx1 = b1.transactions[0].clone();
let tx2 = b2.transactions[0].clone();
let tx1_hash = tx1.hash();
let tx2_hash = tx2.hash();
let b1_hash = b1.hash();
let b2_hash = b2.hash();
let match_tx1 = vec![(tx1_hash.clone(), tx1)];
let match_tx2 = vec![(tx2_hash.clone(), tx2)];
let no_match_bytes = Bytes::from(vec![0x00]);
let match_bytes = Bytes::from(vec![0x01]);
// This peer will provide blocks
let peer_index1 = local_node.create_sync_session(0, DummyOutboundSyncConnection::new());
local_node.on_peer_block(peer_index1, types::Block { block: b1.clone() });
local_node.on_peer_block(peer_index1, types::Block { block: b2.clone() });
// This peer won't get any blocks, because it has not set filter for the connection
let peer_index2 = local_node.create_sync_session(0, DummyOutboundSyncConnection::new());
local_node.on_peer_getdata(peer_index2, types::GetData {inventory: vec![
InventoryVector { inv_type: InventoryType::MessageFilteredBlock, hash: b1_hash.clone() },
InventoryVector { inv_type: InventoryType::MessageFilteredBlock, hash: b2_hash.clone() },
]});
assert_eq!(server.take_tasks(), vec![(peer_index2, ServerTask::ServeGetData(FilteredInventory::with_notfound(vec![
InventoryVector { inv_type: InventoryType::MessageFilteredBlock, hash: b1_hash.clone() },
InventoryVector { inv_type: InventoryType::MessageFilteredBlock, hash: b2_hash.clone() },
])))]);
let peers_config = vec![
(true, false), // will get tx1
(false, true), // will get tx2
(true, true), // will get both tx
(false, false), // won't get any tx
];
for (get_tx1, get_tx2) in peers_config {
let peer_index = local_node.create_sync_session(0, DummyOutboundSyncConnection::new());
// setup filter
local_node.on_peer_filterload(peer_index, default_filterload());
if get_tx1 {
local_node.on_peer_filteradd(peer_index, make_filteradd(&*tx1_hash));
}
if get_tx2 {
local_node.on_peer_filteradd(peer_index, make_filteradd(&*tx2_hash));
}
// ask for data
local_node.on_peer_getdata(peer_index, types::GetData {inventory: vec![
InventoryVector { inv_type: InventoryType::MessageFilteredBlock, hash: b1_hash.clone() },
InventoryVector { inv_type: InventoryType::MessageFilteredBlock, hash: b2_hash.clone() },
]});
// get server tasks
let tasks = server.take_tasks();
assert_eq!(tasks.len(), 1);
match tasks[0] {
(_, ServerTask::ServeGetData(ref filtered_inventory)) => {
assert_eq!(filtered_inventory.unfiltered.len(), 0);
assert_eq!(filtered_inventory.notfound.len(), 0);
assert_eq!(filtered_inventory.filtered.len(), 2);
assert_eq!(filtered_inventory.filtered[0].0, types::MerkleBlock {
block_header: b1.block_header.clone(),
total_transactions: 1,
hashes: vec![tx1_hash.clone()],
flags: if get_tx1 { match_bytes.clone() } else { no_match_bytes.clone() },
});
if get_tx1 {
assert_eq!(filtered_inventory.filtered[0].1, match_tx1);
} else {
assert_eq!(filtered_inventory.filtered[0].1, vec![]);
}
assert_eq!(filtered_inventory.filtered[1].0, types::MerkleBlock {
block_header: b2.block_header.clone(),
total_transactions: 1,
hashes: vec![tx2_hash.clone()],
flags: if get_tx2 { match_bytes.clone() } else { no_match_bytes.clone() },
});
if get_tx2 {
assert_eq!(filtered_inventory.filtered[1].1, match_tx2);
} else {
assert_eq!(filtered_inventory.filtered[1].1, vec![]);
}
},
_ => panic!("unexpected"),
}
}
}
#[test]
fn local_node_serves_compactblock() {
let (_, _, _, server, local_node) = create_local_node(None);
let genesis = test_data::genesis();
let b1 = test_data::block_builder().header().parent(genesis.hash()).build()
.transaction().output().value(10).build().build()
.build(); // genesis -> b1
let b1_hash = b1.hash();
// This peer will provide blocks
let peer_index1 = local_node.create_sync_session(0, DummyOutboundSyncConnection::new());
local_node.on_peer_block(peer_index1, types::Block { block: b1.clone() });
// This peer will receive compact block
let peer_index2 = local_node.create_sync_session(0, DummyOutboundSyncConnection::new());
local_node.on_peer_getdata(peer_index2, types::GetData {inventory: vec![
InventoryVector { inv_type: InventoryType::MessageCompactBlock, hash: b1_hash.clone() },
]});
let tasks = server.take_tasks();
assert_eq!(tasks.len(), 1);
match tasks[0] {
(_, ServerTask::ServeGetData(ref gd)) => {
assert_eq!(gd.filtered.len(), 0);
assert_eq!(gd.unfiltered.len(), 0);
assert_eq!(gd.notfound.len(), 0);
assert_eq!(gd.compacted.len(), 1);
},
_ => panic!("unexpected"),
}
}
#[test]
fn local_node_serves_get_block_txn_when_recently_sent_compact_block() {
let (_, _, _, server, local_node) = create_local_node(None);
let genesis = test_data::genesis();
let b1 = test_data::block_builder().header().parent(genesis.hash()).build()
.transaction().output().value(10).build().build()
.build(); // genesis -> b1
let b1_hash = b1.hash();
// Append block
let peer_index1 = local_node.create_sync_session(0, DummyOutboundSyncConnection::new());
local_node.on_peer_block(peer_index1, types::Block { block: b1.clone() });
// Request compact block
let peer_index2 = local_node.create_sync_session(0, DummyOutboundSyncConnection::new());
local_node.on_peer_getdata(peer_index2, types::GetData {inventory: vec![
InventoryVector { inv_type: InventoryType::MessageCompactBlock, hash: b1_hash.clone() },
]});
// forget tasks
server.take_tasks();
// Request compact transaction from this block
local_node.on_peer_get_block_txn(peer_index2, types::GetBlockTxn {
request: BlockTransactionsRequest {
blockhash: b1_hash.clone(),
indexes: vec![0],
}
});
let tasks = server.take_tasks();
assert_eq!(tasks, vec![(2, ServerTask::ServeGetBlockTxn(b1_hash, vec![0]))]);
}
#[test]
fn local_node_not_serves_get_block_txn_when_compact_block_was_not_sent() {
let (_, _, _, server, local_node) = create_local_node(None);
let genesis = test_data::genesis();
let b1 = test_data::block_builder().header().parent(genesis.hash()).build()
.transaction().output().value(10).build().build()
.build(); // genesis -> b1
let b1_hash = b1.hash();
// Append block
let peer_index1 = local_node.create_sync_session(0, DummyOutboundSyncConnection::new());
local_node.on_peer_block(peer_index1, types::Block { block: b1.clone() });
// Request compact transaction from this block
let peer_index2 = local_node.create_sync_session(0, DummyOutboundSyncConnection::new());
local_node.on_peer_get_block_txn(peer_index2, types::GetBlockTxn {
request: BlockTransactionsRequest {
blockhash: b1_hash,
indexes: vec![0],
}
});
let tasks = server.take_tasks();
assert_eq!(tasks, vec![]);
} }
#[test] #[test]
@ -626,23 +406,17 @@ mod tests {
let (_, _, executor, _, local_node) = create_local_node(None); let (_, _, executor, _, local_node) = create_local_node(None);
// transaction will be relayed to this peer // transaction will be relayed to this peer
let peer_index1 = local_node.create_sync_session(0, DummyOutboundSyncConnection::new()); let peer_index1 = 0; local_node.on_connect(peer_index1, types::Version::default());
{ executor.lock().take_tasks(); } executor.take_tasks();
let genesis = test_data::genesis(); let genesis = test_data::genesis();
let transaction: Transaction = test_data::TransactionBuilder::with_output(1).add_input(&genesis.transactions[0], 0).into(); let transaction: Transaction = test_data::TransactionBuilder::with_output(1).add_input(&genesis.transactions[0], 0).into();
let transaction_hash = transaction.hash(); let transaction_hash = transaction.hash();
let result = local_node.accept_transaction(transaction); let result = local_node.accept_transaction(transaction.clone());
assert_eq!(result, Ok(transaction_hash.clone())); assert_eq!(result, Ok(transaction_hash.clone()));
assert_eq!(executor.lock().take_tasks(), vec![Task::SendInventory(peer_index1, assert_eq!(executor.take_tasks(), vec![Task::RelayNewTransaction(transaction.into(), 83333333)]);
vec![InventoryVector {
inv_type: InventoryType::MessageTx,
hash: transaction_hash,
}]
)]
);
} }
#[test] #[test]
@ -657,12 +431,12 @@ mod tests {
let (_, _, executor, _, local_node) = create_local_node(Some(verifier)); let (_, _, executor, _, local_node) = create_local_node(Some(verifier));
let _peer_index1 = local_node.create_sync_session(0, DummyOutboundSyncConnection::new()); let peer_index1 = 0; local_node.on_connect(peer_index1, types::Version::default());
{ executor.lock().take_tasks(); } executor.take_tasks();
let result = local_node.accept_transaction(transaction); let result = local_node.accept_transaction(transaction);
assert_eq!(result, Err("simulated".to_owned())); assert_eq!(result, Err("simulated".to_owned()));
assert_eq!(executor.lock().take_tasks(), vec![]); assert_eq!(executor.take_tasks(), vec![]);
} }
} }

View File

@ -1,18 +1,13 @@
use std::fmt;
use std::sync::Arc;
use std::collections::{VecDeque, HashSet}; use std::collections::{VecDeque, HashSet};
use std::fmt;
use linked_hash_map::LinkedHashMap; use linked_hash_map::LinkedHashMap;
use parking_lot::RwLock; use chain::{BlockHeader, Transaction, IndexedBlockHeader, IndexedBlock, IndexedTransaction};
use chain::{BlockHeader, Transaction, IndexedBlock};
use db; use db;
use best_headers_chain::{BestHeadersChain, Information as BestHeadersInformation}; use miner::{MemoryPoolOrderingStrategy, MemoryPoolInformation};
use primitives::bytes::Bytes; use primitives::bytes::Bytes;
use primitives::hash::H256; use primitives::hash::H256;
use hash_queue::{HashQueueChain, HashPosition}; use utils::{BestHeadersChain, BestHeadersChainInformation, HashQueueChain, HashPosition};
use miner::{MemoryPool, MemoryPoolOrderingStrategy, MemoryPoolInformation}; use types::{BlockHeight, StorageRef, MemoryPoolRef};
/// Thread-safe reference to `Chain`
pub type ChainRef = Arc<RwLock<Chain>>;
/// Index of 'verifying' queue /// Index of 'verifying' queue
const VERIFYING_QUEUE: usize = 0; const VERIFYING_QUEUE: usize = 0;
@ -29,7 +24,7 @@ pub struct BlockInsertionResult {
/// Hashes of blocks, which were canonized during this insertion procedure. Order matters /// Hashes of blocks, which were canonized during this insertion procedure. Order matters
pub canonized_blocks_hashes: Vec<H256>, pub canonized_blocks_hashes: Vec<H256>,
/// Transaction to 'reverify'. Order matters /// Transaction to 'reverify'. Order matters
pub transactions_to_reverify: Vec<(H256, Transaction)>, pub transactions_to_reverify: Vec<IndexedTransaction>,
} }
impl BlockInsertionResult { impl BlockInsertionResult {
@ -75,36 +70,17 @@ pub enum TransactionState {
/// Synchronization chain information /// Synchronization chain information
pub struct Information { pub struct Information {
/// Number of blocks hashes currently scheduled for requesting /// Number of blocks hashes currently scheduled for requesting
pub scheduled: u32, pub scheduled: BlockHeight,
/// Number of blocks hashes currently requested from peers /// Number of blocks hashes currently requested from peers
pub requested: u32, pub requested: BlockHeight,
/// Number of blocks currently verifying /// Number of blocks currently verifying
pub verifying: u32, pub verifying: BlockHeight,
/// Number of blocks in the storage /// Number of blocks in the storage
pub stored: u32, pub stored: BlockHeight,
/// Information on memory pool /// Information on memory pool
pub transactions: MemoryPoolInformation, pub transactions: MemoryPoolInformation,
/// Information on headers chain /// Information on headers chain
pub headers: BestHeadersInformation, pub headers: BestHeadersChainInformation,
}
/// Result of intersecting chain && inventory
#[derive(Debug, PartialEq)]
pub enum HeadersIntersection {
/// 3.2: No intersection with in-memory queue && no intersection with db
NoKnownBlocks(usize),
/// 2.3: Inventory has no new blocks && some of blocks in inventory are in in-memory queue
InMemoryNoNewBlocks,
/// 2.4.2: Inventory has new blocks && these blocks are right after chain' best block
InMemoryMainNewBlocks(usize),
/// 2.4.3: Inventory has new blocks && these blocks are forked from our chain' best block
InMemoryForkNewBlocks(usize),
/// 3.3: No intersection with in-memory queue && has intersection with db && all blocks are already stored in db
DbAllBlocksKnown,
/// 3.4: No intersection with in-memory queue && has intersection with db && some blocks are not yet stored in db
DbForkNewBlocks(usize),
/// Dead-end blocks are starting from given index
DeadEnd(usize),
} }
/// Blockchain from synchroniation point of view, consisting of: /// Blockchain from synchroniation point of view, consisting of:
@ -118,15 +94,15 @@ pub struct Chain {
/// Best storage block (stored for optimizations) /// Best storage block (stored for optimizations)
best_storage_block: db::BestBlock, best_storage_block: db::BestBlock,
/// Local blocks storage /// Local blocks storage
storage: db::SharedStore, storage: StorageRef,
/// In-memory queue of blocks hashes /// In-memory queue of blocks hashes
hash_chain: HashQueueChain, hash_chain: HashQueueChain,
/// In-memory queue of blocks headers /// In-memory queue of blocks headers
headers_chain: BestHeadersChain, headers_chain: BestHeadersChain,
/// Currently verifying transactions /// Currently verifying transactions
verifying_transactions: LinkedHashMap<H256, Transaction>, verifying_transactions: LinkedHashMap<H256, IndexedTransaction>,
/// Transactions memory pool /// Transactions memory pool
memory_pool: MemoryPool, memory_pool: MemoryPoolRef,
/// Blocks that have been marked as dead-ends /// Blocks that have been marked as dead-ends
dead_end_blocks: HashSet<H256>, dead_end_blocks: HashSet<H256>,
} }
@ -153,7 +129,7 @@ impl BlockState {
impl Chain { impl Chain {
/// Create new `Chain` with given storage /// Create new `Chain` with given storage
pub fn new(storage: db::SharedStore) -> Self { pub fn new(storage: StorageRef, memory_pool: MemoryPoolRef) -> Self {
// we only work with storages with genesis block // we only work with storages with genesis block
let genesis_block_hash = storage.block_hash(0) let genesis_block_hash = storage.block_hash(0)
.expect("storage with genesis block is required"); .expect("storage with genesis block is required");
@ -168,7 +144,7 @@ impl Chain {
hash_chain: HashQueueChain::with_number_of_queues(NUMBER_OF_QUEUES), hash_chain: HashQueueChain::with_number_of_queues(NUMBER_OF_QUEUES),
headers_chain: BestHeadersChain::new(best_storage_block_hash), headers_chain: BestHeadersChain::new(best_storage_block_hash),
verifying_transactions: LinkedHashMap::new(), verifying_transactions: LinkedHashMap::new(),
memory_pool: MemoryPool::new(), memory_pool: memory_pool,
dead_end_blocks: HashSet::new(), dead_end_blocks: HashSet::new(),
} }
} }
@ -180,29 +156,23 @@ impl Chain {
requested: self.hash_chain.len_of(REQUESTED_QUEUE), requested: self.hash_chain.len_of(REQUESTED_QUEUE),
verifying: self.hash_chain.len_of(VERIFYING_QUEUE), verifying: self.hash_chain.len_of(VERIFYING_QUEUE),
stored: self.best_storage_block.number + 1, stored: self.best_storage_block.number + 1,
transactions: self.memory_pool.information(), transactions: self.memory_pool.read().information(),
headers: self.headers_chain.information(), headers: self.headers_chain.information(),
} }
} }
/// Get storage /// Get storage
pub fn storage(&self) -> db::SharedStore { pub fn storage(&self) -> StorageRef {
self.storage.clone() self.storage.clone()
} }
/// Get memory pool /// Get memory pool
pub fn memory_pool(&self) -> &MemoryPool { pub fn memory_pool(&self) -> MemoryPoolRef {
&self.memory_pool self.memory_pool.clone()
}
/// Get mutable memory pool
#[cfg(test)]
pub fn memory_pool_mut(&mut self) -> &mut MemoryPool {
&mut self.memory_pool
} }
/// Get number of blocks in given state /// Get number of blocks in given state
pub fn length_of_blocks_state(&self, state: BlockState) -> u32 { pub fn length_of_blocks_state(&self, state: BlockState) -> BlockHeight {
match state { match state {
BlockState::Stored => self.best_storage_block.number + 1, BlockState::Stored => self.best_storage_block.number + 1,
_ => self.hash_chain.len_of(state.to_queue_index()), _ => self.hash_chain.len_of(state.to_queue_index()),
@ -210,7 +180,7 @@ impl Chain {
} }
/// Get n best blocks of given state /// Get n best blocks of given state
pub fn best_n_of_blocks_state(&self, state: BlockState, n: u32) -> Vec<H256> { pub fn best_n_of_blocks_state(&self, state: BlockState, n: BlockHeight) -> Vec<H256> {
match state { match state {
BlockState::Scheduled | BlockState::Requested | BlockState::Verifying => self.hash_chain.front_n_at(state.to_queue_index(), n), BlockState::Scheduled | BlockState::Requested | BlockState::Verifying => self.hash_chain.front_n_at(state.to_queue_index(), n),
_ => unreachable!("must be checked by caller"), _ => unreachable!("must be checked by caller"),
@ -243,12 +213,12 @@ impl Chain {
number: self.best_storage_block.number + headers_chain_information.best, number: self.best_storage_block.number + headers_chain_information.best,
hash: self.headers_chain.at(headers_chain_information.best - 1) hash: self.headers_chain.at(headers_chain_information.best - 1)
.expect("got this index above; qed") .expect("got this index above; qed")
.hash(), .hash,
} }
} }
/// Get block header by hash /// Get block header by hash
pub fn block_hash(&self, number: u32) -> Option<H256> { pub fn block_hash(&self, number: BlockHeight) -> Option<H256> {
if number <= self.best_storage_block.number { if number <= self.best_storage_block.number {
self.storage.block_hash(number) self.storage.block_hash(number)
} else { } else {
@ -258,7 +228,7 @@ impl Chain {
} }
/// Get block number by hash /// Get block number by hash
pub fn block_number(&self, hash: &H256) -> Option<u32> { pub fn block_number(&self, hash: &H256) -> Option<BlockHeight> {
if let Some(number) = self.storage.block_number(hash) { if let Some(number) = self.storage.block_number(hash) {
return Some(number); return Some(number);
} }
@ -266,18 +236,18 @@ impl Chain {
} }
/// Get block header by number /// Get block header by number
pub fn block_header_by_number(&self, number: u32) -> Option<BlockHeader> { pub fn block_header_by_number(&self, number: BlockHeight) -> Option<IndexedBlockHeader> {
if number <= self.best_storage_block.number { if number <= self.best_storage_block.number {
self.storage.block_header(db::BlockRef::Number(number)) self.storage.block_header(db::BlockRef::Number(number)).map(Into::into)
} else { } else {
self.headers_chain.at(number - self.best_storage_block.number) self.headers_chain.at(number - self.best_storage_block.number)
} }
} }
/// Get block header by hash /// Get block header by hash
pub fn block_header_by_hash(&self, hash: &H256) -> Option<BlockHeader> { pub fn block_header_by_hash(&self, hash: &H256) -> Option<IndexedBlockHeader> {
if let Some(block) = self.storage.block(db::BlockRef::Hash(hash.clone())) { if let Some(block) = self.storage.block(db::BlockRef::Hash(hash.clone())) {
return Some(block.block_header); return Some(block.block_header.into());
} }
self.headers_chain.by_hash(hash) self.headers_chain.by_hash(hash)
} }
@ -317,35 +287,35 @@ impl Chain {
} }
/// Schedule blocks hashes for requesting /// Schedule blocks hashes for requesting
pub fn schedule_blocks_headers(&mut self, hashes: Vec<H256>, headers: Vec<BlockHeader>) { pub fn schedule_blocks_headers(&mut self, headers: Vec<IndexedBlockHeader>) {
self.hash_chain.push_back_n_at(SCHEDULED_QUEUE, hashes); self.hash_chain.push_back_n_at(SCHEDULED_QUEUE, headers.iter().map(|h| h.hash.clone()).collect());
self.headers_chain.insert_n(headers); self.headers_chain.insert_n(headers);
} }
/// Moves n blocks from scheduled queue to requested queue /// Moves n blocks from scheduled queue to requested queue
pub fn request_blocks_hashes(&mut self, n: u32) -> Vec<H256> { pub fn request_blocks_hashes(&mut self, n: BlockHeight) -> Vec<H256> {
let scheduled = self.hash_chain.pop_front_n_at(SCHEDULED_QUEUE, n); let scheduled = self.hash_chain.pop_front_n_at(SCHEDULED_QUEUE, n);
self.hash_chain.push_back_n_at(REQUESTED_QUEUE, scheduled.clone()); self.hash_chain.push_back_n_at(REQUESTED_QUEUE, scheduled.clone());
scheduled scheduled
} }
/// Add block to verifying queue /// Add block to verifying queue
pub fn verify_block(&mut self, hash: H256, header: BlockHeader) { pub fn verify_block(&mut self, header: IndexedBlockHeader) {
// insert header to the in-memory chain in case when it is not already there (non-headers-first sync) // insert header to the in-memory chain in case when it is not already there (non-headers-first sync)
self.hash_chain.push_back_at(VERIFYING_QUEUE, header.hash.clone());
self.headers_chain.insert(header); self.headers_chain.insert(header);
self.hash_chain.push_back_at(VERIFYING_QUEUE, hash);
} }
/// Add blocks to verifying queue /// Add blocks to verifying queue
pub fn verify_blocks(&mut self, blocks: Vec<(H256, BlockHeader)>) { pub fn verify_blocks(&mut self, blocks: Vec<IndexedBlockHeader>) {
for (hash, header) in blocks { for block in blocks {
self.verify_block(hash, header); self.verify_block(block);
} }
} }
/// Moves n blocks from requested queue to verifying queue /// Moves n blocks from requested queue to verifying queue
#[cfg(test)] #[cfg(test)]
pub fn verify_blocks_hashes(&mut self, n: u32) -> Vec<H256> { pub fn verify_blocks_hashes(&mut self, n: BlockHeight) -> Vec<H256> {
let requested = self.hash_chain.pop_front_n_at(REQUESTED_QUEUE, n); let requested = self.hash_chain.pop_front_n_at(REQUESTED_QUEUE, n);
self.hash_chain.push_back_n_at(VERIFYING_QUEUE, requested.clone()); self.hash_chain.push_back_n_at(VERIFYING_QUEUE, requested.clone());
requested requested
@ -357,7 +327,7 @@ impl Chain {
} }
/// Insert new best block to storage /// Insert new best block to storage
pub fn insert_best_block(&mut self, hash: H256, block: &IndexedBlock) -> Result<BlockInsertionResult, db::Error> { pub fn insert_best_block(&mut self, block: &IndexedBlock) -> Result<BlockInsertionResult, db::Error> {
let is_appending_to_main_branch = self.best_storage_block.hash == block.header.raw.previous_header_hash; let is_appending_to_main_branch = self.best_storage_block.hash == block.header.raw.previous_header_hash;
// insert to storage // insert to storage
@ -367,32 +337,33 @@ impl Chain {
self.best_storage_block = self.storage.best_block().expect("Inserted block above"); self.best_storage_block = self.storage.best_block().expect("Inserted block above");
// remove inserted block + handle possible reorganization in headers chain // remove inserted block + handle possible reorganization in headers chain
self.headers_chain.block_inserted_to_storage(&hash, &self.best_storage_block.hash); self.headers_chain.block_inserted_to_storage(block.hash(), &self.best_storage_block.hash);
// case 1: block has been added to the main branch // case 1: block has been added to the main branch
if is_appending_to_main_branch { if is_appending_to_main_branch {
// double check // double check
assert_eq!(self.best_storage_block.hash, hash); assert_eq!(self.best_storage_block.hash, block.hash().clone());
// all transactions from this block were accepted // all transactions from this block were accepted
// => delete accepted transactions from verification queue and from the memory pool // => delete accepted transactions from verification queue and from the memory pool
// + also remove transactions which spent outputs which have been spent by transactions from the block // + also remove transactions which spent outputs which have been spent by transactions from the block
let mut memory_pool = self.memory_pool.write();
for tx in &block.transactions { for tx in &block.transactions {
self.memory_pool.remove_by_hash(&tx.hash); memory_pool.remove_by_hash(&tx.hash);
self.verifying_transactions.remove(&tx.hash); self.verifying_transactions.remove(&tx.hash);
for tx_input in &tx.raw.inputs { for tx_input in &tx.raw.inputs {
self.memory_pool.remove_by_prevout(&tx_input.previous_output); memory_pool.remove_by_prevout(&tx_input.previous_output);
} }
} }
// no transactions to reverify, because we have just appended new transactions to the blockchain // no transactions to reverify, because we have just appended new transactions to the blockchain
Ok(BlockInsertionResult { Ok(BlockInsertionResult {
canonized_blocks_hashes: vec![hash], canonized_blocks_hashes: vec![block.hash().clone()],
transactions_to_reverify: Vec::new(), transactions_to_reverify: Vec::new(),
}) })
} }
// case 2: block has been added to the side branch with reorganization to this branch // case 2: block has been added to the side branch with reorganization to this branch
else if self.best_storage_block.hash == hash { else if &self.best_storage_block.hash == block.hash() {
let mut reorganization = match storage_insertion { let mut reorganization = match storage_insertion {
db::BlockInsertedChain::Reorganized(reorganization) => reorganization, db::BlockInsertedChain::Reorganized(reorganization) => reorganization,
// we have just inserted block to side chain (!is_appending_to_main_branch) // we have just inserted block to side chain (!is_appending_to_main_branch)
@ -412,8 +383,10 @@ impl Chain {
new_main_blocks_transactions_hashes.extend(canonized_transactions_hashes); new_main_blocks_transactions_hashes.extend(canonized_transactions_hashes);
canonized_blocks_hashes.push(canonized_block_hash); canonized_blocks_hashes.push(canonized_block_hash);
} }
let mut memory_pool = self.memory_pool.write();
for transaction_accepted in this_block_transactions_hashes.into_iter().chain(new_main_blocks_transactions_hashes.into_iter()) { for transaction_accepted in this_block_transactions_hashes.into_iter().chain(new_main_blocks_transactions_hashes.into_iter()) {
self.memory_pool.remove_by_hash(&transaction_accepted); memory_pool.remove_by_hash(&transaction_accepted);
self.verifying_transactions.remove(&transaction_accepted); self.verifying_transactions.remove(&transaction_accepted);
} }
canonized_blocks_hashes.reverse(); canonized_blocks_hashes.reverse();
@ -424,24 +397,23 @@ impl Chain {
let decanonized_transactions_hashes = self.storage.block_transaction_hashes(db::BlockRef::Hash(decanonized_block_hash)); let decanonized_transactions_hashes = self.storage.block_transaction_hashes(db::BlockRef::Hash(decanonized_block_hash));
old_main_blocks_transactions_hashes.extend(decanonized_transactions_hashes); old_main_blocks_transactions_hashes.extend(decanonized_transactions_hashes);
} }
let old_main_blocks_transactions: Vec<(H256, Transaction)> = old_main_blocks_transactions_hashes.into_iter() let old_main_blocks_transactions: Vec<IndexedTransaction> = old_main_blocks_transactions_hashes.into_iter()
.map(|h| (h.clone(), self.storage.transaction(&h).expect("block in storage => block transaction in storage"))) .map(|h| self.storage.transaction(&h).expect("block in storage => block transaction in storage").into())
.collect(); .collect();
// reverify memory pool transactions, sorted by timestamp // reverify memory pool transactions, sorted by timestamp
let memory_pool_transactions_count = self.memory_pool.information().transactions_count; let memory_pool_transactions_count = memory_pool.information().transactions_count;
let memory_pool_transactions: Vec<_> = self.memory_pool let memory_pool_transactions: Vec<IndexedTransaction> = memory_pool
.remove_n_with_strategy(memory_pool_transactions_count, MemoryPoolOrderingStrategy::ByTimestamp) .remove_n_with_strategy(memory_pool_transactions_count, MemoryPoolOrderingStrategy::ByTimestamp)
.into_iter() .into_iter()
.map(|t| (t.hash(), t)) .map(|t| t.into())
.collect(); .collect();
// reverify verifying transactions // reverify verifying transactions
let verifying_transactions: Vec<_> = self.verifying_transactions let verifying_transactions: Vec<IndexedTransaction> = self.verifying_transactions
.iter() .iter()
.map(|(h, t)| (h.clone(), t.clone())) .map(|(_, t)| t.clone())
.collect(); .collect();
// there's no guarantee (in docs) that LinkedHashMap::into_iter() will return values ordered by insertion time
self.verifying_transactions.clear(); self.verifying_transactions.clear();
Ok(BlockInsertionResult { Ok(BlockInsertionResult {
@ -527,110 +499,32 @@ impl Chain {
self.headers_chain.remove_n(hashes); self.headers_chain.remove_n(hashes);
} }
/// Intersect chain with inventory
pub fn intersect_with_blocks_headers(&self, hashes: &[H256], headers: &[BlockHeader]) -> HeadersIntersection {
let hashes_len = hashes.len();
assert!(hashes_len != 0 && hashes.len() == headers.len());
// giving that headers are ordered
let (is_first_known, first_state) = match self.block_state(&hashes[0]) {
BlockState::Unknown => (false, self.block_state(&headers[0].previous_header_hash)),
state => (true, state),
};
match first_state {
// if first block of inventory is dead-end, then all other blocks are also dead-end blocks
BlockState::DeadEnd => {
HeadersIntersection::DeadEnd(0)
},
// if first block of inventory is unknown && its parent is unknonw => all other blocks are also unknown
BlockState::Unknown => {
HeadersIntersection::NoKnownBlocks(0)
},
// else if first block is known
first_block_state => match self.block_state(&hashes[hashes_len - 1]) {
// if last block is known to be in db => all inventory blocks are also in db
BlockState::Stored => {
HeadersIntersection::DbAllBlocksKnown
},
// if first block is known && last block is unknown but we know block before first one => intersection with queue or with db
BlockState::Unknown if !is_first_known => {
// previous block is stored => fork from stored block
if first_state == BlockState::Stored {
HeadersIntersection::DbForkNewBlocks(0)
}
// previous block is best block => no fork
else if &self.best_block().hash == &headers[0].previous_header_hash {
HeadersIntersection::InMemoryMainNewBlocks(0)
}
// previous block is not a best block => fork
else {
HeadersIntersection::InMemoryForkNewBlocks(0)
}
},
// if first block is known && last block is unknown => intersection with queue or with db
BlockState::Unknown | BlockState::DeadEnd if is_first_known => {
// find last known block
let mut previous_state = first_block_state;
for (index, hash) in hashes.iter().enumerate().take(hashes_len).skip(1) {
let state = self.block_state(hash);
if state == BlockState::Unknown || state == BlockState::DeadEnd {
// if state is dead end => there are no useful blocks
if state == BlockState::DeadEnd {
return HeadersIntersection::DeadEnd(index);
}
// previous block is stored => fork from stored block
else if previous_state == BlockState::Stored {
return HeadersIntersection::DbForkNewBlocks(index);
}
// previous block is best block => no fork
else if &self.best_block().hash == &hashes[index - 1] {
return HeadersIntersection::InMemoryMainNewBlocks(index);
}
// previous block is not a best block => fork
else {
return HeadersIntersection::InMemoryForkNewBlocks(index);
}
}
previous_state = state;
}
// unreachable because last block is unknown && in above loop we search for unknown blocks
unreachable!();
},
// if first block is known && last block is also known && is in queue => queue intersection with no new block
_ => {
HeadersIntersection::InMemoryNoNewBlocks
}
}
}
}
/// Get transaction state /// Get transaction state
pub fn transaction_state(&self, hash: &H256) -> TransactionState { pub fn transaction_state(&self, hash: &H256) -> TransactionState {
if self.verifying_transactions.contains_key(hash) { if self.verifying_transactions.contains_key(hash) {
return TransactionState::Verifying; return TransactionState::Verifying;
} }
if self.memory_pool.contains(hash) {
return TransactionState::InMemory;
}
if self.storage.contains_transaction(hash) { if self.storage.contains_transaction(hash) {
return TransactionState::Stored; return TransactionState::Stored;
} }
if self.memory_pool.read().contains(hash) {
return TransactionState::InMemory;
}
TransactionState::Unknown TransactionState::Unknown
} }
/// Get transactions hashes with given state /// Get transactions hashes with given state
pub fn transactions_hashes_with_state(&self, state: TransactionState) -> Vec<H256> { pub fn transactions_hashes_with_state(&self, state: TransactionState) -> Vec<H256> {
match state { match state {
TransactionState::InMemory => self.memory_pool.get_transactions_ids(), TransactionState::InMemory => self.memory_pool.read().get_transactions_ids(),
TransactionState::Verifying => self.verifying_transactions.keys().cloned().collect(), TransactionState::Verifying => self.verifying_transactions.keys().cloned().collect(),
_ => panic!("wrong argument"), _ => panic!("wrong argument"),
} }
} }
/// Add transaction to verifying queue /// Add transaction to verifying queue
pub fn verify_transaction(&mut self, hash: H256, tx: Transaction) { pub fn verify_transaction(&mut self, tx: IndexedTransaction) {
self.verifying_transactions.insert(hash, tx); self.verifying_transactions.insert(tx.hash.clone(), tx);
} }
/// Remove verifying trasaction /// Remove verifying trasaction
@ -650,7 +544,7 @@ impl Chain {
for h in all_keys { for h in all_keys {
let remove_verifying_transaction = { let remove_verifying_transaction = {
if let Some(entry) = self.verifying_transactions.get(&h) { if let Some(entry) = self.verifying_transactions.get(&h) {
if entry.inputs.iter().any(|i| i.previous_output.hash == hash) { if entry.raw.inputs.iter().any(|i| i.previous_output.hash == hash) {
queue.push_back(h.clone()); queue.push_back(h.clone());
true true
} else { } else {
@ -670,25 +564,26 @@ impl Chain {
} }
/// Get transaction by hash (if it's in memory pool or verifying) /// Get transaction by hash (if it's in memory pool or verifying)
pub fn transaction_by_hash(&self, hash: &H256) -> Option<Transaction> { pub fn transaction_by_hash(&self, hash: &H256) -> Option<IndexedTransaction> {
self.verifying_transactions.get(hash).cloned() self.verifying_transactions.get(hash).cloned()
.or_else(|| self.memory_pool.read_by_hash(hash).cloned()) .or_else(|| self.memory_pool.read().read_by_hash(hash).cloned().map(|t| t.into()))
} }
/// Insert transaction to memory pool /// Insert transaction to memory pool
pub fn insert_verified_transaction(&mut self, transaction: Transaction) { pub fn insert_verified_transaction(&mut self, transaction: IndexedTransaction) {
// we have verified transaction, but possibly this transaction replaces // we have verified transaction, but possibly this transaction replaces
// existing transaction from memory pool // existing transaction from memory pool
// => remove previous transactions before // => remove previous transactions before
for input in &transaction.inputs { let mut memory_pool = self.memory_pool.write();
self.memory_pool.remove_by_prevout(&input.previous_output); for input in &transaction.raw.inputs {
memory_pool.remove_by_prevout(&input.previous_output);
} }
// now insert transaction itself // now insert transaction itself
self.memory_pool.insert_verified(transaction); memory_pool.insert_verified(transaction);
} }
/// Calculate block locator hashes for hash queue /// Calculate block locator hashes for hash queue
fn block_locator_hashes_for_queue(&self, hashes: &mut Vec<H256>) -> (u32, u32) { fn block_locator_hashes_for_queue(&self, hashes: &mut Vec<H256>) -> (BlockHeight, BlockHeight) {
let queue_len = self.hash_chain.len(); let queue_len = self.hash_chain.len();
if queue_len == 0 { if queue_len == 0 {
return (0, 1); return (0, 1);
@ -711,7 +606,7 @@ impl Chain {
} }
/// Calculate block locator hashes for storage /// Calculate block locator hashes for storage
fn block_locator_hashes_for_storage(&self, mut index: u32, mut step: u32, hashes: &mut Vec<H256>) { fn block_locator_hashes_for_storage(&self, mut index: BlockHeight, mut step: BlockHeight, hashes: &mut Vec<H256>) {
loop { loop {
let block_hash = self.storage.block_hash(index) let block_hash = self.storage.block_hash(index)
.expect("private function; index calculated in `block_locator_hashes`; qed"); .expect("private function; index calculated in `block_locator_hashes`; qed");
@ -735,16 +630,30 @@ impl Chain {
impl db::TransactionProvider for Chain { impl db::TransactionProvider for Chain {
fn transaction_bytes(&self, hash: &H256) -> Option<Bytes> { fn transaction_bytes(&self, hash: &H256) -> Option<Bytes> {
self.memory_pool.transaction_bytes(hash) self.memory_pool.read().transaction_bytes(hash)
.or_else(|| self.storage.transaction_bytes(hash)) .or_else(|| self.storage.transaction_bytes(hash))
} }
fn transaction(&self, hash: &H256) -> Option<Transaction> { fn transaction(&self, hash: &H256) -> Option<Transaction> {
self.memory_pool.transaction(hash) self.memory_pool.read().transaction(hash)
.or_else(|| self.storage.transaction(hash)) .or_else(|| self.storage.transaction(hash))
} }
} }
impl db::BlockHeaderProvider for Chain {
fn block_header_bytes(&self, block_ref: db::BlockRef) -> Option<Bytes> {
use ser::serialize;
self.block_header(block_ref).map(|h| serialize(&h))
}
fn block_header(&self, block_ref: db::BlockRef) -> Option<BlockHeader> {
match block_ref {
db::BlockRef::Hash(hash) => self.block_header_by_hash(&hash).map(|h| h.raw),
db::BlockRef::Number(n) => self.block_header_by_number(n).map(|h| h.raw),
}
}
}
impl fmt::Debug for Information { impl fmt::Debug for Information {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[sch:{} / bh:{} -> req:{} -> vfy:{} -> stored: {}]", self.scheduled, self.headers.best, self.requested, self.verifying, self.stored) write!(f, "[sch:{} / bh:{} -> req:{} -> vfy:{} -> stored: {}]", self.scheduled, self.headers.best, self.requested, self.verifying, self.stored)
@ -783,20 +692,21 @@ impl fmt::Debug for Chain {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::sync::Arc; use std::sync::Arc;
use chain::Transaction; use parking_lot::RwLock;
use hash_queue::HashPosition; use chain::{Transaction, IndexedBlockHeader};
use super::{Chain, BlockState, TransactionState, HeadersIntersection, BlockInsertionResult};
use db::{self, Store, BestBlock};
use primitives::hash::H256;
use devtools::RandomTempPath; use devtools::RandomTempPath;
use db::{self, Store, BestBlock, BlockStapler};
use miner::MemoryPool;
use primitives::hash::H256;
use test_data; use test_data;
use db::BlockStapler; use super::{Chain, BlockState, TransactionState, BlockInsertionResult};
use utils::HashPosition;
#[test] #[test]
fn chain_empty() { fn chain_empty() {
let db = Arc::new(db::TestStorage::with_genesis_block()); let db = Arc::new(db::TestStorage::with_genesis_block());
let db_best_block = BestBlock { number: 0, hash: db.best_block().expect("storage with genesis block is required").hash }; let db_best_block = BestBlock { number: 0, hash: db.best_block().expect("storage with genesis block is required").hash };
let chain = Chain::new(db.clone()); let chain = Chain::new(db.clone(), Arc::new(RwLock::new(MemoryPool::new())));
assert_eq!(chain.information().scheduled, 0); assert_eq!(chain.information().scheduled, 0);
assert_eq!(chain.information().requested, 0); assert_eq!(chain.information().requested, 0);
assert_eq!(chain.information().verifying, 0); assert_eq!(chain.information().verifying, 0);
@ -813,13 +723,13 @@ mod tests {
#[test] #[test]
fn chain_block_path() { fn chain_block_path() {
let db = Arc::new(db::TestStorage::with_genesis_block()); let db = Arc::new(db::TestStorage::with_genesis_block());
let mut chain = Chain::new(db.clone()); let mut chain = Chain::new(db.clone(), Arc::new(RwLock::new(MemoryPool::new())));
// add 6 blocks to scheduled queue // add 6 blocks to scheduled queue
let blocks = test_data::build_n_empty_blocks_from_genesis(6, 0); let blocks = test_data::build_n_empty_blocks_from_genesis(6, 0);
let headers: Vec<_> = blocks.into_iter().map(|b| b.block_header).collect(); let headers: Vec<IndexedBlockHeader> = blocks.into_iter().map(|b| b.block_header.into()).collect();
let hashes: Vec<_> = headers.iter().map(|h| h.hash()).collect(); let hashes: Vec<_> = headers.iter().map(|h| h.hash.clone()).collect();
chain.schedule_blocks_headers(hashes.clone(), headers); chain.schedule_blocks_headers(headers.clone());
assert!(chain.information().scheduled == 6 && chain.information().requested == 0 assert!(chain.information().scheduled == 6 && chain.information().requested == 0
&& chain.information().verifying == 0 && chain.information().stored == 1); && chain.information().verifying == 0 && chain.information().stored == 1);
@ -846,17 +756,17 @@ mod tests {
assert!(chain.information().scheduled == 3 && chain.information().requested == 1 assert!(chain.information().scheduled == 3 && chain.information().requested == 1
&& chain.information().verifying == 0 && chain.information().stored == 1); && chain.information().verifying == 0 && chain.information().stored == 1);
// mark 0 & 1 as verifying // mark 0 & 1 as verifying
chain.verify_block(hashes[1].clone(), test_data::genesis().block_header); chain.verify_block(headers[0].clone().into());
chain.verify_block(hashes[2].clone(), test_data::genesis().block_header); chain.verify_block(headers[1].clone().into());
assert!(chain.information().scheduled == 3 && chain.information().requested == 1 assert!(chain.information().scheduled == 3 && chain.information().requested == 1
&& chain.information().verifying == 2 && chain.information().stored == 1); && chain.information().verifying == 2 && chain.information().stored == 1);
// mark block 0 as verified // mark block 0 as verified
assert_eq!(chain.forget_block_with_state(&hashes[1], BlockState::Verifying), HashPosition::Front); assert_eq!(chain.forget_block_with_state(&hashes[0], BlockState::Verifying), HashPosition::Front);
assert!(chain.information().scheduled == 3 && chain.information().requested == 1 assert!(chain.information().scheduled == 3 && chain.information().requested == 1
&& chain.information().verifying == 1 && chain.information().stored == 1); && chain.information().verifying == 1 && chain.information().stored == 1);
// insert new best block to the chain // insert new best block to the chain
chain.insert_best_block(test_data::block_h1().hash(), &test_data::block_h1().into()).expect("Db error"); chain.insert_best_block(&test_data::block_h1().into()).expect("Db error");
assert!(chain.information().scheduled == 3 && chain.information().requested == 1 assert!(chain.information().scheduled == 3 && chain.information().requested == 1
&& chain.information().verifying == 1 && chain.information().stored == 2); && chain.information().verifying == 1 && chain.information().stored == 2);
assert_eq!(db.best_block().expect("storage with genesis block is required").number, 1); assert_eq!(db.best_block().expect("storage with genesis block is required").number, 1);
@ -864,26 +774,26 @@ mod tests {
#[test] #[test]
fn chain_block_locator_hashes() { fn chain_block_locator_hashes() {
let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block())); let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block()), Arc::new(RwLock::new(MemoryPool::new())));
let genesis_hash = chain.best_block().hash; let genesis_hash = chain.best_block().hash;
assert_eq!(chain.block_locator_hashes(), vec![genesis_hash.clone()]); assert_eq!(chain.block_locator_hashes(), vec![genesis_hash.clone()]);
let block1 = test_data::block_h1(); let block1 = test_data::block_h1();
let block1_hash = block1.hash(); let block1_hash = block1.hash();
chain.insert_best_block(block1_hash.clone(), &block1.into()).expect("Error inserting new block"); chain.insert_best_block(&block1.into()).expect("Error inserting new block");
assert_eq!(chain.block_locator_hashes(), vec![block1_hash.clone(), genesis_hash.clone()]); assert_eq!(chain.block_locator_hashes(), vec![block1_hash.clone(), genesis_hash.clone()]);
let block2 = test_data::block_h2(); let block2 = test_data::block_h2();
let block2_hash = block2.hash(); let block2_hash = block2.hash();
chain.insert_best_block(block2_hash.clone(), &block2.into()).expect("Error inserting new block"); chain.insert_best_block(&block2.into()).expect("Error inserting new block");
assert_eq!(chain.block_locator_hashes(), vec![block2_hash.clone(), block1_hash.clone(), genesis_hash.clone()]); assert_eq!(chain.block_locator_hashes(), vec![block2_hash.clone(), block1_hash.clone(), genesis_hash.clone()]);
let blocks0 = test_data::build_n_empty_blocks_from_genesis(11, 0); let blocks0 = test_data::build_n_empty_blocks_from_genesis(11, 0);
let headers0: Vec<_> = blocks0.into_iter().map(|b| b.block_header).collect(); let headers0: Vec<IndexedBlockHeader> = blocks0.into_iter().map(|b| b.block_header.into()).collect();
let hashes0: Vec<_> = headers0.iter().map(|h| h.hash()).collect(); let hashes0: Vec<_> = headers0.iter().map(|h| h.hash.clone()).collect();
chain.schedule_blocks_headers(hashes0.clone(), headers0.clone()); chain.schedule_blocks_headers(headers0.clone());
chain.request_blocks_hashes(10); chain.request_blocks_hashes(10);
chain.verify_blocks_hashes(10); chain.verify_blocks_hashes(10);
@ -902,10 +812,10 @@ mod tests {
genesis_hash.clone(), genesis_hash.clone(),
]); ]);
let blocks1 = test_data::build_n_empty_blocks_from(6, 0, &headers0[10]); let blocks1 = test_data::build_n_empty_blocks_from(6, 0, &headers0[10].raw);
let headers1: Vec<_> = blocks1.into_iter().map(|b| b.block_header).collect(); let headers1: Vec<IndexedBlockHeader> = blocks1.into_iter().map(|b| b.block_header.into()).collect();
let hashes1: Vec<_> = headers1.iter().map(|h| h.hash()).collect(); let hashes1: Vec<_> = headers1.iter().map(|h| h.hash.clone()).collect();
chain.schedule_blocks_headers(hashes1.clone(), headers1.clone()); chain.schedule_blocks_headers(headers1.clone());
chain.request_blocks_hashes(10); chain.request_blocks_hashes(10);
assert_eq!(chain.block_locator_hashes(), vec![ assert_eq!(chain.block_locator_hashes(), vec![
@ -924,10 +834,10 @@ mod tests {
genesis_hash.clone(), genesis_hash.clone(),
]); ]);
let blocks2 = test_data::build_n_empty_blocks_from(3, 0, &headers1[5]); let blocks2 = test_data::build_n_empty_blocks_from(3, 0, &headers1[5].raw);
let headers2: Vec<_> = blocks2.into_iter().map(|b| b.block_header).collect(); let headers2: Vec<IndexedBlockHeader> = blocks2.into_iter().map(|b| b.block_header.into()).collect();
let hashes2: Vec<_> = headers2.iter().map(|h| h.hash()).collect(); let hashes2: Vec<_> = headers2.iter().map(|h| h.hash.clone()).collect();
chain.schedule_blocks_headers(hashes2.clone(), headers2); chain.schedule_blocks_headers(headers2);
assert_eq!(chain.block_locator_hashes(), vec![ assert_eq!(chain.block_locator_hashes(), vec![
hashes2[2].clone(), hashes2[2].clone(),
@ -946,93 +856,17 @@ mod tests {
]); ]);
} }
#[test]
fn chain_intersect_with_inventory() {
let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block()));
// append 2 db blocks
chain.insert_best_block(test_data::block_h1().hash(), &test_data::block_h1().into()).expect("Error inserting new block");
chain.insert_best_block(test_data::block_h2().hash(), &test_data::block_h2().into()).expect("Error inserting new block");
// prepare blocks
let blocks0 = test_data::build_n_empty_blocks_from(9, 0, &test_data::block_h2().block_header);
let headers0: Vec<_> = blocks0.into_iter().map(|b| b.block_header).collect();
let hashes0: Vec<_> = headers0.iter().map(|h| h.hash()).collect();
// append 3 verifying blocks, 3 requested blocks && 3 scheduled blocks
chain.schedule_blocks_headers(hashes0.clone(), headers0.clone());
chain.request_blocks_hashes(6);
chain.verify_blocks_hashes(3);
let blocks1 = test_data::build_n_empty_blocks(2, 0);
let headers1: Vec<_> = blocks1.into_iter().map(|b| b.block_header).collect();
let hashes1: Vec<_> = headers1.iter().map(|h| h.hash()).collect();
assert_eq!(chain.intersect_with_blocks_headers(&hashes1, &headers1), HeadersIntersection::NoKnownBlocks(0));
assert_eq!(chain.intersect_with_blocks_headers(&vec![
hashes0[2].clone(),
hashes0[3].clone(),
hashes0[4].clone(),
hashes0[5].clone(),
hashes0[6].clone(),
], &vec![
headers0[2].clone(),
headers0[3].clone(),
headers0[4].clone(),
headers0[5].clone(),
headers0[6].clone(),
]), HeadersIntersection::InMemoryNoNewBlocks);
assert_eq!(chain.intersect_with_blocks_headers(&vec![
hashes0[7].clone(),
hashes0[8].clone(),
hashes1[0].clone(),
hashes1[1].clone(),
], &vec![
headers0[7].clone(),
headers0[8].clone(),
headers1[0].clone(),
headers1[1].clone(),
]), HeadersIntersection::InMemoryMainNewBlocks(2));
assert_eq!(chain.intersect_with_blocks_headers(&vec![
hashes0[6].clone(),
hashes0[7].clone(),
hashes1[0].clone(),
hashes1[1].clone(),
], &vec![
headers0[6].clone(),
headers0[7].clone(),
headers1[0].clone(),
headers1[1].clone(),
]), HeadersIntersection::InMemoryForkNewBlocks(2));
assert_eq!(chain.intersect_with_blocks_headers(&vec![
test_data::block_h1().hash(),
test_data::block_h2().hash(),
], &vec![
test_data::block_h1().block_header,
test_data::block_h2().block_header,
]), HeadersIntersection::DbAllBlocksKnown);
assert_eq!(chain.intersect_with_blocks_headers(&vec![
test_data::block_h2().hash(),
hashes1[0].clone(),
], &vec![
test_data::block_h2().block_header,
headers1[0].clone(),
]), HeadersIntersection::DbForkNewBlocks(1));
}
#[test] #[test]
fn chain_transaction_state() { fn chain_transaction_state() {
let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block())); let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block()), Arc::new(RwLock::new(MemoryPool::new())));
let genesis_block = test_data::genesis(); let genesis_block = test_data::genesis();
let block1 = test_data::block_h1(); let block1 = test_data::block_h1();
let tx1: Transaction = test_data::TransactionBuilder::with_version(1).into(); let tx1: Transaction = test_data::TransactionBuilder::with_version(1).into();
let tx2: Transaction = test_data::TransactionBuilder::with_version(2).into(); let tx2: Transaction = test_data::TransactionBuilder::with_version(2).into();
let tx1_hash = tx1.hash(); let tx1_hash = tx1.hash();
let tx2_hash = tx2.hash(); let tx2_hash = tx2.hash();
chain.verify_transaction(tx1_hash.clone(), tx1); chain.verify_transaction(tx1.into());
chain.insert_verified_transaction(tx2); chain.insert_verified_transaction(tx2.into());
assert_eq!(chain.transaction_state(&genesis_block.transactions[0].hash()), TransactionState::Stored); assert_eq!(chain.transaction_state(&genesis_block.transactions[0].hash()), TransactionState::Stored);
assert_eq!(chain.transaction_state(&block1.transactions[0].hash()), TransactionState::Unknown); assert_eq!(chain.transaction_state(&block1.transactions[0].hash()), TransactionState::Unknown);
@ -1056,15 +890,15 @@ mod tests {
let tx2 = b1.transactions[1].clone(); let tx2 = b1.transactions[1].clone();
let tx2_hash = tx2.hash(); let tx2_hash = tx2.hash();
let mut chain = Chain::new(Arc::new(db::TestStorage::with_blocks(&vec![b0]))); let mut chain = Chain::new(Arc::new(db::TestStorage::with_blocks(&vec![b0])), Arc::new(RwLock::new(MemoryPool::new())));
chain.verify_transaction(tx1_hash.clone(), tx1); chain.verify_transaction(tx1.into());
chain.insert_verified_transaction(tx2); chain.insert_verified_transaction(tx2.into());
// only one transaction is in the memory pool // only one transaction is in the memory pool
assert_eq!(chain.information().transactions.transactions_count, 1); assert_eq!(chain.information().transactions.transactions_count, 1);
// when block is inserted to the database => all accepted transactions are removed from mempool && verifying queue // when block is inserted to the database => all accepted transactions are removed from mempool && verifying queue
chain.insert_best_block(b1.hash(), &b1.into()).expect("block accepted"); chain.insert_best_block(&b1.into()).expect("block accepted");
assert_eq!(chain.information().transactions.transactions_count, 0); assert_eq!(chain.information().transactions.transactions_count, 0);
assert!(!chain.forget_verifying_transaction(&tx1_hash)); assert!(!chain.forget_verifying_transaction(&tx1_hash));
@ -1079,11 +913,11 @@ mod tests {
.into_input(0).add_output(300).store(test_chain) // t1 -> t2 -> t3 .into_input(0).add_output(300).store(test_chain) // t1 -> t2 -> t3
.set_default_input(0).set_output(400).store(test_chain); // t4 .set_default_input(0).set_output(400).store(test_chain); // t4
let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block())); let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block()), Arc::new(RwLock::new(MemoryPool::new())));
chain.verify_transaction(test_chain.at(0).hash(), test_chain.at(0)); chain.verify_transaction(test_chain.at(0).into());
chain.verify_transaction(test_chain.at(1).hash(), test_chain.at(1)); chain.verify_transaction(test_chain.at(1).into());
chain.verify_transaction(test_chain.at(2).hash(), test_chain.at(2)); chain.verify_transaction(test_chain.at(2).into());
chain.verify_transaction(test_chain.at(3).hash(), test_chain.at(3)); chain.verify_transaction(test_chain.at(3).into());
chain.forget_verifying_transaction_with_children(&test_chain.at(0).hash()); chain.forget_verifying_transaction_with_children(&test_chain.at(0).hash());
assert!(!chain.forget_verifying_transaction(&test_chain.at(0).hash())); assert!(!chain.forget_verifying_transaction(&test_chain.at(0).hash()));
@ -1100,11 +934,11 @@ mod tests {
.into_input(0).add_output(300).store(test_chain) // t1 -> t2 -> t3 .into_input(0).add_output(300).store(test_chain) // t1 -> t2 -> t3
.set_default_input(0).set_output(400).store(test_chain); // t4 .set_default_input(0).set_output(400).store(test_chain); // t4
let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block())); let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block()), Arc::new(RwLock::new(MemoryPool::new())));
chain.insert_verified_transaction(test_chain.at(0)); chain.insert_verified_transaction(test_chain.at(0).into());
chain.insert_verified_transaction(test_chain.at(1)); chain.insert_verified_transaction(test_chain.at(1).into());
chain.insert_verified_transaction(test_chain.at(2)); chain.insert_verified_transaction(test_chain.at(2).into());
chain.insert_verified_transaction(test_chain.at(3)); chain.insert_verified_transaction(test_chain.at(3).into());
let chain_transactions = chain.transactions_hashes_with_state(TransactionState::InMemory); let chain_transactions = chain.transactions_hashes_with_state(TransactionState::InMemory);
assert!(chain_transactions.contains(&test_chain.at(0).hash())); assert!(chain_transactions.contains(&test_chain.at(0).hash()));
@ -1129,23 +963,23 @@ mod tests {
let storage = Arc::new(db::Storage::new(path.as_path()).unwrap()); let storage = Arc::new(db::Storage::new(path.as_path()).unwrap());
storage.insert_block(&b0).expect("no db error"); storage.insert_block(&b0).expect("no db error");
let mut chain = Chain::new(storage); let mut chain = Chain::new(storage, Arc::new(RwLock::new(MemoryPool::new())));
chain.verify_transaction(tx1_hash.clone(), tx1); chain.verify_transaction(tx1.into());
chain.insert_verified_transaction(tx2); chain.insert_verified_transaction(tx2.into());
// no reorg // no reorg
let result = chain.insert_best_block(b1.hash(), &b1.into()).expect("no error"); let result = chain.insert_best_block(&b1.into()).expect("no error");
assert_eq!(result.transactions_to_reverify.len(), 0); assert_eq!(result.transactions_to_reverify.len(), 0);
// no reorg // no reorg
let result = chain.insert_best_block(b2.hash(), &b2.into()).expect("no error"); let result = chain.insert_best_block(&b2.into()).expect("no error");
assert_eq!(result.transactions_to_reverify.len(), 0); assert_eq!(result.transactions_to_reverify.len(), 0);
// reorg // reorg
let result = chain.insert_best_block(b3.hash(), &b3.into()).expect("no error"); let result = chain.insert_best_block(&b3.into()).expect("no error");
assert_eq!(result.transactions_to_reverify.len(), 2); assert_eq!(result.transactions_to_reverify.len(), 2);
assert!(result.transactions_to_reverify.iter().any(|&(ref h, _)| h == &tx1_hash)); assert!(result.transactions_to_reverify.iter().any(|ref tx| &tx.hash == &tx1_hash));
assert!(result.transactions_to_reverify.iter().any(|&(ref h, _)| h == &tx2_hash)); assert!(result.transactions_to_reverify.iter().any(|ref tx| &tx.hash == &tx2_hash));
} }
#[test] #[test]
@ -1177,31 +1011,31 @@ mod tests {
let tx5 = b5.transactions[0].clone(); let tx5 = b5.transactions[0].clone();
let path = RandomTempPath::create_dir(); let path = RandomTempPath::create_dir();
let storage = Arc::new(db::Storage::new(path.as_path()).unwrap()); let storage = Arc::new(db::Storage::new(path.as_path()).unwrap(), );
storage.insert_block(&genesis).expect("no db error"); storage.insert_block(&genesis).expect("no db error");
let mut chain = Chain::new(storage); let mut chain = Chain::new(storage, Arc::new(RwLock::new(MemoryPool::new())));
chain.insert_verified_transaction(tx3); chain.insert_verified_transaction(tx3.into());
chain.insert_verified_transaction(tx4); chain.insert_verified_transaction(tx4.into());
chain.insert_verified_transaction(tx5); chain.insert_verified_transaction(tx5.into());
assert_eq!(chain.insert_best_block(b0.hash(), &b0.clone().into()).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b0.hash()])); assert_eq!(chain.insert_best_block(&b0.clone().into()).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b0.hash()]));
assert_eq!(chain.information().transactions.transactions_count, 3); assert_eq!(chain.information().transactions.transactions_count, 3);
assert_eq!(chain.insert_best_block(b1.hash(), &b1.clone().into()).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b1.hash()])); assert_eq!(chain.insert_best_block(&b1.clone().into()).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b1.hash()]));
assert_eq!(chain.information().transactions.transactions_count, 3); assert_eq!(chain.information().transactions.transactions_count, 3);
assert_eq!(chain.insert_best_block(b2.hash(), &b2.clone().into()).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b2.hash()])); assert_eq!(chain.insert_best_block(&b2.clone().into()).expect("block accepted"), BlockInsertionResult::with_canonized_blocks(vec![b2.hash()]));
assert_eq!(chain.information().transactions.transactions_count, 3); assert_eq!(chain.information().transactions.transactions_count, 3);
assert_eq!(chain.insert_best_block(b3.hash(), &b3.clone().into()).expect("block accepted"), BlockInsertionResult::default()); assert_eq!(chain.insert_best_block(&b3.clone().into()).expect("block accepted"), BlockInsertionResult::default());
assert_eq!(chain.information().transactions.transactions_count, 3); assert_eq!(chain.information().transactions.transactions_count, 3);
assert_eq!(chain.insert_best_block(b4.hash(), &b4.clone().into()).expect("block accepted"), BlockInsertionResult::default()); assert_eq!(chain.insert_best_block(&b4.clone().into()).expect("block accepted"), BlockInsertionResult::default());
assert_eq!(chain.information().transactions.transactions_count, 3); assert_eq!(chain.information().transactions.transactions_count, 3);
// order matters // order matters
let insert_result = chain.insert_best_block(b5.hash(), &b5.clone().into()).expect("block accepted"); let insert_result = chain.insert_best_block(&b5.clone().into()).expect("block accepted");
let transactions_to_reverify_hashes: Vec<_> = insert_result let transactions_to_reverify_hashes: Vec<_> = insert_result
.transactions_to_reverify .transactions_to_reverify
.into_iter() .into_iter()
.map(|(h, _)| h) .map(|tx| tx.hash)
.collect(); .collect();
assert_eq!(transactions_to_reverify_hashes, vec![tx1_hash, tx2_hash]); assert_eq!(transactions_to_reverify_hashes, vec![tx1_hash, tx2_hash]);
assert_eq!(insert_result.canonized_blocks_hashes, vec![b3.hash(), b4.hash(), b5.hash()]); assert_eq!(insert_result.canonized_blocks_hashes, vec![b3.hash(), b4.hash(), b5.hash()]);
@ -1223,60 +1057,15 @@ mod tests {
let tx3: Transaction = test_data::TransactionBuilder::with_output(20).add_input(&tx0, 1).into(); let tx3: Transaction = test_data::TransactionBuilder::with_output(20).add_input(&tx0, 1).into();
// insert tx2 to memory pool // insert tx2 to memory pool
let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block())); let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block()), Arc::new(RwLock::new(MemoryPool::new())));
chain.insert_verified_transaction(tx2.clone()); chain.insert_verified_transaction(tx2.clone().into());
chain.insert_verified_transaction(tx3.clone()); chain.insert_verified_transaction(tx3.clone().into());
// insert verified block with tx1 // insert verified block with tx1
chain.insert_best_block(b0.hash(), &b0.into()).expect("no error"); chain.insert_best_block(&b0.into()).expect("no error");
// => tx2 is removed from memory pool, but tx3 remains // => tx2 is removed from memory pool, but tx3 remains
assert_eq!(chain.information().transactions.transactions_count, 1); assert_eq!(chain.information().transactions.transactions_count, 1);
} }
#[test]
fn chain_dead_end() {
let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block()));
let blocks = test_data::build_n_empty_blocks_from(5, 0, &test_data::genesis().block_header);
let headers: Vec<_> = blocks.iter().map(|b| b.block_header.clone()).collect();
let hashes: Vec<_> = headers.iter().map(|h| h.hash()).collect();
chain.insert_best_block(blocks[0].hash(), &blocks[0].clone().into()).expect("no error");
chain.insert_best_block(blocks[1].hash(), &blocks[1].clone().into()).expect("no error");
chain.mark_dead_end_block(&blocks[2].hash());
assert_eq!(chain.intersect_with_blocks_headers(&vec![
hashes[0].clone(),
hashes[1].clone(),
hashes[2].clone(),
hashes[3].clone(),
hashes[4].clone(),
], &vec![
headers[0].clone(),
headers[1].clone(),
headers[2].clone(),
headers[3].clone(),
headers[4].clone(),
]), HeadersIntersection::DeadEnd(2));
assert_eq!(chain.intersect_with_blocks_headers(&vec![
hashes[2].clone(),
hashes[3].clone(),
hashes[4].clone(),
], &vec![
headers[2].clone(),
headers[3].clone(),
headers[4].clone(),
]), HeadersIntersection::DeadEnd(0));
assert_eq!(chain.intersect_with_blocks_headers(&vec![
hashes[3].clone(),
hashes[4].clone(),
], &vec![
headers[3].clone(),
headers[4].clone(),
]), HeadersIntersection::DeadEnd(0));
}
#[test] #[test]
fn update_memory_pool_transaction() { fn update_memory_pool_transaction() {
use test_data::{ChainBuilder, TransactionBuilder}; use test_data::{ChainBuilder, TransactionBuilder};
@ -1286,10 +1075,10 @@ mod tests {
.reset().set_input(&data_chain.at(0), 0).add_output(20).lock().store(data_chain) // transaction0 -> transaction1 .reset().set_input(&data_chain.at(0), 0).add_output(20).lock().store(data_chain) // transaction0 -> transaction1
.reset().set_input(&data_chain.at(0), 0).add_output(30).store(data_chain); // transaction0 -> transaction2 .reset().set_input(&data_chain.at(0), 0).add_output(30).store(data_chain); // transaction0 -> transaction2
let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block())); let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block()), Arc::new(RwLock::new(MemoryPool::new())));
chain.insert_verified_transaction(data_chain.at(1)); chain.insert_verified_transaction(data_chain.at(1).into());
assert_eq!(chain.information().transactions.transactions_count, 1); assert_eq!(chain.information().transactions.transactions_count, 1);
chain.insert_verified_transaction(data_chain.at(2)); chain.insert_verified_transaction(data_chain.at(2).into());
assert_eq!(chain.information().transactions.transactions_count, 1); // tx was replaces assert_eq!(chain.information().transactions.transactions_count, 1); // tx was replaces
} }
} }

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,250 +1,214 @@
use std::sync::Arc; use std::sync::Arc;
use std::collections::HashMap; use chain::{IndexedBlock, IndexedTransaction};
use parking_lot::Mutex; use message::common::InventoryVector;
use chain::{Block, BlockHeader, Transaction};
use message::common::{InventoryVector, InventoryType, BlockHeaderAndIDs, BlockTransactions};
use message::types; use message::types;
use primitives::hash::H256; use synchronization_peers::{BlockAnnouncementType, TransactionAnnouncementType};
use p2p::OutboundSyncConnectionRef; use types::{PeerIndex, PeersRef, RequestId};
use synchronization_chain::ChainRef; use utils::KnownHashType;
use synchronization_server::ServerTaskIndex;
use local_node::PeersConnections;
pub type LocalSynchronizationTaskExecutorRef = Arc<Mutex<LocalSynchronizationTaskExecutor>>;
/// Synchronization task executor /// Synchronization task executor
pub trait TaskExecutor : Send + 'static { pub trait TaskExecutor : Send + Sync + 'static {
fn execute(&mut self, task: Task); fn execute(&self, task: Task);
} }
/// Synchronization task for the peer. /// Synchronization task for the peer.
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum Task { pub enum Task {
/// Request given blocks.
RequestBlocks(usize, Vec<H256>),
/// Request blocks headers using full getheaders.block_locator_hashes.
RequestBlocksHeaders(usize),
/// Request memory pool contents
RequestTransactions(usize, Vec<H256>),
/// Request memory pool contents
RequestMemoryPool(usize),
/// Send block.
SendBlock(usize, Block),
/// Send merkleblock
SendMerkleBlock(usize, types::MerkleBlock),
/// Send transaction
SendTransaction(usize, Transaction),
/// Send block transactions
SendBlockTxn(usize, H256, Vec<Transaction>),
/// Send notfound
SendNotFound(usize, Vec<InventoryVector>),
/// Send inventory
SendInventory(usize, Vec<InventoryVector>),
/// Send headers
SendHeaders(usize, Vec<BlockHeader>, ServerTaskIndex),
/// Send compact blocks
SendCompactBlocks(usize, Vec<BlockHeaderAndIDs>),
/// Notify io about ignored request /// Notify io about ignored request
Ignore(usize, u32), Ignore(PeerIndex, RequestId),
/// Close connection with this peer /// Request unknown items from peer
Close(usize), GetData(PeerIndex, types::GetData),
/// Get headers
GetHeaders(PeerIndex, types::GetHeaders),
/// Get memory pool
MemoryPool(PeerIndex),
/// Send block
Block(PeerIndex, IndexedBlock),
/// Send merkleblock
MerkleBlock(PeerIndex, types::MerkleBlock),
/// Send cmpcmblock
CompactBlock(PeerIndex, types::CompactBlock),
/// Send transaction
Transaction(PeerIndex, IndexedTransaction),
/// Send block transactions
BlockTxn(PeerIndex, types::BlockTxn),
/// Send notfound
NotFound(PeerIndex, types::NotFound),
/// Send inventory
Inventory(PeerIndex, types::Inv),
/// Send headers
Headers(PeerIndex, types::Headers, Option<RequestId>),
/// Relay new block to peers
RelayNewBlock(IndexedBlock),
/// Relay new transaction to peers
RelayNewTransaction(IndexedTransaction, u64),
} }
/// Synchronization tasks executor /// Synchronization tasks executor
pub struct LocalSynchronizationTaskExecutor { pub struct LocalSynchronizationTaskExecutor {
/// Active synchronization peers /// Active synchronization peers
peers: HashMap<usize, OutboundSyncConnectionRef>, peers: PeersRef,
/// Synchronization chain
chain: ChainRef,
} }
impl Task { impl LocalSynchronizationTaskExecutor {
#[cfg(test)] pub fn new(peers: PeersRef) -> Arc<Self> {
pub fn peer_index(&self) -> usize { Arc::new(LocalSynchronizationTaskExecutor {
match *self { peers: peers,
Task::RequestBlocks(peer_index, _) => peer_index, })
Task::RequestBlocksHeaders(peer_index) => peer_index, }
Task::RequestTransactions(peer_index, _) => peer_index,
Task::RequestMemoryPool(peer_index) => peer_index, fn execute_ignore(&self, peer_index: PeerIndex, request_id: RequestId) {
Task::SendBlock(peer_index, _) => peer_index, if let Some(connection) = self.peers.connection(peer_index) {
Task::SendMerkleBlock(peer_index, _) => peer_index, trace!(target: "sync", "Ignoring request {} from peer#{}", request_id, peer_index);
Task::SendTransaction(peer_index, _) => peer_index, connection.ignored(request_id);
Task::SendBlockTxn(peer_index, _, _) => peer_index, }
Task::SendNotFound(peer_index, _) => peer_index, }
Task::SendInventory(peer_index, _) => peer_index,
Task::SendHeaders(peer_index, _, _) => peer_index, fn execute_getdata(&self, peer_index: PeerIndex, getdata: types::GetData) {
Task::SendCompactBlocks(peer_index, _) => peer_index, if let Some(connection) = self.peers.connection(peer_index) {
Task::Ignore(peer_index, _) => peer_index, trace!(target: "sync", "Querying {} unknown items from peer#{}", getdata.inventory.len(), peer_index);
Task::Close(peer_index) => peer_index, connection.send_getdata(&getdata);
}
}
fn execute_getheaders(&self, peer_index: PeerIndex, getheaders: types::GetHeaders) {
if let Some(connection) = self.peers.connection(peer_index) {
if !getheaders.block_locator_hashes.is_empty() {
trace!(target: "sync", "Querying headers starting with {} unknown items from peer#{}", getheaders.block_locator_hashes[0].to_reversed_str(), peer_index);
}
connection.send_getheaders(&getheaders);
}
}
fn execute_memorypool(&self, peer_index: PeerIndex) {
if let Some(connection) = self.peers.connection(peer_index) {
trace!(target: "sync", "Querying memory pool contents from peer#{}", peer_index);
let mempool = types::MemPool;
connection.send_mempool(&mempool);
}
}
fn execute_block(&self, peer_index: PeerIndex, block: IndexedBlock) {
if let Some(connection) = self.peers.connection(peer_index) {
trace!(target: "sync", "Sending block {} to peer#{}", block.hash().to_reversed_str(), peer_index);
self.peers.hash_known_as(peer_index, block.hash().clone(), KnownHashType::Block);
let block = types::Block {
block: block.to_raw_block(),
};
connection.send_block(&block);
}
}
fn execute_merkleblock(&self, peer_index: PeerIndex, block: types::MerkleBlock) {
if let Some(connection) = self.peers.connection(peer_index) {
let hash = block.block_header.hash();
trace!(target: "sync", "Sending merkle block {} to peer#{}", hash.to_reversed_str(), peer_index);
self.peers.hash_known_as(peer_index, hash, KnownHashType::Block);
connection.send_merkleblock(&block);
}
}
fn execute_compact_block(&self, peer_index: PeerIndex, block: types::CompactBlock) {
if let Some(connection) = self.peers.connection(peer_index) {
let hash = block.header.header.hash();
trace!(target: "sync", "Sending compact block {} to peer#{}", hash.to_reversed_str(), peer_index);
self.peers.hash_known_as(peer_index, hash, KnownHashType::CompactBlock);
connection.send_compact_block(&block);
}
}
fn execute_transaction(&self, peer_index: PeerIndex, transaction: IndexedTransaction) {
if let Some(connection) = self.peers.connection(peer_index) {
trace!(target: "sync", "Sending transaction {} to peer#{}", transaction.hash.to_reversed_str(), peer_index);
self.peers.hash_known_as(peer_index, transaction.hash, KnownHashType::Transaction);
let transaction = types::Tx {
transaction: transaction.raw,
};
connection.send_transaction(&transaction);
}
}
fn execute_block_txn(&self, peer_index: PeerIndex, blocktxn: types::BlockTxn) {
if let Some(connection) = self.peers.connection(peer_index) {
trace!(target: "sync", "Sending blocktxn with {} transactions to peer#{}", blocktxn.request.transactions.len(), peer_index);
connection.send_block_txn(&blocktxn);
}
}
fn execute_notfound(&self, peer_index: PeerIndex, notfound: types::NotFound) {
if let Some(connection) = self.peers.connection(peer_index) {
trace!(target: "sync", "Sending notfound to peer#{} with {} items", peer_index, notfound.inventory.len());
connection.send_notfound(&notfound);
}
}
fn execute_inventory(&self, peer_index: PeerIndex, inventory: types::Inv) {
if let Some(connection) = self.peers.connection(peer_index) {
trace!(target: "sync", "Sending inventory to peer#{} with {} items", peer_index, inventory.inventory.len());
connection.send_inventory(&inventory);
}
}
fn execute_headers(&self, peer_index: PeerIndex, headers: types::Headers, request_id: Option<RequestId>) {
if let Some(connection) = self.peers.connection(peer_index) {
trace!(target: "sync", "Sending headers to peer#{} with {} items", peer_index, headers.headers.len());
match request_id {
Some(request_id) => connection.respond_headers(&headers, request_id),
None => connection.send_headers(&headers),
}
}
}
fn execute_relay_block(&self, block: IndexedBlock) {
for peer_index in self.peers.enumerate() {
match self.peers.filter_block(peer_index, &block) {
BlockAnnouncementType::SendInventory => {
self.execute_inventory(peer_index, types::Inv::with_inventory(vec![
InventoryVector::block(block.hash().clone()),
]));
},
BlockAnnouncementType::SendHeaders => {
self.execute_headers(peer_index, types::Headers::with_headers(vec![
block.header.raw.clone(),
]), None);
},
BlockAnnouncementType::SendCompactBlock => if let Some(compact_block) = self.peers.build_compact_block(peer_index, &block) {
self.execute_compact_block(peer_index, compact_block);
},
BlockAnnouncementType::DoNotAnnounce => (),
}
}
}
fn execute_relay_transaction(&self, transaction: IndexedTransaction, fee_rate: u64) {
for peer_index in self.peers.enumerate() {
match self.peers.filter_transaction(peer_index, &transaction, Some(fee_rate)) {
TransactionAnnouncementType::SendInventory => self.execute_inventory(peer_index, types::Inv::with_inventory(vec![
InventoryVector::tx(transaction.hash.clone()),
])),
TransactionAnnouncementType::DoNotAnnounce => (),
}
} }
} }
} }
impl LocalSynchronizationTaskExecutor {
pub fn new(chain: ChainRef) -> Arc<Mutex<Self>> {
Arc::new(Mutex::new(LocalSynchronizationTaskExecutor {
peers: HashMap::new(),
chain: chain,
}))
}
}
impl PeersConnections for LocalSynchronizationTaskExecutor {
fn add_peer_connection(&mut self, index: usize, connection: OutboundSyncConnectionRef) {
self.peers.insert(index, connection);
}
fn remove_peer_connection(&mut self, index: usize) {
self.peers.remove(&index);
}
}
impl TaskExecutor for LocalSynchronizationTaskExecutor { impl TaskExecutor for LocalSynchronizationTaskExecutor {
fn execute(&mut self, task: Task) { fn execute(&self, task: Task) {
match task { match task {
Task::RequestBlocks(peer_index, blocks_hashes) => { Task::Ignore(peer_index, request_id) => self.execute_ignore(peer_index, request_id),
let getdata = types::GetData { Task::GetData(peer_index, getdata) => self.execute_getdata(peer_index, getdata),
inventory: blocks_hashes.into_iter() Task::GetHeaders(peer_index, getheaders) => self.execute_getheaders(peer_index, getheaders),
.map(|hash| InventoryVector { Task::MemoryPool(peer_index) => self.execute_memorypool(peer_index),
inv_type: InventoryType::MessageBlock, Task::Block(peer_index, block) => self.execute_block(peer_index, block),
hash: hash, Task::MerkleBlock(peer_index, block) => self.execute_merkleblock(peer_index, block),
}).collect() Task::CompactBlock(peer_index, block) => self.execute_compact_block(peer_index, block),
}; Task::Transaction(peer_index, transaction) => self.execute_transaction(peer_index, transaction),
Task::BlockTxn(peer_index, blocktxn) => self.execute_block_txn(peer_index, blocktxn),
if let Some(connection) = self.peers.get_mut(&peer_index) { Task::NotFound(peer_index, notfound) => self.execute_notfound(peer_index, notfound),
trace!(target: "sync", "Querying {} unknown blocks from peer#{}", getdata.inventory.len(), peer_index); Task::Inventory(peer_index, inventory) => self.execute_inventory(peer_index, inventory),
connection.send_getdata(&getdata); Task::Headers(peer_index, headers, request_id) => self.execute_headers(peer_index, headers, request_id),
} Task::RelayNewBlock(block) => self.execute_relay_block(block),
}, Task::RelayNewTransaction(transaction, fee_rate) => self.execute_relay_transaction(transaction, fee_rate),
Task::RequestBlocksHeaders(peer_index) => {
let block_locator_hashes = self.chain.read().block_locator_hashes();
let getheaders = types::GetHeaders {
version: 0, // this field is ignored by clients
block_locator_hashes: block_locator_hashes,
hash_stop: H256::default(),
};
if let Some(connection) = self.peers.get_mut(&peer_index) {
trace!(target: "sync", "Request blocks hashes from peer#{} using getheaders", peer_index);
connection.send_getheaders(&getheaders);
}
},
Task::RequestMemoryPool(peer_index) => {
let mempool = types::MemPool;
if let Some(connection) = self.peers.get_mut(&peer_index) {
trace!(target: "sync", "Querying memory pool contents from peer#{}", peer_index);
connection.send_mempool(&mempool);
}
},
Task::RequestTransactions(peer_index, transactions_hashes) => {
let getdata = types::GetData {
inventory: transactions_hashes.into_iter()
.map(|hash| InventoryVector {
inv_type: InventoryType::MessageTx,
hash: hash,
}).collect()
};
if let Some(connection) = self.peers.get_mut(&peer_index) {
trace!(target: "sync", "Querying {} unknown transactions from peer#{}", getdata.inventory.len(), peer_index);
connection.send_getdata(&getdata);
}
},
Task::SendBlock(peer_index, block) => {
let block_message = types::Block {
block: block,
};
if let Some(connection) = self.peers.get_mut(&peer_index) {
trace!(target: "sync", "Sending block {:?} to peer#{}", block_message.block.hash().to_reversed_str(), peer_index);
connection.send_block(&block_message);
}
},
Task::SendMerkleBlock(peer_index, merkleblock) => {
if let Some(connection) = self.peers.get_mut(&peer_index) {
trace!(target: "sync", "Sending merkleblock {:?} to peer#{}", merkleblock.block_header.hash().to_reversed_str(), peer_index);
connection.send_merkleblock(&merkleblock);
}
},
Task::SendTransaction(peer_index, transaction) => {
let transaction_message = types::Tx {
transaction: transaction,
};
if let Some(connection) = self.peers.get_mut(&peer_index) {
trace!(target: "sync", "Sending transaction {:?} to peer#{}", transaction_message.transaction.hash().to_reversed_str(), peer_index);
connection.send_transaction(&transaction_message);
}
},
Task::SendBlockTxn(peer_index, block_hash, transactions) => {
let transactions_message = types::BlockTxn {
request: BlockTransactions {
blockhash: block_hash,
transactions: transactions,
}
};
if let Some(connection) = self.peers.get_mut(&peer_index) {
trace!(target: "sync", "Sending blocktxn with {} transactions to peer#{}", transactions_message.request.transactions.len(), peer_index);
connection.send_block_txn(&transactions_message);
}
},
Task::SendNotFound(peer_index, unknown_inventory) => {
let notfound = types::NotFound {
inventory: unknown_inventory,
};
if let Some(connection) = self.peers.get_mut(&peer_index) {
trace!(target: "sync", "Sending notfound to peer#{} with {} items", peer_index, notfound.inventory.len());
connection.send_notfound(&notfound);
}
},
Task::SendInventory(peer_index, inventory) => {
let inventory = types::Inv {
inventory: inventory,
};
if let Some(connection) = self.peers.get_mut(&peer_index) {
trace!(target: "sync", "Sending inventory to peer#{} with {} items", peer_index, inventory.inventory.len());
connection.send_inventory(&inventory);
}
},
Task::SendHeaders(peer_index, headers, id) => {
let headers = types::Headers {
headers: headers,
};
if let Some(connection) = self.peers.get_mut(&peer_index) {
trace!(target: "sync", "Sending headers to peer#{} with {} items", peer_index, headers.headers.len());
match id.raw() {
Some(id) => connection.respond_headers(&headers, id),
None => connection.send_headers(&headers),
}
}
},
Task::SendCompactBlocks(peer_index, compact_blocks) => {
if let Some(connection) = self.peers.get_mut(&peer_index) {
for compact_block in compact_blocks {
trace!(target: "sync", "Sending compact_block {:?} to peer#{}", compact_block.header.hash(), peer_index);
connection.send_compact_block(&types::CompactBlock {
header: compact_block,
});
}
}
},
Task::Ignore(peer_index, id) => {
if let Some(connection) = self.peers.get_mut(&peer_index) {
trace!(target: "sync", "Ignoring request from peer#{} with id {}", peer_index, id);
connection.ignored(id);
}
},
Task::Close(peer_index) => {
if let Some(connection) = self.peers.get_mut(&peer_index) {
trace!(target: "sync", "Closing request with peer#{}", peer_index);
connection.close();
}
},
} }
} }
} }
@ -253,62 +217,161 @@ impl TaskExecutor for LocalSynchronizationTaskExecutor {
pub mod tests { pub mod tests {
use super::*; use super::*;
use std::sync::Arc; use std::sync::Arc;
use std::mem::replace;
use std::time; use std::time;
use parking_lot::{Mutex, Condvar}; use parking_lot::{Mutex, Condvar};
use local_node::PeersConnections; use chain::Transaction;
use p2p::OutboundSyncConnectionRef; use message::types;
use std::collections::HashSet; use test_data;
use inbound_connection::tests::DummyOutboundSyncConnection;
use local_node::tests::{default_filterload, make_filteradd};
use synchronization_peers::{PeersImpl, PeersContainer, PeersFilters, PeersOptions, BlockAnnouncementType};
pub struct DummyTaskExecutor { pub struct DummyTaskExecutor {
tasks: Vec<Task>, tasks: Mutex<Vec<Task>>,
waiter: Arc<Condvar>, waiter: Arc<Condvar>,
closed: HashSet<usize>,
} }
impl DummyTaskExecutor { impl DummyTaskExecutor {
pub fn new() -> Arc<Mutex<Self>> { pub fn new() -> Arc<Self> {
Arc::new(Mutex::new(DummyTaskExecutor { Arc::new(DummyTaskExecutor {
tasks: Vec::new(), tasks: Mutex::new(Vec::new()),
waiter: Arc::new(Condvar::new()), waiter: Arc::new(Condvar::new()),
closed: HashSet::new(), })
}))
} }
pub fn wait_tasks_for(executor: Arc<Mutex<Self>>, timeout_ms: u64) -> Vec<Task> { pub fn wait_tasks_for(executor: Arc<Self>, timeout_ms: u64) -> Vec<Task> {
let mut executor = executor.lock(); {
if executor.tasks.is_empty() { let mut tasks = executor.tasks.lock();
let waiter = executor.waiter.clone(); if tasks.is_empty() {
waiter.wait_for(&mut executor, time::Duration::from_millis(timeout_ms)).timed_out(); let waiter = executor.waiter.clone();
waiter.wait_for(&mut tasks, time::Duration::from_millis(timeout_ms)).timed_out();
}
} }
executor.take_tasks() executor.take_tasks()
} }
pub fn wait_tasks(executor: Arc<Mutex<Self>>) -> Vec<Task> { pub fn wait_tasks(executor: Arc<Self>) -> Vec<Task> {
DummyTaskExecutor::wait_tasks_for(executor, 1000) DummyTaskExecutor::wait_tasks_for(executor, 1000)
} }
pub fn take_tasks(&mut self) -> Vec<Task> { pub fn take_tasks(&self) -> Vec<Task> {
replace(&mut self.tasks, Vec::new()) let mut tasks = self.tasks.lock();
let tasks = tasks.drain(..).collect();
tasks
} }
} }
impl PeersConnections for DummyTaskExecutor {
fn add_peer_connection(&mut self, _: usize, _: OutboundSyncConnectionRef) {}
fn remove_peer_connection(&mut self, _: usize) {}
}
impl TaskExecutor for DummyTaskExecutor { impl TaskExecutor for DummyTaskExecutor {
fn execute(&mut self, task: Task) { fn execute(&self, task: Task) {
match task { self.tasks.lock().push(task);
Task::Close(id) => {
self.closed.insert(id);
()
},
_ => if self.closed.contains(&task.peer_index()) { return },
}
self.tasks.push(task);
self.waiter.notify_one(); self.waiter.notify_one();
} }
} }
#[test]
fn relay_new_block_after_sendcmpct() {
let peers = Arc::new(PeersImpl::default());
let executor = LocalSynchronizationTaskExecutor::new(peers.clone());
let c1 = DummyOutboundSyncConnection::new();
peers.insert(1, c1.clone());
let c2 = DummyOutboundSyncConnection::new();
peers.insert(2, c2.clone());
peers.set_block_announcement_type(2, BlockAnnouncementType::SendCompactBlock);
executor.execute(Task::RelayNewBlock(test_data::genesis().into()));
assert_eq!(*c1.messages.lock().entry("inventory".to_owned()).or_insert(0), 1);
assert_eq!(*c2.messages.lock().entry("cmpctblock".to_owned()).or_insert(0), 1);
}
#[test]
fn relay_new_block_after_sendheaders() {
let peers = Arc::new(PeersImpl::default());
let executor = LocalSynchronizationTaskExecutor::new(peers.clone());
let c1 = DummyOutboundSyncConnection::new();
peers.insert(1, c1.clone());
let c2 = DummyOutboundSyncConnection::new();
peers.insert(2, c2.clone());
peers.set_block_announcement_type(2, BlockAnnouncementType::SendHeaders);
executor.execute(Task::RelayNewBlock(test_data::genesis().into()));
assert_eq!(*c1.messages.lock().entry("inventory".to_owned()).or_insert(0), 1);
assert_eq!(*c2.messages.lock().entry("headers".to_owned()).or_insert(0), 1);
}
#[test]
fn relay_new_transaction_with_bloom_filter() {
let peers = Arc::new(PeersImpl::default());
let executor = LocalSynchronizationTaskExecutor::new(peers.clone());
let tx1: Transaction = test_data::TransactionBuilder::with_output(10).into();
let tx2: Transaction = test_data::TransactionBuilder::with_output(20).into();
let tx3: Transaction = test_data::TransactionBuilder::with_output(30).into();
let tx1_hash = tx1.hash();
let tx2_hash = tx2.hash();
let tx3_hash = tx3.hash();
// peer#1 wants tx1
let c1 = DummyOutboundSyncConnection::new();
peers.insert(1, c1.clone());
peers.set_bloom_filter(1, default_filterload());
peers.update_bloom_filter(1, make_filteradd(&*tx1_hash));
// peer#2 wants tx2
let c2 = DummyOutboundSyncConnection::new();
peers.insert(2, c2.clone());
peers.set_bloom_filter(2, default_filterload());
peers.update_bloom_filter(2, make_filteradd(&*tx2_hash));
// peer#3 wants tx1 + tx2 transactions
let c3 = DummyOutboundSyncConnection::new();
peers.insert(3, c3.clone());
peers.set_bloom_filter(3, default_filterload());
peers.update_bloom_filter(3, make_filteradd(&*tx1_hash));
peers.update_bloom_filter(3, make_filteradd(&*tx2_hash));
// peer#4 has default behaviour (no filter)
let c4 = DummyOutboundSyncConnection::new();
peers.insert(4, c4.clone());
// peer#5 wants some other transactions
let c5 = DummyOutboundSyncConnection::new();
peers.insert(5, c5.clone());
peers.set_bloom_filter(5, default_filterload());
peers.update_bloom_filter(5, make_filteradd(&*tx3_hash));
// tx1 is relayed to peers: 1, 3, 4
executor.execute(Task::RelayNewTransaction(tx1.into(), 0));
assert_eq!(*c1.messages.lock().entry("inventory".to_owned()).or_insert(0), 1);
assert_eq!(*c2.messages.lock().entry("inventory".to_owned()).or_insert(0), 0);
assert_eq!(*c3.messages.lock().entry("inventory".to_owned()).or_insert(0), 1);
assert_eq!(*c4.messages.lock().entry("inventory".to_owned()).or_insert(0), 1);
// tx2 is relayed to peers: 2, 3, 4
executor.execute(Task::RelayNewTransaction(tx2.into(), 0));
assert_eq!(*c1.messages.lock().entry("inventory".to_owned()).or_insert(0), 1);
assert_eq!(*c2.messages.lock().entry("inventory".to_owned()).or_insert(0), 1);
assert_eq!(*c3.messages.lock().entry("inventory".to_owned()).or_insert(0), 2);
assert_eq!(*c4.messages.lock().entry("inventory".to_owned()).or_insert(0), 2);
}
#[test]
fn relay_new_transaction_with_feefilter() {
let peers = Arc::new(PeersImpl::default());
let executor = LocalSynchronizationTaskExecutor::new(peers.clone());
let c2 = DummyOutboundSyncConnection::new();
peers.insert(2, c2.clone());
peers.set_fee_filter(2, types::FeeFilter::with_fee_rate(3000));
let c3 = DummyOutboundSyncConnection::new();
peers.insert(3, c3.clone());
peers.set_fee_filter(3, types::FeeFilter::with_fee_rate(4000));
let c4 = DummyOutboundSyncConnection::new();
peers.insert(4, c4.clone());
executor.execute(Task::RelayNewTransaction(test_data::genesis().transactions[0].clone().into(), 3500));
assert_eq!(*c2.messages.lock().entry("inventory".to_owned()).or_insert(0), 1);
assert_eq!(*c3.messages.lock().entry("inventory".to_owned()).or_insert(0), 0);
assert_eq!(*c4.messages.lock().entry("inventory".to_owned()).or_insert(0), 1);
}
} }

View File

@ -1,16 +1,15 @@
use std::collections::HashSet; use std::collections::HashSet;
use time::precise_time_s; use time::precise_time_s;
use orphan_blocks_pool::OrphanBlocksPool;
use orphan_transactions_pool::OrphanTransactionsPool;
use synchronization_peers::Peers;
use primitives::hash::H256; use primitives::hash::H256;
use synchronization_peers_tasks::PeersTasks;
use utils::{OrphanBlocksPool, OrphanTransactionsPool};
/// Management interval (in ms) /// Management interval (in ms)
pub const MANAGEMENT_INTERVAL_MS: u64 = 10 * 1000; pub const MANAGEMENT_INTERVAL_MS: u64 = 10 * 1000;
/// Response time before getting block to decrease peer score /// Response time before getting block to decrease peer score
const DEFAULT_PEER_BLOCK_FAILURE_INTERVAL_MS: u32 = 60 * 1000; const DEFAULT_PEER_BLOCK_FAILURE_INTERVAL_MS: u32 = 60 * 1000;
/// Response time before getting inventory to decrease peer score /// Response time before getting headers to decrease peer score
const DEFAULT_PEER_INVENTORY_FAILURE_INTERVAL_MS: u32 = 60 * 1000; const DEFAULT_PEER_HEADERS_FAILURE_INTERVAL_MS: u32 = 60 * 1000;
/// Unknown orphan block removal time /// Unknown orphan block removal time
const DEFAULT_UNKNOWN_BLOCK_REMOVAL_TIME_MS: u32 = 20 * 60 * 1000; const DEFAULT_UNKNOWN_BLOCK_REMOVAL_TIME_MS: u32 = 20 * 60 * 1000;
/// Maximal number of orphaned blocks /// Maximal number of orphaned blocks
@ -24,15 +23,15 @@ const DEFAULT_ORPHAN_TRANSACTIONS_MAX_LEN: usize = 10000;
pub struct ManagePeersConfig { pub struct ManagePeersConfig {
/// Time interval (in milliseconds) to wait block from the peer before penalizing && reexecuting tasks /// Time interval (in milliseconds) to wait block from the peer before penalizing && reexecuting tasks
pub block_failure_interval_ms: u32, pub block_failure_interval_ms: u32,
/// Time interval (in milliseconds) to wait inventory from the peer before penalizing && reexecuting tasks /// Time interval (in milliseconds) to wait headers from the peer before penalizing && reexecuting tasks
pub inventory_failure_interval_ms: u32, pub headers_failure_interval_ms: u32,
} }
impl Default for ManagePeersConfig { impl Default for ManagePeersConfig {
fn default() -> Self { fn default() -> Self {
ManagePeersConfig { ManagePeersConfig {
block_failure_interval_ms: DEFAULT_PEER_BLOCK_FAILURE_INTERVAL_MS, block_failure_interval_ms: DEFAULT_PEER_BLOCK_FAILURE_INTERVAL_MS,
inventory_failure_interval_ms: DEFAULT_PEER_INVENTORY_FAILURE_INTERVAL_MS, headers_failure_interval_ms: DEFAULT_PEER_HEADERS_FAILURE_INTERVAL_MS,
} }
} }
} }
@ -72,15 +71,16 @@ impl Default for ManageOrphanTransactionsConfig {
} }
/// Manage stalled synchronization peers blocks tasks /// Manage stalled synchronization peers blocks tasks
pub fn manage_synchronization_peers_blocks(config: &ManagePeersConfig, peers: &mut Peers) -> (Vec<H256>, Vec<H256>) { pub fn manage_synchronization_peers_blocks(config: &ManagePeersConfig, peers: &mut PeersTasks) -> (Vec<H256>, Vec<H256>) {
let mut blocks_to_request: Vec<H256> = Vec::new(); let mut blocks_to_request: Vec<H256> = Vec::new();
let mut blocks_to_forget: Vec<H256> = Vec::new(); let mut blocks_to_forget: Vec<H256> = Vec::new();
let now = precise_time_s(); let now = precise_time_s();
// reset tasks for peers, which has not responded during given period // reset tasks for peers, which has not responded during given period
for (worst_peer_index, worst_peer_time) in peers.ordered_blocks_requests() { let ordered_blocks_requests: Vec<_> = peers.ordered_blocks_requests().clone().into_iter().collect();
for (worst_peer_index, blocks_request) in ordered_blocks_requests {
// check if peer has not responded within given time // check if peer has not responded within given time
let time_diff = now - worst_peer_time; let time_diff = now - blocks_request.timestamp;
if time_diff <= config.block_failure_interval_ms as f64 / 1000f64 { if time_diff <= config.block_failure_interval_ms as f64 / 1000f64 {
break; break;
} }
@ -97,24 +97,26 @@ pub fn manage_synchronization_peers_blocks(config: &ManagePeersConfig, peers: &m
// if peer failed many times => forget it // if peer failed many times => forget it
if peers.on_peer_block_failure(worst_peer_index) { if peers.on_peer_block_failure(worst_peer_index) {
warn!(target: "sync", "Too many failures for peer#{}. Excluding from synchronization", worst_peer_index); warn!(target: "sync", "Too many failures for peer#{}. Excluding from synchronization", worst_peer_index);
peers.unuseful_peer(worst_peer_index);
} }
} }
(blocks_to_request, blocks_to_forget) (blocks_to_request, blocks_to_forget)
} }
/// Manage stalled synchronization peers inventory tasks /// Manage stalled synchronization peers headers tasks
pub fn manage_synchronization_peers_inventory(config: &ManagePeersConfig, peers: &mut Peers) { pub fn manage_synchronization_peers_headers(config: &ManagePeersConfig, peers: &mut PeersTasks) {
let now = precise_time_s(); let now = precise_time_s();
// reset tasks for peers, which has not responded during given period // reset tasks for peers, which has not responded during given period
for (worst_peer_index, worst_peer_time) in peers.ordered_inventory_requests() { let ordered_headers_requests: Vec<_> = peers.ordered_headers_requests().clone().into_iter().collect();
for (worst_peer_index, headers_request) in ordered_headers_requests {
// check if peer has not responded within given time // check if peer has not responded within given time
let time_diff = now - worst_peer_time; let time_diff = now - headers_request.timestamp;
if time_diff <= config.inventory_failure_interval_ms as f64 / 1000f64 { if time_diff <= config.headers_failure_interval_ms as f64 / 1000f64 {
break; break;
} }
peers.on_peer_inventory_failure(worst_peer_index); peers.on_peer_headers_failure(worst_peer_index);
} }
} }
@ -146,7 +148,7 @@ pub fn manage_unknown_orphaned_blocks(config: &ManageUnknownBlocksConfig, orphan
// remove unknown blocks // remove unknown blocks
let unknown_to_remove: Vec<H256> = orphaned_blocks_pool.remove_blocks(&unknown_to_remove).into_iter() let unknown_to_remove: Vec<H256> = orphaned_blocks_pool.remove_blocks(&unknown_to_remove).into_iter()
.map(|t| t.0) .map(|b| b.header.hash)
.collect(); .collect();
if unknown_to_remove.is_empty() { None } else { Some(unknown_to_remove) } if unknown_to_remove.is_empty() { None } else { Some(unknown_to_remove) }
@ -180,7 +182,7 @@ pub fn manage_orphaned_transactions(config: &ManageOrphanTransactionsConfig, orp
// remove unknown blocks // remove unknown blocks
let orphans_to_remove: Vec<H256> = orphaned_transactions_pool.remove_transactions(&orphans_to_remove).into_iter() let orphans_to_remove: Vec<H256> = orphaned_transactions_pool.remove_transactions(&orphans_to_remove).into_iter()
.map(|t| t.0) .map(|t| t.hash)
.collect(); .collect();
if orphans_to_remove.is_empty() { None } else { Some(orphans_to_remove) } if orphans_to_remove.is_empty() { None } else { Some(orphans_to_remove) }
@ -189,22 +191,21 @@ pub fn manage_orphaned_transactions(config: &ManageOrphanTransactionsConfig, orp
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::collections::HashSet; use std::collections::HashSet;
use super::{ManagePeersConfig, ManageUnknownBlocksConfig, ManageOrphanTransactionsConfig, manage_synchronization_peers_blocks,
manage_unknown_orphaned_blocks, manage_orphaned_transactions};
use synchronization_peers::Peers;
use primitives::hash::H256; use primitives::hash::H256;
use test_data; use test_data;
use orphan_blocks_pool::OrphanBlocksPool; use synchronization_peers_tasks::PeersTasks;
use orphan_transactions_pool::OrphanTransactionsPool; use super::{ManagePeersConfig, ManageUnknownBlocksConfig, ManageOrphanTransactionsConfig, manage_synchronization_peers_blocks,
manage_unknown_orphaned_blocks, manage_orphaned_transactions};
use utils::{OrphanBlocksPool, OrphanTransactionsPool};
#[test] #[test]
fn manage_good_peer() { fn manage_good_peer() {
let config = ManagePeersConfig { block_failure_interval_ms: 1000, ..Default::default() }; let config = ManagePeersConfig { block_failure_interval_ms: 1000, ..Default::default() };
let mut peers = Peers::new(); let mut peers = PeersTasks::default();
peers.on_blocks_requested(1, &vec![H256::from(0), H256::from(1)]); peers.on_blocks_requested(1, &vec![H256::from(0), H256::from(1)]);
peers.on_block_received(1, &H256::from(0)); peers.on_block_received(1, &H256::from(0));
assert_eq!(manage_synchronization_peers_blocks(&config, &mut peers), (vec![], vec![])); assert_eq!(manage_synchronization_peers_blocks(&config, &mut peers), (vec![], vec![]));
assert_eq!(peers.idle_peers_for_blocks(), vec![]); assert_eq!(peers.idle_peers_for_blocks().len(), 0);
} }
#[test] #[test]
@ -212,7 +213,7 @@ mod tests {
use std::thread::sleep; use std::thread::sleep;
use std::time::Duration; use std::time::Duration;
let config = ManagePeersConfig { block_failure_interval_ms: 0, ..Default::default() }; let config = ManagePeersConfig { block_failure_interval_ms: 0, ..Default::default() };
let mut peers = Peers::new(); let mut peers = PeersTasks::default();
peers.on_blocks_requested(1, &vec![H256::from(0)]); peers.on_blocks_requested(1, &vec![H256::from(0)]);
peers.on_blocks_requested(2, &vec![H256::from(1)]); peers.on_blocks_requested(2, &vec![H256::from(1)]);
sleep(Duration::from_millis(1)); sleep(Duration::from_millis(1));
@ -231,7 +232,7 @@ mod tests {
let config = ManageUnknownBlocksConfig { removal_time_ms: 1000, max_number: 100 }; let config = ManageUnknownBlocksConfig { removal_time_ms: 1000, max_number: 100 };
let mut pool = OrphanBlocksPool::new(); let mut pool = OrphanBlocksPool::new();
let block = test_data::genesis(); let block = test_data::genesis();
pool.insert_unknown_block(block.hash(), block.into()); pool.insert_unknown_block(block.into());
assert_eq!(manage_unknown_orphaned_blocks(&config, &mut pool), None); assert_eq!(manage_unknown_orphaned_blocks(&config, &mut pool), None);
assert_eq!(pool.len(), 1); assert_eq!(pool.len(), 1);
} }
@ -244,7 +245,7 @@ mod tests {
let mut pool = OrphanBlocksPool::new(); let mut pool = OrphanBlocksPool::new();
let block = test_data::genesis(); let block = test_data::genesis();
let block_hash = block.hash(); let block_hash = block.hash();
pool.insert_unknown_block(block_hash.clone(), block.into()); pool.insert_unknown_block(block.into());
sleep(Duration::from_millis(1)); sleep(Duration::from_millis(1));
assert_eq!(manage_unknown_orphaned_blocks(&config, &mut pool), Some(vec![block_hash])); assert_eq!(manage_unknown_orphaned_blocks(&config, &mut pool), Some(vec![block_hash]));
@ -258,9 +259,8 @@ mod tests {
let block1 = test_data::genesis(); let block1 = test_data::genesis();
let block1_hash = block1.hash(); let block1_hash = block1.hash();
let block2 = test_data::block_h2(); let block2 = test_data::block_h2();
let block2_hash = block2.hash(); pool.insert_unknown_block(block1.into());
pool.insert_unknown_block(block1_hash.clone(), block1.into()); pool.insert_unknown_block(block2.into());
pool.insert_unknown_block(block2_hash.clone(), block2.into());
assert_eq!(manage_unknown_orphaned_blocks(&config, &mut pool), Some(vec![block1_hash])); assert_eq!(manage_unknown_orphaned_blocks(&config, &mut pool), Some(vec![block1_hash]));
assert_eq!(pool.len(), 1); assert_eq!(pool.len(), 1);
} }
@ -271,7 +271,7 @@ mod tests {
let mut pool = OrphanTransactionsPool::new(); let mut pool = OrphanTransactionsPool::new();
let transaction = test_data::block_h170().transactions[1].clone(); let transaction = test_data::block_h170().transactions[1].clone();
let unknown_inputs: HashSet<H256> = transaction.inputs.iter().map(|i| i.previous_output.hash.clone()).collect(); let unknown_inputs: HashSet<H256> = transaction.inputs.iter().map(|i| i.previous_output.hash.clone()).collect();
pool.insert(transaction.hash(), transaction, unknown_inputs); pool.insert(transaction.into(), unknown_inputs);
assert_eq!(manage_orphaned_transactions(&config, &mut pool), None); assert_eq!(manage_orphaned_transactions(&config, &mut pool), None);
assert_eq!(pool.len(), 1); assert_eq!(pool.len(), 1);
} }
@ -285,7 +285,7 @@ mod tests {
let transaction = test_data::block_h170().transactions[1].clone(); let transaction = test_data::block_h170().transactions[1].clone();
let unknown_inputs: HashSet<H256> = transaction.inputs.iter().map(|i| i.previous_output.hash.clone()).collect(); let unknown_inputs: HashSet<H256> = transaction.inputs.iter().map(|i| i.previous_output.hash.clone()).collect();
let transaction_hash = transaction.hash(); let transaction_hash = transaction.hash();
pool.insert(transaction_hash.clone(), transaction, unknown_inputs); pool.insert(transaction.into(), unknown_inputs);
sleep(Duration::from_millis(1)); sleep(Duration::from_millis(1));
assert_eq!(manage_orphaned_transactions(&config, &mut pool), Some(vec![transaction_hash])); assert_eq!(manage_orphaned_transactions(&config, &mut pool), Some(vec![transaction_hash]));
@ -301,9 +301,8 @@ mod tests {
let transaction1_hash = transaction1.hash(); let transaction1_hash = transaction1.hash();
let transaction2 = test_data::block_h182().transactions[1].clone(); let transaction2 = test_data::block_h182().transactions[1].clone();
let unknown_inputs2: HashSet<H256> = transaction2.inputs.iter().map(|i| i.previous_output.hash.clone()).collect(); let unknown_inputs2: HashSet<H256> = transaction2.inputs.iter().map(|i| i.previous_output.hash.clone()).collect();
let transaction2_hash = transaction2.hash(); pool.insert(transaction1.into(), unknown_inputs1);
pool.insert(transaction1_hash.clone(), transaction1, unknown_inputs1); pool.insert(transaction2.into(), unknown_inputs2);
pool.insert(transaction2_hash.clone(), transaction2, unknown_inputs2);
assert_eq!(manage_orphaned_transactions(&config, &mut pool), Some(vec![transaction1_hash])); assert_eq!(manage_orphaned_transactions(&config, &mut pool), Some(vec![transaction1_hash]));
assert_eq!(pool.len(), 1); assert_eq!(pool.len(), 1);
} }

View File

@ -1,584 +1,241 @@
use std::collections::{HashMap, HashSet}; use std::collections::HashMap;
use std::collections::hash_map::Entry; use parking_lot::RwLock;
use chain::{IndexedBlock, IndexedTransaction};
use message::types;
use p2p::OutboundSyncConnectionRef;
use primitives::hash::H256; use primitives::hash::H256;
use linked_hash_map::LinkedHashMap; use types::PeerIndex;
use time::precise_time_s; use utils::{KnownHashType, ConnectionFilter};
use connection_filter::ConnectionFilter;
use synchronization_client::BlockAnnouncementType;
/// Max peer failures # before excluding from sync process /// Block announcement type
const MAX_PEER_FAILURES: usize = 2; #[derive(Debug, Clone, Copy)]
/// Max blocks failures # before forgetiing this block and restarting sync pub enum BlockAnnouncementType {
const MAX_BLOCKS_FAILURES: usize = 6; /// Send inventory message with block hash [default behavior]
SendInventory,
/// Set of peers selected for synchronization. /// Send headers message with block header
#[derive(Debug)] SendHeaders,
pub struct Peers { /// Send cmpctblock message with this block
/// Peers that are marked as useful for current synchronization session && have no pending requests. SendCompactBlock,
idle: HashSet<usize>, /// Do not announce blocks at all
/// Peers that are marked as non-useful for current synchronization session && have no pending requests. DoNotAnnounce,
unuseful: HashSet<usize>,
/// # of failures for given peer.
failures: HashMap<usize, usize>,
/// # of failures for given block.
blocks_failures: HashMap<H256, usize>,
/// Peers that are marked as useful for current synchronization session && have pending blocks requests.
blocks_requests: HashMap<usize, HashSet<H256>>,
/// Last block message time from peer.
blocks_requests_order: LinkedHashMap<usize, f64>,
/// Peers that are marked as useful for current synchronization session && have pending requests.
inventory_requests: HashSet<usize>,
/// Last inventory message time from peer.
inventory_requests_order: LinkedHashMap<usize, f64>,
/// Peer connections filters.
filters: HashMap<usize, ConnectionFilter>,
/// The way peer is informed about new blocks
block_announcement_types: HashMap<usize, BlockAnnouncementType>,
} }
/// Information on synchronization peers /// Transaction announcement type
#[cfg(test)] #[derive(Debug, Clone, Copy)]
#[derive(Debug)] pub enum TransactionAnnouncementType {
pub struct Information { /// Send inventory message with transaction hash [default behavior]
/// # of peers that are marked as useful for current synchronization session && have no pending requests. SendInventory,
pub idle: usize, /// Do not announce transactions at all
/// # of peers that are marked as non-useful for current synchronization session && have no pending requests. DoNotAnnounce,
pub unuseful: usize,
/// # of peers that are marked as useful for current synchronization session && have pending requests.
pub active: usize,
} }
impl Peers { /// `merkleblock` build artefacts
pub fn new() -> Peers { #[derive(Debug, PartialEq)]
Peers { pub struct MerkleBlockArtefacts {
idle: HashSet::new(), /// `merkleblock` message
unuseful: HashSet::new(), pub merkleblock: types::MerkleBlock,
failures: HashMap::new(), /// All matching transactions
blocks_failures: HashMap::new(), pub matching_transactions: Vec<IndexedTransaction>,
blocks_requests: HashMap::new(), }
blocks_requests_order: LinkedHashMap::new(),
inventory_requests: HashSet::new(), /// Connected peers
inventory_requests_order: LinkedHashMap::new(), pub trait Peers : Send + Sync + PeersContainer + PeersFilters + PeersOptions {
filters: HashMap::new(), /// Get peer connection
block_announcement_types: HashMap::new(), fn connection(&self, peer_index: PeerIndex) -> Option<OutboundSyncConnectionRef>;
}
/// Connected peers container
pub trait PeersContainer {
/// Enumerate all known peers (TODO: iterator + separate entity 'Peer')
fn enumerate(&self) -> Vec<PeerIndex>;
/// Insert new peer connection
fn insert(&self, peer_index: PeerIndex, connection: OutboundSyncConnectionRef);
/// Remove peer connection
fn remove(&self, peer_index: PeerIndex);
/// Close and remove peer connection due to misbehaving
fn misbehaving(&self, peer_index: PeerIndex, reason: &str);
/// Close and remove peer connection due to detected DOS attempt
fn dos(&self, peer_index: PeerIndex, reason: &str);
}
/// Filters for peers connections
pub trait PeersFilters {
/// Set up bloom filter for the connection
fn set_bloom_filter(&self, peer_index: PeerIndex, filter: types::FilterLoad);
/// Update bloom filter for the connection
fn update_bloom_filter(&self, peer_index: PeerIndex, filter: types::FilterAdd);
/// Clear bloom filter for the connection
fn clear_bloom_filter(&self, peer_index: PeerIndex);
/// Set up fee filter for the connection
fn set_fee_filter(&self, peer_index: PeerIndex, filter: types::FeeFilter);
/// Is block passing filters for the connection
fn filter_block(&self, peer_index: PeerIndex, block: &IndexedBlock) -> BlockAnnouncementType;
/// Is block passing filters for the connection
fn filter_transaction(&self, peer_index: PeerIndex, transaction: &IndexedTransaction, transaction_fee_rate: Option<u64>) -> TransactionAnnouncementType;
/// Remember known hash
fn hash_known_as(&self, peer_index: PeerIndex, hash: H256, hash_type: KnownHashType);
/// Is given hash known by peer as hash of given type
fn is_hash_known_as(&self, peer_index: PeerIndex, hash: &H256, hash_type: KnownHashType) -> bool;
/// Build compact block using filter for given peer
fn build_compact_block(&self, peer_index: PeerIndex, block: &IndexedBlock) -> Option<types::CompactBlock>;
/// Build merkle block using filter for given peer
fn build_merkle_block(&self, peer_index: PeerIndex, block: &IndexedBlock) -> Option<MerkleBlockArtefacts>;
}
/// Options for peers connections
pub trait PeersOptions {
/// Set up new block announcement type for the connection
fn set_block_announcement_type(&self, peer_index: PeerIndex, announcement_type: BlockAnnouncementType);
/// Set up new transaction announcement type for the connection
fn set_transaction_announcement_type(&self, peer_index: PeerIndex, announcement_type: TransactionAnnouncementType);
}
/// Single connected peer data
struct Peer {
/// Connection to this peer
pub connection: OutboundSyncConnectionRef,
/// Connection filter
pub filter: ConnectionFilter,
/// Block announcement type
pub block_announcement_type: BlockAnnouncementType,
/// Transaction announcement type
pub transaction_announcement_type: TransactionAnnouncementType,
}
/// Default implementation of connectd peers container
#[derive(Default)]
pub struct PeersImpl {
/// All connected peers. Most of times this field is accessed, it is accessed in read mode.
/// So this lock shouldn't be a performance problem.
peers: RwLock<HashMap<PeerIndex, Peer>>,
}
impl Peer {
pub fn with_connection(connection: OutboundSyncConnectionRef) -> Self {
Peer {
connection: connection,
filter: ConnectionFilter::default(),
block_announcement_type: BlockAnnouncementType::SendInventory,
transaction_announcement_type: TransactionAnnouncementType::SendInventory,
}
}
}
impl Peers for PeersImpl {
fn connection(&self, peer_index: PeerIndex) -> Option<OutboundSyncConnectionRef> {
self.peers.read().get(&peer_index).map(|peer| peer.connection.clone())
}
}
impl PeersContainer for PeersImpl {
fn enumerate(&self) -> Vec<PeerIndex> {
self.peers.read().keys().cloned().collect()
}
fn insert(&self, peer_index: PeerIndex, connection: OutboundSyncConnectionRef) {
trace!(target: "sync", "Connected to peer#{}", peer_index);
assert!(self.peers.write().insert(peer_index, Peer::with_connection(connection)).is_none());
}
fn remove(&self, peer_index: PeerIndex) {
if self.peers.write().remove(&peer_index).is_some() {
trace!(target: "sync", "Disconnected from peer#{}", peer_index);
} }
} }
/// Get information on synchronization peers fn misbehaving(&self, peer_index: PeerIndex, reason: &str) {
#[cfg(test)] if let Some(peer) = self.peers.write().remove(&peer_index) {
pub fn information(&self) -> Information { warn!(target: "sync", "Disconnecting from peer#{} due to misbehaving: {}", peer_index, reason);
let blocks_requests_peers: HashSet<_> = self.blocks_requests.keys().cloned().collect(); peer.connection.close();
let total_unuseful_peers = self.unuseful.difference(&self.inventory_requests).count();
let total_active_peers = blocks_requests_peers.union(&self.inventory_requests).count();
Information {
idle: self.idle.len(),
unuseful: total_unuseful_peers,
active: total_active_peers,
} }
} }
/// Is known peer fn dos(&self, peer_index: PeerIndex, reason: &str) {
pub fn is_known_peer(&self, peer_index: usize) -> bool { if let Some(peer) = self.peers.write().remove(&peer_index) {
self.idle.contains(&peer_index) warn!(target: "sync", "Disconnecting from peer#{} due to DOS: {}", peer_index, reason);
|| self.unuseful.contains(&peer_index) peer.connection.close();
|| self.blocks_requests.contains_key(&peer_index) }
|| self.inventory_requests.contains(&peer_index)
} }
}
/// Has any useful peers? impl PeersFilters for PeersImpl {
pub fn has_any_useful(&self) -> bool { fn set_bloom_filter(&self, peer_index: PeerIndex, filter: types::FilterLoad) {
!self.idle.is_empty() if let Some(peer) = self.peers.write().get_mut(&peer_index) {
|| !self.blocks_requests.is_empty() peer.filter.load(filter);
|| !self.inventory_requests.is_empty()
}
/// Get all peers
pub fn all_peers(&self) -> Vec<usize> {
let mut unique: Vec<_> = self.idle.iter().cloned()
.chain(self.unuseful.iter().cloned())
.chain(self.blocks_requests.keys().cloned())
.chain(self.inventory_requests.iter().cloned())
.collect();
// need stable (for tests) && unique peers here, as blocks_requests can intersect with inventory_requests
unique.sort();
unique.dedup();
unique
}
/// Get useful peers
pub fn useful_peers(&self) -> Vec<usize> {
let mut unique: Vec<_> = self.idle.iter().cloned()
.chain(self.blocks_requests.keys().cloned())
.chain(self.inventory_requests.iter().cloned())
.collect();
// need stable (for tests) && unique peers here, as blocks_requests can intersect with inventory_requests
unique.sort();
unique.dedup();
unique
}
/// Get idle peers for inventory request.
pub fn idle_peers_for_inventory(&self) -> Vec<usize> {
let peers: HashSet<_> = self.idle.iter().cloned()
.chain(self.blocks_requests.keys().cloned())
.collect();
let except: HashSet<_> = self.inventory_requests.iter().cloned().collect();
peers.difference(&except).cloned().collect()
}
/// Get idle peers for blocks request.
pub fn idle_peers_for_blocks(&self) -> Vec<usize> {
let peers: HashSet<_> = self.idle.iter().cloned()
.chain(self.inventory_requests.iter().cloned())
.collect();
let except: HashSet<_> = self.blocks_requests.keys().cloned().collect();
peers.difference(&except).cloned().collect()
}
/// Get active blocks requests, sorted by last response time (oldest first).
pub fn ordered_blocks_requests(&self) -> Vec<(usize, f64)> {
self.blocks_requests_order.iter()
.map(|(&pi, &t)| (pi, t))
.collect()
}
/// Get active inventory requests, sorted by last response time (oldest first).
pub fn ordered_inventory_requests(&self) -> Vec<(usize, f64)> {
self.inventory_requests_order.iter()
.map(|(&pi, &t)| (pi, t))
.collect()
}
/// Get peer tasks
pub fn get_blocks_tasks(&self, peer_index: usize) -> Option<HashSet<H256>> {
self.blocks_requests.get(&peer_index).cloned()
}
/// Get filter reference for given peer
pub fn filter(&mut self, peer_index: usize) -> &ConnectionFilter {
assert!(self.is_known_peer(peer_index));
&*self.filters.entry(peer_index).or_insert_with(ConnectionFilter::default)
}
/// Get mutable filter reference for given peer
pub fn filter_mut(&mut self, peer_index: usize) -> &mut ConnectionFilter {
assert!(self.is_known_peer(peer_index));
self.filters.entry(peer_index).or_insert_with(ConnectionFilter::default)
}
/// Get the way peer is informed about new blocks
pub fn block_announcement_type(&self, peer_index: usize) -> BlockAnnouncementType {
self.block_announcement_types.get(&peer_index).cloned()
.unwrap_or(BlockAnnouncementType::SendInventory)
}
/// Mark peer as useful.
pub fn useful_peer(&mut self, peer_index: usize) {
// if peer is unknown => insert to idle queue
// if peer is known && not useful => insert to idle queue
if !self.idle.contains(&peer_index)
&& !self.blocks_requests.contains_key(&peer_index)
&& !self.inventory_requests.contains(&peer_index) {
self.idle.insert(peer_index);
self.unuseful.remove(&peer_index);
self.failures.remove(&peer_index);
} }
} }
/// Mark peer as unuseful. fn update_bloom_filter(&self, peer_index: PeerIndex, filter: types::FilterAdd) {
pub fn unuseful_peer(&mut self, peer_index: usize) { if let Some(peer) = self.peers.write().get_mut(&peer_index) {
// if peer is unknown => insert to idle queue peer.filter.add(filter);
// if peer is known && not useful => insert to idle queue
assert!(!self.blocks_requests.contains_key(&peer_index));
assert!(!self.blocks_requests_order.contains_key(&peer_index));
self.idle.remove(&peer_index);
self.unuseful.insert(peer_index);
self.failures.remove(&peer_index);
self.inventory_requests.remove(&peer_index);
self.inventory_requests_order.remove(&peer_index);
}
/// Change the way peer is informed about new blocks
pub fn set_block_announcement_type(&mut self, peer_index: usize, announcement_type: BlockAnnouncementType) {
self.block_announcement_types.insert(peer_index, announcement_type);
}
/// Peer wants to limit transaction announcing by transaction fee
pub fn on_peer_feefilter(&mut self, peer_index: usize, fee_rate: u64) {
self.filter_mut(peer_index).set_fee_rate(fee_rate);
}
/// Peer has been disconnected
pub fn on_peer_disconnected(&mut self, peer_index: usize) -> Option<Vec<H256>> {
// forget this peer without any chances to reuse
self.idle.remove(&peer_index);
self.unuseful.remove(&peer_index);
self.failures.remove(&peer_index);
let peer_blocks_requests = self.blocks_requests.remove(&peer_index);
self.blocks_requests_order.remove(&peer_index);
self.inventory_requests.remove(&peer_index);
self.inventory_requests_order.remove(&peer_index);
self.filters.remove(&peer_index);
self.block_announcement_types.remove(&peer_index);
peer_blocks_requests
.map(|hs| hs.into_iter().collect())
}
/// Block is received from peer.
pub fn on_block_received(&mut self, peer_index: usize, block_hash: &H256) {
// forget block failures
self.blocks_failures.remove(block_hash);
// if this is requested block && it is last requested block => remove from blocks_requests
let try_mark_as_idle = match self.blocks_requests.entry(peer_index) {
Entry::Occupied(mut requests_entry) => {
requests_entry.get_mut().remove(block_hash);
self.blocks_requests_order.remove(&peer_index);
if requests_entry.get().is_empty() {
requests_entry.remove_entry();
true
} else {
self.blocks_requests_order.insert(peer_index, precise_time_s());
false
}
},
_ => false,
};
// try to mark as idle
if try_mark_as_idle {
self.try_mark_idle(peer_index);
} }
// remember that peer knows about this block
self.filters.entry(peer_index).or_insert_with(ConnectionFilter::default).known_block(block_hash, false);
} }
/// Transaction is received from peer. fn clear_bloom_filter(&self, peer_index: PeerIndex) {
pub fn on_transaction_received(&mut self, peer_index: usize, transaction_hash: &H256) { if let Some(peer) = self.peers.write().get_mut(&peer_index) {
self.filters.entry(peer_index).or_insert_with(ConnectionFilter::default).known_transaction(transaction_hash); peer.filter.clear();
}
/// Inventory received from peer.
pub fn on_inventory_received(&mut self, peer_index: usize) {
// if we have requested inventory => remove from inventory_requests
self.inventory_requests.remove(&peer_index);
self.inventory_requests_order.remove(&peer_index);
// try to mark as idle
self.try_mark_idle(peer_index);
}
/// Blocks have been requested from peer.
pub fn on_blocks_requested(&mut self, peer_index: usize, blocks_hashes: &[H256]) {
// mark peer as active
self.idle.remove(&peer_index);
self.unuseful.remove(&peer_index);
self.blocks_requests.entry(peer_index).or_insert_with(HashSet::new).extend(blocks_hashes.iter().cloned());
self.blocks_requests_order.remove(&peer_index);
self.blocks_requests_order.insert(peer_index, precise_time_s());
}
/// Inventory has been requested from peer.
pub fn on_inventory_requested(&mut self, peer_index: usize) {
self.inventory_requests.insert(peer_index);
self.inventory_requests_order.remove(&peer_index);
self.inventory_requests_order.insert(peer_index, precise_time_s());
// mark peer as active
if self.idle.remove(&peer_index) {
self.unuseful.insert(peer_index);
} }
// peer is now out-of-synchronization process (will not request blocks from him), because:
// 1) if it has new blocks, it will respond with `inventory` message && will be inserted back here
// 2) if it has no new blocks => either synchronization is completed, or it is behind us in sync
} }
/// We have failed to get blocks fn set_fee_filter(&self, peer_index: PeerIndex, filter: types::FeeFilter) {
pub fn on_blocks_failure(&mut self, hashes: Vec<H256>) -> (Vec<H256>, Vec<H256>) { if let Some(peer) = self.peers.write().get_mut(&peer_index) {
let mut failed_blocks: Vec<H256> = Vec::new(); peer.filter.set_fee_rate(filter);
let mut normal_blocks: Vec<H256> = Vec::with_capacity(hashes.len()); }
for hash in hashes { }
match self.blocks_failures.entry(hash.clone()) {
Entry::Vacant(entry) => { fn filter_block(&self, peer_index: PeerIndex, block: &IndexedBlock) -> BlockAnnouncementType {
normal_blocks.push(hash); if let Some(peer) = self.peers.read().get(&peer_index) {
entry.insert(0); if peer.filter.filter_block(&block.header.hash) {
}, return peer.block_announcement_type
Entry::Occupied(mut entry) => {
*entry.get_mut() += 1;
if *entry.get() >= MAX_BLOCKS_FAILURES {
entry.remove();
failed_blocks.push(hash);
} else {
normal_blocks.push(hash);
}
}
} }
} }
(normal_blocks, failed_blocks) BlockAnnouncementType::DoNotAnnounce
} }
/// We have failed to get block from peer during given period fn filter_transaction(&self, peer_index: PeerIndex, transaction: &IndexedTransaction, transaction_fee_rate: Option<u64>) -> TransactionAnnouncementType {
pub fn on_peer_block_failure(&mut self, peer_index: usize) -> bool { if let Some(peer) = self.peers.read().get(&peer_index) {
let peer_failures = match self.failures.entry(peer_index) { if peer.filter.filter_transaction(transaction, transaction_fee_rate) {
Entry::Occupied(mut entry) => { return peer.transaction_announcement_type
let failures = entry.get() + 1; }
entry.insert(failures);
failures
},
Entry::Vacant(entry) => *entry.insert(1),
};
let too_much_failures = peer_failures >= MAX_PEER_FAILURES;
if too_much_failures {
self.idle.remove(&peer_index);
self.unuseful.insert(peer_index);
self.failures.remove(&peer_index);
self.blocks_requests.remove(&peer_index);
self.blocks_requests_order.remove(&peer_index);
} }
too_much_failures
TransactionAnnouncementType::DoNotAnnounce
} }
/// We have failed to get inventory from peer during given period fn hash_known_as(&self, peer_index: PeerIndex, hash: H256, hash_type: KnownHashType) {
pub fn on_peer_inventory_failure(&mut self, peer_index: usize) { if let Some(peer) = self.peers.write().get_mut(&peer_index) {
// ignore inventory failures peer.filter.hash_known_as(hash, hash_type)
self.inventory_requests.remove(&peer_index);
self.inventory_requests_order.remove(&peer_index);
if !self.blocks_requests.contains_key(&peer_index) {
self.idle.insert(peer_index);
self.unuseful.remove(&peer_index);
} }
} }
/// Reset all peers state to the unuseful fn is_hash_known_as(&self, peer_index: PeerIndex, hash: &H256, hash_type: KnownHashType) -> bool {
pub fn reset(&mut self) { self.peers.read().get(&peer_index)
self.unuseful.extend(self.idle.drain()); .map(|peer| peer.filter.is_hash_known_as(hash, hash_type))
self.unuseful.extend(self.blocks_requests.drain().map(|(k, _)| k)); .unwrap_or(false)
self.unuseful.extend(self.inventory_requests.drain());
self.failures.clear();
self.inventory_requests_order.clear();
self.blocks_requests_order.clear();
} }
/// Reset peer tasks && move peer to idle state fn build_compact_block(&self, peer_index: PeerIndex, block: &IndexedBlock) -> Option<types::CompactBlock> {
pub fn reset_blocks_tasks(&mut self, peer_index: usize) -> Vec<H256> { self.peers.read().get(&peer_index)
let requests = self.blocks_requests.remove(&peer_index); .map(|peer| peer.filter.build_compact_block(block))
self.blocks_requests_order.remove(&peer_index);
self.try_mark_idle(peer_index);
requests.expect("empty requests queue is not allowed").into_iter().collect()
} }
/// Try to mark peer as idle fn build_merkle_block(&self, peer_index: PeerIndex, block: &IndexedBlock) -> Option<MerkleBlockArtefacts> {
fn try_mark_idle(&mut self, peer_index: usize) { self.peers.read().get(&peer_index)
if self.blocks_requests.contains_key(&peer_index) .and_then(|peer| peer.filter.build_merkle_block(block))
|| self.inventory_requests.contains(&peer_index) {
return;
}
self.idle.insert(peer_index);
self.unuseful.remove(&peer_index);
} }
} }
#[cfg(test)] impl PeersOptions for PeersImpl {
mod tests { fn set_block_announcement_type(&self, peer_index: PeerIndex, announcement_type: BlockAnnouncementType) {
use super::{Peers, MAX_PEER_FAILURES, MAX_BLOCKS_FAILURES}; if let Some(peer) = self.peers.write().get_mut(&peer_index) {
use primitives::hash::H256; peer.block_announcement_type = announcement_type;
#[test]
fn peers_empty_on_start() {
let peers = Peers::new();
assert_eq!(peers.idle_peers_for_blocks(), vec![]);
assert_eq!(peers.idle_peers_for_inventory(), vec![]);
let info = peers.information();
assert_eq!(info.idle, 0);
assert_eq!(info.active, 0);
}
#[test]
fn peers_all_unuseful_after_reset() {
let mut peers = Peers::new();
peers.on_blocks_requested(7, &vec![H256::default()]);
peers.on_blocks_requested(8, &vec![H256::default()]);
assert_eq!(peers.information().idle, 0);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 2);
peers.reset();
assert_eq!(peers.information().idle, 0);
assert_eq!(peers.information().unuseful, 2);
assert_eq!(peers.information().active, 0);
}
#[test]
fn peer_idle_after_reset_tasks() {
let mut peers = Peers::new();
peers.on_blocks_requested(7, &vec![H256::default()]);
assert_eq!(peers.information().idle, 0);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 1);
assert_eq!(peers.reset_blocks_tasks(7), vec![H256::default()]);
assert_eq!(peers.information().idle, 1);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 0);
}
#[test]
fn peers_active_after_inventory_request() {
let mut peers = Peers::new();
peers.useful_peer(5);
peers.useful_peer(7);
assert_eq!(peers.information().idle, 2);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 0);
peers.on_inventory_requested(5);
assert_eq!(peers.information().idle, 1);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 1);
}
#[test]
fn peers_insert_remove_idle() {
let mut peers = Peers::new();
peers.useful_peer(0);
assert_eq!(peers.information().idle, 1);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 0);
assert_eq!(peers.idle_peers_for_blocks(), vec![0]);
peers.useful_peer(5);
assert_eq!(peers.information().idle, 2);
assert_eq!(peers.information().active, 0);
let idle_peers = peers.idle_peers_for_blocks();
assert!(idle_peers[0] == 0 || idle_peers[0] == 5);
assert!(idle_peers[1] == 0 || idle_peers[1] == 5);
peers.on_peer_disconnected(7);
assert_eq!(peers.information().idle, 2);
assert_eq!(peers.information().active, 0);
let idle_peers = peers.idle_peers_for_blocks();
assert!(idle_peers[0] == 0 || idle_peers[0] == 5);
assert!(idle_peers[1] == 0 || idle_peers[1] == 5);
peers.on_peer_disconnected(0);
assert_eq!(peers.information().idle, 1);
assert_eq!(peers.information().active, 0);
assert_eq!(peers.idle_peers_for_blocks(), vec![5]);
}
#[test]
fn peers_request_blocks() {
let mut peers = Peers::new();
peers.useful_peer(5);
peers.on_blocks_requested(7, &vec![H256::default()]);
assert_eq!(peers.information().idle, 1);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 1);
peers.on_blocks_requested(8, &vec![H256::default()]);
assert_eq!(peers.information().idle, 1);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 2);
peers.on_block_received(7, &H256::default());
assert_eq!(peers.information().idle, 2);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 1);
peers.on_block_received(9, &H256::default());
assert_eq!(peers.information().idle, 2);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 1);
peers.on_block_received(8, &"000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f".into());
assert_eq!(peers.information().idle, 2);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 1);
peers.on_block_received(8, &H256::default());
assert_eq!(peers.information().idle, 3);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 0);
}
#[test]
fn peers_worst() {
let mut peers = Peers::new();
peers.useful_peer(1);
peers.useful_peer(2);
assert_eq!(peers.ordered_blocks_requests(), vec![]);
peers.on_blocks_requested(1, &vec![H256::default()]);
assert_eq!(peers.ordered_blocks_requests().len(), 1);
assert_eq!(peers.ordered_blocks_requests()[0].0, 1);
peers.on_blocks_requested(2, &vec![H256::default()]);
assert_eq!(peers.ordered_blocks_requests().len(), 2);
assert_eq!(peers.ordered_blocks_requests()[0].0, 1);
assert_eq!(peers.ordered_blocks_requests()[1].0, 2);
assert_eq!(peers.information().idle, 0);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 2);
peers.reset_blocks_tasks(1);
assert_eq!(peers.information().idle, 1);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 1);
assert_eq!(peers.ordered_blocks_requests().len(), 1);
assert_eq!(peers.ordered_blocks_requests()[0].0, 2);
for _ in 0..MAX_PEER_FAILURES {
peers.on_peer_block_failure(2);
} }
assert_eq!(peers.ordered_blocks_requests().len(), 0);
assert_eq!(peers.information().idle, 1);
assert_eq!(peers.information().unuseful, 1);
assert_eq!(peers.information().active, 0);
} }
#[test] fn set_transaction_announcement_type(&self, peer_index: PeerIndex, announcement_type: TransactionAnnouncementType) {
fn peer_not_inserted_when_known() { if let Some(peer) = self.peers.write().get_mut(&peer_index) {
let mut peers = Peers::new(); peer.transaction_announcement_type = announcement_type;
peers.useful_peer(1);
peers.useful_peer(1);
assert_eq!(peers.information().active + peers.information().idle + peers.information().unuseful, 1);
peers.on_blocks_requested(1, &vec![H256::default()]);
peers.useful_peer(1);
assert_eq!(peers.information().active + peers.information().idle + peers.information().unuseful, 1);
for _ in 0..MAX_PEER_FAILURES {
peers.on_peer_block_failure(1);
} }
peers.useful_peer(1);
assert_eq!(peers.information().active + peers.information().idle + peers.information().unuseful, 1);
}
#[test]
fn peer_block_failures() {
let mut peers = Peers::new();
peers.useful_peer(1);
peers.on_blocks_requested(1, &vec![H256::from(1)]);
for _ in 0..MAX_BLOCKS_FAILURES {
let requested_blocks = peers.reset_blocks_tasks(1);
let (blocks_to_request, blocks_to_forget) = peers.on_blocks_failure(requested_blocks);
assert_eq!(blocks_to_request, vec![H256::from(1)]);
assert_eq!(blocks_to_forget, vec![]);
peers.on_blocks_requested(1, &vec![H256::from(1)]);
}
let requested_blocks = peers.reset_blocks_tasks(1);
let (blocks_to_request, blocks_to_forget) = peers.on_blocks_failure(requested_blocks);
assert_eq!(blocks_to_request, vec![]);
assert_eq!(blocks_to_forget, vec![H256::from(1)]);
} }
} }

View File

@ -0,0 +1,567 @@
use std::cmp::Ordering;
use std::collections::{HashMap, HashSet};
use linked_hash_map::LinkedHashMap;
use time::precise_time_s;
use primitives::hash::H256;
use types::PeerIndex;
use utils::AverageSpeedMeter;
/// Max peer failures # before excluding from sync process
const MAX_PEER_FAILURES: usize = 2;
/// Max blocks failures # before forgetiing this block and restarting sync
const MAX_BLOCKS_FAILURES: usize = 6;
/// Number of blocks to inspect while calculating average response time
const BLOCKS_TO_INSPECT: usize = 32;
/// Information on synchronization peers
#[cfg(test)]
#[derive(Debug)]
pub struct Information {
/// # of peers that are marked as useful for current synchronization session && have no pending requests.
pub idle: usize,
/// # of peers that are marked as non-useful for current synchronization session && have no pending requests.
pub unuseful: usize,
/// # of peers that are marked as useful for current synchronization session && have pending requests.
pub active: usize,
}
/// Set of peers selected for synchronization.
#[derive(Debug, Default)]
pub struct PeersTasks {
/// All known peers ids
all: HashSet<PeerIndex>,
/// All unuseful peers
unuseful: HashSet<PeerIndex>,
/// All peers without pending headers requests
idle_for_headers: HashSet<PeerIndex>,
/// All peers without pending blocks requests
idle_for_blocks: HashSet<PeerIndex>,
/// Pending headers requests sent to peers
headers_requests: LinkedHashMap<PeerIndex, HeadersRequest>,
/// Pending blocks requests sent to peers
blocks_requests: LinkedHashMap<PeerIndex, BlocksRequest>,
/// Peers statistics
stats: HashMap<PeerIndex, PeerStats>,
/// Blocks statistics
blocks_stats: HashMap<H256, BlockStats>,
}
/// Pending headers request
#[derive(Debug, Clone)]
pub struct HeadersRequest {
/// Time when request has been sent
pub timestamp: f64,
}
/// Pending blocks request
#[derive(Debug, Clone)]
pub struct BlocksRequest {
/// Time when request has been sent
pub timestamp: f64,
/// Hashes of blocks that have been requested
pub blocks: HashSet<H256>,
}
/// Peer statistics
#[derive(Debug, Default)]
struct PeerStats {
/// Number of blocks requests failures
failures: usize,
/// Average block response time meter
speed: AverageSpeedMeter,
}
/// Block statistics
#[derive(Debug, Default)]
struct BlockStats {
/// Number of block request failures
failures: usize,
}
impl PeersTasks {
/// Get information on synchronization peers
#[cfg(test)]
pub fn information(&self) -> Information {
let active_for_headers: HashSet<_> = self.headers_requests.keys().cloned().collect();
Information {
idle: self.idle_for_blocks.difference(&active_for_headers).count(),
unuseful: self.unuseful.len(),
active: active_for_headers.union(&self.blocks_requests.keys().cloned().collect()).count(),
}
}
/// Get all peers
pub fn all_peers(&self) -> &HashSet<PeerIndex> {
&self.all
}
/// Get useful peers
pub fn useful_peers(&self) -> Vec<PeerIndex> {
self.all.difference(&self.unuseful).cloned().collect()
}
/// Get idle peers for headers request.
pub fn idle_peers_for_headers(&self) -> &HashSet<PeerIndex> {
&self.idle_for_headers
}
/// Get idle peers for blocks request.
pub fn idle_peers_for_blocks(&self) -> &HashSet<PeerIndex> {
&self.idle_for_blocks
}
/// Sort peers for blocks request
pub fn sort_peers_for_blocks(&self, peers: &mut Vec<PeerIndex>) {
peers.sort_by(|left, right| {
let left_speed = self.stats.get(&left).map(|s| s.speed.speed()).unwrap_or(0f64);
let right_speed = self.stats.get(&right).map(|s| s.speed.speed()).unwrap_or(0f64);
// larger speed => better
right_speed.partial_cmp(&left_speed).unwrap_or(Ordering::Equal)
})
}
/// Get active headers requests, sorted by last response time (oldest first).
pub fn ordered_headers_requests(&self) -> &LinkedHashMap<PeerIndex, HeadersRequest> {
&self.headers_requests
}
/// Get active blocks requests, sorted by last response time (oldest first).
pub fn ordered_blocks_requests(&self) -> &LinkedHashMap<PeerIndex, BlocksRequest> {
&self.blocks_requests
}
/// Get peer tasks
pub fn get_blocks_tasks(&self, peer_index: PeerIndex) -> Option<&HashSet<H256>> {
self.blocks_requests
.get(&peer_index)
.map(|br| &br.blocks)
}
/// Mark peer as useful.
pub fn useful_peer(&mut self, peer_index: PeerIndex) {
// if peer is unknown => insert to idle queue
// if peer is known && not useful => insert to idle queue
if self.all.insert(peer_index)
|| self.unuseful.remove(&peer_index) {
self.idle_for_headers.insert(peer_index);
self.idle_for_blocks.insert(peer_index);
self.stats.insert(peer_index, PeerStats::new());
}
}
/// Mark peer as unuseful.
pub fn unuseful_peer(&mut self, peer_index: PeerIndex) {
// blocks should be rerequested from another peers
assert!(!self.blocks_requests.contains_key(&peer_index));
if self.all.insert(peer_index) {
self.stats.insert(peer_index, PeerStats::new());
}
self.unuseful.insert(peer_index);
self.idle_for_headers.remove(&peer_index);
self.idle_for_blocks.remove(&peer_index);
}
/// Peer has been disconnected
pub fn disconnect(&mut self, peer_index: PeerIndex) {
// blocks should be rerequested from another peers
assert!(!self.blocks_requests.contains_key(&peer_index));
self.all.remove(&peer_index);
self.unuseful.remove(&peer_index);
self.idle_for_headers.remove(&peer_index);
self.idle_for_blocks.remove(&peer_index);
self.headers_requests.remove(&peer_index);
self.blocks_requests.remove(&peer_index);
self.stats.remove(&peer_index);
}
/// Block is received from peer.
pub fn on_block_received(&mut self, peer_index: PeerIndex, block_hash: &H256) {
// block received => reset failures
self.blocks_stats.remove(&block_hash);
let is_last_requested_block_received = if let Some(blocks_request) = self.blocks_requests.get_mut(&peer_index) {
// if block hasn't been requested => do nothing
if !blocks_request.blocks.remove(&block_hash) {
return;
}
blocks_request.blocks.is_empty()
} else {
// this peers hasn't been requested for blocks at all
return;
};
// it was requested block => update block response time
self.stats.get_mut(&peer_index).map(|br| br.speed.checkpoint());
// if it hasn't been last requested block => just return
if !is_last_requested_block_received {
return;
}
// no more requested blocks => pause requests speed meter
self.stats.get_mut(&peer_index).map(|br| br.speed.stop());
// mark this peer as idle for blocks request
self.blocks_requests.remove(&peer_index);
self.idle_for_blocks.insert(peer_index);
// also mark as available for headers request if not yet
if !self.headers_requests.contains_key(&peer_index) {
self.idle_for_headers.insert(peer_index);
}
}
/// Headers received from peer.
pub fn on_headers_received(&mut self, peer_index: PeerIndex) {
self.headers_requests.remove(&peer_index);
// we only ask for new headers when peer is also not asked for blocks
// => only insert to idle queue if no active blocks requests
if !self.blocks_requests.contains_key(&peer_index) {
self.idle_for_headers.insert(peer_index);
}
}
/// Blocks have been requested from peer.
pub fn on_blocks_requested(&mut self, peer_index: PeerIndex, blocks_hashes: &[H256]) {
if !self.all.contains(&peer_index) {
self.unuseful_peer(peer_index);
}
self.unuseful.remove(&peer_index);
self.idle_for_blocks.remove(&peer_index);
if !self.blocks_requests.contains_key(&peer_index) {
self.blocks_requests.insert(peer_index, BlocksRequest::new());
}
self.blocks_requests.get_mut(&peer_index)
.expect("inserted one line above")
.blocks.extend(blocks_hashes.iter().cloned());
// no more requested blocks => pause requests speed meter
self.stats.get_mut(&peer_index).map(|br| br.speed.start());
}
/// Headers hashave been requested from peer.
pub fn on_headers_requested(&mut self, peer_index: PeerIndex) {
if !self.all.contains(&peer_index) {
self.unuseful_peer(peer_index);
}
self.idle_for_headers.remove(&peer_index);
self.headers_requests.remove(&peer_index);
self.headers_requests.insert(peer_index, HeadersRequest::new());
}
/// We have failed to get blocks
pub fn on_blocks_failure(&mut self, hashes: Vec<H256>) -> (Vec<H256>, Vec<H256>) {
let mut failed_blocks: Vec<H256> = Vec::new();
let mut normal_blocks: Vec<H256> = Vec::with_capacity(hashes.len());
for hash in hashes {
let is_failed_block = {
let block_stats = self.blocks_stats.entry(hash.clone()).or_insert(BlockStats::default());
block_stats.failures += 1;
block_stats.failures > MAX_BLOCKS_FAILURES
};
if is_failed_block {
self.blocks_stats.remove(&hash);
failed_blocks.push(hash);
} else {
normal_blocks.push(hash);
}
}
(normal_blocks, failed_blocks)
}
/// We have failed to get block from peer during given period
pub fn on_peer_block_failure(&mut self, peer_index: PeerIndex) -> bool {
self.stats.get_mut(&peer_index)
.map(|s| {
s.failures += 1;
s.failures > MAX_PEER_FAILURES
})
.unwrap_or_default()
}
/// We have failed to get headers from peer during given period
pub fn on_peer_headers_failure(&mut self, peer_index: PeerIndex) {
// we never penalize peers for header requests failures
self.headers_requests.remove(&peer_index);
self.idle_for_headers.insert(peer_index);
}
/// Reset all peers state to the unuseful
pub fn reset(&mut self) {
self.unuseful.clear();
self.unuseful.extend(self.all.iter().cloned());
self.idle_for_headers.clear();
self.idle_for_blocks.clear();
self.headers_requests.clear();
self.blocks_requests.clear();
}
/// Reset peer tasks && move peer to idle state
pub fn reset_blocks_tasks(&mut self, peer_index: PeerIndex) -> Vec<H256> {
self.idle_for_blocks.insert(peer_index);
self.blocks_requests.remove(&peer_index)
.map(|mut br| br.blocks.drain().collect())
.unwrap_or_default()
}
}
impl HeadersRequest {
pub fn new() -> Self {
HeadersRequest {
timestamp: precise_time_s(),
}
}
}
impl BlocksRequest {
pub fn new() -> Self {
BlocksRequest {
timestamp: precise_time_s(),
blocks: HashSet::new(),
}
}
}
impl PeerStats {
pub fn new() -> Self {
PeerStats {
failures: 0,
speed: AverageSpeedMeter::with_inspect_items(BLOCKS_TO_INSPECT),
}
}
}
#[cfg(test)]
mod tests {
use primitives::hash::H256;
use super::{PeersTasks, MAX_PEER_FAILURES, MAX_BLOCKS_FAILURES};
use types::PeerIndex;
#[test]
fn peers_empty_on_start() {
let peers = PeersTasks::default();
assert_eq!(peers.idle_peers_for_blocks().len(), 0);
assert_eq!(peers.idle_peers_for_headers().len(), 0);
let info = peers.information();
assert_eq!(info.idle, 0);
assert_eq!(info.active, 0);
}
#[test]
fn peers_all_unuseful_after_reset() {
let mut peers = PeersTasks::default();
peers.on_blocks_requested(7, &vec![H256::default()]);
peers.on_blocks_requested(8, &vec![H256::default()]);
assert_eq!(peers.information().idle, 0);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 2);
peers.reset();
assert_eq!(peers.information().idle, 0);
assert_eq!(peers.information().unuseful, 2);
assert_eq!(peers.information().active, 0);
}
#[test]
fn peer_idle_after_reset_tasks() {
let mut peers = PeersTasks::default();
peers.on_blocks_requested(7, &vec![H256::default()]);
assert_eq!(peers.information().idle, 0);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 1);
assert_eq!(peers.reset_blocks_tasks(7), vec![H256::default()]);
assert_eq!(peers.information().idle, 1);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 0);
}
#[test]
fn peers_active_after_headers_request() {
let mut peers = PeersTasks::default();
peers.useful_peer(5);
peers.useful_peer(7);
assert_eq!(peers.information().idle, 2);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 0);
peers.on_headers_requested(5);
assert_eq!(peers.information().idle, 1);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 1);
}
#[test]
fn peers_insert_remove_idle() {
let mut peers = PeersTasks::default();
peers.useful_peer(0);
assert_eq!(peers.information().idle, 1);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 0);
assert_eq!(peers.idle_peers_for_blocks().len(), 1);
assert!(peers.idle_peers_for_blocks().contains(&0));
peers.useful_peer(5);
assert_eq!(peers.information().idle, 2);
assert_eq!(peers.information().active, 0);
let idle_peers: Vec<_> = peers.idle_peers_for_blocks().iter().cloned().collect();
assert!(idle_peers[0] == 0 || idle_peers[0] == 5);
assert!(idle_peers[1] == 0 || idle_peers[1] == 5);
peers.disconnect(7);
assert_eq!(peers.information().idle, 2);
assert_eq!(peers.information().active, 0);
let idle_peers: Vec<_> = peers.idle_peers_for_blocks().iter().cloned().collect();
assert!(idle_peers[0] == 0 || idle_peers[0] == 5);
assert!(idle_peers[1] == 0 || idle_peers[1] == 5);
peers.disconnect(0);
assert_eq!(peers.information().idle, 1);
assert_eq!(peers.information().active, 0);
assert_eq!(peers.idle_peers_for_blocks().len(), 1);
assert!(peers.idle_peers_for_blocks().contains(&5));
}
#[test]
fn peers_request_blocks() {
let mut peers = PeersTasks::default();
peers.useful_peer(5);
peers.on_blocks_requested(7, &vec![H256::default()]);
assert_eq!(peers.information().idle, 1);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 1);
peers.on_blocks_requested(8, &vec![H256::default()]);
assert_eq!(peers.information().idle, 1);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 2);
peers.on_block_received(7, &H256::default());
assert_eq!(peers.information().idle, 2);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 1);
peers.on_block_received(9, &H256::default());
assert_eq!(peers.information().idle, 2);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 1);
peers.on_block_received(8, &"000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f".into());
assert_eq!(peers.information().idle, 2);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 1);
peers.on_block_received(8, &H256::default());
assert_eq!(peers.information().idle, 3);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 0);
}
#[test]
fn peers_worst() {
let mut peers = PeersTasks::default();
peers.useful_peer(1);
peers.useful_peer(2);
assert_eq!(peers.ordered_blocks_requests().len(), 0);
peers.on_blocks_requested(1, &vec![H256::default()]);
assert_eq!(peers.ordered_blocks_requests().len(), 1);
assert_eq!(*peers.ordered_blocks_requests().keys().nth(0).unwrap(), 1);
peers.on_blocks_requested(2, &vec![H256::default()]);
assert_eq!(peers.ordered_blocks_requests().len(), 2);
assert_eq!(*peers.ordered_blocks_requests().keys().nth(0).unwrap(), 1);
assert_eq!(*peers.ordered_blocks_requests().keys().nth(1).unwrap(), 2);
assert_eq!(peers.information().idle, 0);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 2);
peers.reset_blocks_tasks(1);
assert_eq!(peers.information().idle, 1);
assert_eq!(peers.information().unuseful, 0);
assert_eq!(peers.information().active, 1);
assert_eq!(peers.ordered_blocks_requests().len(), 1);
assert_eq!(*peers.ordered_blocks_requests().keys().nth(0).unwrap(), 2);
for _ in 0..(MAX_PEER_FAILURES + 1) {
if peers.on_peer_block_failure(2) {
peers.reset_blocks_tasks(2);
peers.unuseful_peer(2);
}
}
assert_eq!(peers.ordered_blocks_requests().len(), 0);
assert_eq!(peers.information().idle, 1);
assert_eq!(peers.information().unuseful, 1);
assert_eq!(peers.information().active, 0);
}
#[test]
fn peer_not_inserted_when_known() {
let mut peers = PeersTasks::default();
peers.useful_peer(1);
peers.useful_peer(1);
assert_eq!(peers.information().active + peers.information().idle + peers.information().unuseful, 1);
peers.on_blocks_requested(1, &vec![H256::default()]);
peers.useful_peer(1);
assert_eq!(peers.information().active + peers.information().idle + peers.information().unuseful, 1);
for _ in 0..MAX_PEER_FAILURES {
if peers.on_peer_block_failure(1) {
peers.reset_blocks_tasks(1);
peers.unuseful_peer(1);
}
}
peers.useful_peer(1);
assert_eq!(peers.information().active + peers.information().idle + peers.information().unuseful, 1);
}
#[test]
fn peer_block_failures() {
let mut peers = PeersTasks::default();
peers.useful_peer(1);
peers.on_blocks_requested(1, &vec![H256::from(1)]);
for _ in 0..MAX_BLOCKS_FAILURES {
let requested_blocks = peers.reset_blocks_tasks(1);
let (blocks_to_request, blocks_to_forget) = peers.on_blocks_failure(requested_blocks);
assert_eq!(blocks_to_request, vec![H256::from(1)]);
assert_eq!(blocks_to_forget, vec![]);
peers.on_blocks_requested(1, &vec![H256::from(1)]);
}
let requested_blocks = peers.reset_blocks_tasks(1);
let (blocks_to_request, blocks_to_forget) = peers.on_blocks_failure(requested_blocks);
assert_eq!(blocks_to_request, vec![]);
assert_eq!(blocks_to_forget, vec![H256::from(1)]);
}
#[test]
fn peer_sort_peers_for_blocks() {
let mut peers = PeersTasks::default();
peers.on_blocks_requested(1, &vec![H256::from(1), H256::from(2)]);
peers.on_blocks_requested(2, &vec![H256::from(3), H256::from(4)]);
peers.on_block_received(2, &H256::from(3));
peers.on_block_received(2, &H256::from(4));
use std::thread;
use std::time::Duration;
thread::park_timeout(Duration::from_millis(50));
peers.on_block_received(1, &H256::from(1));
peers.on_block_received(1, &H256::from(2));
let mut peers_for_blocks: Vec<PeerIndex> = vec![1, 2];
peers.sort_peers_for_blocks(&mut peers_for_blocks);
assert_eq!(peers_for_blocks[0], 2);
assert_eq!(peers_for_blocks[1], 1);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,15 +1,16 @@
use std::thread;
use std::collections::VecDeque; use std::collections::VecDeque;
use std::sync::Arc; use std::sync::Arc;
use std::sync::mpsc::{channel, Sender, Receiver}; use std::sync::mpsc::{channel, Sender, Receiver};
use chain::{Transaction, OutPoint, TransactionOutput, IndexedBlock}; use std::thread;
use network::Magic; use parking_lot::Mutex;
use miner::{DoubleSpendCheckResult, NonFinalDoubleSpendSet};
use primitives::hash::H256;
use synchronization_chain::ChainRef;
use verification::{self, BackwardsCompatibleChainVerifier as ChainVerifier, Verify as VerificationVerify, Chain};
use db::{SharedStore, PreviousTransactionOutputProvider, TransactionOutputObserver};
use time::get_time; use time::get_time;
use chain::{IndexedBlock, IndexedTransaction};
use db::{PreviousTransactionOutputProvider, TransactionOutputObserver};
use network::Magic;
use primitives::hash::H256;
use verification::{BackwardsCompatibleChainVerifier as ChainVerifier, Verify as VerificationVerify, Chain};
use types::{BlockHeight, StorageRef, MemoryPoolRef};
use utils::{MemoryPoolTransactionOutputProvider, StorageTransactionOutputProvider};
/// Block verification events sink /// Block verification events sink
pub trait BlockVerificationSink : Send + Sync + 'static { pub trait BlockVerificationSink : Send + Sync + 'static {
@ -22,7 +23,7 @@ pub trait BlockVerificationSink : Send + Sync + 'static {
/// Transaction verification events sink /// Transaction verification events sink
pub trait TransactionVerificationSink : Send + Sync + 'static { pub trait TransactionVerificationSink : Send + Sync + 'static {
/// When transaction verification has completed successfully. /// When transaction verification has completed successfully.
fn on_transaction_verification_success(&self, transaction: Transaction); fn on_transaction_verification_success(&self, transaction: IndexedTransaction);
/// When transaction verification has failed. /// When transaction verification has failed.
fn on_transaction_verification_error(&self, err: &str, hash: &H256); fn on_transaction_verification_error(&self, err: &str, hash: &H256);
} }
@ -37,47 +38,30 @@ pub enum VerificationTask {
/// Verify single block /// Verify single block
VerifyBlock(IndexedBlock), VerifyBlock(IndexedBlock),
/// Verify single transaction /// Verify single transaction
VerifyTransaction(u32, Transaction), VerifyTransaction(BlockHeight, IndexedTransaction),
/// Stop verification thread /// Stop verification thread
Stop, Stop,
} }
/// Synchronization verifier /// Synchronization verifier
pub trait Verifier : Send + 'static { pub trait Verifier : Send + Sync + 'static {
/// Verify block /// Verify block
fn verify_block(&self, block: IndexedBlock); fn verify_block(&self, block: IndexedBlock);
/// Verify transaction /// Verify transaction
fn verify_transaction(&self, height: u32, transaction: Transaction); fn verify_transaction(&self, height: BlockHeight, transaction: IndexedTransaction);
} }
/// Asynchronous synchronization verifier /// Asynchronous synchronization verifier
pub struct AsyncVerifier { pub struct AsyncVerifier {
/// Verification work transmission channel. /// Verification work transmission channel.
verification_work_sender: Sender<VerificationTask>, verification_work_sender: Mutex<Sender<VerificationTask>>,
/// Verification thread. /// Verification thread.
verification_worker_thread: Option<thread::JoinHandle<()>>, verification_worker_thread: Option<thread::JoinHandle<()>>,
} }
/// Transaction output observer, which looks into storage && into memory pool
struct ChainMemoryPoolTransactionOutputProvider {
/// In-storage checker
storage_provider: StorageTransactionOutputProvider,
/// Chain reference
chain: ChainRef,
/// Previous outputs, for which we should return 'Not spent' value.
/// These are used when new version of transaction is received.
nonfinal_spends: Option<NonFinalDoubleSpendSet>,
}
/// Transaction output observer, which looks into storage
struct StorageTransactionOutputProvider {
/// Storage reference
storage: SharedStore,
}
impl VerificationTask { impl VerificationTask {
/// Returns transaction reference if it is transaction verification task /// Returns transaction reference if it is transaction verification task
pub fn transaction(&self) -> Option<&Transaction> { pub fn transaction(&self) -> Option<&IndexedTransaction> {
match self { match self {
&VerificationTask::VerifyTransaction(_, ref transaction) => Some(&transaction), &VerificationTask::VerifyTransaction(_, ref transaction) => Some(&transaction),
_ => None, _ => None,
@ -87,35 +71,35 @@ impl VerificationTask {
impl AsyncVerifier { impl AsyncVerifier {
/// Create new async verifier /// Create new async verifier
pub fn new<T: VerificationSink>(verifier: Arc<ChainVerifier>, chain: ChainRef, sink: Arc<T>) -> Self { pub fn new<T: VerificationSink>(verifier: Arc<ChainVerifier>, storage: StorageRef, memory_pool: MemoryPoolRef, sink: Arc<T>) -> Self {
let (verification_work_sender, verification_work_receiver) = channel(); let (verification_work_sender, verification_work_receiver) = channel();
AsyncVerifier { AsyncVerifier {
verification_work_sender: verification_work_sender, verification_work_sender: Mutex::new(verification_work_sender),
verification_worker_thread: Some(thread::Builder::new() verification_worker_thread: Some(thread::Builder::new()
.name("Sync verification thread".to_string()) .name("Sync verification thread".to_string())
.spawn(move || { .spawn(move || {
AsyncVerifier::verification_worker_proc(sink, chain, verifier, verification_work_receiver) AsyncVerifier::verification_worker_proc(sink, storage, memory_pool, verifier, verification_work_receiver)
}) })
.expect("Error creating verification thread")) .expect("Error creating verification thread"))
} }
} }
/// Thread procedure for handling verification tasks /// Thread procedure for handling verification tasks
fn verification_worker_proc<T: VerificationSink>(sink: Arc<T>, chain: ChainRef, verifier: Arc<ChainVerifier>, work_receiver: Receiver<VerificationTask>) { fn verification_worker_proc<T: VerificationSink>(sink: Arc<T>, storage: StorageRef, memory_pool: MemoryPoolRef, verifier: Arc<ChainVerifier>, work_receiver: Receiver<VerificationTask>) {
while let Ok(task) = work_receiver.recv() { while let Ok(task) = work_receiver.recv() {
match task { match task {
VerificationTask::Stop => break, VerificationTask::Stop => break,
_ => { _ => {
let prevout_provider = if let Some(ref transaction) = task.transaction() { let prevout_provider = if let Some(ref transaction) = task.transaction() {
match ChainMemoryPoolTransactionOutputProvider::for_transaction(chain.clone(), transaction) { match MemoryPoolTransactionOutputProvider::for_transaction(storage.clone(), &memory_pool, &transaction.raw) {
Err(e) => { Err(e) => {
sink.on_transaction_verification_error(&format!("{:?}", e), &transaction.hash()); sink.on_transaction_verification_error(&format!("{:?}", e), &transaction.hash);
return; return;
}, },
Ok(prevout_provider) => prevout_provider, Ok(prevout_provider) => prevout_provider,
} }
} else { } else {
ChainMemoryPoolTransactionOutputProvider::for_block(chain.clone()) MemoryPoolTransactionOutputProvider::for_block(storage.clone())
}; };
execute_verification_task(&sink, &prevout_provider, &verifier, task) execute_verification_task(&sink, &prevout_provider, &verifier, task)
}, },
@ -128,8 +112,11 @@ impl AsyncVerifier {
impl Drop for AsyncVerifier { impl Drop for AsyncVerifier {
fn drop(&mut self) { fn drop(&mut self) {
if let Some(join_handle) = self.verification_worker_thread.take() { if let Some(join_handle) = self.verification_worker_thread.take() {
// ignore send error here <= destructing anyway {
let _ = self.verification_work_sender.send(VerificationTask::Stop); let verification_work_sender = self.verification_work_sender.lock();
// ignore send error here <= destructing anyway
let _ = verification_work_sender.send(VerificationTask::Stop);
}
join_handle.join().expect("Clean shutdown."); join_handle.join().expect("Clean shutdown.");
} }
} }
@ -138,14 +125,14 @@ impl Drop for AsyncVerifier {
impl Verifier for AsyncVerifier { impl Verifier for AsyncVerifier {
/// Verify block /// Verify block
fn verify_block(&self, block: IndexedBlock) { fn verify_block(&self, block: IndexedBlock) {
self.verification_work_sender self.verification_work_sender.lock()
.send(VerificationTask::VerifyBlock(block)) .send(VerificationTask::VerifyBlock(block))
.expect("Verification thread have the same lifetime as `AsyncVerifier`"); .expect("Verification thread have the same lifetime as `AsyncVerifier`");
} }
/// Verify transaction /// Verify transaction
fn verify_transaction(&self, height: u32, transaction: Transaction) { fn verify_transaction(&self, height: BlockHeight, transaction: IndexedTransaction) {
self.verification_work_sender self.verification_work_sender.lock()
.send(VerificationTask::VerifyTransaction(height, transaction)) .send(VerificationTask::VerifyTransaction(height, transaction))
.expect("Verification thread have the same lifetime as `AsyncVerifier`"); .expect("Verification thread have the same lifetime as `AsyncVerifier`");
} }
@ -154,7 +141,7 @@ impl Verifier for AsyncVerifier {
/// Synchronous synchronization verifier /// Synchronous synchronization verifier
pub struct SyncVerifier<T: VerificationSink> { pub struct SyncVerifier<T: VerificationSink> {
/// Storage reference /// Storage reference
storage: SharedStore, storage: StorageRef,
/// Verifier /// Verifier
verifier: ChainVerifier, verifier: ChainVerifier,
/// Verification sink /// Verification sink
@ -163,7 +150,7 @@ pub struct SyncVerifier<T: VerificationSink> {
impl<T> SyncVerifier<T> where T: VerificationSink { impl<T> SyncVerifier<T> where T: VerificationSink {
/// Create new sync verifier /// Create new sync verifier
pub fn new(network: Magic, storage: SharedStore, sink: Arc<T>) -> Self { pub fn new(network: Magic, storage: StorageRef, sink: Arc<T>) -> Self {
let verifier = ChainVerifier::new(storage.clone(), network); let verifier = ChainVerifier::new(storage.clone(), network);
SyncVerifier { SyncVerifier {
storage: storage, storage: storage,
@ -181,7 +168,7 @@ impl<T> Verifier for SyncVerifier<T> where T: VerificationSink {
} }
/// Verify transaction /// Verify transaction
fn verify_transaction(&self, _height: u32, _transaction: Transaction) { fn verify_transaction(&self, _height: BlockHeight, _transaction: IndexedTransaction) {
unimplemented!() // sync verifier is currently only used for blocks verification unimplemented!() // sync verifier is currently only used for blocks verification
} }
} }
@ -192,6 +179,9 @@ fn execute_verification_task<T: VerificationSink, U: TransactionOutputObserver +
tasks_queue.push_back(task); tasks_queue.push_back(task);
while let Some(task) = tasks_queue.pop_front() { while let Some(task) = tasks_queue.pop_front() {
// TODO: for each task different output provider must be created
// reorg => txes from storage are reverifying + mempool txes are reverifying
// => some mempool can be invalid after reverifying
match task { match task {
VerificationTask::VerifyBlock(block) => { VerificationTask::VerifyBlock(block) => {
// verify block // verify block
@ -212,9 +202,9 @@ fn execute_verification_task<T: VerificationSink, U: TransactionOutputObserver +
}, },
VerificationTask::VerifyTransaction(height, transaction) => { VerificationTask::VerifyTransaction(height, transaction) => {
let time: u32 = get_time().sec as u32; let time: u32 = get_time().sec as u32;
match verifier.verify_mempool_transaction(tx_output_provider, height, time, &transaction) { match verifier.verify_mempool_transaction(tx_output_provider, height, time, &transaction.raw) {
Ok(_) => sink.on_transaction_verification_success(transaction), Ok(_) => sink.on_transaction_verification_success(transaction.into()),
Err(e) => sink.on_transaction_verification_error(&format!("{:?}", e), &transaction.hash()), Err(e) => sink.on_transaction_verification_error(&format!("{:?}", e), &transaction.hash),
} }
}, },
_ => unreachable!("must be checked by caller"), _ => unreachable!("must be checked by caller"),
@ -222,117 +212,16 @@ fn execute_verification_task<T: VerificationSink, U: TransactionOutputObserver +
} }
} }
impl StorageTransactionOutputProvider {
pub fn with_storage(storage: SharedStore) -> Self {
StorageTransactionOutputProvider {
storage: storage,
}
}
}
impl TransactionOutputObserver for StorageTransactionOutputProvider {
fn is_spent(&self, prevout: &OutPoint) -> Option<bool> {
self.storage
.transaction_meta(&prevout.hash)
.and_then(|tm| tm.is_spent(prevout.index as usize))
}
}
impl PreviousTransactionOutputProvider for StorageTransactionOutputProvider {
fn previous_transaction_output(&self, prevout: &OutPoint) -> Option<TransactionOutput> {
self.storage.as_previous_transaction_output_provider().previous_transaction_output(prevout)
}
}
impl ChainMemoryPoolTransactionOutputProvider {
pub fn for_transaction(chain: ChainRef, transaction: &Transaction) -> Result<Self, verification::TransactionError> {
// we have to check if there are another in-mempool transactions which spent same outputs here
let check_result = chain.read().memory_pool().check_double_spend(transaction);
ChainMemoryPoolTransactionOutputProvider::for_double_spend_check_result(chain, check_result)
}
pub fn for_block(chain: ChainRef) -> Self {
// we have to check if there are another in-mempool transactions which spent same outputs here
let check_result = DoubleSpendCheckResult::NoDoubleSpend;
ChainMemoryPoolTransactionOutputProvider::for_double_spend_check_result(chain, check_result)
.expect("check_result is NoDoubleSpend; NoDoubleSpend means no error; qed")
}
fn for_double_spend_check_result(chain: ChainRef, check_result: DoubleSpendCheckResult) -> Result<Self, verification::TransactionError> {
match check_result {
DoubleSpendCheckResult::DoubleSpend(_, hash, index) => Err(verification::TransactionError::UsingSpentOutput(hash, index)),
DoubleSpendCheckResult::NoDoubleSpend => Ok(ChainMemoryPoolTransactionOutputProvider {
storage_provider: StorageTransactionOutputProvider::with_storage(chain.read().storage()),
chain: chain.clone(),
nonfinal_spends: None,
}),
DoubleSpendCheckResult::NonFinalDoubleSpend(nonfinal_spends) => Ok(ChainMemoryPoolTransactionOutputProvider {
storage_provider: StorageTransactionOutputProvider::with_storage(chain.read().storage()),
chain: chain.clone(),
nonfinal_spends: Some(nonfinal_spends),
}),
}
}
}
impl TransactionOutputObserver for ChainMemoryPoolTransactionOutputProvider {
fn is_spent(&self, prevout: &OutPoint) -> Option<bool> {
if let Some(ref nonfinal_spends) = self.nonfinal_spends {
let prevout = prevout.clone().into();
// check if this output is 'locked' by mempool transaction
if nonfinal_spends.double_spends.contains(&prevout) {
return Some(false);
}
// check if this output is output of transaction, which depends on locked mempool transaction
if nonfinal_spends.dependent_spends.contains(&prevout) {
return Some(false);
}
}
// we can omit memory_pool check here when we're verifying new transactions, because this
// check has already been completed in `for_transaction` method
// BUT if transactions are verifying because of reorganzation, we should check mempool
// because while reorganizing, we can get new transactions to the mempool
let chain = self.chain.read();
if chain.memory_pool().is_spent(prevout) {
return Some(true);
}
self.storage_provider.is_spent(prevout)
}
}
impl PreviousTransactionOutputProvider for ChainMemoryPoolTransactionOutputProvider {
fn previous_transaction_output(&self, prevout: &OutPoint) -> Option<TransactionOutput> {
// check if that is output of some transaction, which is vitually removed from memory pool
if let Some(ref nonfinal_spends) = self.nonfinal_spends {
if nonfinal_spends.dependent_spends.contains(&prevout.clone().into()) {
// transaction is trying to replace some nonfinal transaction
// + it is also depends on this transaction
// => this is definitely an error
return None;
}
}
let chain = self.chain.read();
chain.memory_pool().previous_transaction_output(prevout)
.or_else(|| chain.storage().as_previous_transaction_output_provider().previous_transaction_output(prevout))
}
}
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use std::sync::Arc; use std::sync::Arc;
use std::collections::HashMap; use std::collections::HashMap;
use chain::{Transaction, OutPoint}; use synchronization_client_core::CoreVerificationSink;
use synchronization_chain::{Chain, ChainRef};
use synchronization_client::CoreVerificationSink;
use synchronization_executor::tests::DummyTaskExecutor; use synchronization_executor::tests::DummyTaskExecutor;
use primitives::hash::H256; use primitives::hash::H256;
use chain::IndexedBlock; use chain::{IndexedBlock, IndexedTransaction};
use super::{Verifier, BlockVerificationSink, TransactionVerificationSink, ChainMemoryPoolTransactionOutputProvider}; use super::{Verifier, BlockVerificationSink, TransactionVerificationSink};
use db::{self, TransactionOutputObserver, PreviousTransactionOutputProvider}; use types::BlockHeight;
use test_data;
use parking_lot::RwLock;
#[derive(Default)] #[derive(Default)]
pub struct DummyVerifier { pub struct DummyVerifier {
@ -364,59 +253,14 @@ pub mod tests {
} }
} }
fn verify_transaction(&self, _height: u32, transaction: Transaction) { fn verify_transaction(&self, _height: BlockHeight, transaction: IndexedTransaction) {
match self.sink { match self.sink {
Some(ref sink) => match self.errors.get(&transaction.hash()) { Some(ref sink) => match self.errors.get(&transaction.hash) {
Some(err) => sink.on_transaction_verification_error(&err, &transaction.hash()), Some(err) => sink.on_transaction_verification_error(&err, &transaction.hash),
None => sink.on_transaction_verification_success(transaction), None => sink.on_transaction_verification_success(transaction.into()),
}, },
None => panic!("call set_sink"), None => panic!("call set_sink"),
} }
} }
} }
#[test]
fn when_transaction_spends_output_twice() {
let tx1: Transaction = test_data::TransactionBuilder::with_default_input(0).into();
let tx2: Transaction = test_data::TransactionBuilder::with_default_input(1).into();
let out1 = tx1.inputs[0].previous_output.clone();
let out2 = tx2.inputs[0].previous_output.clone();
let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block()));
chain.memory_pool_mut().insert_verified(tx1);
assert!(chain.memory_pool().is_spent(&out1));
assert!(!chain.memory_pool().is_spent(&out2));
}
#[test]
fn when_transaction_depends_on_removed_nonfinal_transaction() {
let dchain = &mut test_data::ChainBuilder::new();
test_data::TransactionBuilder::with_output(10).store(dchain) // t0
.reset().set_input(&dchain.at(0), 0).add_output(20).lock().store(dchain) // nonfinal: t0[0] -> t1
.reset().set_input(&dchain.at(1), 0).add_output(30).store(dchain) // dependent: t0[0] -> t1[0] -> t2
.reset().set_input(&dchain.at(0), 0).add_output(40).store(dchain); // good replacement: t0[0] -> t3
let mut chain = Chain::new(Arc::new(db::TestStorage::with_genesis_block()));
chain.memory_pool_mut().insert_verified(dchain.at(0));
chain.memory_pool_mut().insert_verified(dchain.at(1));
chain.memory_pool_mut().insert_verified(dchain.at(2));
// when inserting t3:
// check that is_spent(t0[0]) == Some(false) (as it is spent by nonfinal t1)
// check that is_spent(t1[0]) == Some(false) (as t1 is virtually removed)
// check that is_spent(t2[0]) == Some(false) (as t2 is virtually removed)
// check that previous_transaction_output(t0[0]) = Some(_)
// check that previous_transaction_output(t1[0]) = None (as t1 is virtually removed)
// check that previous_transaction_output(t2[0]) = None (as t2 is virtually removed)
// =>
// if t3 is also depending on t1[0] || t2[0], it will be rejected by verification as missing inputs
let chain = ChainRef::new(RwLock::new(chain));
let provider = ChainMemoryPoolTransactionOutputProvider::for_transaction(chain, &dchain.at(3)).unwrap();
assert_eq!(provider.is_spent(&OutPoint { hash: dchain.at(0).hash(), index: 0, }), Some(false));
assert_eq!(provider.is_spent(&OutPoint { hash: dchain.at(1).hash(), index: 0, }), Some(false));
assert_eq!(provider.is_spent(&OutPoint { hash: dchain.at(2).hash(), index: 0, }), Some(false));
assert_eq!(provider.previous_transaction_output(&OutPoint { hash: dchain.at(0).hash(), index: 0, }), Some(dchain.at(0).outputs[0].clone()));
assert_eq!(provider.previous_transaction_output(&OutPoint { hash: dchain.at(1).hash(), index: 0, }), None);
assert_eq!(provider.previous_transaction_output(&OutPoint { hash: dchain.at(2).hash(), index: 0, }), None);
}
} }

50
sync/src/types.rs Normal file
View File

@ -0,0 +1,50 @@
use std::sync::Arc;
use futures::BoxFuture;
use parking_lot::{Mutex, RwLock};
use db;
use local_node::LocalNode;
use miner::MemoryPool;
use synchronization_client::SynchronizationClient;
use synchronization_executor::LocalSynchronizationTaskExecutor;
use synchronization_peers::Peers;
use synchronization_server::ServerImpl;
use synchronization_verifier::AsyncVerifier;
use utils::SynchronizationState;
pub use utils::BlockHeight;
/// Network request id
pub type RequestId = u32;
/// Peer is indexed using this type
pub type PeerIndex = usize;
// No-error, no-result future
pub type EmptyBoxFuture = BoxFuture<(), ()>;
/// Reference to storage
pub type StorageRef = db::SharedStore;
/// Reference to memory pool
pub type MemoryPoolRef = Arc<RwLock<MemoryPool>>;
/// Shared synchronization state reference
pub type SynchronizationStateRef = Arc<SynchronizationState>;
/// Reference to peers
pub type PeersRef = Arc<Peers>;
/// Reference to synchronization tasks executor
pub type ExecutorRef<T> = Arc<T>;
/// Reference to synchronization client
pub type ClientRef<T> = Arc<T>;
/// Reference to synchronization client core
pub type ClientCoreRef<T> = Arc<Mutex<T>>;
/// Reference to synchronization server
pub type ServerRef<T> = Arc<T>;
/// Reference to local node
pub type LocalNodeRef = Arc<LocalNode<LocalSynchronizationTaskExecutor, ServerImpl, SynchronizationClient<LocalSynchronizationTaskExecutor, AsyncVerifier>>>;

View File

@ -0,0 +1,61 @@
use std::collections::VecDeque;
use time;
/// Speed meter with given items number
#[derive(Debug, Default)]
pub struct AverageSpeedMeter {
/// Number of items to inspect
inspect_items: usize,
/// Number of items currently inspected
inspected_items: VecDeque<f64>,
/// Current speed
speed: f64,
/// Last timestamp
last_timestamp: Option<f64>,
}
impl AverageSpeedMeter {
pub fn with_inspect_items(inspect_items: usize) -> Self {
assert!(inspect_items > 0);
AverageSpeedMeter {
inspect_items: inspect_items,
inspected_items: VecDeque::with_capacity(inspect_items),
speed: 0_f64,
last_timestamp: None,
}
}
pub fn speed(&self) -> f64 {
let items_per_second = 1_f64 / self.speed;
if items_per_second.is_normal() { items_per_second } else { 0_f64 }
}
pub fn inspected_items_len(&self) -> usize {
self.inspected_items.len()
}
pub fn checkpoint(&mut self) {
// if inspected_items is already full => remove oldest item from average
if self.inspected_items.len() == self.inspect_items {
let oldest = self.inspected_items.pop_front().expect("len() is not zero; qed");
self.speed = (self.inspect_items as f64 * self.speed - oldest) / (self.inspect_items as f64 - 1_f64);
}
// add new item
let now = time::precise_time_s();
if let Some(last_timestamp) = self.last_timestamp {
let newest = now - last_timestamp;
self.speed = (self.inspected_items.len() as f64 * self.speed + newest) / (self.inspected_items.len() as f64 + 1_f64);
self.inspected_items.push_back(newest);
}
self.last_timestamp = Some(now);
}
pub fn start(&mut self) {
self.last_timestamp = Some(time::precise_time_s());
}
pub fn stop(&mut self) {
self.last_timestamp = None;
}
}

View File

@ -1,7 +1,7 @@
use std::collections::HashMap; use std::collections::HashMap;
use chain::BlockHeader; use chain::IndexedBlockHeader;
use primitives::hash::H256; use primitives::hash::H256;
use hash_queue::{HashQueue, HashPosition}; use super::{HashQueue, HashPosition};
/// Best headers chain information /// Best headers chain information
#[derive(Debug)] #[derive(Debug)]
@ -19,12 +19,13 @@ pub struct BestHeadersChain {
/// Best hash in storage /// Best hash in storage
storage_best_hash: H256, storage_best_hash: H256,
/// Headers by hash /// Headers by hash
headers: HashMap<H256, BlockHeader>, headers: HashMap<H256, IndexedBlockHeader>,
/// Best chain /// Best chain
best: HashQueue, best: HashQueue,
} }
impl BestHeadersChain { impl BestHeadersChain {
/// Create new best headers chain
pub fn new(storage_best_hash: H256) -> Self { pub fn new(storage_best_hash: H256) -> Self {
BestHeadersChain { BestHeadersChain {
storage_best_hash: storage_best_hash, storage_best_hash: storage_best_hash,
@ -33,6 +34,7 @@ impl BestHeadersChain {
} }
} }
/// Get information on headers chain
pub fn information(&self) -> Information { pub fn information(&self) -> Information {
Information { Information {
best: self.best.len(), best: self.best.len(),
@ -40,19 +42,23 @@ impl BestHeadersChain {
} }
} }
pub fn at(&self, height: u32) -> Option<BlockHeader> { /// Get header from main chain at given position
pub fn at(&self, height: u32) -> Option<IndexedBlockHeader> {
self.best.at(height) self.best.at(height)
.and_then(|hash| self.headers.get(&hash).cloned()) .and_then(|hash| self.headers.get(&hash).cloned())
} }
pub fn by_hash(&self, hash: &H256) -> Option<BlockHeader> { /// Get geader by given hash
pub fn by_hash(&self, hash: &H256) -> Option<IndexedBlockHeader> {
self.headers.get(hash).cloned() self.headers.get(hash).cloned()
} }
/// Get height of main chain
pub fn height(&self, hash: &H256) -> Option<u32> { pub fn height(&self, hash: &H256) -> Option<u32> {
self.best.position(hash) self.best.position(hash)
} }
/// Get all direct child blocks hashes of given block hash
pub fn children(&self, hash: &H256) -> Vec<H256> { pub fn children(&self, hash: &H256) -> Vec<H256> {
self.best.position(hash) self.best.position(hash)
.and_then(|pos| self.best.at(pos + 1)) .and_then(|pos| self.best.at(pos + 1))
@ -60,26 +66,32 @@ impl BestHeadersChain {
.unwrap_or_default() .unwrap_or_default()
} }
/// Get hash of best block
pub fn best_block_hash(&self) -> H256 { pub fn best_block_hash(&self) -> H256 {
self.best.back().or_else(|| Some(self.storage_best_hash.clone())).expect("storage_best_hash is always known") self.best.back()
.or_else(|| Some(self.storage_best_hash.clone()))
.expect("storage_best_hash is always known")
} }
pub fn insert(&mut self, header: BlockHeader) { /// Insert new block header
pub fn insert(&mut self, header: IndexedBlockHeader) {
// append to the best chain // append to the best chain
if self.best_block_hash() == header.previous_header_hash { if self.best_block_hash() == header.raw.previous_header_hash {
let header_hash = header.hash(); let header_hash = header.hash.clone();
self.headers.insert(header_hash.clone(), header); self.headers.insert(header_hash.clone(), header);
self.best.push_back(header_hash); self.best.push_back(header_hash);
return; return;
} }
} }
pub fn insert_n(&mut self, headers: Vec<BlockHeader>) { /// Insert new blocks headers
pub fn insert_n(&mut self, headers: Vec<IndexedBlockHeader>) {
for header in headers { for header in headers {
self.insert(header); self.insert(header);
} }
} }
/// Remove block header with given hash and all its children
pub fn remove(&mut self, hash: &H256) { pub fn remove(&mut self, hash: &H256) {
if self.headers.remove(hash).is_some() { if self.headers.remove(hash).is_some() {
match self.best.remove(hash) { match self.best.remove(hash) {
@ -90,12 +102,14 @@ impl BestHeadersChain {
} }
} }
/// Remove blocks headers with given hash and all its children
pub fn remove_n<I: IntoIterator<Item=H256>> (&mut self, hashes: I) { pub fn remove_n<I: IntoIterator<Item=H256>> (&mut self, hashes: I) {
for hash in hashes { for hash in hashes {
self.remove(&hash); self.remove(&hash);
} }
} }
/// Called when new blocks is inserted to storage
pub fn block_inserted_to_storage(&mut self, hash: &H256, storage_best_hash: &H256) { pub fn block_inserted_to_storage(&mut self, hash: &H256, storage_best_hash: &H256) {
if self.best.front().map(|h| &h == hash).unwrap_or(false) { if self.best.front().map(|h| &h == hash).unwrap_or(false) {
self.best.pop_front(); self.best.pop_front();
@ -104,11 +118,13 @@ impl BestHeadersChain {
self.storage_best_hash = storage_best_hash.clone(); self.storage_best_hash = storage_best_hash.clone();
} }
/// Clears headers chain
pub fn clear(&mut self) { pub fn clear(&mut self) {
self.headers.clear(); self.headers.clear();
self.best.clear(); self.best.clear();
} }
/// Remove headers after position
fn clear_after(&mut self, position: u32) { fn clear_after(&mut self, position: u32) {
if position == 0 { if position == 0 {
self.clear() self.clear()
@ -122,9 +138,9 @@ impl BestHeadersChain {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::BestHeadersChain;
use primitives::hash::H256; use primitives::hash::H256;
use test_data; use test_data;
use super::BestHeadersChain;
#[test] #[test]
fn best_chain_empty() { fn best_chain_empty() {
@ -143,23 +159,23 @@ mod tests {
let b2 = test_data::block_h2().block_header; let b2 = test_data::block_h2().block_header;
let b181 = test_data::block_h181().block_header; let b181 = test_data::block_h181().block_header;
let b182 = test_data::block_h182().block_header; let b182 = test_data::block_h182().block_header;
chain.insert(b1); chain.insert(b1.into());
chain.insert(b181.clone()); chain.insert(b181.clone().into());
assert_eq!(chain.information().best, 1); assert_eq!(chain.information().best, 1);
assert_eq!(chain.information().total, 1); assert_eq!(chain.information().total, 1);
chain.insert(b2); chain.insert(b2.into());
assert_eq!(chain.information().best, 2); assert_eq!(chain.information().best, 2);
assert_eq!(chain.information().total, 2); assert_eq!(chain.information().total, 2);
chain.clear(); chain.clear();
assert_eq!(chain.information().best, 0); assert_eq!(chain.information().best, 0);
assert_eq!(chain.information().total, 0); assert_eq!(chain.information().total, 0);
chain.insert(b181.clone()); chain.insert(b181.clone().into());
assert_eq!(chain.information().best, 0); assert_eq!(chain.information().best, 0);
assert_eq!(chain.information().total, 0); assert_eq!(chain.information().total, 0);
chain.block_inserted_to_storage(&b181.hash(), &b181.hash()); chain.block_inserted_to_storage(&b181.hash(), &b181.hash());
assert_eq!(chain.information().best, 0); assert_eq!(chain.information().best, 0);
assert_eq!(chain.information().total, 0); assert_eq!(chain.information().total, 0);
chain.insert(b182); chain.insert(b182.into());
assert_eq!(chain.information().best, 1); assert_eq!(chain.information().best, 1);
assert_eq!(chain.information().total, 1); assert_eq!(chain.information().total, 1);
} }
@ -173,14 +189,14 @@ mod tests {
let b4 = test_data::block_builder().header().parent(b3.hash()).build().build().block_header; let b4 = test_data::block_builder().header().parent(b3.hash()).build().build().block_header;
let mut chain = BestHeadersChain::new(b0.hash()); let mut chain = BestHeadersChain::new(b0.hash());
chain.insert_n(vec![b1.clone(), b2.clone(), b3.clone(), b4.clone()]); chain.insert_n(vec![b1.clone().into(), b2.clone().into(), b3.clone().into(), b4.clone().into()]);
assert_eq!(chain.information().best, 4); assert_eq!(chain.information().best, 4);
assert_eq!(chain.information().total, 4); assert_eq!(chain.information().total, 4);
chain.remove(&b2.hash()); chain.remove(&b2.hash());
assert_eq!(chain.information().best, 1); assert_eq!(chain.information().best, 1);
assert_eq!(chain.information().total, 1); assert_eq!(chain.information().total, 1);
chain.insert_n(vec![b2.clone(), b3.clone(), b4.clone()]); chain.insert_n(vec![b2.clone().into(), b3.clone().into(), b4.clone().into()]);
assert_eq!(chain.information().best, 4); assert_eq!(chain.information().best, 4);
assert_eq!(chain.information().total, 4); assert_eq!(chain.information().total, 4);
chain.remove(&H256::default()); chain.remove(&H256::default());
@ -196,16 +212,16 @@ mod tests {
fn best_chain_insert_to_db_no_reorg() { fn best_chain_insert_to_db_no_reorg() {
let mut chain = BestHeadersChain::new(test_data::genesis().hash()); let mut chain = BestHeadersChain::new(test_data::genesis().hash());
let b1 = test_data::block_h1().block_header; let b1 = test_data::block_h1().block_header;
chain.insert(b1.clone()); chain.insert(b1.clone().into());
assert_eq!(chain.at(0), Some(b1.clone())); assert_eq!(chain.at(0), Some(b1.clone().into()));
let b2 = test_data::block_h2().block_header; let b2 = test_data::block_h2().block_header;
chain.insert(b2.clone()); chain.insert(b2.clone().into());
assert_eq!(chain.at(0), Some(b1.clone())); assert_eq!(chain.at(0), Some(b1.clone().into()));
assert_eq!(chain.at(1), Some(b2.clone())); assert_eq!(chain.at(1), Some(b2.clone().into()));
chain.block_inserted_to_storage(&b1.hash(), &b1.hash()); chain.block_inserted_to_storage(&b1.hash(), &b1.hash());
assert_eq!(chain.at(0), Some(b2)); assert_eq!(chain.at(0), Some(b2.into()));
assert_eq!(chain.at(1), None); assert_eq!(chain.at(1), None);
assert_eq!(chain.information().best, 1); assert_eq!(chain.information().best, 1);

View File

@ -0,0 +1,286 @@
use parking_lot::Mutex;
use bit_vec::BitVec;
use murmur3::murmur3_32;
use chain::{IndexedTransaction, OutPoint};
use message::types;
use ser::serialize;
use script::Script;
/// Constant optimized to create large differences in the seed for different values of `hash_functions_num`.
const SEED_OFFSET: u32 = 0xFBA4C795;
/// Connection bloom filter
#[derive(Debug)]
pub struct BloomFilter {
/// Bloom data. Filter can be updated when transaction is matched => we have to use some kind of lock here.
/// Mutex is an only choice, because:
/// 1) we do not know if transaction matches the filter in advance
/// 2) RwLock is non-upgradeable in Rust
bloom: Option<Mutex<BloomFilterData>>,
/// Filter update type.
filter_flags: types::FilterFlags,
}
/// Bloom filter data implemented as described in:
/// https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki
#[derive(Debug, Default)]
struct BloomFilterData {
/// Filter storage
filter: BitVec,
/// Number of hash functions to use in bloom filter
hash_functions_num: u32,
/// Value to add to Murmur3 hash seed when calculating hash
tweak: u32,
}
impl Default for BloomFilter {
fn default() -> Self {
BloomFilter {
bloom: None,
filter_flags: types::FilterFlags::None,
}
}
}
impl BloomFilter {
/// Create with given parameters
#[cfg(test)]
pub fn with_filter_load(message: types::FilterLoad) -> Self {
BloomFilter {
filter_flags: message.flags,
bloom: Some(Mutex::new(BloomFilterData::with_filter_load(message))),
}
}
/// Returns true if bloom filter is set
pub fn is_set(&self) -> bool {
self.bloom.is_some()
}
/// Sets bloom filter to given value
pub fn set_bloom_filter(&mut self, message: types::FilterLoad) {
self.bloom = Some(Mutex::new(BloomFilterData::with_filter_load(message)));
}
/// Adds given data to current filter, so that new transactions can be accepted
pub fn update_bloom_filter(&mut self, message: types::FilterAdd) {
if let Some(ref mut bloom) = self.bloom {
bloom.lock().insert(&message.data);
}
}
/// Removes bloom filter, so that all transactions are now accepted by this filter
pub fn remove_bloom_filter(&mut self) {
self.bloom = None;
}
/// Filters transaction using bloom filter data
pub fn filter_transaction(&self, tx: &IndexedTransaction) -> bool {
// check with bloom filter, if set
match self.bloom {
/// if no filter is set for the connection => match everything
None => true,
/// filter using bloom filter, then update
Some(ref bloom) => {
let mut bloom = bloom.lock();
let mut is_match = false;
// match if filter contains any arbitrary script data element in any scriptPubKey in tx
for (output_index, output) in tx.raw.outputs.iter().enumerate() {
let script = Script::new(output.script_pubkey.clone());
let is_update_needed = self.filter_flags == types::FilterFlags::All
|| (self.filter_flags == types::FilterFlags::PubKeyOnly && (script.is_pay_to_public_key() || script.is_multisig_script()));
for instruction in script.iter().filter_map(|i| i.ok()) {
if let Some(instruction_data) = instruction.data {
if bloom.contains(instruction_data) {
is_match = true;
if is_update_needed {
bloom.insert(&serialize(&OutPoint {
hash: tx.hash.clone(),
index: output_index as u32,
}));
}
}
}
}
}
// filter is updated only above => we can early-return from now
if is_match {
return is_match;
}
// match if filter contains transaction itself
if bloom.contains(&*tx.hash) {
return true;
}
// match if filter contains an outpoint this transaction spends
for input in &tx.raw.inputs {
// check if match previous output
let previous_output = serialize(&input.previous_output);
is_match = bloom.contains(&*previous_output);
if is_match {
return true;
}
// check if match any arbitrary script data element in any scriptSig in tx
let script = Script::new(input.script_sig.clone());
for instruction in script.iter().filter_map(|i| i.ok()) {
if let Some(instruction_data) = instruction.data {
is_match = bloom.contains(&*instruction_data);
if is_match {
return true;
}
}
}
}
// no matches
false
},
}
}
}
impl BloomFilterData {
/// Create with given parameters
pub fn with_filter_load(message: types::FilterLoad) -> Self {
BloomFilterData {
filter: BitVec::from_bytes(&message.filter),
hash_functions_num: message.hash_functions,
tweak: message.tweak,
}
}
/// True if filter contains given bytes
pub fn contains(&self, data: &[u8]) -> bool {
for hash_function_idx in 0..self.hash_functions_num {
let murmur_seed = hash_function_idx.overflowing_mul(SEED_OFFSET).0.overflowing_add(self.tweak).0;
let murmur_hash = murmur3_32(&mut data.as_ref(), murmur_seed) as usize % self.filter.len();
if !self.filter.get(murmur_hash).expect("murmur_hash is result of mod operation by filter len; qed") {
return false;
}
}
true
}
/// Add bytes to the filter
pub fn insert(&mut self, data: &[u8]) {
for hash_function_idx in 0..self.hash_functions_num {
let murmur_seed = hash_function_idx.overflowing_mul(SEED_OFFSET).0.overflowing_add(self.tweak).0;
let murmur_hash = murmur3_32(&mut data.as_ref(), murmur_seed) as usize % self.filter.len();
self.filter.set(murmur_hash, true);
}
}
}
#[cfg(test)]
mod tests {
use std::iter::repeat;
use chain::IndexedTransaction;
use message::types;
use primitives::bytes::Bytes;
use primitives::hash::H256;
use ser::serialize;
use test_data;
use super::{BloomFilter, BloomFilterData};
fn default_filterload() -> types::FilterLoad {
types::FilterLoad {
filter: Bytes::from(repeat(0u8).take(1024).collect::<Vec<_>>()),
hash_functions: 10,
tweak: 5,
flags: types::FilterFlags::None,
}
}
fn make_filteradd(data: &[u8]) -> types::FilterAdd {
types::FilterAdd {
data: data.into(),
}
}
#[test]
fn bloom_insert_data() {
let mut bloom = BloomFilterData::with_filter_load(default_filterload());
assert!(!bloom.contains(&*H256::default()));
bloom.insert(&*H256::default());
assert!(bloom.contains(&*H256::default()));
}
#[test]
fn bloom_filter_matches_transaction_by_hash() {
let tx1: IndexedTransaction = test_data::TransactionBuilder::with_output(10).into();
let tx2: IndexedTransaction = test_data::TransactionBuilder::with_output(20).into();
let mut filter = BloomFilter::with_filter_load(default_filterload());
assert!(!filter.filter_transaction(&tx1));
assert!(!filter.filter_transaction(&tx2));
filter.update_bloom_filter(make_filteradd(&*tx1.hash));
assert!(filter.filter_transaction(&tx1));
assert!(!filter.filter_transaction(&tx2));
}
#[test]
fn bloom_filter_matches_transaction_by_output_script_data_element() {
// https://webbtc.com/tx/eb3b82c0884e3efa6d8b0be55b4915eb20be124c9766245bcc7f34fdac32bccb
// output script: OP_DUP OP_HASH160 380cb3c594de4e7e9b8e18db182987bebb5a4f70 OP_EQUALVERIFY OP_CHECKSIG
let tx1: IndexedTransaction = "01000000024de8b0c4c2582db95fa6b3567a989b664484c7ad6672c85a3da413773e63fdb8000000006b48304502205b282fbc9b064f3bc823a23edcc0048cbb174754e7aa742e3c9f483ebe02911c022100e4b0b3a117d36cab5a67404dddbf43db7bea3c1530e0fe128ebc15621bd69a3b0121035aa98d5f77cd9a2d88710e6fc66212aff820026f0dad8f32d1f7ce87457dde50ffffffff4de8b0c4c2582db95fa6b3567a989b664484c7ad6672c85a3da413773e63fdb8010000006f004730440220276d6dad3defa37b5f81add3992d510d2f44a317fd85e04f93a1e2daea64660202200f862a0da684249322ceb8ed842fb8c859c0cb94c81e1c5308b4868157a428ee01ab51210232abdc893e7f0631364d7fd01cb33d24da45329a00357b3a7886211ab414d55a51aeffffffff02e0fd1c00000000001976a914380cb3c594de4e7e9b8e18db182987bebb5a4f7088acc0c62d000000000017142a9bc5447d664c1d0141392a842d23dba45c4f13b17500000000".into();
let tx1_out_data: Bytes = "380cb3c594de4e7e9b8e18db182987bebb5a4f70".into();
let tx2 = IndexedTransaction::default();
let mut filter = BloomFilter::with_filter_load(default_filterload());
assert!(!filter.filter_transaction(&tx1));
assert!(!filter.filter_transaction(&tx2));
filter.update_bloom_filter(make_filteradd(&tx1_out_data));
assert!(filter.filter_transaction(&tx1));
assert!(!filter.filter_transaction(&tx2));
}
#[test]
fn bloom_filter_matches_transaction_by_previous_output_point() {
// https://webbtc.com/tx/eb3b82c0884e3efa6d8b0be55b4915eb20be124c9766245bcc7f34fdac32bccb
let tx1: IndexedTransaction = "01000000024de8b0c4c2582db95fa6b3567a989b664484c7ad6672c85a3da413773e63fdb8000000006b48304502205b282fbc9b064f3bc823a23edcc0048cbb174754e7aa742e3c9f483ebe02911c022100e4b0b3a117d36cab5a67404dddbf43db7bea3c1530e0fe128ebc15621bd69a3b0121035aa98d5f77cd9a2d88710e6fc66212aff820026f0dad8f32d1f7ce87457dde50ffffffff4de8b0c4c2582db95fa6b3567a989b664484c7ad6672c85a3da413773e63fdb8010000006f004730440220276d6dad3defa37b5f81add3992d510d2f44a317fd85e04f93a1e2daea64660202200f862a0da684249322ceb8ed842fb8c859c0cb94c81e1c5308b4868157a428ee01ab51210232abdc893e7f0631364d7fd01cb33d24da45329a00357b3a7886211ab414d55a51aeffffffff02e0fd1c00000000001976a914380cb3c594de4e7e9b8e18db182987bebb5a4f7088acc0c62d000000000017142a9bc5447d664c1d0141392a842d23dba45c4f13b17500000000".into();
let tx1_previous_output: Bytes = serialize(&tx1.raw.inputs[0].previous_output);
let tx2 = IndexedTransaction::default();
let mut filter = BloomFilter::with_filter_load(default_filterload());
assert!(!filter.filter_transaction(&tx1));
assert!(!filter.filter_transaction(&tx2));
filter.update_bloom_filter(make_filteradd(&tx1_previous_output));
assert!(filter.filter_transaction(&tx1));
assert!(!filter.filter_transaction(&tx2));
}
#[test]
fn connection_filter_matches_transaction_by_input_script_data_element() {
// https://webbtc.com/tx/eb3b82c0884e3efa6d8b0be55b4915eb20be124c9766245bcc7f34fdac32bccb
// input script: PUSH DATA 304502205b282fbc9b064f3bc823a23edcc0048cbb174754e7aa742e3c9f483ebe02911c022100e4b0b3a117d36cab5a67404dddbf43db7bea3c1530e0fe128ebc15621bd69a3b01
let tx1: IndexedTransaction = "01000000024de8b0c4c2582db95fa6b3567a989b664484c7ad6672c85a3da413773e63fdb8000000006b48304502205b282fbc9b064f3bc823a23edcc0048cbb174754e7aa742e3c9f483ebe02911c022100e4b0b3a117d36cab5a67404dddbf43db7bea3c1530e0fe128ebc15621bd69a3b0121035aa98d5f77cd9a2d88710e6fc66212aff820026f0dad8f32d1f7ce87457dde50ffffffff4de8b0c4c2582db95fa6b3567a989b664484c7ad6672c85a3da413773e63fdb8010000006f004730440220276d6dad3defa37b5f81add3992d510d2f44a317fd85e04f93a1e2daea64660202200f862a0da684249322ceb8ed842fb8c859c0cb94c81e1c5308b4868157a428ee01ab51210232abdc893e7f0631364d7fd01cb33d24da45329a00357b3a7886211ab414d55a51aeffffffff02e0fd1c00000000001976a914380cb3c594de4e7e9b8e18db182987bebb5a4f7088acc0c62d000000000017142a9bc5447d664c1d0141392a842d23dba45c4f13b17500000000".into();
let tx1_input_data: Bytes = "304502205b282fbc9b064f3bc823a23edcc0048cbb174754e7aa742e3c9f483ebe02911c022100e4b0b3a117d36cab5a67404dddbf43db7bea3c1530e0fe128ebc15621bd69a3b01".into();
let tx2 = IndexedTransaction::default();
let mut filter = BloomFilter::with_filter_load(default_filterload());
assert!(!filter.filter_transaction(&tx1));
assert!(!filter.filter_transaction(&tx2));
filter.update_bloom_filter(make_filteradd(&tx1_input_data));
assert!(filter.filter_transaction(&tx1));
assert!(!filter.filter_transaction(&tx2));
}
}

View File

@ -10,7 +10,7 @@ use ser::{Stream, Serializable};
/// Maximum size of prefilled transactions in compact block /// Maximum size of prefilled transactions in compact block
const MAX_COMPACT_BLOCK_PREFILLED_SIZE: usize = 10 * 1024; const MAX_COMPACT_BLOCK_PREFILLED_SIZE: usize = 10 * 1024;
pub fn build_compact_block(block: IndexedBlock, prefilled_transactions_indexes: HashSet<usize>) -> BlockHeaderAndIDs { pub fn build_compact_block(block: &IndexedBlock, prefilled_transactions_indexes: HashSet<usize>) -> BlockHeaderAndIDs {
let nonce: u64 = thread_rng().gen(); let nonce: u64 = thread_rng().gen();
let prefilled_transactions_len = prefilled_transactions_indexes.len(); let prefilled_transactions_len = prefilled_transactions_indexes.len();
@ -19,14 +19,14 @@ pub fn build_compact_block(block: IndexedBlock, prefilled_transactions_indexes:
let mut prefilled_transactions_size: usize = 0; let mut prefilled_transactions_size: usize = 0;
let (key0, key1) = short_transaction_id_keys(nonce, &block.header.raw); let (key0, key1) = short_transaction_id_keys(nonce, &block.header.raw);
for (transaction_index, transaction) in block.transactions.into_iter().enumerate() { for (transaction_index, transaction) in block.transactions.iter().enumerate() {
let transaction_size = transaction.raw.serialized_size(); let transaction_size = transaction.raw.serialized_size();
if prefilled_transactions_size + transaction_size < MAX_COMPACT_BLOCK_PREFILLED_SIZE if prefilled_transactions_size + transaction_size < MAX_COMPACT_BLOCK_PREFILLED_SIZE
&& prefilled_transactions_indexes.contains(&transaction_index) { && prefilled_transactions_indexes.contains(&transaction_index) {
prefilled_transactions_size += transaction_size; prefilled_transactions_size += transaction_size;
prefilled_transactions.push(PrefilledTransaction { prefilled_transactions.push(PrefilledTransaction {
index: transaction_index, index: transaction_index,
transaction: transaction.raw, transaction: transaction.raw.clone(),
}) })
} else { } else {
short_ids.push(short_transaction_id(key0, key1, &transaction.hash)); short_ids.push(short_transaction_id(key0, key1, &transaction.hash));
@ -34,7 +34,7 @@ pub fn build_compact_block(block: IndexedBlock, prefilled_transactions_indexes:
} }
BlockHeaderAndIDs { BlockHeaderAndIDs {
header: block.header.raw, header: block.header.raw.clone(),
nonce: nonce, nonce: nonce,
short_ids: short_ids, short_ids: short_ids,
prefilled_transactions: prefilled_transactions, prefilled_transactions: prefilled_transactions,
@ -99,7 +99,7 @@ mod tests {
.transaction().output().value(30).build().build() .transaction().output().value(30).build().build()
.build(); // genesis -> block .build(); // genesis -> block
let prefilled: HashSet<_> = vec![1].into_iter().collect(); let prefilled: HashSet<_> = vec![1].into_iter().collect();
let compact_block = build_compact_block(block.clone().into(), prefilled); let compact_block = build_compact_block(&block.clone().into(), prefilled);
let (key0, key1) = short_transaction_id_keys(compact_block.nonce, &block.block_header); let (key0, key1) = short_transaction_id_keys(compact_block.nonce, &block.block_header);
let short_ids = vec![ let short_ids = vec![
short_transaction_id(key0, key1, &block.transactions[0].hash()), short_transaction_id(key0, key1, &block.transactions[0].hash()),

View File

@ -0,0 +1,189 @@
use bit_vec::BitVec;
use chain::{IndexedBlock, IndexedTransaction};
use message::types;
use primitives::bytes::Bytes;
use primitives::hash::H256;
use synchronization_peers::MerkleBlockArtefacts;
use utils::{KnownHashFilter, KnownHashType, BloomFilter, FeeRateFilter, build_compact_block, build_partial_merkle_tree};
/// Filter, which controls data relayed over connection.
#[derive(Debug, Default)]
pub struct ConnectionFilter {
/// Known hashes filter
known_hash_filter: KnownHashFilter,
/// Fee rate filter
fee_rate_filter: FeeRateFilter,
/// Bloom filter
bloom_filter: BloomFilter,
}
impl ConnectionFilter {
/// Add known item hash
pub fn hash_known_as(&mut self, hash: H256, hash_type: KnownHashType) {
self.known_hash_filter.insert(hash, hash_type);
}
/// Is item with given hash && type is known by peer
pub fn is_hash_known_as(&self, hash: &H256, hash_type: KnownHashType) -> bool {
self.known_hash_filter.contains(hash, hash_type)
}
/// Check if block should be sent to this connection
pub fn filter_block(&self, block_hash: &H256) -> bool {
self.known_hash_filter.filter_block(block_hash)
}
/// Check if transaction should be sent to this connection && optionally update filter
pub fn filter_transaction(&self, transaction: &IndexedTransaction, transaction_fee_rate: Option<u64>) -> bool {
self.known_hash_filter.filter_transaction(&transaction.hash)
&& self.fee_rate_filter.filter_transaction(transaction_fee_rate)
&& self.bloom_filter.filter_transaction(transaction)
}
/// Load filter
pub fn load(&mut self, message: types::FilterLoad) {
self.bloom_filter.set_bloom_filter(message);
}
/// Add filter
pub fn add(&mut self, message: types::FilterAdd) {
self.bloom_filter.update_bloom_filter(message);
}
/// Clear filter
pub fn clear(&mut self) {
self.bloom_filter.remove_bloom_filter();
}
/// Limit transaction announcing by transaction fee
pub fn set_fee_rate(&mut self, message: types::FeeFilter) {
self.fee_rate_filter.set_min_fee_rate(message);
}
/// Convert block to compact block using this filter
pub fn build_compact_block(&self, block: &IndexedBlock) -> types::CompactBlock {
let unknown_transaction_indexes = block.transactions.iter().enumerate()
.filter(|&(_, tx)| self.known_hash_filter.contains(&tx.hash, KnownHashType::Transaction))
.map(|(idx, _)| idx)
.collect();
types::CompactBlock {
header: build_compact_block(&block, unknown_transaction_indexes),
}
}
/// Convert `Block` to `MerkleBlock` using this filter
pub fn build_merkle_block(&self, block: &IndexedBlock) -> Option<MerkleBlockArtefacts> {
if !self.bloom_filter.is_set() {
/// only respond when bloom filter is set
return None;
}
// prepare result
let all_len = block.transactions.len();
let mut result = MerkleBlockArtefacts {
merkleblock: types::MerkleBlock {
block_header: block.header.raw.clone(),
total_transactions: all_len as u32,
hashes: Vec::default(),
flags: Bytes::default(),
},
matching_transactions: Vec::new(),
};
// calculate hashes && match flags for all transactions
let (all_hashes, all_flags) = block.transactions.iter()
.fold((Vec::<H256>::with_capacity(all_len), BitVec::with_capacity(all_len)), |(mut all_hashes, mut all_flags), t| {
let flag = self.bloom_filter.filter_transaction(&t);
all_flags.push(flag);
all_hashes.push(t.hash.clone());
if flag {
result.matching_transactions.push(t.clone());
}
(all_hashes, all_flags)
});
// build partial merkle tree
let partial_merkle_tree = build_partial_merkle_tree(all_hashes, all_flags);
result.merkleblock.hashes.extend(partial_merkle_tree.hashes);
// to_bytes() converts [true, false, true] to 0b10100000
// while protocol requires [true, false, true] to be serialized as 0x00000101
result.merkleblock.flags = partial_merkle_tree.flags.to_bytes().into_iter()
.map(|b|
((b & 0b10000000) >> 7) |
((b & 0b01000000) >> 5) |
((b & 0b00100000) >> 3) |
((b & 0b00010000) >> 1) |
((b & 0b00001000) << 1) |
((b & 0b00000100) << 3) |
((b & 0b00000010) << 5) |
((b & 0b00000001) << 7)).collect::<Vec<u8>>().into();
Some(result)
}
}
#[cfg(test)]
pub mod tests {
use std::iter::repeat;
use chain::IndexedTransaction;
use message::types;
use primitives::bytes::Bytes;
use test_data;
use super::ConnectionFilter;
use utils::KnownHashType;
#[test]
fn filter_default_accepts_block() {
assert!(ConnectionFilter::default().filter_block(&test_data::genesis().hash()));
}
#[test]
fn filter_default_accepts_transaction() {
assert!(ConnectionFilter::default().filter_transaction(&test_data::genesis().transactions[0].clone().into(), Some(0)));
}
#[test]
fn filter_rejects_block_known() {
let mut filter = ConnectionFilter::default();
filter.hash_known_as(test_data::block_h1().hash(), KnownHashType::Block);
filter.hash_known_as(test_data::block_h2().hash(), KnownHashType::CompactBlock);
assert!(!filter.filter_block(&test_data::block_h1().hash()));
assert!(!filter.filter_block(&test_data::block_h2().hash()));
assert!(filter.filter_block(&test_data::genesis().hash()));
}
#[test]
fn filter_rejects_transaction_known() {
let mut filter = ConnectionFilter::default();
filter.hash_known_as(test_data::block_h1().transactions[0].hash(), KnownHashType::Transaction);
assert!(!filter.filter_transaction(&test_data::block_h1().transactions[0].clone().into(), None));
assert!(filter.filter_transaction(&test_data::block_h2().transactions[0].clone().into(), None));
}
#[test]
fn filter_rejects_transaction_feerate() {
let mut filter = ConnectionFilter::default();
filter.set_fee_rate(types::FeeFilter::with_fee_rate(1000));
assert!(filter.filter_transaction(&test_data::block_h1().transactions[0].clone().into(), None));
assert!(filter.filter_transaction(&test_data::block_h1().transactions[0].clone().into(), Some(1500)));
assert!(!filter.filter_transaction(&test_data::block_h1().transactions[0].clone().into(), Some(500)));
}
#[test]
fn filter_rejects_transaction_bloomfilter() {
let mut filter = ConnectionFilter::default();
let tx: IndexedTransaction = test_data::block_h1().transactions[0].clone().into();
filter.load(types::FilterLoad {
filter: Bytes::from(repeat(0u8).take(1024).collect::<Vec<_>>()),
hash_functions: 10,
tweak: 5,
flags: types::FilterFlags::None,
});
assert!(!filter.filter_transaction(&tx, None));
filter.add(types::FilterAdd {
data: (&*tx.hash as &[u8]).into(),
});
assert!(filter.filter_transaction(&tx, None));
filter.clear();
assert!(filter.filter_transaction(&tx, None));
}
}

View File

@ -0,0 +1,49 @@
use message::types;
/// Connection fee rate filter
#[derive(Debug, Default)]
pub struct FeeRateFilter {
/// Minimal fee in satoshis per 1000 bytes
fee_rate: u64,
}
impl FeeRateFilter {
/// Set minimal fee rate, this filter accepts
pub fn set_min_fee_rate(&mut self, message: types::FeeFilter) {
self.fee_rate = message.fee_rate;
}
/// Filter transaction using its fee rate
pub fn filter_transaction(&self, tx_fee_rate: Option<u64>) -> bool {
tx_fee_rate
.map(|tx_fee_rate| tx_fee_rate >= self.fee_rate)
.unwrap_or(true)
}
}
#[cfg(test)]
mod tests {
use message::types;
use super::FeeRateFilter;
#[test]
fn fee_rate_filter_empty() {
assert!(FeeRateFilter::default().filter_transaction(Some(0)));
assert!(FeeRateFilter::default().filter_transaction(None));
}
#[test]
fn fee_rate_filter_accepts() {
let mut filter = FeeRateFilter::default();
filter.set_min_fee_rate(types::FeeFilter::with_fee_rate(1000));
assert!(filter.filter_transaction(Some(1000)));
assert!(filter.filter_transaction(Some(2000)));
}
#[test]
fn fee_rate_filter_rejects() {
let mut filter = FeeRateFilter::default();
filter.set_min_fee_rate(types::FeeFilter::with_fee_rate(1000));
assert!(!filter.filter_transaction(Some(500)));
}
}

View File

@ -0,0 +1,147 @@
use linked_hash_map::LinkedHashMap;
use primitives::hash::H256;
/// Maximal number of hashes to store in known-hashes filter
pub const MAX_KNOWN_HASHES_LEN: usize = 2048;
/// Hash-knowledge type
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum KnownHashType {
/// Peer knows transaction with this hash
Transaction,
/// Peer knows block with this hash
Block,
/// Peer knows compact block with this hash
CompactBlock,
}
/// Known-hashes filter
#[derive(Debug, Default)]
pub struct KnownHashFilter {
/// Insertion-time ordered known hashes
known_hashes: LinkedHashMap<H256, KnownHashType>,
}
impl KnownHashFilter {
/// Insert known hash
pub fn insert(&mut self, hash: H256, hash_type: KnownHashType) {
if !self.known_hashes.contains_key(&hash) {
self.known_hashes.insert(hash, hash_type);
// remove oldest-known hash, if limits overflow
if self.known_hashes.len() > MAX_KNOWN_HASHES_LEN {
self.known_hashes.pop_front();
}
}
}
/// Returns number of known hashes
#[cfg(test)]
pub fn len(&self) -> usize {
self.known_hashes.len()
}
/// Returns true if peer knows about this hash with this type
pub fn contains(&self, hash: &H256, hash_type: KnownHashType) -> bool {
self.known_hashes.get(hash)
.map(|stored_hash_type| *stored_hash_type == hash_type)
.unwrap_or(false)
}
/// Filter block using its hash
pub fn filter_block(&self, hash: &H256) -> bool {
self.known_hashes.get(hash)
.map(|stored_hash_type| *stored_hash_type != KnownHashType::Block
&& *stored_hash_type != KnownHashType::CompactBlock)
.unwrap_or(true)
}
/// Filter transaction using its hash
pub fn filter_transaction(&self, hash: &H256) -> bool {
self.known_hashes.get(hash)
.map(|stored_hash_type| *stored_hash_type != KnownHashType::Transaction)
.unwrap_or(true)
}
}
#[cfg(test)]
mod tests {
use primitives::hash::H256;
use super::{KnownHashFilter, KnownHashType, MAX_KNOWN_HASHES_LEN};
#[test]
fn known_hash_filter_empty() {
assert!(KnownHashFilter::default().filter_transaction(&H256::from(0)));
assert!(KnownHashFilter::default().filter_block(&H256::from(0)));
}
#[test]
fn known_hash_filter_block() {
let mut filter = KnownHashFilter::default();
filter.insert(H256::from(0), KnownHashType::Block);
filter.insert(H256::from(1), KnownHashType::CompactBlock);
filter.insert(H256::from(2), KnownHashType::Transaction);
assert!(!filter.filter_block(&H256::from(0)));
assert!(!filter.filter_block(&H256::from(1)));
assert!(filter.filter_block(&H256::from(2)));
assert!(filter.filter_block(&H256::from(3)));
}
#[test]
fn known_hash_filter_transaction() {
let mut filter = KnownHashFilter::default();
filter.insert(H256::from(0), KnownHashType::Block);
filter.insert(H256::from(1), KnownHashType::CompactBlock);
filter.insert(H256::from(2), KnownHashType::Transaction);
assert!(filter.filter_transaction(&H256::from(0)));
assert!(filter.filter_transaction(&H256::from(1)));
assert!(!filter.filter_transaction(&H256::from(2)));
assert!(filter.filter_transaction(&H256::from(3)));
}
#[test]
fn known_hash_filter_contains() {
let mut filter = KnownHashFilter::default();
filter.insert(H256::from(0), KnownHashType::Block);
filter.insert(H256::from(1), KnownHashType::CompactBlock);
filter.insert(H256::from(2), KnownHashType::Transaction);
assert!(filter.contains(&H256::from(0), KnownHashType::Block));
assert!(!filter.contains(&H256::from(0), KnownHashType::CompactBlock));
assert!(filter.contains(&H256::from(1), KnownHashType::CompactBlock));
assert!(!filter.contains(&H256::from(1), KnownHashType::Block));
assert!(filter.contains(&H256::from(2), KnownHashType::Transaction));
assert!(!filter.contains(&H256::from(2), KnownHashType::Block));
assert!(!filter.contains(&H256::from(3), KnownHashType::Block));
assert!(!filter.contains(&H256::from(3), KnownHashType::CompactBlock));
assert!(!filter.contains(&H256::from(3), KnownHashType::Transaction));
}
#[test]
fn known_hash_filter_insert() {
let mut hash_data = [0u8; 32];
let mut filter = KnownHashFilter::default();
assert_eq!(filter.len(), 0);
// insert new hash
filter.insert(H256::from(hash_data.clone()), KnownHashType::Block);
assert_eq!(filter.len(), 1);
// insert already known hash => nothing should change
filter.insert(H256::from(hash_data.clone()), KnownHashType::Block);
assert_eq!(filter.len(), 1);
// insert MAX_KNOWN_HASHES_LEN
for i in 1..MAX_KNOWN_HASHES_LEN {
hash_data[0] = (i % 255) as u8;
hash_data[1] = ((i / 255) % 255) as u8;
filter.insert(H256::from(hash_data.clone()), KnownHashType::Block);
assert_eq!(filter.len(), i + 1);
}
// insert new unknown hash => nothing should change as we already have max number of hashes
hash_data[0] = ((MAX_KNOWN_HASHES_LEN + 1) % 255) as u8;
hash_data[1] = (((MAX_KNOWN_HASHES_LEN + 1) / 255) % 255) as u8;
filter.insert(H256::from(hash_data.clone()), KnownHashType::Block);
assert_eq!(filter.len(), MAX_KNOWN_HASHES_LEN);
// check that oldest known hash has been removed
hash_data[0] = 0; hash_data[1] = 0;
assert!(!filter.contains(&H256::from(hash_data.clone()), KnownHashType::Block));
hash_data[0] = 1; hash_data[1] = 0;
assert!(filter.contains(&H256::from(hash_data.clone()), KnownHashType::Block));
}
}

View File

@ -0,0 +1,148 @@
use std::collections::HashMap;
use chain::{Transaction, TransactionOutput, OutPoint};
use db::{PreviousTransactionOutputProvider, TransactionOutputObserver};
use miner::{DoubleSpendCheckResult, HashedOutPoint, NonFinalDoubleSpendSet};
use verification::TransactionError;
use super::super::types::{MemoryPoolRef, StorageRef};
use super::StorageTransactionOutputProvider;
/// Transaction output observer, which looks into both storage && into memory pool.
/// It also allows to replace non-final transactions in the memory pool.
pub struct MemoryPoolTransactionOutputProvider {
/// Storage provider
storage_provider: StorageTransactionOutputProvider,
/// Transaction inputs from memory pool transactions
mempool_inputs: HashMap<HashedOutPoint, Option<TransactionOutput>>,
/// Previous outputs, for which we should return 'Not spent' value.
/// These are used when new version of transaction is received.
nonfinal_spends: Option<NonFinalDoubleSpendSet>,
}
impl MemoryPoolTransactionOutputProvider {
/// Create new provider for verifying given transaction
pub fn for_transaction(storage: StorageRef, memory_pool: &MemoryPoolRef, transaction: &Transaction) -> Result<Self, TransactionError> {
// we have to check if there are another in-mempool transactions which spent same outputs here
let memory_pool = memory_pool.read();
let check_result = memory_pool.check_double_spend(transaction);
match check_result {
// input of transaction is already spent by another final transaction from memory pool
DoubleSpendCheckResult::DoubleSpend(_, hash, index) => Err(TransactionError::UsingSpentOutput(hash, index)),
// there are no transactions, which are spending same inputs in memory pool
DoubleSpendCheckResult::NoDoubleSpend => Ok(MemoryPoolTransactionOutputProvider {
storage_provider: StorageTransactionOutputProvider::with_storage(storage),
mempool_inputs: transaction.inputs.iter()
.map(|input| (
input.previous_output.clone().into(),
memory_pool.previous_transaction_output(&input.previous_output),
)).collect(),
nonfinal_spends: None,
}),
// there are non-final transactions, which are spending same inputs in memory pool
DoubleSpendCheckResult::NonFinalDoubleSpend(nonfinal_spends) => Ok(MemoryPoolTransactionOutputProvider {
storage_provider: StorageTransactionOutputProvider::with_storage(storage),
mempool_inputs: transaction.inputs.iter()
.map(|input| (
input.previous_output.clone().into(),
memory_pool.previous_transaction_output(&input.previous_output),
)).collect(),
nonfinal_spends: Some(nonfinal_spends),
}),
}
}
/// Create new provider for verifying given block
pub fn for_block(storage: StorageRef) -> Self {
MemoryPoolTransactionOutputProvider {
storage_provider: StorageTransactionOutputProvider::with_storage(storage),
mempool_inputs: HashMap::new(),
nonfinal_spends: None,
}
}
}
impl TransactionOutputObserver for MemoryPoolTransactionOutputProvider {
fn is_spent(&self, prevout: &OutPoint) -> Option<bool> {
// check if this output is spent by some non-final mempool transaction
if let Some(ref nonfinal_spends) = self.nonfinal_spends {
if nonfinal_spends.double_spends.contains(&prevout.clone().into()) {
return Some(false);
}
}
// we can omit memory_pool check here, because it has been completed in `for_transaction` method
// => just check spending in storage
self.storage_provider.is_spent(prevout)
}
}
impl PreviousTransactionOutputProvider for MemoryPoolTransactionOutputProvider {
fn previous_transaction_output(&self, prevout: &OutPoint) -> Option<TransactionOutput> {
let hashed_prevout: HashedOutPoint = prevout.clone().into();
// check if that is output of some transaction, which is vitually removed from memory pool
if let Some(ref nonfinal_spends) = self.nonfinal_spends {
if nonfinal_spends.dependent_spends.contains(&hashed_prevout) {
// transaction is trying to replace some nonfinal transaction
// + it is also depends on this transaction
// => this is definitely an error
return None;
}
}
// check if this is output from memory pool transaction
if let Some(ref output) = self.mempool_inputs.get(&hashed_prevout) {
if let Some(ref output) = **output {
return Some(output.clone());
}
}
// now check in storage
self.storage_provider.previous_transaction_output(prevout)
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use parking_lot::RwLock;
use chain::OutPoint;
use db::{self, TransactionOutputObserver, PreviousTransactionOutputProvider};
use miner::MemoryPool;
use test_data;
use super::MemoryPoolTransactionOutputProvider;
#[test]
fn when_transaction_depends_on_removed_nonfinal_transaction() {
let dchain = &mut test_data::ChainBuilder::new();
test_data::TransactionBuilder::with_output(10).store(dchain) // t0
.reset().set_input(&dchain.at(0), 0).add_output(20).lock().store(dchain) // nonfinal: t0[0] -> t1
.reset().set_input(&dchain.at(1), 0).add_output(30).store(dchain) // dependent: t0[0] -> t1[0] -> t2
.reset().set_input(&dchain.at(0), 0).add_output(40).store(dchain); // good replacement: t0[0] -> t3
let storage = Arc::new(db::TestStorage::with_genesis_block());
let memory_pool = Arc::new(RwLock::new(MemoryPool::new()));
{
memory_pool.write().insert_verified(dchain.at(0).into());
memory_pool.write().insert_verified(dchain.at(1).into());
memory_pool.write().insert_verified(dchain.at(2).into());
}
// when inserting t3:
// check that is_spent(t0[0]) == Some(false) (as it is spent by nonfinal t1)
// check that is_spent(t1[0]) == None (as t1 is virtually removed)
// check that is_spent(t2[0]) == None (as t2 is virtually removed)
// check that previous_transaction_output(t0[0]) = Some(_)
// check that previous_transaction_output(t1[0]) = None (as t1 is virtually removed)
// check that previous_transaction_output(t2[0]) = None (as t2 is virtually removed)
// =>
// if t3 is also depending on t1[0] || t2[0], it will be rejected by verification as missing inputs
let provider = MemoryPoolTransactionOutputProvider::for_transaction(storage, &memory_pool, &dchain.at(3)).unwrap();
assert_eq!(provider.is_spent(&OutPoint { hash: dchain.at(0).hash(), index: 0, }), Some(false));
assert_eq!(provider.is_spent(&OutPoint { hash: dchain.at(1).hash(), index: 0, }), None);
assert_eq!(provider.is_spent(&OutPoint { hash: dchain.at(2).hash(), index: 0, }), None);
assert_eq!(provider.previous_transaction_output(&OutPoint { hash: dchain.at(0).hash(), index: 0, }), Some(dchain.at(0).outputs[0].clone()));
assert_eq!(provider.previous_transaction_output(&OutPoint { hash: dchain.at(1).hash(), index: 0, }), None);
assert_eq!(provider.previous_transaction_output(&OutPoint { hash: dchain.at(2).hash(), index: 0, }), None);
}
}

View File

@ -0,0 +1,82 @@
use std::collections::HashMap;
use chain::BlockHeader;
use db::{BlockRef, BlockHeaderProvider};
use primitives::bytes::Bytes;
use primitives::hash::H256;
/// Block headers provider from `headers` message
pub struct MessageBlockHeadersProvider<'a> {
/// Synchronization chain headers provider
chain_provider: &'a BlockHeaderProvider,
/// headers offset
first_header_number: u32,
/// headers by hash
headers: HashMap<H256, BlockHeader>,
/// headers by order
headers_order: Vec<H256>,
}
impl<'a> MessageBlockHeadersProvider<'a> {
pub fn new(chain_provider: &'a BlockHeaderProvider, best_block_header_height: u32) -> Self {
MessageBlockHeadersProvider {
chain_provider: chain_provider,
first_header_number: best_block_header_height + 1,
headers: HashMap::new(),
headers_order: Vec::new(),
}
}
pub fn append_header(&mut self, hash: H256, header: BlockHeader) {
self.headers.insert(hash.clone(), header);
self.headers_order.push(hash);
}
}
impl<'a> BlockHeaderProvider for MessageBlockHeadersProvider<'a> {
fn block_header_bytes(&self, block_ref: BlockRef) -> Option<Bytes> {
use ser::serialize;
self.block_header(block_ref).map(|h| serialize(&h))
}
fn block_header(&self, block_ref: BlockRef) -> Option<BlockHeader> {
self.chain_provider.block_header(block_ref.clone())
.or_else(move || match block_ref {
BlockRef::Hash(h) => self.headers.get(&h).cloned(),
BlockRef::Number(n) => if n >= self.first_header_number && n - self.first_header_number < self.headers_order.len() as u32 {
let ref header_hash = self.headers_order[(n - self.first_header_number) as usize];
Some(self.headers[header_hash].clone())
} else {
None
},
})
}
}
#[cfg(test)]
mod tests {
use db::{self, AsSubstore, BlockHeaderProvider};
use test_data;
use primitives::hash::H256;
use super::MessageBlockHeadersProvider;
#[test]
fn test_message_block_headers_provider() {
let storage = db::TestStorage::with_genesis_block();
let storage_provider = storage.as_block_header_provider();
let mut headers_provider = MessageBlockHeadersProvider::new(storage_provider, 0);
assert_eq!(headers_provider.block_header(db::BlockRef::Hash(test_data::genesis().hash())), Some(test_data::genesis().block_header));
assert_eq!(headers_provider.block_header(db::BlockRef::Number(0)), Some(test_data::genesis().block_header));
assert_eq!(headers_provider.block_header(db::BlockRef::Hash(H256::from(1))), None);
assert_eq!(headers_provider.block_header(db::BlockRef::Number(1)), None);
headers_provider.append_header(test_data::block_h1().hash(), test_data::block_h1().block_header);
assert_eq!(headers_provider.block_header(db::BlockRef::Hash(test_data::genesis().hash())), Some(test_data::genesis().block_header));
assert_eq!(headers_provider.block_header(db::BlockRef::Number(0)), Some(test_data::genesis().block_header));
assert_eq!(headers_provider.block_header(db::BlockRef::Hash(test_data::block_h1().hash())), Some(test_data::block_h1().block_header));
assert_eq!(headers_provider.block_header(db::BlockRef::Number(1)), Some(test_data::block_h1().block_header));
assert_eq!(headers_provider.block_header(db::BlockRef::Hash(H256::from(1))), None);
assert_eq!(headers_provider.block_header(db::BlockRef::Number(2)), None);
}
}

34
sync/src/utils/mod.rs Normal file
View File

@ -0,0 +1,34 @@
mod average_speed_meter;
mod best_headers_chain;
mod bloom_filter;
mod compact_block_builder;
mod connection_filter;
mod fee_rate_filter;
mod hash_queue;
mod known_hash_filter;
mod memory_pool_transaction_provider;
mod message_block_headers_provider;
mod orphan_blocks_pool;
mod orphan_transactions_pool;
mod partial_merkle_tree;
mod storage_transaction_provider;
mod synchronization_state;
pub use self::average_speed_meter::AverageSpeedMeter;
pub use self::best_headers_chain::{BestHeadersChain, Information as BestHeadersChainInformation};
pub use self::bloom_filter::BloomFilter;
pub use self::compact_block_builder::build_compact_block;
pub use self::connection_filter::ConnectionFilter;
pub use self::fee_rate_filter::FeeRateFilter;
pub use self::hash_queue::{HashQueue, HashQueueChain, HashPosition};
pub use self::known_hash_filter::{KnownHashType, KnownHashFilter};
pub use self::memory_pool_transaction_provider::MemoryPoolTransactionOutputProvider;
pub use self::message_block_headers_provider::MessageBlockHeadersProvider;
pub use self::orphan_blocks_pool::OrphanBlocksPool;
pub use self::orphan_transactions_pool::{OrphanTransactionsPool, OrphanTransaction};
pub use self::partial_merkle_tree::{PartialMerkleTree, build_partial_merkle_tree};
pub use self::storage_transaction_provider::StorageTransactionOutputProvider;
pub use self::synchronization_state::SynchronizationState;
/// Block height type
pub type BlockHeight = u32;

View File

@ -40,19 +40,19 @@ impl OrphanBlocksPool {
} }
/// Insert orphaned block, for which we have already requested its parent block /// Insert orphaned block, for which we have already requested its parent block
pub fn insert_orphaned_block(&mut self, hash: H256, block: IndexedBlock) { pub fn insert_orphaned_block(&mut self, block: IndexedBlock) {
self.orphaned_blocks self.orphaned_blocks
.entry(block.header.raw.previous_header_hash.clone()) .entry(block.header.raw.previous_header_hash.clone())
.or_insert_with(HashMap::new) .or_insert_with(HashMap::new)
.insert(hash, block); .insert(block.header.hash.clone(), block);
} }
/// Insert unknown block, for which we know nothing about its parent block /// Insert unknown block, for which we know nothing about its parent block
pub fn insert_unknown_block(&mut self, hash: H256, block: IndexedBlock) { pub fn insert_unknown_block(&mut self, block: IndexedBlock) {
let previous_value = self.unknown_blocks.insert(hash.clone(), time::precise_time_s()); let previous_value = self.unknown_blocks.insert(block.header.hash.clone(), time::precise_time_s());
assert_eq!(previous_value, None); assert_eq!(previous_value, None);
self.insert_orphaned_block(hash, block); self.insert_orphaned_block(block);
} }
/// Remove all blocks, which are not-unknown /// Remove all blocks, which are not-unknown
@ -66,11 +66,11 @@ impl OrphanBlocksPool {
} }
/// Remove all blocks, depending on this parent /// Remove all blocks, depending on this parent
pub fn remove_blocks_for_parent(&mut self, hash: &H256) -> Vec<(H256, IndexedBlock)> { pub fn remove_blocks_for_parent(&mut self, hash: &H256) -> VecDeque<IndexedBlock> {
let mut queue: VecDeque<H256> = VecDeque::new(); let mut queue: VecDeque<H256> = VecDeque::new();
queue.push_back(hash.clone()); queue.push_back(hash.clone());
let mut removed: Vec<(H256, IndexedBlock)> = Vec::new(); let mut removed: VecDeque<IndexedBlock> = VecDeque::new();
while let Some(parent_hash) = queue.pop_front() { while let Some(parent_hash) = queue.pop_front() {
if let Entry::Occupied(entry) = self.orphaned_blocks.entry(parent_hash) { if let Entry::Occupied(entry) = self.orphaned_blocks.entry(parent_hash) {
let (_, orphaned) = entry.remove_entry(); let (_, orphaned) = entry.remove_entry();
@ -78,16 +78,16 @@ impl OrphanBlocksPool {
self.unknown_blocks.remove(orphaned_hash); self.unknown_blocks.remove(orphaned_hash);
} }
queue.extend(orphaned.keys().cloned()); queue.extend(orphaned.keys().cloned());
removed.extend(orphaned.into_iter()); removed.extend(orphaned.into_iter().map(|(_, b)| b));
} }
} }
removed removed
} }
/// Remove blocks with given hashes + all dependent blocks /// Remove blocks with given hashes + all dependent blocks
pub fn remove_blocks(&mut self, hashes: &HashSet<H256>) -> Vec<(H256, IndexedBlock)> { pub fn remove_blocks(&mut self, hashes: &HashSet<H256>) -> Vec<IndexedBlock> {
// TODO: excess clone // TODO: excess clone
let mut removed: Vec<(H256, IndexedBlock)> = Vec::new(); let mut removed: Vec<IndexedBlock> = Vec::new();
let parent_orphan_keys: Vec<_> = self.orphaned_blocks.keys().cloned().collect(); let parent_orphan_keys: Vec<_> = self.orphaned_blocks.keys().cloned().collect();
for parent_orphan_key in parent_orphan_keys { for parent_orphan_key in parent_orphan_keys {
if let Entry::Occupied(mut orphan_entry) = self.orphaned_blocks.entry(parent_orphan_key) { if let Entry::Occupied(mut orphan_entry) = self.orphaned_blocks.entry(parent_orphan_key) {
@ -96,9 +96,10 @@ impl OrphanBlocksPool {
let orphans_keys: HashSet<H256> = orphans.keys().cloned().collect(); let orphans_keys: HashSet<H256> = orphans.keys().cloned().collect();
for orphan_to_remove in orphans_keys.intersection(hashes) { for orphan_to_remove in orphans_keys.intersection(hashes) {
self.unknown_blocks.remove(orphan_to_remove); self.unknown_blocks.remove(orphan_to_remove);
removed.push((orphan_to_remove.clone(), removed.push(
orphans.remove(orphan_to_remove).expect("iterating by intersection of orphans keys with hashes; removing from orphans; qed") orphans.remove(orphan_to_remove)
)); .expect("iterating by intersection of orphans keys with hashes; removing from orphans; qed")
);
} }
orphans.is_empty() orphans.is_empty()
}; };
@ -137,7 +138,7 @@ mod tests {
let b1 = test_data::block_h1(); let b1 = test_data::block_h1();
let b1_hash = b1.hash(); let b1_hash = b1.hash();
pool.insert_orphaned_block(b1_hash.clone(), b1.into()); pool.insert_orphaned_block(b1.into());
assert_eq!(pool.len(), 1); assert_eq!(pool.len(), 1);
assert!(!pool.contains_unknown_block(&b1_hash)); assert!(!pool.contains_unknown_block(&b1_hash));
@ -150,7 +151,7 @@ mod tests {
let b1 = test_data::block_h1(); let b1 = test_data::block_h1();
let b1_hash = b1.hash(); let b1_hash = b1.hash();
pool.insert_unknown_block(b1_hash.clone(), b1.into()); pool.insert_unknown_block(b1.into());
assert_eq!(pool.len(), 1); assert_eq!(pool.len(), 1);
assert!(pool.contains_unknown_block(&b1_hash)); assert!(pool.contains_unknown_block(&b1_hash));
@ -165,8 +166,8 @@ mod tests {
let b2 = test_data::block_h169(); let b2 = test_data::block_h169();
let b2_hash = b2.hash(); let b2_hash = b2.hash();
pool.insert_orphaned_block(b1_hash.clone(), b1.into()); pool.insert_orphaned_block(b1.into());
pool.insert_unknown_block(b2_hash.clone(), b2.into()); pool.insert_unknown_block(b2.into());
assert_eq!(pool.len(), 2); assert_eq!(pool.len(), 2);
assert!(!pool.contains_unknown_block(&b1_hash)); assert!(!pool.contains_unknown_block(&b1_hash));
@ -191,14 +192,14 @@ mod tests {
let b3 = test_data::block_h2(); let b3 = test_data::block_h2();
let b3_hash = b3.hash(); let b3_hash = b3.hash();
pool.insert_orphaned_block(b1_hash.clone(), b1.into()); pool.insert_orphaned_block(b1.into());
pool.insert_unknown_block(b2_hash.clone(), b2.into()); pool.insert_unknown_block(b2.into());
pool.insert_orphaned_block(b3_hash.clone(), b3.into()); pool.insert_orphaned_block(b3.into());
let removed = pool.remove_blocks_for_parent(&test_data::genesis().hash()); let removed = pool.remove_blocks_for_parent(&test_data::genesis().hash());
assert_eq!(removed.len(), 2); assert_eq!(removed.len(), 2);
assert_eq!(removed[0].0, b1_hash); assert_eq!(removed[0].hash(), &b1_hash);
assert_eq!(removed[1].0, b3_hash); assert_eq!(removed[1].hash(), &b3_hash);
assert_eq!(pool.len(), 1); assert_eq!(pool.len(), 1);
assert!(!pool.contains_unknown_block(&b1_hash)); assert!(!pool.contains_unknown_block(&b1_hash));
@ -219,13 +220,12 @@ mod tests {
let b4 = test_data::block_h170(); let b4 = test_data::block_h170();
let b4_hash = b4.hash(); let b4_hash = b4.hash();
let b5 = test_data::block_h181(); let b5 = test_data::block_h181();
let b5_hash = b5.hash();
pool.insert_orphaned_block(b1_hash.clone(), b1.into()); pool.insert_orphaned_block(b1.into());
pool.insert_orphaned_block(b2_hash.clone(), b2.into()); pool.insert_orphaned_block(b2.into());
pool.insert_orphaned_block(b3_hash.clone(), b3.into()); pool.insert_orphaned_block(b3.into());
pool.insert_orphaned_block(b4_hash.clone(), b4.into()); pool.insert_orphaned_block(b4.into());
pool.insert_orphaned_block(b5_hash.clone(), b5.into()); pool.insert_orphaned_block(b5.into());
let mut blocks_to_remove: HashSet<H256> = HashSet::new(); let mut blocks_to_remove: HashSet<H256> = HashSet::new();
blocks_to_remove.insert(b1_hash.clone()); blocks_to_remove.insert(b1_hash.clone());
@ -233,10 +233,10 @@ mod tests {
let removed = pool.remove_blocks(&blocks_to_remove); let removed = pool.remove_blocks(&blocks_to_remove);
assert_eq!(removed.len(), 4); assert_eq!(removed.len(), 4);
assert!(removed.iter().any(|&(ref h, _)| h == &b1_hash)); assert!(removed.iter().any(|ref b| b.hash() == &b1_hash));
assert!(removed.iter().any(|&(ref h, _)| h == &b2_hash)); assert!(removed.iter().any(|ref b| b.hash() == &b2_hash));
assert!(removed.iter().any(|&(ref h, _)| h == &b3_hash)); assert!(removed.iter().any(|ref b| b.hash() == &b3_hash));
assert!(removed.iter().any(|&(ref h, _)| h == &b4_hash)); assert!(removed.iter().any(|ref b| b.hash() == &b4_hash));
assert_eq!(pool.len(), 1); assert_eq!(pool.len(), 1);
} }

View File

@ -2,7 +2,7 @@ use std::collections::{HashMap, HashSet, VecDeque};
use std::collections::hash_map::Entry; use std::collections::hash_map::Entry;
use linked_hash_map::LinkedHashMap; use linked_hash_map::LinkedHashMap;
use time; use time;
use chain::Transaction; use chain::IndexedTransaction;
use primitives::hash::H256; use primitives::hash::H256;
#[derive(Debug)] #[derive(Debug)]
@ -21,7 +21,7 @@ pub struct OrphanTransaction {
/// Time when this transaction was inserted to the pool /// Time when this transaction was inserted to the pool
pub insertion_time: f64, pub insertion_time: f64,
/// Transaction itself /// Transaction itself
pub transaction: Transaction, pub transaction: IndexedTransaction,
/// Parent transactions, which are still unknown to us /// Parent transactions, which are still unknown to us
pub unknown_parents: HashSet<H256>, pub unknown_parents: HashSet<H256>,
} }
@ -47,31 +47,33 @@ impl OrphanTransactionsPool {
} }
/// Check if pool contains this transaction /// Check if pool contains this transaction
pub fn contains(&mut self, hash: &H256) -> bool { pub fn contains(&self, hash: &H256) -> bool {
self.by_hash.contains_key(hash) self.by_hash.contains_key(hash)
} }
/// Insert orphan transaction /// Insert orphan transaction
pub fn insert(&mut self, hash: H256, transaction: Transaction, unknown_parents: HashSet<H256>) { pub fn insert(&mut self, transaction: IndexedTransaction, unknown_parents: HashSet<H256>) {
assert!(!self.by_hash.contains_key(&hash)); assert!(!self.by_hash.contains_key(&transaction.hash));
assert!(unknown_parents.iter().all(|h| transaction.inputs.iter().any(|i| &i.previous_output.hash == h))); assert!(unknown_parents.iter().all(|h| transaction.raw.inputs.iter().any(|i| &i.previous_output.hash == h)));
for unknown_parent in &unknown_parents { for unknown_parent in &unknown_parents {
self.by_parent.entry(unknown_parent.clone()) self.by_parent.entry(unknown_parent.clone())
.or_insert_with(HashSet::new) .or_insert_with(HashSet::new)
.insert(hash.clone()); .insert(transaction.hash.clone());
} }
let hash = transaction.hash.clone();
self.by_hash.insert(hash, OrphanTransaction::new(transaction, unknown_parents)); self.by_hash.insert(hash, OrphanTransaction::new(transaction, unknown_parents));
} }
/// Remove all transactions, depending on this parent /// Remove all transactions, depending on this parent
pub fn remove_transactions_for_parent(&mut self, hash: &H256) -> Vec<(H256, Transaction)> { pub fn remove_transactions_for_parent(&mut self, hash: &H256) -> Vec<IndexedTransaction> {
assert!(!self.by_hash.contains_key(hash)); assert!(!self.by_hash.contains_key(hash));
let mut removal_queue: VecDeque<H256> = VecDeque::new(); let mut removal_queue: VecDeque<H256> = VecDeque::new();
removal_queue.push_back(hash.clone()); removal_queue.push_back(hash.clone());
let mut removed_orphans: Vec<(H256, Transaction)> = Vec::new(); let mut removed_orphans: Vec<IndexedTransaction> = Vec::new();
while let Some(hash) = removal_queue.pop_front() { while let Some(hash) = removal_queue.pop_front() {
// remove direct children of hash // remove direct children of hash
let mut removed_orphans_hashes: Vec<H256> = Vec::new(); let mut removed_orphans_hashes: Vec<H256> = Vec::new();
@ -84,7 +86,7 @@ impl OrphanTransactionsPool {
if all_parents_are_known { if all_parents_are_known {
removed_orphans_hashes.push(child.clone()); removed_orphans_hashes.push(child.clone());
removed_orphans.push((child.clone(), self.by_hash.remove(child).expect("checked couple of lines above").transaction)); removed_orphans.push(self.by_hash.remove(child).expect("checked couple of lines above").transaction);
} }
} }
@ -99,11 +101,11 @@ impl OrphanTransactionsPool {
} }
/// Remove transactions with given hashes + all dependent blocks /// Remove transactions with given hashes + all dependent blocks
pub fn remove_transactions(&mut self, hashes: &[H256]) -> Vec<(H256, Transaction)> { pub fn remove_transactions(&mut self, hashes: &[H256]) -> Vec<IndexedTransaction> {
let mut removed: Vec<(H256, Transaction)> = Vec::new(); let mut removed: Vec<IndexedTransaction> = Vec::new();
for hash in hashes { for hash in hashes {
if let Some(transaction) = self.by_hash.remove(hash) { if let Some(transaction) = self.by_hash.remove(hash) {
removed.push((hash.clone(), transaction.transaction)); removed.push(transaction.transaction);
} }
removed.extend(self.remove_transactions_for_parent(hash)); removed.extend(self.remove_transactions_for_parent(hash));
} }
@ -113,7 +115,7 @@ impl OrphanTransactionsPool {
impl OrphanTransaction { impl OrphanTransaction {
/// Create new orphaned transaction /// Create new orphaned transaction
pub fn new(transaction: Transaction, unknown_parents: HashSet<H256>) -> Self { pub fn new(transaction: IndexedTransaction, unknown_parents: HashSet<H256>) -> Self {
OrphanTransaction { OrphanTransaction {
insertion_time: time::precise_time_s(), insertion_time: time::precise_time_s(),
transaction: transaction, transaction: transaction,
@ -154,19 +156,19 @@ mod tests {
let t5_unknown: HashSet<H256> = chain.at(4).inputs.iter().map(|i| i.previous_output.hash.clone()).collect(); let t5_unknown: HashSet<H256> = chain.at(4).inputs.iter().map(|i| i.previous_output.hash.clone()).collect();
let mut pool = OrphanTransactionsPool::new(); let mut pool = OrphanTransactionsPool::new();
pool.insert(chain.at(1).hash(), chain.at(1), t2_unknown); // t2 pool.insert(chain.at(1).into(), t2_unknown); // t2
pool.insert(chain.at(2).hash(), chain.at(2), t3_unknown); // t3 pool.insert(chain.at(2).into(), t3_unknown); // t3
pool.insert(chain.at(4).hash(), chain.at(4), t5_unknown); // t5 pool.insert(chain.at(4).into(), t5_unknown); // t5
assert_eq!(pool.len(), 3); assert_eq!(pool.len(), 3);
let removed = pool.remove_transactions_for_parent(&chain.at(0).hash()); let removed = pool.remove_transactions_for_parent(&chain.at(0).hash());
assert_eq!(pool.len(), 1); assert_eq!(pool.len(), 1);
let removed: Vec<H256> = removed.into_iter().map(|(h, _)| h).collect(); let removed: Vec<H256> = removed.into_iter().map(|tx| tx.hash).collect();
assert_eq!(removed, vec![chain.at(1).hash(), chain.at(2).hash()]); assert_eq!(removed, vec![chain.at(1).hash(), chain.at(2).hash()]);
let removed = pool.remove_transactions_for_parent(&chain.at(3).hash()); let removed = pool.remove_transactions_for_parent(&chain.at(3).hash());
assert_eq!(pool.len(), 0); assert_eq!(pool.len(), 0);
let removed: Vec<H256> = removed.into_iter().map(|(h, _)| h).collect(); let removed: Vec<H256> = removed.into_iter().map(|tx| tx.hash).collect();
assert_eq!(removed, vec![chain.at(4).hash()]); assert_eq!(removed, vec![chain.at(4).hash()]);
} }
@ -186,20 +188,20 @@ mod tests {
let t7_unknown: HashSet<H256> = chain.at(6).inputs.iter().map(|i| i.previous_output.hash.clone()).collect(); let t7_unknown: HashSet<H256> = chain.at(6).inputs.iter().map(|i| i.previous_output.hash.clone()).collect();
let mut pool = OrphanTransactionsPool::new(); let mut pool = OrphanTransactionsPool::new();
pool.insert(chain.at(1).hash(), chain.at(1), t2_unknown); // t2 pool.insert(chain.at(1).into(), t2_unknown); // t2
pool.insert(chain.at(2).hash(), chain.at(2), t3_unknown); // t3 pool.insert(chain.at(2).into(), t3_unknown); // t3
pool.insert(chain.at(4).hash(), chain.at(4), t5_unknown); // t5 pool.insert(chain.at(4).into(), t5_unknown); // t5
pool.insert(chain.at(6).hash(), chain.at(6), t7_unknown); // t7 pool.insert(chain.at(6).into(), t7_unknown); // t7
assert_eq!(pool.len(), 4); assert_eq!(pool.len(), 4);
let removed = pool.remove_transactions(&vec![chain.at(1).hash(), chain.at(3).hash()]); let removed = pool.remove_transactions(&vec![chain.at(1).hash(), chain.at(3).hash()]);
assert_eq!(pool.len(), 1); assert_eq!(pool.len(), 1);
let removed: Vec<H256> = removed.into_iter().map(|(h, _)| h).collect(); let removed: Vec<H256> = removed.into_iter().map(|tx| tx.hash).collect();
assert_eq!(removed, vec![chain.at(1).hash(), chain.at(2).hash(), chain.at(4).hash()]); assert_eq!(removed, vec![chain.at(1).hash(), chain.at(2).hash(), chain.at(4).hash()]);
let removed = pool.remove_transactions(&vec![chain.at(6).hash()]); let removed = pool.remove_transactions(&vec![chain.at(6).hash()]);
assert_eq!(pool.len(), 0); assert_eq!(pool.len(), 0);
let removed: Vec<H256> = removed.into_iter().map(|(h, _)| h).collect(); let removed: Vec<H256> = removed.into_iter().map(|tx| tx.hash).collect();
assert_eq!(removed, vec![chain.at(6).hash()]); assert_eq!(removed, vec![chain.at(6).hash()]);
} }
} }

View File

@ -0,0 +1,281 @@
use std::cmp::min;
use bit_vec::BitVec;
use chain::merkle_node_hash;
use primitives::hash::H256;
/// Partial merkle tree
pub struct PartialMerkleTree {
/// Total number of transactions
pub tx_count: usize,
/// Nodes hashes
pub hashes: Vec<H256>,
/// Match flags
pub flags: BitVec,
}
/// Partial merkle tree parse result
#[cfg(test)]
pub struct ParsedPartialMerkleTree {
/// Merkle root
pub root: H256,
/// Matched hashes
pub hashes: Vec<H256>,
/// Match flags
pub flags: BitVec,
}
/// Build partial merkle tree
pub fn build_partial_merkle_tree(tx_hashes: Vec<H256>, tx_matches: BitVec) -> PartialMerkleTree {
PartialMerkleTreeBuilder::build(tx_hashes, tx_matches)
}
/// Parse partial merkle tree
#[cfg(test)]
pub fn parse_partial_merkle_tree(tree: PartialMerkleTree) -> Result<ParsedPartialMerkleTree, String> {
PartialMerkleTreeBuilder::parse(tree)
}
/// Service structure to construct `merkleblock` message.
struct PartialMerkleTreeBuilder {
/// All transactions length.
all_len: usize,
/// All transactions hashes.
all_hashes: Vec<H256>,
/// Match flags for all transactions.
all_matches: BitVec,
/// Partial hashes.
hashes: Vec<H256>,
/// Partial match flags.
matches: BitVec,
}
impl PartialMerkleTree {
/// Create new merkle tree with given data
pub fn new(tx_count:usize, hashes: Vec<H256>, flags: BitVec) -> Self {
PartialMerkleTree {
tx_count: tx_count,
hashes: hashes,
flags: flags,
}
}
}
#[cfg(test)]
impl ParsedPartialMerkleTree {
pub fn new(root: H256, hashes: Vec<H256>, flags: BitVec) -> Self {
ParsedPartialMerkleTree {
root: root,
hashes: hashes,
flags: flags,
}
}
}
impl PartialMerkleTreeBuilder {
/// Build partial merkle tree as described here:
/// https://bitcoin.org/en/developer-reference#creating-a-merkleblock-message
pub fn build(all_hashes: Vec<H256>, all_matches: BitVec) -> PartialMerkleTree {
let mut partial_merkle_tree = PartialMerkleTreeBuilder {
all_len: all_hashes.len(),
all_hashes: all_hashes,
all_matches: all_matches,
hashes: Vec::new(),
matches: BitVec::new(),
};
partial_merkle_tree.build_tree();
PartialMerkleTree::new(partial_merkle_tree.all_len, partial_merkle_tree.hashes, partial_merkle_tree.matches)
}
#[cfg(test)]
/// Parse partial merkle tree as described here:
/// https://bitcoin.org/en/developer-reference#parsing-a-merkleblock-message
pub fn parse(tree: PartialMerkleTree) -> Result<ParsedPartialMerkleTree, String> {
let mut partial_merkle_tree = PartialMerkleTreeBuilder {
all_len: tree.tx_count,
all_hashes: Vec::new(),
all_matches: BitVec::from_elem(tree.tx_count, false),
hashes: tree.hashes,
matches: tree.flags,
};
let merkle_root = try!(partial_merkle_tree.parse_tree());
Ok(ParsedPartialMerkleTree::new(merkle_root, partial_merkle_tree.all_hashes, partial_merkle_tree.all_matches))
}
fn build_tree(&mut self) {
let tree_height = self.tree_height();
self.build_branch(tree_height, 0)
}
#[cfg(test)]
fn parse_tree(&mut self) -> Result<H256, String> {
if self.all_len == 0 {
return Err("no transactions".into());
}
if self.hashes.len() > self.all_len {
return Err("too many hashes".into());
}
if self.matches.len() < self.hashes.len() {
return Err("too few matches".into());
}
// parse tree
let mut matches_used = 0usize;
let mut hashes_used = 0usize;
let tree_height = self.tree_height();
let merkle_root = try!(self.parse_branch(tree_height, 0, &mut matches_used, &mut hashes_used));
if matches_used != self.matches.len() {
return Err("not all matches used".into());
}
if hashes_used != self.hashes.len() {
return Err("not all hashes used".into());
}
Ok(merkle_root)
}
fn build_branch(&mut self, height: usize, pos: usize) {
// determine whether this node is the parent of at least one matched txid
let transactions_begin = pos << height;
let transactions_end = min(self.all_len, (pos + 1) << height);
let flag = (transactions_begin..transactions_end).any(|idx| self.all_matches[idx]);
// remember flag
self.matches.push(flag);
// proceeed with descendants
if height == 0 || !flag {
// we're at the leaf level || there is no match
let hash = self.branch_hash(height, pos);
self.hashes.push(hash);
} else {
// proceed with left child
self.build_branch(height - 1, pos << 1);
// proceed with right child if any
if (pos << 1) + 1 < self.level_width(height - 1) {
self.build_branch(height - 1, (pos << 1) + 1);
}
}
}
#[cfg(test)]
fn parse_branch(&mut self, height: usize, pos: usize, matches_used: &mut usize, hashes_used: &mut usize) -> Result<H256, String> {
if *matches_used >= self.matches.len() {
return Err("all matches used".into());
}
let flag = self.matches[*matches_used];
*matches_used += 1;
if height == 0 || !flag {
// we're at the leaf level || there is no match
if *hashes_used > self.hashes.len() {
return Err("all hashes used".into());
}
// get node hash
let ref hash = self.hashes[*hashes_used];
*hashes_used += 1;
// on leaf level && matched flag set => mark transaction as matched
if height == 0 && flag {
self.all_hashes.push(hash.clone());
self.all_matches.set(pos, true);
}
Ok(hash.clone())
} else {
// proceed with left child
let left = try!(self.parse_branch(height - 1, pos << 1, matches_used, hashes_used));
// proceed with right child if any
let has_right_child = (pos << 1) + 1 < self.level_width(height - 1);
let right = if has_right_child {
try!(self.parse_branch(height - 1, (pos << 1) + 1, matches_used, hashes_used))
} else {
left.clone()
};
if has_right_child && left == right {
Err("met same hash twice".into())
} else {
Ok(merkle_node_hash(&left, &right))
}
}
}
fn tree_height(&self) -> usize {
let mut height = 0usize;
while self.level_width(height) > 1 {
height += 1;
}
height
}
fn level_width(&self, height: usize) -> usize {
(self.all_len + (1 << height) - 1) >> height
}
fn branch_hash(&self, height: usize, pos: usize) -> H256 {
if height == 0 {
self.all_hashes[pos].clone()
} else {
let left = self.branch_hash(height - 1, pos << 1);
let right = if (pos << 1) + 1 < self.level_width(height - 1) {
self.branch_hash(height - 1, (pos << 1) + 1)
} else {
left.clone()
};
merkle_node_hash(&left, &right)
}
}
}
#[cfg(test)]
mod tests {
use chain::{Transaction, merkle_root};
use primitives::hash::H256;
use test_data;
use super::{build_partial_merkle_tree, parse_partial_merkle_tree};
#[test]
// test from core implementation (slow)
// https://github.com/bitcoin/bitcoin/blob/master/src/test/pmt_tests.cpp
fn test_build_merkle_block() {
use bit_vec::BitVec;
use rand::{Rng, SeedableRng, StdRng};
let rng_seed: &[_] = &[0, 0, 0, 0];
let mut rng: StdRng = SeedableRng::from_seed(rng_seed);
// for some transactions counts
let tx_counts: Vec<usize> = vec![1, 4, 7, 17, 56, 100, 127, 256, 312, 513, 1000, 4095];
for tx_count in tx_counts {
// build block with given transactions number
let transactions: Vec<Transaction> = (0..tx_count).map(|n| test_data::TransactionBuilder::with_version(n as i32).into()).collect();
let hashes: Vec<_> = transactions.iter().map(|t| t.hash()).collect();
let merkle_root = merkle_root(&hashes);
// mark different transactions as matched
for seed_tweak in 1..15 {
let mut matches: BitVec = BitVec::with_capacity(tx_count);
let mut matched_hashes: Vec<H256> = Vec::with_capacity(tx_count);
for i in 0usize..tx_count {
let is_match = (rng.gen::<u32>() & ((1 << (seed_tweak / 2)) - 1)) == 0;
matches.push(is_match);
if is_match {
matched_hashes.push(hashes[i].clone());
}
}
// build partial merkle tree
let partial_tree = build_partial_merkle_tree(hashes.clone(), matches.clone());
// parse tree back
let parsed_tree = parse_partial_merkle_tree(partial_tree).expect("no error");
assert_eq!(matched_hashes, parsed_tree.hashes);
assert_eq!(matches, parsed_tree.flags);
assert_eq!(merkle_root, parsed_tree.root);
}
}
}
}

View File

@ -0,0 +1,31 @@
use chain::{TransactionOutput, OutPoint};
use db::{PreviousTransactionOutputProvider, TransactionOutputObserver};
use super::super::types::StorageRef;
/// Transaction output observer, which looks into storage
pub struct StorageTransactionOutputProvider {
/// Storage reference
storage: StorageRef,
}
impl StorageTransactionOutputProvider {
pub fn with_storage(storage: StorageRef) -> Self {
StorageTransactionOutputProvider {
storage: storage,
}
}
}
impl TransactionOutputObserver for StorageTransactionOutputProvider {
fn is_spent(&self, prevout: &OutPoint) -> Option<bool> {
self.storage
.transaction_meta(&prevout.hash)
.and_then(|tm| tm.is_spent(prevout.index as usize))
}
}
impl PreviousTransactionOutputProvider for StorageTransactionOutputProvider {
fn previous_transaction_output(&self, prevout: &OutPoint) -> Option<TransactionOutput> {
self.storage.as_previous_transaction_output_provider().previous_transaction_output(prevout)
}
}

View File

@ -0,0 +1,40 @@
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use super::super::types::{StorageRef, BlockHeight};
// AtomicU32 is unstable => using AtomicUsize here
/// Shared synchronization client state.
/// It can be slightly innacurate, but the accuracy is not required for it
#[derive(Debug)]
pub struct SynchronizationState {
/// Is synchronization in progress?
is_synchronizing: AtomicBool,
/// Height of best block in the storage
best_storage_block_height: AtomicUsize,
}
impl SynchronizationState {
pub fn with_storage(storage: StorageRef) -> Self {
let best_storage_block_height = storage.best_block().map(|b| b.number).unwrap_or(0);
SynchronizationState {
is_synchronizing: AtomicBool::new(false),
best_storage_block_height: AtomicUsize::new(best_storage_block_height as usize),
}
}
pub fn synchronizing(&self) -> bool {
self.is_synchronizing.load(Ordering::SeqCst)
}
pub fn update_synchronizing(&self, synchronizing: bool) {
self.is_synchronizing.store(synchronizing, Ordering::SeqCst);
}
pub fn best_storage_block_height(&self) -> BlockHeight {
self.best_storage_block_height.load(Ordering::SeqCst) as BlockHeight
}
pub fn update_best_storage_block_height(&self, height: BlockHeight) {
self.best_storage_block_height.store(height as usize, Ordering::SeqCst);
}
}

View File

@ -1,7 +1,7 @@
use primitives::hash::H256; use primitives::hash::H256;
use ser::Serializable; use ser::Serializable;
use primitives::bytes::Bytes; use primitives::bytes::Bytes;
use chain::{Transaction, TransactionInput, TransactionOutput, OutPoint}; use chain::{Transaction, IndexedTransaction, TransactionInput, TransactionOutput, OutPoint};
#[derive(Debug, Default, Clone)] #[derive(Debug, Default, Clone)]
pub struct ChainBuilder { pub struct ChainBuilder {
@ -39,6 +39,15 @@ impl Into<Transaction> for TransactionBuilder {
} }
} }
impl Into<IndexedTransaction> for TransactionBuilder {
fn into(self) -> IndexedTransaction {
IndexedTransaction {
hash: self.transaction.hash(),
raw: self.transaction,
}
}
}
impl TransactionBuilder { impl TransactionBuilder {
pub fn with_version(version: i32) -> TransactionBuilder { pub fn with_version(version: i32) -> TransactionBuilder {
let builder = TransactionBuilder::default(); let builder = TransactionBuilder::default();