deleted obsolete sync_v2

This commit is contained in:
Svyatoslav Nikolsky 2016-12-23 16:14:49 +03:00
parent d696dcfdaf
commit cee9ce38f9
35 changed files with 0 additions and 5020 deletions

View File

@ -1,7 +0,0 @@
+ 1. block_locator_hashes
+ 2. known_hash_filter нигде сейчас не заполняется
+ 3. relay new *
4. &block.header.hash -> block.hash()
5. IndexedBlock && IndexedTransaction everywhere
6. verifier should be replaced with single-thread mempool + futures to avoid sinks + custom sinks + local accept transaction
+ fix error - for_transaction must be called for each transaction verification

View File

@ -25,7 +25,6 @@ extern crate ethcore_devtools as devtools;
extern crate rand;
extern crate network;
//mod v2;
mod best_headers_chain;
mod blocks_writer;
mod connection_filter;

View File

@ -1,163 +0,0 @@
use message::types;
use p2p::{InboundSyncConnection, InboundSyncConnectionRef};
use types::{PeersRef, PeerIndex, RequestId};
/// Inbound synchronization connection
#[derive(Debug)]
pub struct InboundConnection {
/// Index of peer for this connection
peer_index: PeerIndex,
/// Peers reference
peers: PeersRef,
/// Reference to synchronization node
node: LocalNodeRef,
}
impl InboundConnection {
/// Create new inbound connection
pub fn new(peer_index: PeerIndex, peers: PeersRef, node: LocalNodeRef) -> InboundConnection {
InboundConnection {
peer_index: peer_index,
peers:peers,
node: node,
}
}
/// Box inbound connection
pub fn boxed(self) -> InboundSyncConnectionRef {
Box::new(self)
}
}
impl InboundSyncConnection for InboundConnection {
fn start_sync_session(&self, version: u32) {
self.node.on_connect(self.peer_index, version);
}
fn close_session(&self) {
self.node.on_disconnect(self.peer_index);
}
fn on_inventory(&self, message: types::Inv) {
// if inventory is empty - just ignore this message
if message.inventory.is_empty() {
return;
}
// if inventory length is too big => possible DOS
if message.inventory.len() > types::INV_MAX_INVENTORY_LEN {
self.peers.dos(self.peer_index, format!("'inv' message contains {} entries", message.inventory.len()));
return;
}
self.node.on_inventory(self.peer_index, message);
}
fn on_getdata(&self, message: types::GetData) {
// if inventory is empty - just ignore this message
if message.inventory.is_empty() {
return;
}
// if inventory length is too big => possible DOS
if message.inventory.len() > types::GETDATA_MAX_INVENTORY_LEN {
self.peers.dos(self.peer_index, format!("'getdata' message contains {} entries", message.inventory.len()));
return;
}
self.node.on_getdata(self.peer_index, message);
}
fn on_getblocks(&self, message: types::GetBlocks) {
self.node.on_getblocks(self.peer_index, message);
}
fn on_getheaders(&self, message: types::GetHeaders, id: RequestId) {
self.node.on_getheaders(self.peer_index, message, id);
}
fn on_transaction(&self, message: types::Tx) {
self.node.on_transaction(self.peer_index, message.transaction.into());
}
fn on_block(&self, message: types::Block) {
self.node.on_block(self.peer_index, message.block.into());
}
fn on_headers(&self, message: types::Headers) {
// if headers are empty - just ignore this message
if message.headers.is_empty() {
return;
}
// if there are too many headers => possible DOS
if message.headers.len() > types::HEADERS_MAX_HEADERS_LEN {
self.peers.dos(self.peer_index, format!("'headers' message contains {} headers", message.headers.len()));
return;
}
self.node.on_headers(self.peer_index, message);
}
fn on_mempool(&self, message: types::MemPool) {
self.node.on_peer_mempool(self.peer_index, message);
}
fn on_filterload(&self, message: types::FilterLoad) {
// if filter is too big => possible DOS
if message.filter.0.len() > types::FILTERLOAD_MAX_FILTER_LEN {
self.peers.dos(self.peer_index, format!("'filterload' message contains {}-len filter", message.filter.0.len()));
return;
}
// if too many hash functions => possible DOS
if message.hash_functions > types::FILTERLOAD_MAX_HASH_FUNCS {
self.peers.dos(self.peer_index, format!("'filterload' message contains {} hash functions", message.hash_functions));
return;
}
self.node.on_peer_filterload(self.peer_index, message);
}
fn on_filteradd(&self, message: types::FilterAdd) {
// if filter item is too big => possible DOS
if message.data.0.len() > types::FILTERADD_MAX_DATA_LEN {
self.peers.dos(self.peer_index, format!("'filteradd' message contains {}-len data item", message.data.0.len()));
return;
}
self.node.on_peer_filteradd(self.peer_index, message);
}
fn on_filterclear(&self, message: types::FilterClear) {
self.node.on_peer_filterclear(self.peer_index, message);
}
fn on_merkleblock(&self, message: types::MerkleBlock) {
self.node.on_peer_merkleblock(self.peer_index, message);
}
fn on_sendheaders(&self, message: types::SendHeaders) {
self.node.on_peer_sendheaders(self.peer_index, message);
}
fn on_feefilter(&self, message: types::FeeFilter) {
self.node.on_peer_feefilter(self.peer_index, message);
}
fn on_send_compact(&self, message: types::SendCompact) {
self.node.on_peer_send_compact(self.peer_index, message);
}
fn on_compact_block(&self, message: types::CompactBlock) {
self.node.on_peer_compact_block(self.peer_index, message);
}
fn on_get_block_txn(&self, message: types::GetBlockTxn) {
self.node.on_peer_get_block_txn(self.peer_index, message);
}
fn on_block_txn(&self, message: types::BlockTxn) {
self.node.on_peer_block_txn(self.peer_index, message);
}
fn on_notfound(&self, message: types::NotFound) {
self.node.on_peer_notfound(self.peer_index, message);
}
}

View File

@ -1,46 +0,0 @@
use std::sync::atomic::{AtomicUsize, Ordering};
use p2p::{LocalSyncNode, LocalSyncNodeRef, OutboundSyncConnectionRef, InboundSyncConnectionRef};
use inbound_connection::InboundConnection;
/// Inbound synchronization connection factory
#[derive(Debug)]
pub struct InboundConnectionFactory {
/// Peers reference
peers: PeersRef,
/// Reference to synchronization node
node: LocalNodeRef,
/// Throughout counter of synchronization peers
counter: AtomicUsize,
}
impl InboundConnectionFactory {
/// Create new inbound connection factory
pub fn new(peers: PeersRef, node: LocalNodeRef) -> Self {
InboundConnectionFactory {
peers: peers,
node: node,
counter: AtomicUsize::new(0),
}
}
/// Box inbound connection factory
pub fn boxed(self) -> LocalSyncNodeRef {
Box::new(self)
}
}
impl LocalSyncNode for InboundConnectionFactory {
fn start_height(&self) -> i32 {
// this is not used currently
0
}
fn create_sync_session(&self, _best_block_height: i32, outbound_connection: OutboundSyncConnectionRef) -> InboundSyncConnectionRef {
let peer_index = self.counter.fetch_add(1, Ordering::SeqCst) + 1;
trace!(target: "sync", "Creating new sync session with peer#{}", peer_index);
// remember outbound connection
self.peers.insert(peer_index, outbound_connection);
// create new inbound connection
InboundConnection::new(peer_index, self.peers.clone(), self.node.clone()).boxed()
}
}

View File

@ -1,16 +0,0 @@
mod inbound_connection;
mod inbound_connection_factory;
mod synchronization_blocks_queue;
mod synchronization_client;
mod synchronization_client_core;
mod synchronization_executor;
mod synchronization_filter;
mod synchronization_manager;
mod synchronization_node;
mod synchronization_peers;
mod synchronization_peers_tasks;
mod synchronization_server;
mod synchronization_transactions_queue;
mod synchronization_verifier;
mod types;
mod utils;

View File

@ -1,126 +0,0 @@
use std::collections::HashSet;
use chain::{IndexedTransaction, IndexedBlockHeader, IndexedBlock};
use db::BestBlock;
use primitives::hash::H256;
/// Index of 'verifying' queue
const VERIFYING_QUEUE: usize = 0;
/// Index of 'requested' queue
const REQUESTED_QUEUE: usize = 1;
/// Index of 'scheduled' queue
const SCHEDULED_QUEUE: usize = 2;
/// Number of hash queues
const NUMBER_OF_QUEUES: usize = 3;
/// Block synchronization state
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum BlockState {
/// Block is unknown
Unknown,
/// Block is orphan
Orphan,
/// This block has been marked as dead-end block
DeadEnd,
/// Scheduled for requesting
Scheduled,
/// Requested from peers
Requested,
/// Currently verifying
Verifying,
/// In storage
Stored,
}
/// Block insertion result
#[derive(Debug, Default, PartialEq)]
pub struct BlockInsertionResult {
/// Hashes of blocks, which were canonized during this insertion procedure. Order matters
pub canonized_blocks_hashes: Vec<H256>,
/// Transaction to 'reverify'. Order matters
pub transactions_to_reverify: Vec<IndexedTransaction>,
}
/// Synchronization chain information
pub struct Information {
/// Number of blocks hashes currently scheduled for requesting
pub scheduled: BlockHeight,
/// Number of blocks hashes currently requested from peers
pub requested: BlockHeight,
/// Number of blocks currently verifying
pub verifying: BlockHeight,
/// Number of blocks in the storage
pub stored: BlockHeight,
/// Information on memory pool
pub transactions: MemoryPoolInformation,
/// Information on headers chain
pub headers: BestHeadersInformation,
}
/// Blockchain blocks from synchroniation point of view, consisting of:
/// 1) all blocks from the `storage` [oldest blocks]
/// 2) all blocks currently verifying by `synchronization_verifier`
/// 3) all blocks currently requested from peers
/// 4) all blocks currently scheduled for requesting [newest blocks]
pub trait BlocksQueue {
/// Returns queue information
fn information(&self) -> Information;
/// Returns state of given block
fn state(&self, hash: &H256) -> BlockState;
/// Insert dead-end block
fn dead_end_block(&mut self, hash: H256);
/// Schedule blocks for retrieval
fn schedule(&mut self, headers: &[IndexedBlockHeader]);
/// Forget block
fn forget(&mut self, hash: &H256);
/// Forget block, but leave header
fn forget_leave_header(&mut self, hash: &H256) -> HashPosition;
/// Append blocks header to verification queue
fn verify(&mut self, header: IndexedBlockHeader);
/// Remove orphan blocks for header
fn remove_blocks_for_parent(&mut self, hash: &H256) -> Vec<IndexedBlock>;
/*
/// Returns best known block information
fn best_block() -> BestBlock;
/// Returns number of blocks with given state
fn state_length(&self, state: BlockState) -> BlockHeight;
/// Returns `n` best blocks of the given state
fn front_n_of_state(&self, state: BlockState, n: BlockHeight) -> Vec<H256>;
/// Returns block locator hashes
fn block_locator_hashes(&self) -> Vec<H256>;
/// Schedule blocks headers for retrieval
fn schedule_blocks_headers(&mut self, headers: Vec<IndexedBlockHeader>);
/// Request `n` blocks headers
fn request_blocks_hashes(&mut self, n: u32) -> Vec<H256>;
/// Append blocks header to verification queue
fn verify_block(&mut self, header: IndexedBlockHeader);
/// Insert verified block to storage
fn insert_verified_block(&mut self, block: IndexedBlock) -> Result<BlockInsertionResult, DbError>;
/// Forget block hash
fn forget(&mut self, hash: &H256) -> HashPosition;
/// Forget block hash && header
fn forget_leave_header(&mut self, hash: &H256) -> HashPosition;
/// Forget block with children
fn forget_with_children(&mut self, hash: &H256);*/
}
/// Blocks queue implementation
pub struct TransactionsQueueImpl {
/// Genesis block hash
genesis_block_hash: H256,
/// Best storage block (stored for optimizations)
best_storage_block: BestBlock,
/// Dead-end blocks
dead_end: HashSet<H256>,
/// Unknown blocks
unknown_blocks: UnknownBlocksPool,
/// Orphaned blocks
orphan_pool: OrphanBlocksPool,
/// Hashes chain
hash_queue: HashQueueChain,
/// Headers chain
headers_queue: BlockHeaderChain,
/// Storage reference
storage: StorageRef,
}

View File

@ -1,88 +0,0 @@
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use synchronization_client_core::ClientCore;
/// Shared client state
pub struct ClientState {
/// Is synchronizing flag
is_synchronizing: Arc<AtomicBool>,
}
/// Synchronization client trait
pub trait Client {
/// Are we currently synchronizing
fn is_synchronizing(&self) -> bool;
/// Called upon receiving inventory message
fn on_inventory(&self, peer_index: PeerIndex, message: types::Inv);
/// Called upon receiving headers message
fn on_headers(&self, peer_index: PeerIndex, message: types::Headers);
/// Called upon receiving notfound message
fn on_notfound(&self, peer_index: PeerIndex, message: types::NotFound);
/// Called upon receiving block message
fn on_block(&self, peer_index: PeerIndex, block: IndexedBlock) -> Promise;
/// Called upon receiving transaction message
fn on_transaction(&self, peer_index: PeerIndex, transaction: IndexedTransaction);
/// Called when connection is closed
fn on_disconnect(&self, peer_index: PeerIndex);
}
/// Synchronization client implementation
pub struct ClientImpl {
/// State reference
state: ClientStateRef,
/// Core reference
core: ClientCoreRef,
}
impl Sync for ClientState {
}
impl ClientState {
fn is_synchronizing(&self) -> bool {
self.state.load(Ordering::Relaxed)
}
}
impl Client for ClientImpl {
fn is_synchronizing(&self) -> bool {
self.state.is_synchronizing();
}
fn on_inventory(&self, peer_index: PeerIndex, message: types::Inv) {
self.core.lock().on_inventory(peer_index, message);
}
fn on_headers(&self, peer_index: PeerIndex, message: types::Headers) {
self.core.lock().on_headers(peer_index, message);
}
fn on_notfound(&self, peer_index: PeerIndex, message: types::NotFound) {
self.core.lock().on_notfound(peer_index, message);
}
fn on_block(&self, peer_index: PeerIndex, block: IndexedBlock) -> Promise {
if let Some(verification_tasks) = self.core.lock().on_transaction(peer_index, transaction) {
while let Some(verification_task) = verification_tasks.pop_front() {
self.verifier.execute(verification_task);
}
}
// try to switch to saturated state OR execute sync tasks
let mut core = self.core.lock();
if !core.try_switch_to_saturated_state() {
core.execute_synchronization_tasks(None, None);
}
}
fn on_transaction(&self, peer_index: PeerIndex, transaction: IndexedTransaction) {
if let Some(verification_tasks) = self.client.lock().on_transaction(peer_index, transaction) {
while let Some(verification_task) = verification_tasks.pop_front() {
self.verifier.execute(verification_task);
}
}
}
fn on_disconnect(&self, peer_index: PeerIndex) {
self.client.lock().on_disconnect(peer_index);
}
}

View File

@ -1,635 +0,0 @@
use std::cmp::{min, max};
use std::collections::{VecDeque, HashSet};
use std::mem::swap;
use time;
use chain::{IndexedBlock, IndexedTransaction};
use message::common;
use message::types;
use primitives::hash::H256;
use synchronization_blocks_queue::{BlocksQueue, BlockState};
use synchronization_executor::Task;
use synchronization_verifier::{VerificationTask, VerificationTasks};
use types::{PeerIndex};
// TODO: refactor DeadEnd blocks
/// Approximate maximal number of blocks hashes in scheduled queue.
const MAX_SCHEDULED_HASHES: u32 = 4 * 1024;
/// Approximate maximal number of blocks hashes in requested queue.
const MAX_REQUESTED_BLOCKS: u32 = 256;
/// Approximate maximal number of blocks in verifying queue.
const MAX_VERIFYING_BLOCKS: u32 = 256;
/// Minimum number of blocks to request from peer
const MIN_BLOCKS_IN_REQUEST: u32 = 32;
/// Maximum number of blocks to request from peer
const MAX_BLOCKS_IN_REQUEST: u32 = 128;
/// Number of blocks to receive since synchronization start to begin duplicating blocks requests
const NEAR_EMPTY_VERIFICATION_QUEUE_THRESHOLD_BLOCKS: usize = 20;
/// Number of seconds left before verification queue will be empty to count it as 'near empty queue'
const NEAR_EMPTY_VERIFICATION_QUEUE_THRESHOLD_S: f64 = 20_f64;
/// Number of blocks to inspect when calculating average sync speed
const SYNC_SPEED_BLOCKS_TO_INSPECT: usize = 512;
/// Number of blocks to inspect when calculating average blocks speed
const BLOCKS_SPEED_BLOCKS_TO_INSPECT: usize = 512;
/// Synchronization client core config
pub struct ClientCoreConfig {
/// Receiving dead-end block from peer leads to disconnect
pub dead_end_fatal: bool,
}
/// Synchronization client core
pub trait ClientCore {
/// Called upon receiving inventory message
fn on_inventory(&mut self, peer_index: PeerIndex, message: types::Inv);
/// Called upon receiving headers message
fn on_headers(&mut self, peer_index: PeerIndex, message: types::Headers);
/// Called upon receiving notfound message
fn on_notfound(&mut self, peer_index: PeerIndex, message: types::NotFound);
/// Called upon receiving block message
fn on_block(&mut self, peer_index: PeerIndex, block: IndexedBlock) -> Option<VerificationTasks>;
/// Called upon receiving transaction message
fn on_transaction(&mut self, peer_index: PeerIndex, transaction: IndexedTransaction, relay: bool) -> Option<VerificationTasks>;
/// Called when connection is closed
fn on_disconnect(&mut self, peer_index: PeerIndex);
/// Try switch to saturated state
fn try_switch_to_saturated_state(&mut self) -> bool;
/// Execute pending synchronization tasks
fn execute_synchronization_tasks(&mut self, forced_blocks_requests: Option<Vec<H256>>, final_blocks_requests: Option<Vec<H256>>);
}
/// Synchronization state
#[derive(Debug, Clone, Copy)]
enum State {
/// We know that there are > 1 unknown blocks, unknown to us in the blockchain
Synchronizing(f64, u32),
/// There is only one unknown block in the blockchain
NearlySaturated,
/// We have downloaded all blocks of the blockchain of which we have ever heard
Saturated,
}
/// Synchronization client core implementation
pub struct ClientCoreImpl {
/// Synchronization state
state: State,
/// Synchronization peers
peers: PeersRef,
/// Synchronization peers tasks
peers_tasks: PeersTasks,
/// Transactions queue
tx_queue: TransactionsQueue,
/// Blocks queue
blocks_queue: BlocksQueue,
/// Chain verifier
chain_verifier: ChainVerifier,
/// Core stats
stats: Stats,
}
/// Synchronization client core stats
struct Stats {
/// Block processing speed meter
block_speed_meter: AverageSpeedMeter,
/// Block synchronization speed meter
sync_speed_meter: AverageSpeedMeter,
}
/// Transaction append error
enum AppendTransactionError {
/// Cannot append when synchronization is in progress
Synchronizing,
/// Some inputs of transaction are unknown
Orphan(HashSet<H256>),
}
impl ClientCore for ClientCoreImpl {
fn on_inventory(&mut self, peer_index: PeerIndex, message: types::Inv) {
// we are synchronizing => we ask only for blocks with known headers => there are no useful blocks hashes for us
// we are synchronizing => we ignore all transactions until it is completed => there are no useful transactions hashes for us
if self.state.is_synchronizing() {
return;
}
// else ask for all unknown transactions and blocks
let inventory: Vec<_> = message.inventory.into_iter()
.filter(|item| {
match item.inv_type {
InventoryType::Tx => self.tx_queue.state(&item.hash) == TransactionState::Unknown,
InventoryType::Block => match self.blocks_queue.state(&item.hash) {
BlockState::Unknown => true,
BlockState::DeadEnd if self.config.dead_end_fatal => {
self.peers.misbehaving(peer_index, format!("Provided dead-end block {:?}", item.hash.to_reversed_str()));
return false;
},
_ => false,
},
_ => false,
}
})
.collect();
if inventory.is_empty() {
return;
}
let message = types::GetData::with_inventory(inventory);
self.executor.execute(Task::GetData(peer_index, message));
}
fn on_headers(&mut self, peer_index: PeerIndex, message: types::Headers) {
assert!(!message.headers.is_empty(), "This must be checked in incoming connection");
// transform to indexed headers
let headers: Vec<_> = message.headers.into_iter().map(IndexedBlockHeader::from).collect();
// update peers to select next tasks
self.peers_tasks.on_headers_received(peer_index);
// headers are ordered
// => if we know nothing about headers[0].parent
// => all headers are also unknown to us
let header0 = &headers[0].raw;
if self.blocks_queue.state(&header0.previous_header_hash) == BlockState::Unknown {
warn!(target: "sync", "Previous header of the first header from peer#{} `headers` message is unknown. First: {}. Previous: {}", peer_index, header0.hash().to_reversed_str(), header0.previous_header_hash.to_reversed_str());
return;
}
// find first unknown header position
// optimization: normally, the first header will be unknown
let num_headers = headers.len();
let first_unknown_index = match self.blocks_queue.state(&header0.hash) {
BlockState::Unknown => 0,
_ => {
// optimization: if last header is known, then all headers are also known
let header_last = &headers[num_headers - 1].raw;
match self.blocks_queue.state(&header_last.hash) {
BlockState::Unknown => 1 + headers.iter().skip(1)
.position(|header| self.blocks_queue.state(&header.hash) == BlockState::Unknown)
.expect("last header has UnknownState; we are searching for first unknown header; qed"),
// else all headers are known
_ => {
trace!(target: "sync", "Ignoring {} known headers from peer#{}", headers.len(), peer_index);
// but this peer is still useful for synchronization
self.peers_tasks.mark_useful(peer_index);
return;
},
}
}
};
// if first unknown header is preceeded with dead end header => disconnect
let mut last_known_hash = if first_unknown_index > 0 { &headers[first_unknown_index - 1].hash } else { &header0.raw.previous_header_hash };
if self.config.dead_end_fatal {
if self.blocks_queue.state(last_known_hash) == BlockState::DeadEnd {
self.peers.misbehaving(peer_index, format!("Provided dead-end block {:?}", last_known_hash.to_reversed_str()));
return;
} else {
warn!(target: "sync", "Peer#{} has provided us with dead-end block {:?}", peer_index, block.hash.to_reversed_str());
}
}
// validate blocks headers before scheduling
let mut headers_provider = InMemoryHeadersProvider::new(&chain);
for header in &headers[first_unknown_index..num_headers] {
// check that this header is direct child of previous header
if header.previous_header_hash != last_known_hash {
self.peers.misbehaving(peer_index, format!("Neighbour headers in peer#{} `headers` message are unlinked: Prev: {}, PrevLink: {}, Curr: {}", peer_index, prev_block_hash.to_reversed_str(), block_header.previous_header_hash.to_reversed_str(), block_header_hash.to_reversed_str()));
return;
}
// check that we do not know all blocks in range [first_unknown_index..]
// if we know some block => there has been verification error => all headers should be ignored
// see when_previous_block_verification_failed_fork_is_not_requested for details
match self.blocks_queue.state(&header.raw) {
BlockState::Unknown => (),
BlockState::DeadEnd if self.config.dead_end_fatal => {
self.peers.misbehaving(peer_index, format!("Provided dead-end block {:?}", header.raw.to_reversed_str()));
return;
},
_ => {
trace!(target: "sync", "Ignoring {} headers from peer#{} - known header in the middle", message.headers.len(), peer_index);
self.peers_tasks.mark_useful(peer_index);
return;
},
}
// verify header
if self.verify_headers {
if let Err(error) = self.chain_verifier.verify_block_header(&headers_provider, &header.hash, &header.raw) {
if self.config.dead_end_fatal {
self.peers.misbehaving(peer_index, format!("Neighbour headers in peer#{} `headers` message are unlinked: Prev: {}, PrevLink: {}, Curr: {}", peer_index, prev_block_hash.to_reversed_str(), block_header.previous_header_hash.to_reversed_str(), block_header_hash.to_reversed_str()));
} else {
warn!(target: "sync", "Neighbour headers in peer#{} `headers` message are unlinked: Prev: {}, PrevLink: {}, Curr: {}", peer_index, prev_block_hash.to_reversed_str(), block_header.previous_header_hash.to_reversed_str(), block_header_hash.to_reversed_str());
}
self.blocks_queue.dead_end_block(&header.hash);
return;
}
}
last_known_hash = &header.hash;
headers_provider.append_header(header);
}
// append headers to the queue
trace!(target: "sync", "New {} headers from peer#{}. First {:?}, last: {:?}",
num_headers - first_unknown_index,
peer_index,
headers[0].hash.to_reversed_str(),
headers[num_headers - 1].hash.to_reversed_str()
);
self.blocks_queue.schedule_headers(&headers[first_unknown_index..num_headers]);
// this peers has supplied us with new headers => useful indeed
self.peers_tasks.mark_useful(peer_index);
// and execute tasks
self.execute_synchronization_tasks(None, None);
}
fn on_notfound(&mut self, peer_index: PeerIndex, message: types::NotFound) {
let notfound_blocks: HashSet<_> = message.inventory.into_iter()
.filter(|item| item.type == common::InventoryType::Block)
.map(|item| item.hash)
.collect();
if notfound_blocks.is_empty() {
// it is not about blocks => just ignore it
return;
}
if let Some(blocks_tasks) = self.peers_tasks.blocks_tasks(peer_index) {
// check if peer has responded with notfound to requested blocks
if blocks_tasks.intersection(&notfound_blocks).nth(0).is_none() {
// if notfound some other blocks => just ignore the message
return;
}
// for now, let's exclude peer from synchronization - we are relying on full nodes for synchronization
trace!(target: "sync", "Peer#{} is excluded from synchronization as it has not found blocks", peer_index);
let removed_tasks = self.peers_tasks.reset_blocks_tasks(peer_index);
self.peers_tasks.mark_unuseful(peer_index);
// if peer has had some blocks tasks, rerequest these blocks
self.execute_synchronization_tasks(Some(removed_tasks), None);
}
}
fn on_block(&mut self, peer_index: PeerIndex, block: IndexedBlock) -> Option<VerificationTasks> {
// update peers to select next tasks
self.peers_tasks.on_block_received(peer_index, &block.hash);
// prepare list of blocks to verify + make all required changes to the chain
let block_state = self.blocks_queue.state(&block.hash);
match block_state {
BlockState::Verifying | BlockState::Stored => {
// remember peer as useful
self.peers_tasks.mark_useful(peer_index);
// already verifying || stored => no new verification tasks
None
},
BlockState::Unknown | BlockState::Scheduled | BlockState::Requested | BlockState::DeadEnd => {
// if configured => drop connection on dead-end block
if block_state == BlockState::DeadEnd {
if self.config.close_connection_on_bad_block {
self.peers.misbehaving(peer_index, format!("Provided dead-end block {:?}", block.hash.to_reversed_str()));
return None;
} else {
warn!(target: "sync", "Peer#{} has provided us with dead-end block {:?}", peer_index, block.hash.to_reversed_str());
}
}
// new block received => update synchronization speed
self.stats.sync_speed_meter.checkpoint();
// check parent block state
let parent_block_state = self.blocks_queue.state(&block.header.raw.previous_header_hash);
match parent_block_state {
BlockState::Unknown | BlockState::DeadEnd => {
// if configured => drop connection on dead-end block
if parent_block_state == BlockState::DeadEnd {
if self.config.close_connection_on_bad_block {
self.peers.misbehaving(peer_index, format!("Provided dead-end block {:?}", block.hash.to_reversed_str()));
return None;
} else {
warn!(target: "sync", "Peer#{} has provided us with after-dead-end block {:?}", peer_index, block.hash.to_reversed_str());
}
}
if self.state.is_synchronizing() {
// when synchronizing, we tend to receive all blocks in-order
trace!(
target: "sync",
"Ignoring block {} from peer#{}, because its parent is unknown and we are synchronizing",
block.hash.to_reversed_str(),
peer_index
);
// remove block from current queue
self.blocks_queue.forget(&block.hash);
// remove orphaned blocks
for block in self.orphaned_blocks_pool.remove_blocks_for_parent(&block.hash) {
self.blocks_queue.forget(&b.hash);
}
} else {
// remove this block from the queue
self.blocks_queue.forget(&block.hash);
// remember this block as unknown
if !self.orphaned_blocks_pool.contains_unknown_block(&block.hash) {
self.orphaned_blocks_pool.insert_unknown_block(block);
}
}
// no verification tasks, as we have either ignored, or postponed verification
None
},
BlockState::Verifying | BlockState::Stored => {
// remember peer as useful
self.peers_tasks.useful_peer(peer_index);
// forget blocks we are going to verify
// + remember that we are verifying these blocks
// + remember that we are verifying these blocks by message from this peer
let orphan_blocks = self.blocks_queue.remove_blocks_for_parent(&block.hash);
self.blocks_queue.forget_leave_header(&block.hash);
self.blocks_queue.verify(&block.hash);
//self.verifying_blocks_by_peer.insert(block.hash.clone(), peer_index);
for orphan_block in &orphan_blocks {
self.blocks_queue.forget_leave_header(&orphan_block.hash);
self.blocks_queue.verify(&orphan_block.hash);
//self.verifying_blocks_by_peer.insert(orphan_block.hash.clone(), peer_index);
}
// update
/*match self.verifying_blocks_futures.entry(peer_index) {
Entry::Occupied(mut entry) => {
entry.get_mut().0.extend(blocks_to_verify.iter().map(|&(ref h, _)| h.clone()));
},
Entry::Vacant(entry) => {
let block_hashes: HashSet<_> = blocks_to_verify.iter().map(|&(ref h, _)| h.clone()).collect();
entry.insert((block_hashes, Vec::new()));
}
}*/
// schedule verification of this block && all dependent orphans
let mut verification_tasks: VecDeque<_> = VecDeque::with_capacity(orphan_blocks.len() + 1);
verification_tasks.push_back(VerificationTask::VerifyBlock(block));
verification_tasks.extend(orphan_blocks.into_iter().map(|block| VerificationTask::VerifyBlock(block)));
// we have blocks to verify
Some(verification_tasks);
},
BlockState::Requested | BlockState::Scheduled => {
// remember peer as useful
self.peers_tasks.mark_useful(peer_index);
// remember as orphan block
self.blocks_queue.insert_orphaned_block(block);
// no new blocks to verify
None
}
}
},
}
}
fn on_transaction(&mut self, peer_index: PeerIndex, transaction: IndexedTransaction, relay: bool) -> Option<VerificationTasks> {
match self.try_append_transaction(transaction, relay) {
Err(AppendTransactionError::Orphan(transaction, unknown_parents)) => {
self.orphaned_transactions_pool.insert(transaction, unknown_parents);
None
},
Err(AppendTransactionError::Synchronizing) => None,
Ok(transactions) => Some(transactions.into_iter().map(|transaction| VerificationTask::VerifyTransaction(transaction)).collect()),
}
}
fn on_disconnect(&mut self, peer_index: PeerIndex) {
// when last peer is disconnected, reset, but let verifying blocks be verified
let peer_tasks = self.peers_tasks.on_peer_disconnected(peer_index);
if !self.peers_tasks.has_any_useful() {
self.switch_to_saturated_state();
} else if peer_tasks.is_some() {
self.execute_synchronization_tasks(peer_tasks, None);
}
}
fn try_switch_to_saturated_state(&mut self) -> bool {
// move to saturated state if there are no more blocks in scheduled || requested state
let in_saturated_state = self.blocks_chain.state_len(BlockState::Scheduled) != 0
|| self.blocks_chain.state_len(BlockState::Requested) != 0;
if in_saturated_state {
self.switch_to_saturated_state();
}
in_saturated_state
}
fn execute_synchronization_tasks(&mut self, forced_blocks_requests: Option<Vec<H256>>, final_blocks_requests: Option<Vec<H256>>) {
let mut tasks: Vec<Task> = Vec::new();
// display information if processed many blocks || enough time has passed since sync start
self.print_synchronization_information();
// if some blocks requests are forced => we should ask peers even if there are no idle peers
if let Some(forced_blocks_requests) = forced_blocks_requests {
let useful_peers = self.peers_tasks.useful_peers();
// if we have to request blocks && there are no useful peers at all => switch to saturated state
if useful_peers.is_empty() {
warn!(target: "sync", "Last peer was marked as non-useful. Moving to saturated state.");
self.switch_to_saturated_state();
return;
}
let forced_tasks = self.prepare_blocks_requests_tasks(useful_peers, forced_blocks_requests);
tasks.extend(forced_tasks);
}
// if some blocks requests are marked as last [i.e. blocks are potentialy wrong] => ask peers anyway
if let Some(final_blocks_requests) = final_blocks_requests {
let useful_peers = self.peers.useful_peers();
if !useful_peers.is_empty() { // if empty => not a problem, just forget these blocks
let forced_tasks = self.prepare_blocks_requests_tasks(useful_peers, final_blocks_requests);
tasks.extend(forced_tasks);
}
}
let mut blocks_requests: Option<Vec<H256>> = None;
let blocks_idle_peers = self.peers.idle_peers_for_blocks();
{
// check if we can query some blocks hashes
let inventory_idle_peers = self.peers.idle_peers_for_inventory();
if !inventory_idle_peers.is_empty() {
let scheduled_hashes_len = self.chain.state_len(BlockState::Scheduled);
if scheduled_hashes_len < MAX_SCHEDULED_HASHES {
for inventory_peer in &inventory_idle_peers {
self.peers.on_inventory_requested(*inventory_peer);
}
let inventory_tasks = inventory_idle_peers.into_iter().map(Task::RequestBlocksHeaders);
tasks.extend(inventory_tasks);
}
}
let blocks_idle_peers_len = blocks_idle_peers.len() as u32;
if blocks_idle_peers_len != 0 {
// check if verification queue is empty/almost empty
// && there are pending blocks requests
// && there are idle block peers
// => we may need to duplicate pending blocks requests to idle peers
// this will result in additional network load, but verification queue will be filled up earlier
// it is very useful when dealing with large blocks + some peer is responding, but with very low speed:
// requested: [B1, B2, B3, B4] from peer1
// orphans: [B5, B6, B7, B8, ... B1024] ===> 1GB of RAM
// verifying: None <=== we are waiting for B1 to come
// idle: [peer2]
// peer1 responds with single block in ~20 seconds
// => we could ask idle peer2 about [B1, B2, B3, B4]
// these requests has priority over new blocks requests below
let requested_hashes_len = self.blocks_chain.length_of_blocks_state(BlockState::Requested);
let verifying_hashes_len = self.blocks_chain.length_of_blocks_state(BlockState::Verifying);
if requested_hashes_len != 0 {
let verification_speed: f64 = self.block_speed_meter.speed();
let synchronization_speed: f64 = self.sync_speed_meter.speed();
// estimate time when verification queue will be empty
let verification_queue_will_be_empty_in = if verifying_hashes_len == 0 {
// verification queue is already empty
if self.block_speed_meter.inspected_items_len() < NEAR_EMPTY_VERIFICATION_QUEUE_THRESHOLD_BLOCKS {
// the very beginning of synchronization
// => peers have not yet responded with a single requested blocks
60_f64
} else {
// blocks were are already received
// => bad situation
0_f64
}
} else {
if verification_speed < 0.01_f64 {
// verification speed is too slow
60_f64
} else {
// blocks / (blocks / second) -> second
verifying_hashes_len as f64 / verification_speed
}
};
// estimate time when all synchronization requests will complete
let synchronization_queue_will_be_full_in = if synchronization_speed < 0.01_f64 {
// synchronization speed is too slow
60_f64
} else {
// blocks / (blocks / second) -> second
requested_hashes_len as f64 / synchronization_speed
};
// if verification queue will be empty before all synchronization requests will be completed
// + do not spam with duplicated blocks requests if blocks are too big && there are still blocks left for NEAR_EMPTY_VERIFICATION_QUEUE_THRESHOLD_S
// => duplicate blocks requests
if synchronization_queue_will_be_full_in > verification_queue_will_be_empty_in &&
verification_queue_will_be_empty_in < NEAR_EMPTY_VERIFICATION_QUEUE_THRESHOLD_S {
// blocks / second * second -> blocks
let hashes_requests_to_duplicate_len = synchronization_speed * (synchronization_queue_will_be_full_in - verification_queue_will_be_empty_in);
// do not ask for too many blocks
let hashes_requests_to_duplicate_len = min(MAX_BLOCKS_IN_REQUEST, hashes_requests_to_duplicate_len as u32);
// ask for at least 1 block
let hashes_requests_to_duplicate_len = max(1, min(requested_hashes_len, hashes_requests_to_duplicate_len));
blocks_requests = Some(self.blocks_chain.best_n_of_blocks_state(BlockState::Requested, hashes_requests_to_duplicate_len));
trace!(target: "sync", "Duplicating {} blocks requests. Sync speed: {} * {}, blocks speed: {} * {}.", hashes_requests_to_duplicate_len, synchronization_speed, requested_hashes_len, verification_speed, verifying_hashes_len);
}
}
// check if we can move some blocks from scheduled to requested queue
{
let scheduled_hashes_len = self.blocks_chain.length_of_blocks_state(BlockState::Scheduled);
if requested_hashes_len + verifying_hashes_len < MAX_REQUESTED_BLOCKS + MAX_VERIFYING_BLOCKS && scheduled_hashes_len != 0 {
let chunk_size = min(MAX_BLOCKS_IN_REQUEST, max(scheduled_hashes_len / blocks_idle_peers_len, MIN_BLOCKS_IN_REQUEST));
let hashes_to_request_len = chunk_size * blocks_idle_peers_len;
let hashes_to_request = self.blocks_chain.request_blocks_hashes(hashes_to_request_len);
match blocks_requests {
Some(ref mut blocks_requests) => blocks_requests.extend(hashes_to_request),
None => blocks_requests = Some(hashes_to_request),
}
}
}
}
}
// append blocks requests tasks
if let Some(blocks_requests) = blocks_requests {
tasks.extend(self.prepare_blocks_requests_tasks(blocks_idle_peers, blocks_requests));
}
// execute synchronization tasks
for task in tasks {
self.executor.execute(task);
}
}
}
impl ClientCoreImpl {
/// Print synchronization information
fn print_synchronization_information(&mut self) {
if let State::Synchronizing(timestamp, num_of_blocks) = self.state {
let new_timestamp = time::precise_time_s();
let timestamp_diff = new_timestamp - timestamp;
let new_num_of_blocks = self.blocks_queue.best_storage_block().number;
let blocks_diff = if new_num_of_blocks > num_of_blocks { new_num_of_blocks - num_of_blocks } else { 0 };
if timestamp_diff >= 60.0 || blocks_diff > 1000 {
self.state = State::Synchronizing(time::precise_time_s(), new_num_of_blocks);
info!(target: "sync", "{:?} @ Processed {} blocks in {} seconds. Queue information: {:?}"
, time::strftime("%H:%M:%S", &time::now()).unwrap()
, blocks_diff, timestamp_diff
, self.blocks_queue.information());
}
}
}
/// Switch to saturated state
fn switch_to_saturated_state(&mut self) {
if self.state.is_saturated() {
return;
}
self.state = State::Saturated;
self.peers.reset();
// remove sync orphans, but leave unknown orphans until they'll be removed by management thread
let removed_orphans = self.orphaned_blocks_pool.remove_known_blocks();
// leave currently verifying blocks
{
self.chain.forget_blocks(&removed_orphans);
self.chain.forget_all_blocks_with_state(BlockState::Requested);
self.chain.forget_all_blocks_with_state(BlockState::Scheduled);
use time;
info!(target: "sync", "{:?} @ Switched to saturated state. Chain information: {:?}",
time::strftime("%H:%M:%S", &time::now()).unwrap(),
self.chain.information());
}
// finally - ask all known peers for their best blocks inventory, in case if some peer
// has lead us to the fork
// + ask all peers for their memory pool
for peer in self.peers_tasks.all_peers() {
self.executor.execute(Task::RequestBlocksHeaders(peer));
self.executor.execute(Task::RequestMemoryPool(peer));
}
}
/// Prepare blocks requests for peers
fn prepare_blocks_requests_tasks(&mut self, peers: Vec<PeerIndex>, mut hashes: Vec<H256>) -> Vec<Task> {
// TODO: ask most fast peers for hashes at the beginning of `hashes`
let chunk_size = min(MAX_BLOCKS_IN_REQUEST, max(hashes.len() as u32, MIN_BLOCKS_IN_REQUEST));
let last_peer_index = peers.len() - 1;
let mut tasks: Vec<Task> = Vec::new();
for (peer_index, peer) in peers.into_iter().enumerate() {
// we have to request all blocks => we will request last peer for all remaining blocks
let peer_chunk_size = if peer_index == last_peer_index { hashes.len() } else { min(hashes.len(), chunk_size as usize) };
if peer_chunk_size == 0 {
break;
}
let mut chunk_hashes = hashes.split_off(peer_chunk_size);
swap(&mut chunk_hashes, &mut hashes);
self.peers.on_blocks_requested(peer, &chunk_hashes);
tasks.push(Task::RequestBlocks(peer, chunk_hashes));
}
assert_eq!(hashes.len(), 0);
tasks
}
}

View File

@ -1 +0,0 @@
// TODO: ignore messages from peers, which have no outbound connections

View File

@ -1,200 +0,0 @@
use std::sync::Arc;
use chain::{BlockHeader, Block, Transaction};
use message::common;
use message::types;
use primitives::hash::H256;
use types::{RequestId, PeersRef};
/// Synchronization task
pub enum Task {
/// Request given blocks.
RequestBlocks(usize, Vec<H256>),
/// Request blocks headers using full getheaders.block_locator_hashes.
RequestBlocksHeaders(usize),
/// Request memory pool contents
RequestTransactions(usize, Vec<H256>),
/// Request memory pool contents
RequestMemoryPool(usize),
/// Send block.
SendBlock(usize, Block),
/// Send merkleblock
SendMerkleBlock(usize, types::MerkleBlock),
/// Send transaction
SendTransaction(usize, Transaction),
/// Send block transactions
SendBlockTxn(usize, H256, Vec<Transaction>),
/// Send notfound
SendNotFound(usize, Vec<common::InventoryVector>),
/// Send inventory
SendInventory(usize, Vec<common::InventoryVector>),
/// Send headers
SendHeaders(usize, Vec<BlockHeader>, Option<RequestId>),
/// Send compact blocks
SendCompactBlocks(usize, Vec<common::BlockHeaderAndIDs>),
/// Notify io about ignored request
Ignore(usize, u32),
/// Close connection with this peer
Close(usize),
}
/// Synchronization task executor trait
pub trait Executor {
/// Execute single synchronization task
fn execute(&self, task: Task);
}
/// Synchronization task executor implementation
pub struct ExecutorImpl {
/// Synchronization peers reference
peers: PeersRef,
}
impl Executor for ExecutorImpl {
fn execute(&self, task: Task) {
match task {
Task::RequestBlocks(peer_index, blocks_hashes) => {
let getdata = types::GetData {
inventory: blocks_hashes.into_iter()
.map(|hash| common::InventoryVector {
inv_type: common::InventoryType::MessageBlock,
hash: hash,
}).collect()
};
if let Some(connection) = self.peers.read().connection(peer_index) {
trace!(target: "sync", "Querying {} unknown blocks from peer#{}", getdata.inventory.len(), peer_index);
connection.send_getdata(&getdata);
}
},
Task::RequestBlocksHeaders(peer_index) => {
let block_locator_hashes = self.chain.read().block_locator_hashes();
let getheaders = types::GetHeaders {
version: 0, // this field is ignored by clients
block_locator_hashes: block_locator_hashes,
hash_stop: H256::default(),
};
if let Some(connection) = self.peers.read().connection(peer_index) {
trace!(target: "sync", "Request blocks hashes from peer#{} using getheaders", peer_index);
connection.send_getheaders(&getheaders);
}
},
Task::RequestMemoryPool(peer_index) => {
let mempool = types::MemPool;
if let Some(connection) = self.peers.read().connection(peer_index) {
trace!(target: "sync", "Querying memory pool contents from peer#{}", peer_index);
connection.send_mempool(&mempool);
}
},
Task::RequestTransactions(peer_index, transactions_hashes) => {
let getdata = types::GetData {
inventory: transactions_hashes.into_iter()
.map(|hash| common::InventoryVector {
inv_type: common::InventoryType::MessageTx,
hash: hash,
}).collect()
};
if let Some(connection) = self.peers.read().connection(peer_index) {
trace!(target: "sync", "Querying {} unknown transactions from peer#{}", getdata.inventory.len(), peer_index);
connection.send_getdata(&getdata);
}
},
Task::SendBlock(peer_index, block) => {
let block_message = types::Block {
block: block,
};
if let Some(connection) = self.peers.read().connection(peer_index) {
trace!(target: "sync", "Sending block {:?} to peer#{}", block_message.block.hash().to_reversed_str(), peer_index);
connection.send_block(&block_message);
}
},
Task::SendMerkleBlock(peer_index, merkleblock) => {
if let Some(connection) = self.peers.read().connection(peer_index) {
trace!(target: "sync", "Sending merkleblock {:?} to peer#{}", merkleblock.block_header.hash().to_reversed_str(), peer_index);
connection.send_merkleblock(&merkleblock);
}
},
Task::SendTransaction(peer_index, transaction) => {
let transaction_message = types::Tx {
transaction: transaction,
};
if let Some(connection) = self.peers.read().connection(peer_index) {
trace!(target: "sync", "Sending transaction {:?} to peer#{}", transaction_message.transaction.hash().to_reversed_str(), peer_index);
connection.send_transaction(&transaction_message);
}
},
Task::SendBlockTxn(peer_index, block_hash, transactions) => {
let transactions_message = types::BlockTxn {
request: common::BlockTransactions {
blockhash: block_hash,
transactions: transactions,
}
};
if let Some(connection) = self.peers.read().connection(peer_index) {
trace!(target: "sync", "Sending blocktxn with {} transactions to peer#{}", transactions_message.request.transactions.len(), peer_index);
connection.send_block_txn(&transactions_message);
}
},
Task::SendNotFound(peer_index, unknown_inventory) => {
let notfound = types::NotFound {
inventory: unknown_inventory,
};
if let Some(connection) = self.peers.read().connection(peer_index) {
trace!(target: "sync", "Sending notfound to peer#{} with {} items", peer_index, notfound.inventory.len());
connection.send_notfound(&notfound);
}
},
Task::SendInventory(peer_index, inventory) => {
let inventory = types::Inv {
inventory: inventory,
};
if let Some(connection) = self.peers.read().connection(peer_index) {
trace!(target: "sync", "Sending inventory to peer#{} with {} items", peer_index, inventory.inventory.len());
connection.send_inventory(&inventory);
}
},
Task::SendHeaders(peer_index, headers, id) => {
let headers = types::Headers {
headers: headers,
};
if let Some(connection) = self.peers.read().connection(peer_index) {
trace!(target: "sync", "Sending headers to peer#{} with {} items", peer_index, headers.headers.len());
match id.raw() {
Some(id) => connection.respond_headers(&headers, id),
None => connection.send_headers(&headers),
}
}
},
Task::SendCompactBlocks(peer_index, compact_blocks) => {
if let Some(connection) = self.peers.read().connection(peer_index) {
for compact_block in compact_blocks {
trace!(target: "sync", "Sending compact_block {:?} to peer#{}", compact_block.header.hash(), peer_index);
connection.send_compact_block(&types::CompactBlock {
header: compact_block,
});
}
}
},
Task::Ignore(peer_index, id) => {
if let Some(connection) = self.peers.read().connection(peer_index) {
trace!(target: "sync", "Ignoring request from peer#{} with id {}", peer_index, id);
connection.ignored(id);
}
},
Task::Close(peer_index) => {
if let Some(connection) = self.peers.read().connection(peer_index) {
trace!(target: "sync", "Closing request with peer#{}", peer_index);
connection.close();
}
},
}
}
}

View File

@ -1,143 +0,0 @@
use chain::{IndexedBlock, IndexedTransaction};
use message::types;
use primitives::hash::H256;
use super::utils::{KnownHashType, KnownHashFilter, FeeRateFilter, BloomFilter};
/// Synchronization peer filter
pub trait Filter {
/// Check if block is known by the peer
fn filter_block(&self, block: &IndexedBlock) -> bool;
/// Check if transaction is known by the peer
fn filter_transaction(&self, transaction: &IndexedTransaction, fee_rate: Option<u64>) -> bool;
/// Remember that peer knows given block
fn remember_known_block(&mut self, hash: H256);
/// Remember that peer knows given compact block
fn remember_known_compact_block(&mut self, hash: H256);
/// Remember that peer knows given transaction
fn remember_known_transaction(&mut self, hash: H256);
/// Set up bloom filter
fn set_bloom_filter(&mut self, filter: types::FilterLoad);
/// Update bloom filter
fn update_bloom_filter(&mut self, filter: types::FilterAdd);
/// Remove bloom filter
fn remove_bloom_filter(&mut self);
/// Set up fee rate filter
fn set_min_fee_rate(&mut self, filter: types::FeeFilter);
}
/// Synchronization peer filter implementation
#[derive(Default)]
pub struct FilterImpl {
/// Known hashes filter
known_hash_filter: KnownHashFilter,
/// Feerate filter
fee_rate_filter: FeeRateFilter,
/// Bloom filter
bloom_filter: BloomFilter,
}
impl Filter for FilterImpl {
fn filter_block(&self, block: &IndexedBlock) -> bool {
self.known_hash_filter.filter_block(&block.header.hash)
}
fn filter_transaction(&self, transaction: &IndexedTransaction, fee_rate: Option<u64>) -> bool {
self.known_hash_filter.filter_transaction(&transaction.hash)
&& fee_rate.map(|fee_rate| self.fee_rate_filter.filter_transaction(fee_rate)).unwrap_or(false)
&& self.bloom_filter.filter_transaction(transaction)
}
fn remember_known_block(&mut self, hash: H256) {
self.known_hash_filter.insert(hash, KnownHashType::Block);
}
fn remember_known_compact_block(&mut self, hash: H256) {
self.known_hash_filter.insert(hash, KnownHashType::CompactBlock);
}
fn remember_known_transaction(&mut self, hash: H256) {
self.known_hash_filter.insert(hash, KnownHashType::Transaction);
}
fn set_bloom_filter(&mut self, filter: types::FilterLoad) {
self.bloom_filter.set_bloom_filter(filter);
}
fn update_bloom_filter(&mut self, filter: types::FilterAdd) {
self.bloom_filter.update_bloom_filter(filter);
}
fn remove_bloom_filter(&mut self) {
self.bloom_filter.remove_bloom_filter();
}
fn set_min_fee_rate(&mut self, filter: types::FeeFilter) {
self.fee_rate_filter.set_min_fee_rate(filter);
}
}
#[cfg(test)]
mod tests {
use std::iter::repeat;
use chain::IndexedTransaction;
use message::types;
use primitives::bytes::Bytes;
use test_data;
use super::{Filter, FilterImpl};
#[test]
fn filter_default_accepts_block() {
assert!(FilterImpl::default().filter_block(&test_data::genesis().into()));
}
#[test]
fn filter_default_accepts_transaction() {
assert!(FilterImpl::default().filter_transaction(&test_data::genesis().transactions[0].clone().into(), Some(0)));
}
#[test]
fn filter_rejects_block_known() {
let mut filter = FilterImpl::default();
filter.remember_known_block(test_data::block_h1().hash());
filter.remember_known_compact_block(test_data::block_h2().hash());
assert!(!filter.filter_block(&test_data::block_h1().into()));
assert!(!filter.filter_block(&test_data::block_h2().into()));
assert!(filter.filter_block(&test_data::genesis().into()));
}
#[test]
fn filter_rejects_transaction_known() {
let mut filter = FilterImpl::default();
filter.remember_known_transaction(test_data::block_h1().transactions[0].hash());
assert!(!filter.filter_transaction(&test_data::block_h1().transactions[0].clone().into(), None));
assert!(!filter.filter_transaction(&test_data::block_h2().transactions[0].clone().into(), None));
}
#[test]
fn filter_rejects_transaction_feerate() {
let mut filter = FilterImpl::default();
filter.set_min_fee_rate(types::FeeFilter::with_fee_rate(1000));
assert!(filter.filter_transaction(&test_data::block_h1().transactions[0].clone().into(), None));
assert!(filter.filter_transaction(&test_data::block_h1().transactions[0].clone().into(), Some(1500)));
assert!(!filter.filter_transaction(&test_data::block_h1().transactions[0].clone().into(), Some(500)));
}
#[test]
fn filter_rejects_transaction_bloomfilter() {
let mut filter = FilterImpl::default();
let tx: IndexedTransaction = test_data::block_h1().transactions[0].clone().into();
filter.set_bloom_filter(types::FilterLoad {
filter: Bytes::from(repeat(0u8).take(1024).collect::<Vec<_>>()),
hash_functions: 10,
tweak: 5,
flags: types::FilterFlags::None,
});
assert!(!filter.filter_transaction(&tx, None));
filter.update_bloom_filter(types::FilterAdd {
data: (&*tx.hash as &[u8]).into(),
});
assert!(filter.filter_transaction(&tx, None));
filter.remove_bloom_filter();
assert!(filter.filter_transaction(&tx, None));
}
}

View File

@ -1,24 +0,0 @@
use primitives::hash::H256;
use synchronization_peers::Peers;
use utils::{OrphanBlocksPool, OrphanTransactionsPool};
struct ManagePeersConfig {
}
struct ManageUnknownBlocksConfig {
}
struct ManageOrphanTransactionsConfig {
}
pub fn manage_synchronization_peers_blocks(config: &ManagePeersConfig, peers: &mut Peers) -> (Vec<H256>, Vec<H256>) {
}
pub fn manage_synchronization_peers_inventory(config: &ManagePeersConfig, peers: &mut Peers) {
}
pub fn manage_unknown_orphaned_blocks(config: &ManageUnknownBlocksConfig, orphaned_blocks_pool: &mut OrphanBlocksPool) -> Option<Vec<H256>> {
}
pub fn manage_orphaned_transactions(config: &ManageOrphanTransactionsConfig, orphaned_transactions_pool: &mut OrphanTransactionsPool) -> Option<Vec<H256>> {
}

View File

@ -1,295 +0,0 @@
use chain::{IndexedBlock, IndexedTransaction};
use message::types;
use network::Magic;
use synchronization_client::Client;
use synchronization_executor::{Executor, Task};
use synchronization_peers::{Peers, BlockAnnouncementType, TransactionAnnouncementType};
use synchronization_server::{Server, ServerTask};
use types::{LOCAL_PEER_INDEX, PeerIndex, RequestId, PeersRef, ClientRef, ExecutorRef, MemoryPoolRef, StorageRef};
use utils::Promise;
// TODO: both Local + Network node impl
// TODO: call getblocktemplate directly from RPC + lock MemoryPool only for reading transactions && then release lock ASAP
/// Local synchronization client node
pub trait LocalClientNode {
/// When new block comes from local client
fn push_block(&self, block: IndexedBlock) -> Promise;
/// When new transaction comes from local client
fn push_transaction(&self, transaction: IndexedTransaction) -> Promise;
}
/// Network synchronization node
pub trait NetworkNode {
/// Called when connection is opened
fn on_connect(&self, peer_index: PeerIndex, version: types::Version);
/// Called when connection is closed
fn on_disconnect(&self, peer_index: PeerIndex);
}
/// Network synchronization client node
pub trait NetworkClientNode {
/// Called upon receiving inventory message
fn on_inventory(&self, peer_index: PeerIndex, message: types::Inv);
/// Called upon receiving headers message
fn on_headers(&self, peer_index: PeerIndex, message: types::Headers);
/// Called upon receiving notfound message
fn on_notfound(&self, peer_index: PeerIndex, message: types::NotFound);
/// Called upon receiving block message
fn on_block(&self, peer_index: PeerIndex, block: IndexedBlock);
/// Called upon receiving transaction message
fn on_transaction(&self, peer_index: PeerIndex, transaction: IndexedTransaction);
/// Called upon receiving merkleblock message
fn on_merkleblock(&self, peer_index: PeerIndex, message: types::MerkleBlock);
/// Called upon receiving cmpctblock message
fn on_cmpctblock(&self, peer_index: PeerIndex, message: types::CompactBlock);
/// Called upon receiving blocktxn message
fn on_blocktxn(&self, peer_index: PeerIndex, message: types::BlockTxn);
}
/// Network synchronization server node
pub trait NetworkServerNode: NetworkFiltersNode + NetworkOptionsNode {
/// Called upon receiving getdata message
fn on_getdata(&self, peer_index: PeerIndex, message: types::GetData);
/// Called upon receiving getblocks message
fn on_getblocks(&self, peer_index: PeerIndex, message: types::GetBlocks);
/// Called upon receiving getheaders message
fn on_getheaders(&self, peer_index: PeerIndex, message: types::GetHeaders, request_id: RequestId);
/// Called upon receiving mempool message
fn on_mempool(&self, peer_index: PeerIndex, message: types::MemPool);
/// Called upon receiving getblocktxn message
fn on_getblocktxn(&self, peer_index: PeerIndex, message: types::GetBlockTxn);
}
/// Network synchronization node with filters support
pub trait NetworkFiltersNode {
/// Called upon receiving filterload message
fn on_filterload(&self, peer_index: PeerIndex, message: types::FilterLoad);
/// Called upon receiving filteradd message
fn on_filteradd(&self, peer_index: PeerIndex, message: types::FilterAdd);
/// Called upon receiving filterclear message
fn on_filterclear(&self, peer_index: PeerIndex, message: types::FilterClear);
/// Called upon receiving feefilter message
fn on_feefilter(&self, peer_index: PeerIndex, message: types::FeeFilter);
}
/// Network synchronization node with options support
pub trait NetworkOptionsNode {
/// Called upon receiving sendheaders message
fn on_sendheaders(&self, peer_index: PeerIndex, message: types::SendHeaders);
/// Called upon receiving sendcmpct message
fn on_sendcmpct(&self, peer_index: PeerIndex, message: types::SendHeaders);
}
/// Network synchronization node implementation
struct NetworkNodeImpl<TPeers: Peers, TExecutor: Executor, TClient: Client, TServer: Server> {
/// Network we are currenly operating on
network: Magic,
/// Peers reference
peers: PeersRef<TPeers>,
/// Executor reference
executor: ExecutorRef<TExecutor>,
/// Synchronization client reference
client: ClientRef<TClient>,
/// Synchronization server
server: TServer,
}
/// Local synchronization node implementation
struct LocalNodeImpl<TClient: Client> {
/// Storage reference
storage: StorageRef,
/// Memory pool reference
memory_pool: MemoryPoolRef,
/// Synchronization client reference
client: ClientRef<TClient>,
}
impl<TPeers, TExecutor, TClient, TServer> NetworkNode for NetworkNodeImpl
where TPeers: Peers, TExecutor: Executor, TClient: Client, TServer: Server {
fn on_connect(&self, peer_index: PeerIndex, version: types::Version) {
trace!(target: "sync", "Starting new sync session with peer#{}", peer_index);
// light clients may not want transactions broadcasting until filter for connection is set
if !version.relay_transactions() {
self.peers.set_transaction_announcement_type(TransactionAnnouncementType::DoNotAnnounce);
}
// ask peer for its block headers to find our best common block
// and possibly start sync session
self.executor.execute(Task::RequestBlocksHeaders(peer_index));
}
fn on_disconnect(&self, peer_index: PeerIndex) {
trace!(target: "sync", "Stopping sync session with peer#{}", peer_index);
// forget this connection
self.peers.remove(peer_index);
// tell client that this peer has disconneced to adjust synchronization process
self.client.on_disconnect(peer_index);
// tell server that this peer has disconneced to stop serving its requests
self.server.on_disconnect(peer_index);
}
}
impl<TPeers, TExecutor, TClient, TServer> NetworkClientNode for NetworkNodeImpl
where TPeers: Peers, TExecutor: Executor, TClient: Client, TServer: Server {
fn on_inventory(&self, peer_index: PeerIndex, message: types::Inv) {
trace!(target: "sync", "'inventory' message from peer#{}: {}", peer_index, message.short_info());
self.client.on_inventory(peer_index, message);
}
fn on_headers(&self, peer_index: PeerIndex, message: types::Headers) {
trace!(target: "sync", "'headers' message from peer#{}: {}", peer_index, message.short_info());
self.client.on_headers(peer_index, message);
}
fn on_notfound(&self, peer_index: PeerIndex, message: types::NotFound) {
trace!(target: "sync", "'notfound' message from peer#{}: {}", peer_index, message.short_info());
self.client.on_notfound(peer_index, message);
}
fn on_block(&self, peer_index: PeerIndex, block: IndexedBlock) -> Promise {
trace!(target: "sync", "'block' message from peer#{}: {}", peer_index, block.short_info());
self.client.on_block(peer_index, block);
}
fn on_transaction(&self, peer_index: PeerIndex, transaction: IndexedTransaction) {
trace!(target: "sync", "'tx' message from peer#{}: {}", peer_index, transaction.short_info());
self.client.on_transaction(peer_index, transaction);
}
fn on_merkleblock(&self, peer_index: PeerIndex, message: types::MerkleBlock) {
trace!(target: "sync", "'merkleblock' message from peer#{}: {}", peer_index, message.short_info());
// we never setup filter on connections => misbehaving
self.peers.misbehaving(peer_index, format!("Got unrequested 'merkleblock' message: {}", message.long_info()));
}
fn on_cmpctblock(&self, peer_index: PeerIndex, message: types::CompactBlock) {
trace!(target: "sync", "'cmpctblock' message from peer#{}: {}", peer_index, message.short_info());
// we never ask for compact blocks && transactions => misbehaving
self.peers.misbehaving(peer_index, format!("Got unrequested 'cmpctblock' message: {}", message.long_info()));
}
fn on_blocktxn(&self, peer_index: PeerIndex, message: types::BlockTxn) {
trace!(target: "sync", "'blocktxn' message from peer#{}: {}", peer_index, message.short_info());
// we never ask for compact blocks && transactions => misbehaving
self.peers.misbehaving(peer_index, format!("Got unrequested 'blocktxn' message: {}", message.long_info()));
}
}
impl<TPeers, TExecutor, TClient, TServer> NetworkServerNode for NetworkNodeImpl
where TPeers: Peers, TExecutor: Executor, TClient: Client, TServer: Server {
fn on_getdata(&self, peer_index: PeerIndex, message: types::GetData) {
// we do not serve any server requests during synchronization
if self.client.is_synchronizing() {
trace!(target: "sync", "'getdata' message from peer#{} ignored as sync is in progress: {}", peer_index, message.short_info());
return;
}
trace!(target: "sync", "'getdata' message from peer#{}: {}", peer_index, message.short_info());
self.server.execute(peer_index, ServerTask::ServeGetData(message));
}
fn on_getblocks(&self, peer_index: PeerIndex, message: types::GetBlocks) {
// we do not serve any server requests during synchronization
if self.client.is_synchronizing() {
trace!(target: "sync", "'getblocks' message from peer#{} ignored as sync is in progress: {}", peer_index, message.short_info());
return;
}
trace!(target: "sync", "'getblocks' message from peer#{}: {}", peer_index, message.short_info());
self.server.execute(peer_index, ServerTask::ServeGetBlocks(message));
}
fn on_getheaders(&self, peer_index: PeerIndex, message: types::GetHeaders, request_id: RequestId) {
// we do not serve any server requests during synchronization
if self.client.is_synchronizing() {
trace!(target: "sync", "'getheaders' message from peer#{} ignored as sync is in progress: {}", peer_index, message.short_info());
self.executor.execute(Task::Ignore(RequestId));
return;
}
trace!(target: "sync", "'getheaders' message from peer#{}: {}", peer_index, message.short_info());
// during regtests we should simulate bitcoind and respond to incoming requests in-order and 'synchronously':
// -> block, -> getheaders, -> ping, [wait for verification completed], <- headers, <- pong
let promise = match self.network {
Magic::Regtest => self.peer_promises.get(peer_index),
_ => Promise::completed(),
};
// so we are responding with headers only when block verification promise is completed
// requests are ordered in p2p module, so that no other requests are served until we respond or ignore request_id
promise.and_then(|| self.server.execute(peer_index, ServerTask::ServeGetHeaders(message, request_id)));
}
fn on_mempool(&self, peer_index: PeerIndex, _message: types::MemPool) {
// we do not serve any server requests during synchronization
if self.client.is_synchronizing() {
trace!(target: "sync", "'mempool' message from peer#{} ignored as sync is in progress", peer_index);
return;
}
trace!(target: "sync", "'mempool' message from peer#{}", peer_index);
self.server.execute(peer_index, ServerTask::ServeMempool);
}
fn on_getblocktxn(&self, peer_index: PeerIndex, message: types::GetBlockTxn) {
// we do not serve any server requests during synchronization
if self.client.is_synchronizing() {
trace!(target: "sync", "'getblocktxn' message from peer#{} ignored as sync is in progress: {}", peer_index, message.short_info());
return;
}
trace!(target: "sync", "'getblocktxn' message from peer#{}: {}", peer_index, message.short_info());
self.server.execute(peer_index, ServerTask::ServeGetBlockTxn(message));
}
}
impl<TPeers, TExecutor, TClient, TServer> NetworkFiltersNode for NetworkNodeImpl
where TPeers: Peers, TExecutor: Executor, TClient: Client, TServer: Server {
fn on_filterload(&self, peer_index: PeerIndex, message: types::FilterLoad) {
trace!(target: "sync", "'filterload' message from peer#{}: {}", peer_index, message.short_info());
self.peers.set_bloom_filter(peer_index, message);
}
fn on_filteradd(&self, peer_index: PeerIndex, message: types::FilterAdd) {
trace!(target: "sync", "'filteradd' message from peer#{}: {}", peer_index, message.short_info());
self.peers.update_bloom_filter(peer_index, message);
}
fn on_filterclear(&self, peer_index: PeerIndex, message: types::FilterClear) {
trace!(target: "sync", "'filterclear' message from peer#{}", peer_index);
self.peers.clear_bloom_filter(peer_index, message);
}
fn on_feefilter(&self, peer_index: PeerIndex, message: types::FeeFilter) {
trace!(target: "sync", "'feefilter' message from peer#{}: {}", peer_index, message.short_info());
self.peers.set_fee_filter(peer_index, message);
}
}
impl<TPeers, TExecutor, TClient, TServer> NetworkOptionsNode for NetworkNodeImpl
where TPeers: Peers, TExecutor: Executor, TClient: Client, TServer: Server {
fn on_sendheaders(&self, peer_index: PeerIndex, _message: types::SendHeaders) {
trace!(target: "sync", "'sendheaders' message from peer#{}", peer_index);
self.peers.set_block_announcement_type(peer_index, BlockAnnouncementType::SendHeaders);
}
fn on_sendcmpct(&self, peer_index: PeerIndex, message: types::SendHeaders) {
trace!(target: "sync", "'sendcmpct' message from peer#{}", peer_index);
self.peers.set_block_announcement_type(peer_index, BlockAnnouncementType::SendCompact);
}
}
impl<TClient> LocalClientNode for LocalNodeImpl where TClient: Client {
fn push_block(&self, block: IndexedBlock) -> Promise {
trace!(target: "sync", "'block' is received from local client: {}", block.short_info());
self.client.on_block(LOCAL_PEER_INDEX, block)
}
fn push_transaction(&self, transaction: IndexedTransaction) -> Promise {
trace!(target: "sync", "'transaction' is received from local client: {}", transaction.short_info());
self.client.on_transaction(LOCAL_PEER_INDEX, transaction)
}
}

View File

@ -1,191 +0,0 @@
use std::collections::HashMap;
use parking_lot::RwLock;
use chain::{IndexedBlock, IndexedTransaction};
use message::types;
use p2p::OutboundSyncConnectionRef;
use synchronization_filter::Filter;
use types::{PeerIndex, ConnectionRef};
/// Block announcement type
pub enum BlockAnnouncementType {
/// Send inventory message with block hash [default behavior]
SendInventory,
/// Send headers message with block header
SendHeaders,
/// Send cmpctblock message with this block
SendCompactBlock,
/// Do not announce blocks at all
DoNotAnnounce,
}
/// Transaction announcement type
pub enum TransactionAnnouncementType {
/// Send inventory message with transaction hash [default behavior]
SendInventory,
/// Do not announce transactions at all
DoNotAnnounce,
}
/// Connected peers
pub trait Peers : PeersContainer + PeersFilters + PeersOptions {
/// Get peer connection
fn connection(&self, peer_index: PeerIndex) -> Option<OutboundSyncConnectionRef>;
}
/// Connected peers container
pub trait PeersContainer {
/// Insert new peer connection
fn insert(&self, peer_index: PeerIndex, connection: OutboundSyncConnectionRef);
/// Remove peer connection
fn remove(&self, peer_index: PeerIndex);
/// Close and remove peer connection due to misbehaving
fn misbehaving(&self, peer_index: PeerIndex, reason: &str);
/// Close and remove peer connection due to detected DOS attempt
fn dos(&self, peer_index: PeerIndex, reason: &str);
}
/// Filters for peers connections
pub trait PeersFilters {
/// Set up bloom filter for the connection
fn set_bloom_filter(&self, peer_index: PeerIndex, filter: types::FilterLoad);
/// Update bloom filter for the connection
fn update_bloom_filter(&self, peer_index: PeerIndex, filter: types::FilterAdd);
/// Clear bloom filter for the connection
fn clear_bloom_filter(&self, peer_index: PeerIndex);
/// Set up fee filter for the connection
fn set_fee_filter(&self, peer_index: PeerIndex, filter: types::FeeFilter);
/// Is block passing filters for the connection
fn filter_block(&self, peer_index: PeerIndex, block: &IndexedBlock);
/// Is block passing filters for the connection
fn filter_transaction(&self, peer_index: PeerIndex, transaction: &IndexedTransaction);
}
/// Options for peers connections
pub trait PeersOptions {
/// Set up new block announcement type for the connection
fn set_block_announcement_type(&self, peer_index: PeerIndex, announcement_type: BlockAnnouncementType);
/// Set up new transaction announcement type for the connection
fn set_transaction_announcement_type(&self, peer_index: PeerIndex, announcement_type: TransactionAnnouncementType);
/// Get block announcement type for the connection
fn block_announcement_type(&self, peer_index: PeerIndex) -> BlockAnnouncementType;
/// Get transaction announcement type for the connection
fn transaction_announcement_type(&self, peer_index: PeerIndex) -> TransactionAnnouncementType;
}
/// Single connected peer data
struct Peer {
/// Connection to this peer
pub connection: OutboundSyncConnectionRef,
/// Connection filter
pub filter: Filter,
/// Block announcement type
pub block_announcement_type: BlockAnnouncementType,
/// Transaction announcement type
pub transaction_announcement_type: TransactionAnnouncementType,
}
/// Default implementation of connectd peers container
struct PeersImpl {
/// All connected peers. Most of times this field is accessed, it is accessed in read mode.
/// So this lock shouldn't be a performance problem.
peers: RwLock<HashMap<PeerIndex, Peer>>,
}
impl Peers for PeersImpl {
fn connection(&self, peer_index: PeerIndex) -> Option<OutboundSyncConnectionRef> {
self.peers.read(peer_index).get(peer_index).cloned()
}
}
impl PeersContainer for PeersImpl {
fn insert(&self, peer_index: PeerIndex, connection: OutboundSyncConnectionRef) {
trace!(target: "sync", "Connected to peer#{}", peer_index);
assert!(self.peers.write().insert(peer_index, ConnectionRef::new(connection)).is_none());
}
fn remove(&self, peer_index: PeerIndex) {
if self.peers.write().remove(peer_index).is_some() {
trace!(target: "sync", "Disconnected from peer#{}", peer_index);
}
}
fn misbehaving(&self, peer_index: PeerIndex, reason: &str) {
if let Some(peer) = self.peers.write().remove(peer_index) {
trace!(target: "sync", "Disconnecting from peer#{} due to misbehaving: {}", peer_index, reason);
peer.connection.close();
}
}
fn dos(&self, peer_index: PeerIndex, reason: &str) {
if let Some(peer) = self.peers.write().remove(peer_index) {
trace!(target: "sync", "Disconnecting from peer#{} due to DOS: {}", peer_index, reason);
peer.connection.close();
}
}
}
impl PeersFilters for PeersImpl {
fn set_bloom_filter(&self, peer_index: PeerIndex, filter: types::FilterLoad) {
if let Some(peer) = self.peers.write().get_mut(peer_index) {
peer.filter.set_bloom_filter(filter);
}
}
fn update_bloom_filter(&self, peer_index: PeerIndex, filter: types::FilterAdd) {
if let Some(peer) = self.peers.write().get_mut(peer_index) {
peer.filter.add_bloom_filter(filter);
}
}
fn clear_bloom_filter(&self, peer_index: PeerIndex) {
if let Some(peer) = self.peers.write().get_mut(peer_index) {
peer.filter.clear_bloom_filter();
}
}
fn set_fee_filter(&self, peer_index: PeerIndex, filter: types::FeeFilter) {
if let Some(peer) = self.peers.write().get_mut(peer_index) {
peer.filter.set_fee_filter(filter.fee_rate);
}
}
fn filter_block(&self, peer_index: PeerIndex, block: &IndexedBlock) {
if let Some(peer) = self.peers.read().get() {
peer.filter.filter_block(block);
}
}
fn filter_transaction(&self, peer_index: PeerIndex, transaction: &IndexedTransaction) {
if let Some(peer) = self.peers.read().get() {
peer.filter.filter_transaction(transaction);
}
}
}
impl PeersOptions for PeersImpl {
fn set_block_announcement_type(&self, peer_index: PeerIndex, announcement_type: BlockAnnouncementType) {
if let Some(peer) = self.peers.write().get_mut(peer_index) {
peer.block_announcement_type = announcement_type;
}
}
fn set_transaction_announcement_type(&self, peer_index: PeerIndex, announcement_type: TransactionAnnouncementType) {
if let Some(peer) = self.peers.write().get_mut(peer_index) {
peer.transaction_announcement_type = announcement_type;
}
}
fn block_announcement_type(&self, peer_index: PeerIndex) -> BlockAnnouncementType {
self.peers.read()
.get(peer_index)
.map(|peer| peer.block_announcement_type)
.unwrap_or(BlockAnnouncementType::DoNotAnnounce)
}
fn transaction_announcement_type(&self, peer_index: PeerIndex) -> TransactionAnnouncementType {
self.peers.read()
.get(peer_index)
.map(|peer| peer.transaction_announcement_type)
.unwrap_or(TransactionAnnouncementType::DoNotAnnounce)
}
}

View File

@ -1,177 +0,0 @@
use std::collections::{HashSet, HashMap};
use linked_hash_map::LinkedHashMap;
use primitives::hash::H256;
use types::PeerIndex;
use utils::AverageSpeedMeter;
/// Synchronization peers tasks
pub trait PeersTasks {
/// Return all available peers
fn all(&self) -> &HashSet<PeerIndex>;
/// Return idle peers for headers requests
fn idle_for_headers(&self) -> &HashSet<PeerIndex>;
/// Return idle peers for blocks requests
fn idle_for_blocks(&self) -> &HashSet<PeerIndex>;
/// Mark useful peer
fn mark_useful(&mut self, peer_index: PeerIndex);
/// Mark unuseful peer
fn mark_unuseful(&mut self, peer_index: PeerIndex);
/// Reset peer blocks tasks
fn reset_blocks_tasks(&mut self, peer_index: PeerIndex) -> Vec<H256>;
/// When new peer is connected
fn on_peer_connected(&mut self, peer_index: PeerIndex);
/// When new peer is disconnected
fn on_peer_disconnected(&mut self, peer_index: PeerIndex);
/// When headers are received from peer
fn on_headers_received(&mut self, peer_index: PeerIndex);
/// When block received from peer
fn on_block_received(&mut self, peer_index: PeerIndex, hash: &H256);
/// When response for headers request hasn't been received for too long.
/// Returns true if peer has been removed from useful peers list.
fn on_headers_failure(&mut self, peer_index: PeerIndex) -> bool;
/// When response for blocks request hasn't been received for too long.
/// Returns true if peer has been removed from useful peers list.
fn on_block_failure(&mut self, peer_index: PeerIndex) -> usize;
}
/// Synchronization peers tasks implementation
pub struct PeersTasksImpl {
/// Synchronization peers stats
stats: HashMap<PeerIndex, PeerStats>,
/// All available peers indexes
all: HashSet<PeerIndex>,
/// Peers, which are available for headers requests
idle_for_headers: HashSet<PeerIndex>,
/// Peers, which are available for blocks requests
idle_for_blocks: HashSet<PeerIndex>,
/// Active headers requests
active_headers: LinkedHashMap<PeerIndex, HeadersRequest>,
/// Active blocks requests
active_blocks: LinkedHashMap<PeerIndex, BlocksRequest>,
}
/// Headers request
struct HeadersRequest {
/// Time of request
pub time: f64,
}
/// Headers request
struct BlocksRequest {
/// Time of request
pub time: f64,
/// Requested blocks
pub blocks: LinkedHashMap<H256>,
}
/// Synchronization peer tasks statistics
#[derive(Debug, Default)]
struct PeerStats {
/// Number of blocks requests failures
pub blocks_failures: usize,
/// Average block response time meter
pub blocks_response_meter: AverageSpeedMeter,
}
impl PeersTasks for PeersTasksImpl {
fn all(&self) -> &HashSet<PeerIndex> {
&self.all
}
fn idle_for_headers(&self) -> &HashSet<PeerIndex> {
&self.idle_for_headers
}
fn idle_for_blocks(&self) -> &HashSet<PeerIndex> {
&self.idle_for_blocks
}
fn mark_useful(&mut self, peer_index: PeerIndex) {
let has_active_blocks_requests = self.active_blocks.contains_key(peer_index);
if !has_active_blocks_requests {
self.idle_for_blocks.insert(peer_index);
}
if !has_active_blocks_requests && !self.active_headers.contains_key(peer_index) {
self.idle_for_headers.insert(peer_index);
}
}
fn mark_unuseful(&mut self, peer_index: PeerIndex) {
self.idle_for_headers.remove(peer_index);
self.idle_for_blocks.remove(peer_index);
self.active_headers.remove(peer_index);
self.active_blocks.remove(peer_index);
}
fn reset_blocks_tasks(&mut self, peer_index: PeerIndex) -> Vec<H256> {
self.active_blocks.remove(peer_index)
.map(|hs| hs.drain().map(|(k, _)| k).collect())
.unwrap_or(Vec::new)
}
fn on_peer_connected(&mut self, peer_index: PeerIndex) {
self.stats.insert(peer_index, PeerStats::default());
self.all.insert(peer_index);
self.idle_for_headers.insert(peer_index);
self.idle_for_blocks.insert(peer_index);
}
fn on_peer_disconnected(&mut self, peer_index: PeerIndex) {
self.stats.remove(peer_index);
self.all.remove(peer_index);
self.idle_for_headers.remove(peer_index);
self.idle_for_blocks.remove(peer_index);
self.active_headers.remove(peer_index);
self.active_blocks.remove(peer_index);
}
fn on_headers_received(&mut self, peer_index: PeerIndex) {
self.active_headers.remove(peer_index);
// we only ask for new headers when peer is also not asked for blocks
// => only insert to idle queue if no active blocks requests
if !self.active_blocks.contains_key(peer_index) {
self.idle_for_headers.insert(peer_index);
}
}
fn on_block_received(&mut self, peer_index: PeerIndex, hash: &H256) {
let is_last_requested_block_received = if let Some(blocks_request) = self.active_blocks.get(peer_index) {
// if block hasn't been requested => do nothing
if !blocks_request.remove(hash) {
return;
}
blocks_request.is_empty()
} else {
// this peers hasn't been requested for blocks at all
return;
};
// it was requested block => update block response time
self.stats[peer_index].blocks_response_meter.checkpoint();
// if it hasn't been last requested block => just return
if !is_last_requested_block_received {
return;
}
// mark this peer as idle for blocks request
self.active_blocks.remove(peer_index);
self.idle_for_blocks.insert(peer_index);
// also mark as available for headers request if not yet
if !self.active_headers.contains_key(peer_index) {
self.idle_for_headers.insert(peer_index);
}
}
fn on_headers_failure(&mut self, peer_index: PeerIndex) -> bool {
// we never penalize peers for header requests failures
self.active_headers.remove(peer_index);
self.idle_for_headers.insert(peer_index);
}
fn on_block_failure(&mut self, peer_index: PeerIndex) -> usize {
self.stats[peer_index].blocks_failures += 1;
self.stats[peer_index].blocks_failures
}
}

View File

@ -1,399 +0,0 @@
use std::collections::{HashSet, HashMap, VecDeque};
use std::collections::hash_map::Entry;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread;
use parking_lot::{Condvar, Mutex};
use message::{types, common};
use primitives::hash::H256;
use synchronization_executor::{Task, Executor};
use types::{BlockHeight, PeerIndex, RequestId, BlockHeight, ExecutorRef, MemoryPoolRef, StorageRef};
/// Synchronization server task
pub enum ServerTask {
/// Serve 'getdata' request
ServeGetData(PeerIndex, types::GetData),
/// Serve reversed 'getdata' request
ServeReversedGetData(PeerIndex, types::GetData, types::NotFound),
/// Serve 'getblocks' request
ServeGetBlocks(PeerIndex, types::GetBlocks),
/// Serve 'getheaders' request
ServeGetHeaders(PeerIndex, types::GetHeaders, RequestId),
/// Serve 'mempool' request
ServeMempool(PeerIndex),
/// Serve 'getblocktxn' request
ServeGetBlockTxn(PeerIndex, types::GetBlockTxn),
}
/// Synchronization server
pub trait Server {
/// Execute single synchronization task
fn execute(&self, peer_index: PeerIndex, task: ServerTask);
/// Called when connection is closed
fn on_disconnect(&self, peer_index: PeerIndex);
}
/// Synchronization server implementation
pub struct ServerImpl {
/// New task is ready for execution in the queue
queue_ready: Arc<Condvar>,
/// Tasks queue
queue: Arc<Mutex<ServerQueue>>,
/// Worker thread join handle
worker_thread: Option<thread::JoinHandle<()>>,
}
/// Synchronization server tasks queue
struct ServerQueue {
/// True if server is stopping
is_stopping: AtomicBool,
/// New task is ready for execution in the queue
queue_ready: Arc<Condvar>,
/// Round-robin peers serving queue
peers_queue: VecDeque<usize>,
/// Scheduled tasks for each peer in execution order
tasks_queue: HashMap<usize, VecDeque<ServerTask>>,
}
/// Synchronization server task executor
struct ServerTaskExecutor<TExecutor: Executor> {
/// Storage reference
storage: StorageRef,
/// Memory pool reference
memory_pool: MemoryPoolRef,
/// Executor reference
executor: ExecutorRef<TExecutor>,
}
impl ServerImpl {
pub fn new<TExecutor: Executor>(storage: StorageRef, memory_pool: MemoryPoolRef, executor: ExecutorRef<TExecutor>) -> Self {
let server_executor = ServerTaskExecutor::new(storage, memory_pool, executor);
let queue_ready = Arc::new(Condvar::new());
let queue = Arc::new(Mutex::new(ServerQueue::new(queue_ready.clone(), server_executor)));
ServerImpl {
queue_ready: queue_ready.clone(),
queue: queue.clone(),
worker_thread: Some(thread::spawn(move || ServerImpl::start(queue_ready, queue.downgrade(), server_executor))),
}
}
fn start(queue_ready: Arc<Condvar>, queue: Arc<Mutex<ServerQueue>>, executor: ServerTaskExecutor) {
loop {
let task = {
let mut queue = queue.lock();
if queue.is_stopping.load(Ordering::SeqCst) {
break;
}
queue.next_task()
.or_else(|| {
queue_ready.wait(&mut queue);
queue.next_task()
})
};
let task = match task {
Some(server_task) => task,
// no tasks after wake-up => stopping
_ => continue,
};
executor.execute(task);
}
}
}
impl Server for ServerImpl {
fn execute(&self, task: ServerTask) {
self.queue.lock().add_task(task);
}
fn on_disconnect(&self, peer_index: PeerIndex) {
self.queue.lock().remove_peer_tasks(peer_index);
}
}
impl ServerQueue {
pub fn new(queue_ready: Arc<Condvar>) -> Self {
ServerQueue {
is_stopping: AtomicBool::new(false),
queue_ready: queue_ready,
peers_queue: VecDeque::new(),
tasks_queue: HashMap::new(),
}
}
pub fn next_task(&mut self) -> Option<ServerTask> {
self.peers_queue.pop_front()
.map(|peer_index| {
let (peer_task, is_last_peer_task) = {
let peer_tasks = self.tasks_queue.get_mut(&peer_index)
.expect("entry from tasks_queue is removed when empty; when empty, peer is removed from peers_queue; qed");
let peer_task = peer_tasks.pop_front()
.expect("entry from peer_tasks is removed when empty; when empty, peer is removed from peers_queue; qed");
(peer_task, peer_tasks.is_empty())
};
// remove if no tasks left || schedule otherwise
if !is_last_peer_task {
self.peers_queue.push_back(peer_index);
} else {
self.tasks_queue.remove(peer_index);
}
peer_task
})
}
pub fn add_task(&mut self, task: ServerTask) {
let peer = task.peer();
match self.tasks_queue.entry(peer) {
Entry::Occupied(mut entry) => {
let add_to_peers_queue = entry.get().is_empty();
entry.get_mut().push_back(task);
if add_to_peers_queue {
self.peers_queue.push_back(peer);
}
},
Entry::Vacant(entry) => {
let mut new_tasks = VecDeque::new();
new_tasks.push_back(task);
entry.insert(new_tasks);
self.peers_queue.push_back(peer);
}
}
self.queue_ready.notify_one();
}
pub fn remove_peer_tasks(&mut self, peer_index: PeerIndex) {
if self.tasks_queue.remove(&peer_index).is_some() {
let position = self.peers_queue.iter().position(|idx| idx == peer_index)
.expect("tasks for peer are in tasks_queue; peers_queue and tasks_queue have same set of peers; qed");
self.peers_queue.remove(position);
}
}
}
impl<TExecutor> ServerTaskExecutor where TExecutor: Executor {
fn serve_get_data(&self, peer_index: PeerIndex, message: types::GetData) -> Option<ServerTask> {
// getdata request is served by single item by just popping values from the back
// of inventory vector
// => to respond in given order, we have to reverse blocks inventory here
message.inventory.reverse();
// + while iterating by items, also accumulate unknown items to respond with notfound
let notfound = types::NotFound { inventory: Vec::new(), };
Some(ServerTask::ServeReversedGetData(peer_index, message, notfound))
}
fn serve_reversed_get_data(&self, peer_index: PeerIndex, mut message: types::GetData, mut notfound: types::NotFound) -> Option<ServerTask> {
let next_item = match message.inventory.pop() {
None => {
if !notfound.inventory.is_empty() {
trace!(target: "sync", "'getdata' from peer#{} container unknown items: {}", peer_index, notfound.short_info());
self.executor.execute(Task::NotFound(peer_index, notfound));
}
return None;
},
Some(next_item) => next_item,
};
match next_item.inv_type {
common::InventoryType::MessageTx => {
// only transaction from memory pool can be requested
if let Some(transaction) = self.memory_pool.read().read_by_hash(&next_item.hash) {
let message = types::Tx::with_transaction(transaction);
trace!(target: "sync", "'getblocks' tx response to peer#{} is ready: {}", peer_index, message.short_info());
self.executor.execute(Task::SendTransaction(peer_index, message));
} else {
notfound.inventory.push(next_item);
}
},
common::InventoryType::MessageBlock => {
if let Some(block) = self.storage().block(next_item.hash.clone().into()) {
let message = types::Block::with_block(block);
trace!(target: "sync", "'getblocks' block response to peer#{} is ready: {}", peer_index, message.short_info());
self.executor.execute(Task::SendBlock(peer_index, message));
} else {
notfound.inventory.push(next_item);
}
},
common::InventoryType::MessageFilteredBlock => {
if let Some(block) = self.storage().block(next_item.hash.clone().into()) {
let message = self.peers.build_filtered_block(peer_index, block.into());
trace!(target: "sync", "'getblocks' merkleblock response to peer#{} is ready: {}", peer_index, message.short_info());
self.executor.execute(Task::SendMerkleBlock(peer_index, message));
} else {
notfound.inventory.push(next_item);
}
},
common::InventoryType::MessageCompactBlock => {
if let Some(block) = self.storage().block(next_item.hash.clone().into()) {
let message = self.peers.build_compact_block(peer_index, block.into());
trace!(target: "sync", "'getblocks' compactblock response to peer#{} is ready: {}", peer_index, message.short_info());
self.executor.execute(Task::SendCompactBlock(peer_index, message));
} else {
notfound.inventory.push(next_item);
}
},
}
Some(ServerTask::ServeReversedGetData(peer_index, message, notfound))
}
fn serve_get_blocks(&self, peer_index: PeerIndex, message: types::GetBlocks) {
if let Some(block_height) = self.locate_best_common_block(&message.hash_stop, &message.block_locator_hashes) {
let inventory: Vec<_> = (block_height + 1..block_height + 1 + types::GetHeaders::MAX_BLOCKS)
.map(|block_height| self.storage.block_hash(block_height))
.take_while(|block_hash| block_hash != &message.hash_stop)
.map(common::InventoryVector::block)
.collect();
// empty inventory messages are invalid according to regtests, while empty headers messages are valid
if !inventory.is_empty() {
let message = types::Inv::with_inventory(inventory);
trace!(target: "sync", "'getblocks' response to peer#{} is ready: {}", peer_index, message.short_info());
self.executor.execute(Task::SendInventory(peer_index, message));
} else {
trace!(target: "sync", "'getblocks' request from peer#{} is ignored as there are no new blocks for peer", peer_index);
}
} else {
self.peers.misbehaving(peer_index, format!("Got 'getblocks' message without known blocks: {}", message.long_info()));
return;
}
}
fn serve_get_headers(&self, peer_index: PeerIndex, message: types::GetHeaders) {
if let Some(block_height) = self.locate_best_common_block(&message.hash_stop, &message.block_locator_hashes) {
let headers = (block_height + 1..block_height + 1 + types::GetHeaders::MAX_HEADERS)
.map(|block_height| self.storage.block_hash(block_height))
.take_while(|block_hash| block_hash != &message.hash_stop)
.map(|block_hash| self.storage.block_header(block_hash.into()))
.collect();
// empty inventory messages are invalid according to regtests, while empty headers messages are valid
let message = types::Headers {
headers: headers,
};
trace!(target: "sync", "'getheaders' response to peer#{} is ready: {}", peer_index, message.short_info());
self.executor.execute(Task::SendHeaders(peer_index, message));
} else {
self.peers.misbehaving(peer_index, format!("Got 'headers' message without known blocks: {}", message.long_info()));
return;
}
}
fn serve_mempool(&self, peer_index: PeerIndex) {
let inventory: Vec<_> = self.memory_pool.read()
.get_transactions_ids()
.map(common::InventoryVector::tx)
.collect();
// empty inventory messages are invalid according to regtests, while empty headers messages are valid
if !inventory.is_empty() {
let message = types::Inv::with_inventory(inventory);
trace!(target: "sync", "'mempool' response to peer#{} is ready: {}", peer_index, message.short_info());
self.executor.execute(Task::SendInventory(peer_index, message));
} else {
trace!(target: "sync", "'mempool' request from peer#{} is ignored as pool is empty", peer_index);
}
}
fn serve_get_block_txn(&self, peer_index: PeerIndex, message: types::GetBlockTxn) {
// according to protocol documentation, we only should only respond
// if requested block has been recently sent in 'cmpctblock'
if !self.peers.is_sent_as(&message.request.blockhash, common::InventoryType::MessageCompactBlock) {
self.peers.misbehaving(peer_index, format!("Got 'getblocktxn' message for non-sent block: {}", message.long_info()));
return;
}
let block_transactions = match self.storage.block_transaction_hashes(message.request.blockhash.clone().into()) {
None => {
// we have checked that this block has been sent recently
// => this is either some db error, or db has been pruned
// => ignore
warn!(target: "sync", "'getblocktxn' request from peer#{} is ignored as there are no transactions in storage", peer_index);
return;
},
Some(block_transactions) => block_transactions,
};
let block_transactions_len = block_transactions.len();
let requested_len = message.request.indexes.len();
if requested_len > block_transactions_len {
// peer has requested more transactions, than there are
self.peers.misbehaving(peer_index, format!("Got 'getblocktxn' message with more transactions, than there are: {}", message.long_info()));
return;
}
let mut requested_indexes = HashSet::new();
let mut transactions = Vec::with_capacity(message.request.indexes.len());
for transaction_index in message.request.indexes {
if transaction_index >= block_transactions_len {
// peer has requested index, larger than index of last transaction
self.peers.misbehaving(peer_index, format!("Got 'getblocktxn' message with index, larger than index of last transaction: {}", message.long_info()));
return;
}
if requested_indexes.insert(transaction_index) {
// peer has requested same index several times
self.peers.misbehaving(peer_index, format!("Got 'getblocktxn' message where same index has been requested several times: {}", message.long_info()));
return;
}
if let Some(transaction) = self.storage.transaction(block_transactions[transaction_index]) {
transactions.push(transaction);
} else {
// we have just got this hash using block_transactions_hashes
// => this is either some db error, or db has been pruned
// => we can not skip transactions, according to protocol description
// => ignore
warn!(target: "sync", "'getblocktxn' request from peer#{} is ignored as we have failed to find transaction {} in storage", peer_index, block_transactions[transaction_index].to_reversed_str());
return;
}
}
let message = types::BlockTxn {
request: common::BlockTransactions {
blockhash: message.request.blockhash,
transactions: transactions,
},
};
trace!(target: "sync", "'getblocktxn' response to peer#{} is ready: {}", peer_index, message.short_info());
self.executor.execute(Task::SendBlockTxn(peer_index, message));
}
fn locate_best_common_block(&self, hash_stop: &H256, locator: &[H256]) -> Option<BlockHeight> {
for block_hash in locator.iter().chain(&[hash_stop]) {
if let Some(block_number) = self.storage.block_number(block_hash) {
return Some(block_number);
}
// block with this hash is definitely not in the main chain (block_number has returned None)
// but maybe it is in some fork? if so => we should find intersection with main chain
// and this would be our best common block
let mut block_hash = block_hash;
loop {
let block_header = match self.storage.block_header(block_hash.into()) {
None => break,
Some(block_header) => block_header,
};
if let Some(block_number) = self.storage.block_number(block_header.previous_block_hash.clone().into()) {
return Some(block_number);
}
block_hash = block_header.previous_block_hash;
}
}
}
}
impl<TExecutor> ServerTaskExecutor where TExecutor: Executor {
fn execute(&self, task: ServerTask) -> Option<ServerTask> {
match task {
ServerTask::ServeGetData(peer_index, message) => self.serve_get_data(peer_index, message),
ServerTask::ServeReversedGetData(peer_index, message) => self.serve_reversed_get_data(peer_index, message),
ServerTask::ServeGetBlocks(peer_index, message) => self.serve_get_blocks(peer_index, message),
ServerTask::ServeGetHeaders(peer_index, message) => self.serve_get_headers(peer_index, message),
ServerTask::ServeMempool(peer_index) => self.serve_mempool(peer_index),
ServerTask::ServeGetBlockTxn(peer_index, message) => self.serve_get_block_txn(peer_index, message),
}
}
}

View File

@ -1,163 +0,0 @@
use std::collections::{HashSet, VecDeque};
use linked_hash_map::LinkedHashMap;
use chain::IndexedTransaction;
use primitives::hash::H256;
use types::{MemoryPoolRef, StorageRef};
use utils::OrphanTransactionsPool;
/// Transactions synchronization state
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum TransactionState {
/// Transaction is unknown
Unknown,
/// Orphan
Orphan,
/// Currently verifying
Verifying,
/// In memory pool
InMemory,
/// In storage
Stored,
}
/// Synchronization chain information
pub struct Information {
/// Number of orphaned transactions
pub orphaned: usize,
/// Number of transactions currently verifying
pub verifying: usize,
/// Number of transactions currently resided in memory pool
pub in_memory: usize,
}
/// Blockchain transactions from synchroniation point of view, consisting of:
/// 1) all transactions from the `storage` [oldest transactions]
/// 2) all transactions currently residing in memory pool
/// 3) all transactions currently verifying by `synchronization_verifier`
pub trait TransactionsQueue {
/// Returns queue information
fn information(&self) -> Information;
/// Returns state of transaction with given hash
fn transaction_state(&self, hash: &H256) -> TransactionState;
/// Insert orphan transaction
fn insert_orphan_transaction(&mut self, transaction: IndexedTransaction, unknown_inputs: HashSet<H256>);
/// Remove orphan transactions, which depends on given transaction.
fn remove_orphan_children(&mut self, hash: &H256) -> Vec<IndexedTransaction>;
/// Insert verifying transaction.
fn insert_verifying_transaction(&mut self, transaction: IndexedTransaction);
/// Forget verifying transaction.
fn forget_verifying_transaction(&mut self, hash: &H256) -> bool;
/// Forget verifying transaction and all verifying children.
fn forget_verifying_transaction_with_children(&mut self, hash: &H256);
/// Insert verified transaction to memory pool.
fn insert_verified_transaction(&mut self, transaction: IndexedTransaction);
}
/// Transactions queue implementation
pub struct TransactionsQueueImpl {
/// Orphaned transactions
orphan_pool: OrphanTransactionsPool,
/// Currently verifying transactions
verifying: LinkedHashMap<H256, IndexedTransaction>,
/// Transactions memory pool
memory_pool: MemoryPoolRef,
/// Storage reference
storage: StorageRef,
}
impl TransactionsQueueImpl {
pub fn new(memory_pool: MemoryPoolRef, storage: StorageRef) -> Self {
TransactionsQueueImpl {
orphan_pool: OrphanTransactionsPool::new(),
verifying: LinkedHashMap::new(),
memory_pool: memory_pool,
storage: storage,
}
}
}
impl TransactionsQueue for TransactionsQueueImpl {
fn information(&self) -> Information {
Information {
orphaned: self.orphan_pool.len(),
verifying: self.verifying.len(),
in_memory: self.memory_pool.read().information().transactions_count,
}
}
fn transaction_state(&self, hash: &H256) -> TransactionState {
if self.orphan_pool.contains(hash) {
return TransactionState::Orphan;
}
if self.verifying.contans_key(hash) {
return TransactionState::Verifying;
}
if self.storage.contains_transaction(hash) {
return TransactionState::Stored;
}
if self.memory_pool.read().contains(hash) {
return TransactionState::InMemory;
}
TransactionState::Unknown
}
fn insert_orphan_transaction(&mut self, transaction: IndexedTransaction, unknown_inputs: HashSet<H256>) {
self.orphan_pool.insert(transaction, unknown_inputs);
}
fn remove_orphan_children(&mut self, hash: &H256) -> Vec<IndexedTransaction> {
self.orphan_pool.remove_transactions_for_parent(hash)
}
fn insert_verifying_transaction(&mut self, transaction: IndexedTransaction) {
self.verifying.insert(transaction.hash.clone(), transaction);
}
fn forget_verifying_transaction(&mut self, hash: &H256) -> bool {
self.verifying.remove(hash).is_some()
}
fn forget_verifying_transaction_with_children(&mut self, hash: &H256) {
self.forget_verifying_transaction(hash);
// TODO: suboptimal
let mut queue: VecDeque<H256> = VecDeque::new();
queue.push_back(hash.clone());
while let Some(hash) = queue.pop_front() {
let all_keys: Vec<_> = self.verifying_transactions.keys().cloned().collect();
for h in all_keys {
let remove_verifying_transaction = {
if let Some(entry) = self.verifying.get(&h) {
if entry.inputs.iter().any(|i| i.previous_output.hash == hash) {
queue.push_back(h.clone());
true
} else {
false
}
} else {
// iterating by previously read keys
unreachable!()
}
};
if remove_verifying_transaction {
self.verifying.remove(&h);
}
}
}
}
fn insert_verified_transaction(&mut self, transaction: IndexedTransaction) {
// we have verified transaction, but possibly this transaction replaces
// existing transaction from memory pool
// => remove previous transactions before
let memory_pool = self.memory_pool.write();
for input in &transaction.raw.inputs {
memory_pool.remove_by_prevout(&input.previous_output);
}
// now insert transaction itself
memory_pool.insert_verified(transaction);
}
}

View File

@ -1,144 +0,0 @@
use std::collections::VecDeque;
use std::sync::Arc;
use time;
use chain::{IndexedBlock, IndexedTransaction};
use db::{TransactionOutputObserver, PreviousTransactionOutputProvider};
use types::ChainRef;
use verification::{ChainVerifier, Chain as VerificationChain};
/// Verification task
pub enum VerificationTask {
}
/// Ordered verification tasks
pub type VerificationTasks = VecDeque<VerificationTask>;
/// Block verifier
pub trait BlockVerifier {
/// Verify block
fn verify_block(&self, block: IndexedBlock);
}
/// Transaction verifier
pub trait TransactionVerifier {
/// Verify block
fn verify_transaction(&self, block: IndexedTransaction);
}
/// Verifier
pub trait Verifier: BlockVerifier + TransactionVerifier {
}
/// Block verification events sink
pub trait BlockVerifierSink {
/// When block verification has completed successfully.
fn on_block_verification_success(&self, block: IndexedBlock) -> Option<Vec<VerificationTask>>;
/// When block verification has failed.
fn on_block_verification_error(&self, err: &str, block: IndexedBlock);
}
/// Transaction verification events sink
pub trait TransactionVerifierSink {
/// When transaction verification has completed successfully.
fn on_transaction_verification_success(&self, tx: IndexedTransaction);
/// When transaction verification has failed.
fn on_transaction_verification_error(&self, err: &str, tx: IndexedTransaction);
}
/// Verification events sink
pub trait VerifierSink: BlockVerifierSink + TransactionVerifierSink {
}
/// Asynchronous verifier implementation
pub struct AsyncVerifierImpl {
}
/// Synchronous verifier implementation
pub struct SyncVerifierImpl {
/// Verifier itself
verifier: ChainVerifier,
/// Synchronization chain reference
chain: ChainRef,
}
impl BlockVerifier for AsyncVerifierImpl {
fn verify_block(&self, block: IndexedBlock) {
self.work_sender
.send(VerificationTask::VerifyBlock(block))
.expect("Verification thread have the same lifetime as `AsyncVerifier`");
}
}
impl TransactionVerifier for AsyncVerifierImpl {
/// Verify transaction
fn verify_transaction(&self, height: u32, transaction: IndexedTransaction) {
self.work_sender
.send(VerificationTask::VerifyTransaction(height, transaction))
.expect("Verification thread have the same lifetime as `AsyncVerifier`");
}
}
impl Verifier for AsyncVerifierImpl {
}
impl BlockVerifier for SyncVerifierImpl {
fn verify_block(&self, block: IndexedBlock) {
self.work_sender
.send(VerificationTask::VerifyBlock(block))
.expect("Verification thread have the same lifetime as `AsyncVerifier`");
}
}
impl TransactionVerifier for SyncVerifierImpl {
/// Verify transaction
fn verify_transaction(&self, height: u32, transaction: IndexedTransaction) {
self.work_sender
.send(VerificationTask::VerifyTransaction(height, transaction))
.expect("Verification thread have the same lifetime as `AsyncVerifier`");
}
}
impl Verifier for SyncVerifierImpl {
}
/// TODO: use SyncVerifier by AsyncVerifier => get rid of this method Execute single verification task
fn execute_verification_task<T: VerifierSink, U: TransactionOutputObserver + PreviousTransactionOutputProvider>(
sink: &Arc<T>,
tx_output_provider: Option<&U>,
verifier: &ChainVerifier,
task: VerificationTask
) {
let mut tasks_queue: VecDeque<VerificationTask> = VecDeque::new();
tasks_queue.push_back(task);
while let Some(task) = tasks_queue.pop_front() {
match task {
VerificationTask::VerifyBlock(block) => {
// verify block
match verifier.verify(&block) {
Ok(VerificationChain::Main) | Ok(VerificationChain::Side) => {
if let Some(tasks) = sink.on_block_verification_success(block) {
tasks_queue.extend(tasks);
}
},
Ok(VerificationChain::Orphan) => {
// this can happen for B1 if B0 verification has failed && we have already scheduled verification of B0
sink.on_block_verification_error(&format!("orphaned block because parent block verification has failed"), &block.hash())
},
Err(e) => {
sink.on_block_verification_error(&format!("{:?}", e), &block.hash())
}
}
},
VerificationTask::VerifyTransaction(height, transaction) => {
let time: u32 = time::get_time().sec as u32;
let tx_output_provider = tx_output_provider.expect("must be provided for transaction checks");
match verifier.verify_mempool_transaction(tx_output_provider, height, time, &transaction) {
Ok(_) => sink.on_transaction_verification_success(transaction),
Err(e) => sink.on_transaction_verification_error(&format!("{:?}", e), &transaction.hash()),
}
},
_ => unreachable!("must be checked by caller"),
}
}
}

View File

@ -1,31 +0,0 @@
use std::sync::Arc;
use parking_lot::RwLock;
use db::SharedStore;
use miner::MemoryPool;
/// Local peer index
pub const LOCAL_PEER_INDEX: PeerIndex = 0;
/// Peers are indexed by this type
pub type PeerIndex = usize;
/// Requests IDs
pub type RequestId = u32;
/// Block height type
pub type BlockHeight = u32;
/// Synchronization peers reference
pub type PeersRef<T> = Arc<T>;
/// Synchronization client reference
pub type ClientRef<T> = Arc<T>;
/// Synchronization executor reference
pub type ExecutorRef<T> = Arc<T>;
/// Memory pool reference
pub type MemoryPoolRef = Arc<RwLock<MemoryPool>>;
/// Storage reference
pub type StorageRef = SharedStore;

View File

@ -1,52 +0,0 @@
use std::collections::VecDeque;
use time;
/// Average Speed meter
pub struct AverageSpeedMeter {
/// Number of items to inspect
inspect_items: usize,
/// Number of items currently inspected
inspected_items: VecDeque<f64>,
/// Current speed
speed: f64,
/// Last timestamp
last_timestamp: Option<f64>,
}
impl AverageSpeedMeter {
pub fn with_inspect_items(inspect_items: usize) -> Self {
assert!(inspect_items > 0);
AverageSpeedMeter {
inspect_items: inspect_items,
inspected_items: VecDeque::with_capacity(inspect_items),
speed: 0_f64,
last_timestamp: None,
}
}
pub fn speed(&self) -> f64 {
let items_per_second = 1_f64 / self.speed;
if items_per_second.is_normal() { items_per_second } else { 0_f64 }
}
pub fn inspected_items_len(&self) -> usize {
self.inspected_items.len()
}
pub fn checkpoint(&mut self) {
// if inspected_items is already full => remove oldest item from average
if self.inspected_items.len() == self.inspect_items {
let oldest = self.inspected_items.pop_front().expect("len() is not zero; qed");
self.speed = (self.inspect_items as f64 * self.speed - oldest) / (self.inspect_items as f64 - 1_f64);
}
// add new item
let now = time::precise_time_s();
if let Some(last_timestamp) = self.last_timestamp {
let newest = now - last_timestamp;
self.speed = (self.inspected_items.len() as f64 * self.speed + newest) / (self.inspected_items.len() as f64 + 1_f64);
self.inspected_items.push_back(newest);
}
self.last_timestamp = Some(now);
}
}

View File

@ -1,227 +0,0 @@
use std::collections::HashMap;
use chain::IndexedBlockHeader;
use primitives::hash::H256;
use super::{HashQueue, HashPosition};
/// Blocks headers chain information
#[derive(Debug)]
pub struct Information {
/// Number of headers in best chain
pub best: u32,
/// Total number of headers
pub total: u32,
}
// TODO: currently it supports first chain only (so whatever comes first, it is named best)
/// Chain of blocks headers
#[derive(Debug)]
pub struct BlockHeaderChain {
/// Best hash in storage
storage_best_hash: H256,
/// Headers by hash
headers: HashMap<H256, IndexedBlockHeader>,
/// Best chain
best: HashQueue,
}
impl BlockHeaderChain {
/// Create new block header chain
pub fn new(storage_best_hash: H256) -> Self {
BlockHeaderChain {
storage_best_hash: storage_best_hash,
headers: HashMap::new(),
best: HashQueue::new(),
}
}
/// Returns information on block headers chain
pub fn information(&self) -> Information {
Information {
best: self.best.len(),
total: self.headers.len() as u32,
}
}
/// Returns block header at given height
pub fn at(&self, height: u32) -> Option<IndexedBlockHeader> {
self.best.at(height)
.and_then(|hash| self.headers.get(&hash).cloned())
}
/// Returns block header by hash
pub fn by_hash(&self, hash: &H256) -> Option<IndexedBlockHeader> {
self.headers.get(hash).cloned()
}
/// Returns height of block with given hash
pub fn height(&self, hash: &H256) -> Option<u32> {
self.best.position(hash)
}
/// Returns direct children of given hash
pub fn children(&self, hash: &H256) -> Vec<H256> {
self.best.position(hash)
.and_then(|pos| self.best.at(pos + 1))
.and_then(|child| Some(vec![child]))
.unwrap_or_default()
}
/// Returns best known block hash
pub fn best_block_hash(&self) -> H256 {
self.best.back().or_else(|| Some(self.storage_best_hash.clone())).expect("storage_best_hash is always known")
}
/// Try to insert new block header
pub fn insert(&mut self, header: IndexedBlockHeader) {
// append to the best chain
if self.best_block_hash() == header.raw.previous_header_hash {
self.best.push_back(header.hash.clone());
self.headers.insert(header.hash.clone(), header);
return;
}
}
/// Try to insert multiple blocks headers
pub fn insert_n(&mut self, headers: Vec<IndexedBlockHeader>) {
for header in headers {
self.insert(header);
}
}
/// Remove block header with given hash, deleting also all its children
pub fn remove(&mut self, hash: &H256) {
if self.headers.remove(hash).is_some() {
match self.best.remove(hash) {
HashPosition::Front => self.clear(),
HashPosition::Inside(position) => self.clear_after(position),
_ => (),
}
}
}
/// Remove block header with given hash, deleting also all its children
pub fn remove_n<I: IntoIterator<Item=H256>> (&mut self, hashes: I) {
for hash in hashes {
self.remove(&hash);
}
}
/// Called when new block is inserted to the storage
pub fn block_inserted_to_storage(&mut self, hash: &H256, storage_best_hash: &H256) {
if self.best.front().map(|h| &h == hash).unwrap_or(false) {
self.best.pop_front();
self.headers.remove(hash);
}
self.storage_best_hash = storage_best_hash.clone();
}
/// Delete all known headers
pub fn clear(&mut self) {
self.headers.clear();
self.best.clear();
}
/// Delete all known headers after given position
fn clear_after(&mut self, position: u32) {
if position == 0 {
self.clear()
} else {
while self.best.len() > position {
self.headers.remove(&self.best.pop_back().expect("len() > position; qed"));
}
}
}
}
#[cfg(test)]
mod tests {
use primitives::hash::H256;
use test_data;
use super::BlockHeaderChain;
#[test]
fn best_chain_empty() {
let chain = BlockHeaderChain::new(H256::default());
assert_eq!(chain.at(0), None);
assert_eq!(chain.by_hash(&H256::from(0)), None);
assert_eq!(chain.height(&H256::default()), None);
assert_eq!(chain.children(&H256::default()), vec![]);
assert_eq!(chain.best_block_hash(), H256::default());
}
#[test]
fn best_chain_insert() {
let mut chain = BlockHeaderChain::new(test_data::genesis().hash());
let b1 = test_data::block_h1().block_header;
let b2 = test_data::block_h2().block_header;
let b181 = test_data::block_h181().block_header;
let b182 = test_data::block_h182().block_header;
chain.insert(b1.into());
chain.insert(b181.clone().into());
assert_eq!(chain.information().best, 1);
assert_eq!(chain.information().total, 1);
chain.insert(b2.into());
assert_eq!(chain.information().best, 2);
assert_eq!(chain.information().total, 2);
chain.clear();
assert_eq!(chain.information().best, 0);
assert_eq!(chain.information().total, 0);
chain.insert(b181.clone().into());
assert_eq!(chain.information().best, 0);
assert_eq!(chain.information().total, 0);
chain.block_inserted_to_storage(&b181.hash(), &b181.hash());
assert_eq!(chain.information().best, 0);
assert_eq!(chain.information().total, 0);
chain.insert(b182.into());
assert_eq!(chain.information().best, 1);
assert_eq!(chain.information().total, 1);
}
#[test]
fn best_chain_remove() {
let b0 = test_data::block_builder().header().build().build();
let b1 = test_data::block_builder().header().parent(b0.hash()).build().build().block_header;
let b2 = test_data::block_builder().header().parent(b1.hash()).build().build().block_header;
let b3 = test_data::block_builder().header().parent(b2.hash()).build().build().block_header;
let b4 = test_data::block_builder().header().parent(b3.hash()).build().build().block_header;
let mut chain = BlockHeaderChain::new(b0.hash());
chain.insert_n(vec![b1.clone().into(), b2.clone().into(), b3.clone().into(), b4.clone().into()]);
assert_eq!(chain.information().best, 4);
assert_eq!(chain.information().total, 4);
chain.remove(&b2.hash());
assert_eq!(chain.information().best, 1);
assert_eq!(chain.information().total, 1);
chain.insert_n(vec![b2.clone().into(), b3.clone().into(), b4.clone().into()]);
assert_eq!(chain.information().best, 4);
assert_eq!(chain.information().total, 4);
chain.remove(&H256::default());
assert_eq!(chain.information().best, 4);
assert_eq!(chain.information().total, 4);
chain.remove(&b1.hash());
assert_eq!(chain.information().best, 0);
assert_eq!(chain.information().total, 0);
}
#[test]
fn best_chain_insert_to_db_no_reorg() {
let mut chain = BlockHeaderChain::new(test_data::genesis().hash());
let b1 = test_data::block_h1().block_header;
chain.insert(b1.clone().into());
assert_eq!(chain.at(0), Some(b1.clone().into()));
let b2 = test_data::block_h2().block_header;
chain.insert(b2.clone().into());
assert_eq!(chain.at(0), Some(b1.clone().into()));
assert_eq!(chain.at(1), Some(b2.clone().into()));
chain.block_inserted_to_storage(&b1.hash(), &b1.hash());
assert_eq!(chain.at(0), Some(b2.into()));
assert_eq!(chain.at(1), None);
assert_eq!(chain.information().best, 1);
assert_eq!(chain.information().total, 1);
}
}

View File

@ -1,281 +0,0 @@
use parking_lot::Mutex;
use bit_vec::BitVec;
use murmur3::murmur3_32;
use chain::{IndexedTransaction, OutPoint};
use message::types;
use ser::serialize;
use script::Script;
/// Constant optimized to create large differences in the seed for different values of `hash_functions_num`.
const SEED_OFFSET: u32 = 0xFBA4C795;
/// Connection bloom filter
#[derive(Debug)]
pub struct BloomFilter {
/// Bloom data. Filter can be updated when transaction is matched => we have to use some kind of lock here.
/// Mutex is an only choice, because:
/// 1) we do not know if transaction matches the filter in advance
/// 2) RwLock is non-upgradeable in Rust
bloom: Option<Mutex<BloomFilterData>>,
/// Filter update type.
filter_flags: types::FilterFlags,
}
/// Bloom filter data implemented as described in:
/// https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki
#[derive(Debug, Default)]
struct BloomFilterData {
/// Filter storage
filter: BitVec,
/// Number of hash functions to use in bloom filter
hash_functions_num: u32,
/// Value to add to Murmur3 hash seed when calculating hash
tweak: u32,
}
impl Default for BloomFilter {
fn default() -> Self {
BloomFilter {
bloom: None,
filter_flags: types::FilterFlags::None,
}
}
}
impl BloomFilter {
/// Create with given parameters
#[cfg(test)]
pub fn with_filter_load(message: types::FilterLoad) -> Self {
BloomFilter {
filter_flags: message.flags,
bloom: Some(Mutex::new(BloomFilterData::with_filter_load(message))),
}
}
/// Sets bloom filter to given value
pub fn set_bloom_filter(&mut self, message: types::FilterLoad) {
self.bloom = Some(Mutex::new(BloomFilterData::with_filter_load(message)));
}
/// Adds given data to current filter, so that new transactions can be accepted
pub fn update_bloom_filter(&mut self, message: types::FilterAdd) {
if let Some(ref mut bloom) = self.bloom {
bloom.lock().insert(&message.data);
}
}
/// Removes bloom filter, so that all transactions are now accepted by this filter
pub fn remove_bloom_filter(&mut self) {
self.bloom = None;
}
/// Filters transaction using bloom filter data
pub fn filter_transaction(&self, tx: &IndexedTransaction) -> bool {
// check with bloom filter, if set
match self.bloom {
/// if no filter is set for the connection => match everything
None => true,
/// filter using bloom filter, then update
Some(ref bloom) => {
let mut bloom = bloom.lock();
let mut is_match = false;
// match if filter contains any arbitrary script data element in any scriptPubKey in tx
for (output_index, output) in tx.raw.outputs.iter().enumerate() {
let script = Script::new(output.script_pubkey.clone());
let is_update_needed = self.filter_flags == types::FilterFlags::All
|| (self.filter_flags == types::FilterFlags::PubKeyOnly && (script.is_pay_to_public_key() || script.is_multisig_script()));
for instruction in script.iter().filter_map(|i| i.ok()) {
if let Some(instruction_data) = instruction.data {
if bloom.contains(instruction_data) {
is_match = true;
if is_update_needed {
bloom.insert(&serialize(&OutPoint {
hash: tx.hash.clone(),
index: output_index as u32,
}));
}
}
}
}
}
// filter is updated only above => we can early-return from now
if is_match {
return is_match;
}
// match if filter contains transaction itself
if bloom.contains(&*tx.hash) {
return true;
}
// match if filter contains an outpoint this transaction spends
for input in &tx.raw.inputs {
// check if match previous output
let previous_output = serialize(&input.previous_output);
is_match = bloom.contains(&*previous_output);
if is_match {
return true;
}
// check if match any arbitrary script data element in any scriptSig in tx
let script = Script::new(input.script_sig.clone());
for instruction in script.iter().filter_map(|i| i.ok()) {
if let Some(instruction_data) = instruction.data {
is_match = bloom.contains(&*instruction_data);
if is_match {
return true;
}
}
}
}
// no matches
false
},
}
}
}
impl BloomFilterData {
/// Create with given parameters
pub fn with_filter_load(message: types::FilterLoad) -> Self {
BloomFilterData {
filter: BitVec::from_bytes(&message.filter),
hash_functions_num: message.hash_functions,
tweak: message.tweak,
}
}
/// True if filter contains given bytes
pub fn contains(&self, data: &[u8]) -> bool {
for hash_function_idx in 0..self.hash_functions_num {
let murmur_seed = hash_function_idx.overflowing_mul(SEED_OFFSET).0.overflowing_add(self.tweak).0;
let murmur_hash = murmur3_32(&mut data.as_ref(), murmur_seed) as usize % self.filter.len();
if !self.filter.get(murmur_hash).expect("murmur_hash is result of mod operation by filter len; qed") {
return false;
}
}
true
}
/// Add bytes to the filter
pub fn insert(&mut self, data: &[u8]) {
for hash_function_idx in 0..self.hash_functions_num {
let murmur_seed = hash_function_idx.overflowing_mul(SEED_OFFSET).0.overflowing_add(self.tweak).0;
let murmur_hash = murmur3_32(&mut data.as_ref(), murmur_seed) as usize % self.filter.len();
self.filter.set(murmur_hash, true);
}
}
}
#[cfg(test)]
pub mod tests {
use std::iter::repeat;
use chain::IndexedTransaction;
use message::types;
use primitives::bytes::Bytes;
use primitives::hash::H256;
use ser::serialize;
use test_data;
use super::{BloomFilter, BloomFilterData};
fn default_filterload() -> types::FilterLoad {
types::FilterLoad {
filter: Bytes::from(repeat(0u8).take(1024).collect::<Vec<_>>()),
hash_functions: 10,
tweak: 5,
flags: types::FilterFlags::None,
}
}
fn make_filteradd(data: &[u8]) -> types::FilterAdd {
types::FilterAdd {
data: data.into(),
}
}
#[test]
fn bloom_insert_data() {
let mut bloom = BloomFilterData::with_filter_load(default_filterload());
assert!(!bloom.contains(&*H256::default()));
bloom.insert(&*H256::default());
assert!(bloom.contains(&*H256::default()));
}
#[test]
fn bloom_filter_matches_transaction_by_hash() {
let tx1: IndexedTransaction = test_data::TransactionBuilder::with_output(10).into();
let tx2: IndexedTransaction = test_data::TransactionBuilder::with_output(20).into();
let mut filter = BloomFilter::with_filter_load(default_filterload());
assert!(!filter.filter_transaction(&tx1));
assert!(!filter.filter_transaction(&tx2));
filter.update_bloom_filter(make_filteradd(&*tx1.hash));
assert!(filter.filter_transaction(&tx1));
assert!(!filter.filter_transaction(&tx2));
}
#[test]
fn bloom_filter_matches_transaction_by_output_script_data_element() {
// https://webbtc.com/tx/eb3b82c0884e3efa6d8b0be55b4915eb20be124c9766245bcc7f34fdac32bccb
// output script: OP_DUP OP_HASH160 380cb3c594de4e7e9b8e18db182987bebb5a4f70 OP_EQUALVERIFY OP_CHECKSIG
let tx1: IndexedTransaction = "01000000024de8b0c4c2582db95fa6b3567a989b664484c7ad6672c85a3da413773e63fdb8000000006b48304502205b282fbc9b064f3bc823a23edcc0048cbb174754e7aa742e3c9f483ebe02911c022100e4b0b3a117d36cab5a67404dddbf43db7bea3c1530e0fe128ebc15621bd69a3b0121035aa98d5f77cd9a2d88710e6fc66212aff820026f0dad8f32d1f7ce87457dde50ffffffff4de8b0c4c2582db95fa6b3567a989b664484c7ad6672c85a3da413773e63fdb8010000006f004730440220276d6dad3defa37b5f81add3992d510d2f44a317fd85e04f93a1e2daea64660202200f862a0da684249322ceb8ed842fb8c859c0cb94c81e1c5308b4868157a428ee01ab51210232abdc893e7f0631364d7fd01cb33d24da45329a00357b3a7886211ab414d55a51aeffffffff02e0fd1c00000000001976a914380cb3c594de4e7e9b8e18db182987bebb5a4f7088acc0c62d000000000017142a9bc5447d664c1d0141392a842d23dba45c4f13b17500000000".into();
let tx1_out_data: Bytes = "380cb3c594de4e7e9b8e18db182987bebb5a4f70".into();
let tx2 = IndexedTransaction::default();
let mut filter = BloomFilter::with_filter_load(default_filterload());
assert!(!filter.filter_transaction(&tx1));
assert!(!filter.filter_transaction(&tx2));
filter.update_bloom_filter(make_filteradd(&tx1_out_data));
assert!(filter.filter_transaction(&tx1));
assert!(!filter.filter_transaction(&tx2));
}
#[test]
fn bloom_filter_matches_transaction_by_previous_output_point() {
// https://webbtc.com/tx/eb3b82c0884e3efa6d8b0be55b4915eb20be124c9766245bcc7f34fdac32bccb
let tx1: IndexedTransaction = "01000000024de8b0c4c2582db95fa6b3567a989b664484c7ad6672c85a3da413773e63fdb8000000006b48304502205b282fbc9b064f3bc823a23edcc0048cbb174754e7aa742e3c9f483ebe02911c022100e4b0b3a117d36cab5a67404dddbf43db7bea3c1530e0fe128ebc15621bd69a3b0121035aa98d5f77cd9a2d88710e6fc66212aff820026f0dad8f32d1f7ce87457dde50ffffffff4de8b0c4c2582db95fa6b3567a989b664484c7ad6672c85a3da413773e63fdb8010000006f004730440220276d6dad3defa37b5f81add3992d510d2f44a317fd85e04f93a1e2daea64660202200f862a0da684249322ceb8ed842fb8c859c0cb94c81e1c5308b4868157a428ee01ab51210232abdc893e7f0631364d7fd01cb33d24da45329a00357b3a7886211ab414d55a51aeffffffff02e0fd1c00000000001976a914380cb3c594de4e7e9b8e18db182987bebb5a4f7088acc0c62d000000000017142a9bc5447d664c1d0141392a842d23dba45c4f13b17500000000".into();
let tx1_previous_output: Bytes = serialize(&tx1.raw.inputs[0].previous_output);
let tx2 = IndexedTransaction::default();
let mut filter = BloomFilter::with_filter_load(default_filterload());
assert!(!filter.filter_transaction(&tx1));
assert!(!filter.filter_transaction(&tx2));
filter.update_bloom_filter(make_filteradd(&tx1_previous_output));
assert!(filter.filter_transaction(&tx1));
assert!(!filter.filter_transaction(&tx2));
}
#[test]
fn connection_filter_matches_transaction_by_input_script_data_element() {
// https://webbtc.com/tx/eb3b82c0884e3efa6d8b0be55b4915eb20be124c9766245bcc7f34fdac32bccb
// input script: PUSH DATA 304502205b282fbc9b064f3bc823a23edcc0048cbb174754e7aa742e3c9f483ebe02911c022100e4b0b3a117d36cab5a67404dddbf43db7bea3c1530e0fe128ebc15621bd69a3b01
let tx1: IndexedTransaction = "01000000024de8b0c4c2582db95fa6b3567a989b664484c7ad6672c85a3da413773e63fdb8000000006b48304502205b282fbc9b064f3bc823a23edcc0048cbb174754e7aa742e3c9f483ebe02911c022100e4b0b3a117d36cab5a67404dddbf43db7bea3c1530e0fe128ebc15621bd69a3b0121035aa98d5f77cd9a2d88710e6fc66212aff820026f0dad8f32d1f7ce87457dde50ffffffff4de8b0c4c2582db95fa6b3567a989b664484c7ad6672c85a3da413773e63fdb8010000006f004730440220276d6dad3defa37b5f81add3992d510d2f44a317fd85e04f93a1e2daea64660202200f862a0da684249322ceb8ed842fb8c859c0cb94c81e1c5308b4868157a428ee01ab51210232abdc893e7f0631364d7fd01cb33d24da45329a00357b3a7886211ab414d55a51aeffffffff02e0fd1c00000000001976a914380cb3c594de4e7e9b8e18db182987bebb5a4f7088acc0c62d000000000017142a9bc5447d664c1d0141392a842d23dba45c4f13b17500000000".into();
let tx1_input_data: Bytes = "304502205b282fbc9b064f3bc823a23edcc0048cbb174754e7aa742e3c9f483ebe02911c022100e4b0b3a117d36cab5a67404dddbf43db7bea3c1530e0fe128ebc15621bd69a3b01".into();
let tx2 = IndexedTransaction::default();
let mut filter = BloomFilter::with_filter_load(default_filterload());
assert!(!filter.filter_transaction(&tx1));
assert!(!filter.filter_transaction(&tx2));
filter.update_bloom_filter(make_filteradd(&tx1_input_data));
assert!(filter.filter_transaction(&tx1));
assert!(!filter.filter_transaction(&tx2));
}
}

View File

@ -1,121 +0,0 @@
use std::collections::HashSet;
use rand::{thread_rng, Rng};
use bitcrypto::{sha256, siphash24};
use byteorder::{LittleEndian, ByteOrder};
use chain::{BlockHeader, ShortTransactionID, IndexedBlock};
use message::common::{BlockHeaderAndIDs, PrefilledTransaction};
use primitives::hash::H256;
use ser::{Stream, Serializable};
/// Maximum size of prefilled transactions in compact block
const MAX_COMPACT_BLOCK_PREFILLED_SIZE: usize = 10 * 1024;
/// Build compact block from given block and selected transactions
pub fn build_compact_block(block: IndexedBlock, prefilled_transactions_indexes: HashSet<usize>) -> BlockHeaderAndIDs {
let nonce: u64 = thread_rng().gen();
let prefilled_transactions_len = prefilled_transactions_indexes.len();
let mut short_ids: Vec<ShortTransactionID> = Vec::with_capacity(block.transactions.len() - prefilled_transactions_len);
let mut prefilled_transactions: Vec<PrefilledTransaction> = Vec::with_capacity(prefilled_transactions_len);
let mut prefilled_transactions_size: usize = 0;
let (key0, key1) = short_transaction_id_keys(nonce, &block.header.raw);
for (transaction_index, transaction) in block.transactions.into_iter().enumerate() {
let transaction_size = transaction.raw.serialized_size();
if prefilled_transactions_size + transaction_size < MAX_COMPACT_BLOCK_PREFILLED_SIZE
&& prefilled_transactions_indexes.contains(&transaction_index) {
prefilled_transactions_size += transaction_size;
prefilled_transactions.push(PrefilledTransaction {
index: transaction_index,
transaction: transaction.raw,
})
} else {
short_ids.push(short_transaction_id(key0, key1, &transaction.hash));
}
}
BlockHeaderAndIDs {
header: block.header.raw,
nonce: nonce,
short_ids: short_ids,
prefilled_transactions: prefilled_transactions,
}
}
pub fn short_transaction_id_keys(nonce: u64, block_header: &BlockHeader) -> (u64, u64) {
// Short transaction IDs are used to represent a transaction without sending a full 256-bit hash. They are calculated by:
// 1) single-SHA256 hashing the block header with the nonce appended (in little-endian)
let mut stream = Stream::new();
stream.append(block_header);
stream.append(&nonce);
let block_header_with_nonce_hash = sha256(&stream.out());
// 2) Running SipHash-2-4 with the input being the transaction ID and the keys (k0/k1) set to the first two little-endian
// 64-bit integers from the above hash, respectively.
let key0 = LittleEndian::read_u64(&block_header_with_nonce_hash[0..8]);
let key1 = LittleEndian::read_u64(&block_header_with_nonce_hash[8..16]);
(key0, key1)
}
pub fn short_transaction_id(key0: u64, key1: u64, transaction_hash: &H256) -> ShortTransactionID {
// 2) Running SipHash-2-4 with the input being the transaction ID and the keys (k0/k1) set to the first two little-endian
// 64-bit integers from the above hash, respectively.
let siphash_transaction_hash = siphash24(key0, key1, &**transaction_hash);
// 3) Dropping the 2 most significant bytes from the SipHash output to make it 6 bytes.
let mut siphash_transaction_hash_bytes = [0u8; 8];
LittleEndian::write_u64(&mut siphash_transaction_hash_bytes, siphash_transaction_hash);
siphash_transaction_hash_bytes[0..6].into()
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use chain::{BlockHeader, Transaction, ShortTransactionID};
use message::common::{BlockHeaderAndIDs, PrefilledTransaction};
use test_data;
use super::{build_compact_block, short_transaction_id_keys, short_transaction_id};
#[test]
fn short_transaction_id_is_correct() {
// https://webbtc.com/tx/fa755807ab9f3ca8a9b25982570700f3f94bb0627f373893c3cfe79b5cf16def
let transaction: Transaction = "01000000015fe01688dd8ae4428e21835c0e1b7af571c4223658d94da0c123e6fd7399862a010000006b483045022100f9e6d1bd3c9f54dcc72405994ec9ac2795878dd0b3cfbdc52bed28c2737fbecc02201fd68deab17bfaef1626e232cc4488dc273ba6fa5d807712b111d017cb96e0990121021fff64d1a21ede90d77cafa35fe7621db8aa433d947267980b395c35d23bd87fffffffff021ea56f72000000001976a9146fae1c8e7a648fff905dfdac9b019d3e887d7e8f88ac80f0fa02000000001976a9147f29b567c7dd9fc59cd3a7f716914966cc91ffa188ac00000000".into();
let transaction_hash = transaction.hash();
// https://webbtc.com/block/000000000000000001582cb2307ac43f3b4b268f2a75d3581d0babd48df1c300
let block_header: BlockHeader = "000000205a54771c6a1a2bcc8f3412184f319dc02f7258b56fd5060100000000000000001de7a03cefe565d11cdfa369f6ffe59b9368a257203726c9cc363d31b4e3c2ebca4f3c58d4e6031830ccfd80".into();
let nonce = 13450019974716797918_u64;
let (key0, key1) = short_transaction_id_keys(nonce, &block_header);
let actual_id = short_transaction_id(key0, key1, &transaction_hash);
let expected_id: ShortTransactionID = "036e8b8b8f00".into();
assert_eq!(expected_id, actual_id);
}
#[test]
fn compact_block_is_built_correctly() {
let block = test_data::block_builder().header().parent(test_data::genesis().hash()).build()
.transaction().output().value(10).build().build()
.transaction().output().value(20).build().build()
.transaction().output().value(30).build().build()
.build(); // genesis -> block
let prefilled: HashSet<_> = vec![1].into_iter().collect();
let compact_block = build_compact_block(block.clone().into(), prefilled);
let (key0, key1) = short_transaction_id_keys(compact_block.nonce, &block.block_header);
let short_ids = vec![
short_transaction_id(key0, key1, &block.transactions[0].hash()),
short_transaction_id(key0, key1, &block.transactions[2].hash()),
];
assert_eq!(compact_block, BlockHeaderAndIDs {
header: block.block_header.clone(),
nonce: compact_block.nonce,
short_ids: short_ids,
prefilled_transactions: vec![
PrefilledTransaction {
index: 1,
transaction: block.transactions[1].clone(),
}
],
});
}
}

View File

@ -1,46 +0,0 @@
use message::types;
/// Connection fee rate filter
#[derive(Debug, Default)]
pub struct FeeRateFilter {
/// Minimal fee in satoshis per 1000 bytes
fee_rate: u64,
}
impl FeeRateFilter {
/// Set minimal fee rate, this filter accepts
pub fn set_min_fee_rate(&mut self, message: types::FeeFilter) {
self.fee_rate = message.fee_rate;
}
/// Filter transaction using its fee rate
pub fn filter_transaction(&self, tx_fee_rate: u64) -> bool {
tx_fee_rate >= self.fee_rate
}
}
#[cfg(test)]
mod tests {
use message::types;
use super::FeeRateFilter;
#[test]
fn fee_rate_filter_empty() {
assert!(FeeRateFilter::default().filter_transaction(0));
}
#[test]
fn fee_rate_filter_accepts() {
let mut filter = FeeRateFilter::default();
filter.set_min_fee_rate(types::FeeFilter::with_fee_rate(1000));
assert!(filter.filter_transaction(1000));
assert!(filter.filter_transaction(2000));
}
#[test]
fn fee_rate_filter_rejects() {
let mut filter = FeeRateFilter::default();
filter.set_min_fee_rate(types::FeeFilter::with_fee_rate(1000));
assert!(filter.filter_transaction(500));
}
}

View File

@ -1,399 +0,0 @@
use std::ops::Index;
use std::collections::{VecDeque, HashSet};
use std::iter::repeat;
use primitives::hash::H256;
/// Hash position
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum HashPosition {
/// Hash is not in the queue
Missing,
/// Hash is at the front of the queue
Front,
/// Hash is somewhere inside in the queue
Inside(u32),
}
/// Ordered queue with O(1) contains() && random access operations cost.
#[derive(Debug, Clone)]
pub struct HashQueue {
/// Index-ordered hashes
queue: VecDeque<H256>,
/// Set of hashes
set: HashSet<H256>,
}
/// Chain of linked queues. First queue has index zero.
#[derive(Debug)]
pub struct HashQueueChain {
/// Hash queues
chain: Vec<HashQueue>,
}
impl HashQueue {
pub fn new() -> Self {
HashQueue {
queue: VecDeque::new(),
set: HashSet::new(),
}
}
/// Clears the queue
pub fn clear(&mut self) {
self.set.clear();
self.queue.clear();
}
/// Returns len of the given queue.
pub fn len(&self) -> u32 {
self.queue.len() as u32
}
/// Returns front element from the given queue.
pub fn front(&self) -> Option<H256> {
self.queue.front().cloned()
}
/// Returns back element from the given queue.
pub fn back(&self) -> Option<H256> {
self.queue.back().cloned()
}
/// Returns position of the element in the queue
pub fn position(&self, hash: &H256) -> Option<u32> {
self.queue.iter().enumerate()
.filter_map(|(pos, h)| if hash == h { Some(pos as u32) } else { None })
.nth(0)
}
/// Returns element at position
pub fn at(&self, position: u32) -> Option<H256> {
self.queue.get(position as usize).cloned()
}
/// Returns previous-to back element from the given queue.
pub fn pre_back(&self) -> Option<H256> {
let queue_len = self.queue.len();
if queue_len <= 1 {
return None;
}
Some(self.queue[queue_len - 2].clone())
}
/// Returns true if queue contains element.
pub fn contains(&self, hash: &H256) -> bool {
self.set.contains(hash)
}
/// Returns n elements from the front of the queue
pub fn front_n(&self, n: u32) -> Vec<H256> {
self.queue.iter().cloned().take(n as usize).collect()
}
/// Removes element from the front of the queue.
pub fn pop_front(&mut self) -> Option<H256> {
match self.queue.pop_front() {
Some(hash) => {
self.set.remove(&hash);
Some(hash)
},
None => None,
}
}
/// Removes n elements from the front of the queue.
pub fn pop_front_n(&mut self, n: u32) -> Vec<H256> {
let mut result: Vec<H256> = Vec::new();
for _ in 0..n {
match self.pop_front() {
Some(hash) => result.push(hash),
None => return result,
}
}
result
}
/// Removes element from the back of the queue.
pub fn pop_back(&mut self) -> Option<H256> {
match self.queue.pop_back() {
Some(hash) => {
self.set.remove(&hash);
Some(hash)
},
None => None,
}
}
/// Adds element to the back of the queue.
pub fn push_back(&mut self, hash: H256) {
if !self.set.insert(hash.clone()) {
panic!("must be checked by caller");
}
self.queue.push_back(hash);
}
/// Adds elements to the back of the queue.
pub fn push_back_n(&mut self, hashes: Vec<H256>) {
for hash in hashes {
self.push_back(hash);
}
}
/// Removes element from the queue, returning its position.
pub fn remove(&mut self, hash: &H256) -> HashPosition {
if !self.set.remove(hash) {
return HashPosition::Missing;
}
if self.queue.front().expect("checked one line above") == hash {
self.queue.pop_front();
return HashPosition::Front;
}
for i in 0..self.queue.len() {
if self.queue[i] == *hash {
self.queue.remove(i);
return HashPosition::Inside(i as u32);
}
}
// unreachable because hash is not missing, not at the front and not inside
unreachable!()
}
/// Removes all elements from the queue.
pub fn remove_all(&mut self) -> VecDeque<H256> {
use std::mem::replace;
self.set.clear();
replace(&mut self.queue, VecDeque::new())
}
}
impl Index<u32> for HashQueue {
type Output = H256;
fn index(&self, index: u32) -> &Self::Output {
&self.queue[index as usize]
}
}
impl HashQueueChain {
/// Creates chain with given number of queues.
pub fn with_number_of_queues(number_of_queues: usize) -> Self {
assert!(number_of_queues != 0);
HashQueueChain {
chain: repeat(HashQueue::new()).take(number_of_queues).collect(),
}
}
/// Returns length of the whole chain.
pub fn len(&self) -> u32 {
self.chain.iter().fold(0, |total, chain| total + chain.len())
}
/// Returns length of the given queue.
pub fn len_of(&self, queue_index: usize) -> u32 {
self.chain[queue_index].len()
}
/// Returns element at the given position
pub fn at(&self, mut index: u32) -> Option<H256> {
for queue in &self.chain {
let queue_len = queue.len();
if index < queue_len {
return queue.at(index);
}
index -= queue_len;
}
None
}
/// Returns element at the front of the given queue.
pub fn front_at(&self, queue_index: usize) -> Option<H256> {
let queue = &self.chain[queue_index];
queue.front()
}
/// Returns element at the front of the given queue.
pub fn back_at(&self, queue_index: usize) -> Option<H256> {
let queue = &self.chain[queue_index];
queue.back()
}
/// Returns previous-to back element from the given queue.
pub fn pre_back_at(&self, chain_index: usize) -> Option<H256> {
let queue = &self.chain[chain_index];
queue.pre_back()
}
/// Returns the back of the whole chain.
pub fn back(&self) -> Option<H256> {
let mut queue_index = self.chain.len() - 1;
loop {
let queue = &self.chain[queue_index];
let queue_back = queue.back();
if queue_back.is_some() {
return queue_back;
}
queue_index -= 1;
if queue_index == 0 {
return None;
}
}
}
/// Checks if hash is contained in given queue.
#[cfg(test)]
pub fn is_contained_in(&self, queue_index: usize, hash: &H256) -> bool {
self.chain[queue_index].contains(hash)
}
/// Returns the index of queue, hash is contained in.
pub fn contains_in(&self, hash: &H256) -> Option<usize> {
for i in 0..self.chain.len() {
if self.chain[i].contains(hash) {
return Some(i);
}
}
None
}
/// Returns n elements from the front of the given queue
pub fn front_n_at(&self, queue_index: usize, n: u32) -> Vec<H256> {
self.chain[queue_index].front_n(n)
}
/// Remove a number of hashes from the front of the given queue.
pub fn pop_front_n_at(&mut self, queue_index: usize, n: u32) -> Vec<H256> {
self.chain[queue_index].pop_front_n(n)
}
/// Push hash onto the back of the given queue.
pub fn push_back_at(&mut self, queue_index: usize, hash: H256) {
self.chain[queue_index].push_back(hash)
}
/// Push a number of hashes onto the back of the given queue.
pub fn push_back_n_at(&mut self, queue_index: usize, hashes: Vec<H256>) {
self.chain[queue_index].push_back_n(hashes)
}
/// Remove hash from given queue.
pub fn remove_at(&mut self, queue_index: usize, hash: &H256) -> HashPosition {
self.chain[queue_index].remove(hash)
}
/// Remove all items from given queue.
pub fn remove_all_at(&mut self, queue_index: usize) -> VecDeque<H256> {
self.chain[queue_index].remove_all()
}
}
impl Index<u32> for HashQueueChain {
type Output = H256;
fn index(&self, mut index: u32) -> &Self::Output {
for queue in &self.chain {
let queue_len = queue.len();
if index < queue_len {
return &queue[index];
}
index -= queue_len;
}
panic!("invalid index");
}
}
#[cfg(test)]
mod tests {
use super::{HashQueue, HashQueueChain, HashPosition};
use primitives::hash::H256;
#[test]
fn hash_queue_empty() {
let mut queue = HashQueue::new();
assert_eq!(queue.len(), 0);
assert_eq!(queue.front(), None);
assert_eq!(queue.back(), None);
assert_eq!(queue.pre_back(), None);
assert_eq!(queue.contains(&"000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f".into()), false);
assert_eq!(queue.pop_front(), None);
assert_eq!(queue.pop_front_n(100), vec![]);
assert_eq!(queue.remove(&"000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f".into()), HashPosition::Missing);
}
#[test]
fn hash_queue_chain_empty() {
let mut chain = HashQueueChain::with_number_of_queues(3);
assert_eq!(chain.len(), 0);
assert_eq!(chain.len_of(0), 0);
assert_eq!(chain.front_at(0), None);
assert_eq!(chain.back_at(0), None);
assert_eq!(chain.pre_back_at(0), None);
assert_eq!(chain.back(), None);
assert_eq!(chain.is_contained_in(0, &"000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f".into()), false);
assert_eq!(chain.contains_in(&"000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f".into()), None);
assert_eq!(chain.pop_front_n_at(0, 100), vec![]);
assert_eq!(chain.remove_at(0, &"000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f".into()), HashPosition::Missing);
}
#[test]
fn hash_queue_chain_not_empty() {
let mut chain = HashQueueChain::with_number_of_queues(4);
chain.push_back_n_at(0, vec![
H256::from(0),
H256::from(1),
H256::from(2),
]);
chain.push_back_n_at(1, vec![
H256::from(3),
H256::from(4),
]);
chain.push_back_n_at(2, vec![
H256::from(5),
]);
assert_eq!(chain.len(), 6);
assert_eq!(chain.len_of(0), 3);
assert_eq!(chain.len_of(1), 2);
assert_eq!(chain.len_of(2), 1);
assert_eq!(chain.len_of(3), 0);
assert_eq!(chain.front_at(0), Some(H256::from(0)));
assert_eq!(chain.front_at(1), Some(H256::from(3)));
assert_eq!(chain.front_at(2), Some(H256::from(5)));
assert_eq!(chain.front_at(3), None);
assert_eq!(chain.back_at(0), Some(H256::from(2)));
assert_eq!(chain.back_at(1), Some(H256::from(4)));
assert_eq!(chain.back_at(2), Some(H256::from(5)));
assert_eq!(chain.back_at(3), None);
assert_eq!(chain.pre_back_at(0), Some(H256::from(1)));
assert_eq!(chain.pre_back_at(1), Some(H256::from(3)));
assert_eq!(chain.pre_back_at(2), None);
assert_eq!(chain.pre_back_at(3), None);
assert_eq!(chain.back(), Some(H256::from(5)));
assert_eq!(chain.is_contained_in(0, &H256::from(2)), true);
assert_eq!(chain.is_contained_in(1, &H256::from(2)), false);
assert_eq!(chain.is_contained_in(2, &H256::from(2)), false);
assert_eq!(chain.is_contained_in(3, &H256::from(2)), false);
assert_eq!(chain.contains_in(&H256::from(2)), Some(0));
assert_eq!(chain.contains_in(&H256::from(5)), Some(2));
assert_eq!(chain.contains_in(&H256::from(9)), None);
}
#[test]
fn hash_queue_front_n() {
let mut queue = HashQueue::new();
queue.push_back_n(vec![H256::from(0), H256::from(1)]);
assert_eq!(queue.front_n(3), vec![H256::from(0), H256::from(1)]);
assert_eq!(queue.front_n(3), vec![H256::from(0), H256::from(1)]);
assert_eq!(queue.pop_front_n(3), vec![H256::from(0), H256::from(1)]);
assert_eq!(queue.pop_front_n(3), vec![]);
}
}

View File

@ -1,89 +0,0 @@
use std::collections::HashMap;
use chain::{BlockHeader, IndexedBlockHeader};
use db::{BlockRef, BlockHeaderProvider};
use primitives::bytes::Bytes;
use primitives::hash::H256;
use ser::serialize;
use super::{BlockHeight, BlockHeaderChain};
/// Block headers provider from `headers` message
pub struct InMemoryBlockHeaderProvider<'a> {
/// Block headers chain
headers_chain: &'a BlockHeaderChain,
/// Previous header (from headers_chain)
previous_header_height: Option<BlockHeight>,
/// Header-by-hash
by_hash: HashMap<H256, IndexedBlockHeader>,
/// headers by order
by_height: Vec<H256>,
}
impl<'a> InMemoryBlockHeaderProvider<'a> {
/// Creates new provider for given headers chain and previous header reference
pub fn new(headers_chain: &'a BlockHeaderChain, previous_header_height: Option<BlockHeight>) -> Self {
InMemoryBlockHeaderProvider {
headers_chain: headers_chain,
previous_header_height: previous_header_height,
by_hash: HashMap::new(),
by_height: Vec::new(),
}
}
/// Appends new header to the end of in-memory chain
pub fn append_header(&mut self, header: IndexedBlockHeader) {
self.by_height.push(header.hash.clone());
self.by_hash.insert(header.hash.clone(), header);
}
}
impl<'a> BlockHeaderProvider for InMemoryBlockHeaderProvider<'a> {
fn block_header_bytes(&self, block_ref: BlockRef) -> Option<Bytes> {
self.block_header(block_ref).map(|h| serialize(&h))
}
fn block_header(&self, block_ref: BlockRef) -> Option<BlockHeader> {
match block_ref {
BlockRef::Hash(h) => self.headers_chain.by_hash(&h)
.or_else(|| self.by_hash.get(&h).cloned()),
BlockRef::Number(n) => {
if let Some(previous_header_height) = self.previous_header_height {
if n > previous_header_height {
let n = (n - previous_header_height - 1) as usize;
if n < self.by_height.len() {
return Some(self.by_hash[&self.by_height[n]].raw.clone())
} else {
return None
}
}
}
self.headers_chain.at(n)
}
}
.map(|header| header.raw)
}
}
#[cfg(test)]
mod tests {
use db::BlockHeaderProvider;
use test_data;
use super::super::BlockHeaderChain;
use super::super::BlockHeight;
use super::InMemoryBlockHeaderProvider;
#[test]
fn in_memory_block_header_provider_updates() {
let chain = BlockHeaderChain::new(test_data::genesis().hash());
let mut provider = InMemoryBlockHeaderProvider::new(&chain, Some(0));
assert_eq!(provider.block_header(0.into()), Some(test_data::genesis().block_header.into()));
assert_eq!(provider.block_header(1.into()), None);
assert_eq!(provider.block_header(test_data::genesis().hash().into()), Some(test_data::genesis().block_header.into()));
assert_eq!(provider.block_header(test_data::block_h1().hash().into()), None);
provider.append_header(test_data::block_h1().block_header.into());
assert_eq!(provider.block_header(0.into()), Some(test_data::genesis().block_header.into()));
assert_eq!(provider.block_header(1.into()), Some(test_data::block_h1().block_header.into()));
assert_eq!(provider.block_header(test_data::genesis().hash().into()), Some(test_data::genesis().block_header.into()));
assert_eq!(provider.block_header(test_data::block_h1().hash().into()), Some(test_data::block_h1().block_header.into()));
}
}

View File

@ -1,148 +0,0 @@
use linked_hash_map::LinkedHashMap;
use primitives::hash::H256;
/// Maximal number of hashes to store in known-hashes filter
pub const MAX_KNOWN_HASHES_LEN: usize = 2048;
/// Hash-knowledge type
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum KnownHashType {
/// Peer knows transaction with this hash
Transaction,
/// Peer knows block with this hash
Block,
/// Peer knows compact block with this hash
CompactBlock,
}
/// Known-hashes filter
#[derive(Debug, Default)]
pub struct KnownHashFilter {
/// Insertion-time ordered known hashes
known_hashes: LinkedHashMap<H256, KnownHashType>,
}
impl KnownHashFilter {
/// Insert known hash
pub fn insert(&mut self, hash: H256, hash_type: KnownHashType) {
if self.known_hashes.contains_key(&hash) {
self.known_hashes.insert(hash, hash_type);
// remove oldest-known hash
if self.known_hashes.len() > MAX_KNOWN_HASHES_LEN {
self.known_hashes.pop_front();
}
}
}
/// Returns number of known hashes
#[cfg(test)]
pub fn len(&self) -> usize {
self.known_hashes.len()
}
/// Returns true if peer knows about this hash with this type
pub fn contains(&self, hash: &H256, hash_type: KnownHashType) -> bool {
self.known_hashes.get(hash)
.map(|stored_hash_type| *stored_hash_type == hash_type)
.unwrap_or(false)
}
/// Filter block using its hash
pub fn filter_block(&self, hash: &H256) -> bool {
self.known_hashes.get(hash)
.map(|stored_hash_type| *stored_hash_type != KnownHashType::Block
&& *stored_hash_type != KnownHashType::CompactBlock)
.unwrap_or(false)
}
/// Filter transaction using its hash
pub fn filter_transaction(&self, hash: &H256) -> bool {
self.known_hashes.get(hash)
.map(|stored_hash_type| *stored_hash_type != KnownHashType::Transaction)
.unwrap_or(true)
}
}
#[cfg(test)]
mod tests {
use message::types;
use primitives::hash::H256;
use super::{KnownHashFilter, KnownHashType, MAX_KNOWN_HASHES_LEN};
#[test]
fn known_hash_filter_empty() {
assert!(KnownHashFilter::default().filter_transaction(&H256::from(0)));
assert!(KnownHashFilter::default().filter_block(&H256::from(0)));
}
#[test]
fn known_hash_filter_block() {
let mut filter = KnownHashFilter::default();
filter.insert(H256::from(0), KnownHashType::Block);
filter.insert(H256::from(1), KnownHashType::CompactBlock);
filter.insert(H256::from(2), KnownHashType::Transaction);
assert!(!filter.filter_block(&H256::from(0)));
assert!(!filter.filter_block(&H256::from(1)));
assert!(filter.filter_block(&H256::from(2)));
assert!(filter.filter_block(&H256::from(3)));
}
#[test]
fn known_hash_filter_transaction() {
let mut filter = KnownHashFilter::default();
filter.insert(H256::from(0), KnownHashType::Block);
filter.insert(H256::from(1), KnownHashType::CompactBlock);
filter.insert(H256::from(2), KnownHashType::Transaction);
assert!(filter.filter_transaction(&H256::from(0)));
assert!(filter.filter_transaction(&H256::from(1)));
assert!(!filter.filter_transaction(&H256::from(2)));
assert!(filter.filter_transaction(&H256::from(3)));
}
#[test]
fn known_hash_filter_contains() {
let mut filter = KnownHashFilter::default();
filter.insert(H256::from(0), KnownHashType::Block);
filter.insert(H256::from(1), KnownHashType::CompactBlock);
filter.insert(H256::from(2), KnownHashType::Transaction);
assert!(filter.contains(&H256::from(0), KnownHashType::Block));
assert!(!filter.contains(&H256::from(0), KnownHashType::CompactBlock));
assert!(filter.contains(&H256::from(1), KnownHashType::CompactBlock));
assert!(!filter.contains(&H256::from(1), KnownHashType::Block));
assert!(filter.contains(&H256::from(2), KnownHashType::Transaction));
assert!(!filter.contains(&H256::from(2), KnownHashType::Block));
assert!(!filter.contains(&H256::from(3), KnownHashType::Block));
assert!(!filter.contains(&H256::from(3), KnownHashType::CompactBlock));
assert!(!filter.contains(&H256::from(3), KnownHashType::Transaction));
}
#[test]
fn known_hash_filter_insert() {
let mut hash_data = [0u8; 32];
let mut filter = KnownHashFilter::default();
assert_eq!(filter.len(), 0);
// insert new hash
filter.insert(H256::from(hash_data.clone()), KnownHashType::Block);
assert_eq!(filter.len(), 1);
// insert already known hash => nothing should change
filter.insert(H256::from(hash_data.clone()), KnownHashType::Block);
assert_eq!(filter.len(), 1);
// insert MAX_KNOWN_HASHES_LEN
for i in 1..MAX_KNOWN_HASHES_LEN {
hash_data[0] = (i % 255) as u8;
hash_data[1] = ((i >> 8) % 255) as u8;
filter.insert(H256::from(hash_data.clone()), KnownHashType::Block);
assert_eq!(filter.len(), i);
}
// insert new unknown hash => nothing should change as we already have max number of hashes
hash_data[0] = ((MAX_KNOWN_HASHES_LEN + 1) % 255) as u8;
hash_data[1] = (((MAX_KNOWN_HASHES_LEN + 1) >> 8) % 255) as u8;
filter.insert(H256::from(hash_data.clone()), KnownHashType::Block);
assert_eq!(filter.len(), MAX_KNOWN_HASHES_LEN);
// check that oldest known hash has been removed
hash_data[0] = 0; hash_data[1] = 0;
assert!(!filter.contains(&H256::from(hash_data.clone()), KnownHashType::Block));
hash_data[0] = 1; hash_data[1] = 0;
assert!(filter.contains(&H256::from(hash_data.clone()), KnownHashType::Block));
}
}

View File

@ -1,25 +0,0 @@
use message::types;
/// Short info for message
pub trait MessageShortInfo {
/// Return short info on message
fn short_info() -> String;
}
/// Long info for message
pub trait MessageLongInfo {
/// Return long info on message
fn long_info() -> String;
}
impl MessageShortInfo for types::Inv {
fn short_info(&self) -> String {
format!("LEN={}", self.inventory.len())
}
}
impl MessageLongInfo for types::Inv {
fn long_info(&self) -> String {
self.short_info()
}
}

View File

@ -1,31 +0,0 @@
mod average_speed_meter;
mod block_header_chain;
mod bloom_filter;
mod compact_block;
mod fee_rate_filter;
mod hash_queue;
mod in_memory_block_header_provider;
mod known_hash_filter;
mod message_info;
mod orphan_blocks_pool;
mod orphan_transactions_pool;
mod partial_merkle_tree;
mod promise;
mod types;
mod unknown_blocks_pool;
pub use self::average_speed_meter::AverageSpeedMeter;
pub use self::block_header_chain::BlockHeaderChain;
pub use self::bloom_filter::BloomFilter;
pub use self::compact_block::build_compact_block;
pub use self::fee_rate_filter::FeeRateFilter;
pub use self::hash_queue::{HashPosition, HashQueue, HashQueueChain};
pub use self::in_memory_block_header_provider::InMemoryBlockHeaderProvider;
pub use self::known_hash_filter::{KnownHashType, KnownHashFilter};
pub use self::message_info::{MessageShortInfo, MessageLongInfo};
pub use self::orphan_blocks_pool::OrphanBlocksPool;
pub use self::orphan_transactions_pool::{OrphanTransaction, OrphanTransactionsPool};
pub use self::partial_merkle_tree::build_partial_merkle_tree;
pub use self::promise::Promise;
pub use self::types::BlockHeight;
pub use self::unknown_blocks_pool::{UnknownBlock, UnknownBlocksPool};

View File

@ -1,174 +0,0 @@
use std::collections::{HashMap, HashSet, VecDeque};
use std::collections::hash_map::Entry;
use linked_hash_map::LinkedHashMap;
use time;
use chain::IndexedBlock;
use primitives::hash::H256;
/// Storage for blocks, for which parent block has been requested, but not received yet.
#[derive(Debug)]
pub struct OrphanBlocksPool {
/// { Parent block hash: { Block hash : block } }.
by_parent_hash: HashMap<H256, HashMap<H256, IndexedBlock>>,
}
impl OrphanBlocksPool {
/// Create new pool
pub fn new() -> Self {
OrphanBlocksPool {
by_parent_hash: HashMap::new(),
}
}
/// Get total number of blocks in pool
pub fn len(&self) -> usize {
self.by_parent_hash.len()
}
/// Insert orphaned block, for which we have already requested its parent block
pub fn insert_orphaned_block(&mut self, block: IndexedBlock) {
self.by_parent_hash
.entry(block.header.raw.previous_header_hash.clone())
.or_insert_with(HashMap::new)
.insert(block.header.hash.clone(), block);
}
/// Remove all blocks
pub fn clear(&mut self) -> Vec<H256> {
self.by_parent_hash.drain()
.flat_map(|(_, mut v)| v.drain().map(|(k, _)| k).collect::<Vec<_>>())
.collect()
}
/// Remove all blocks, depending on this parent
pub fn remove_blocks_for_parent(&mut self, hash: &H256) -> Vec<IndexedBlock> {
let mut queue: VecDeque<H256> = VecDeque::new();
queue.push_back(hash.clone());
let mut removed: Vec<IndexedBlock> = Vec::new();
while let Some(parent_hash) = queue.pop_front() {
if let Entry::Occupied(entry) = self.by_parent_hash.entry(parent_hash) {
let (_, mut orphaned) = entry.remove_entry();
queue.extend(orphaned.keys().cloned());
removed.extend(orphaned.drain().map(|(_, v)| v));
}
}
removed
}
/// Remove blocks with given hashes + all dependent blocks
pub fn remove_blocks(&mut self, hashes: &HashSet<H256>) -> Vec<IndexedBlock> {
// TODO: excess clone
let mut removed: Vec<IndexedBlock> = Vec::new();
let parent_orphan_keys: Vec<_> = self.by_parent_hash.keys().cloned().collect();
for parent_orphan_key in parent_orphan_keys {
if let Entry::Occupied(mut orphan_entry) = self.by_parent_hash.entry(parent_orphan_key) {
let remove_entry = {
let mut orphans = orphan_entry.get_mut();
let orphans_keys: HashSet<H256> = orphans.keys().cloned().collect();
for orphan_to_remove in orphans_keys.intersection(hashes) {
removed.push(
orphans.remove(orphan_to_remove)
.expect("iterating by intersection of orphans keys with hashes; removing from orphans; qed")
);
}
orphans.is_empty()
};
if remove_entry {
orphan_entry.remove_entry();
}
}
}
// also delete all children
for hash in hashes.iter() {
removed.extend(self.remove_blocks_for_parent(hash));
}
removed
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use test_data;
use primitives::hash::H256;
use super::OrphanBlocksPool;
#[test]
fn orphan_block_pool_empty_on_start() {
let pool = OrphanBlocksPool::new();
assert_eq!(pool.len(), 0);
}
#[test]
fn orphan_block_pool_insert_orphan_block() {
let mut pool = OrphanBlocksPool::new();
let b1 = test_data::block_h1();
let b1_hash = b1.hash();
assert_eq!(pool.len(), 0);
pool.insert_orphaned_block(b1.into());
assert_eq!(pool.len(), 1);
pool.clear();
assert_eq!(pool.len(), 0);
}
#[test]
fn orphan_block_pool_remove_blocks_for_parent() {
let mut pool = OrphanBlocksPool::new();
let b1 = test_data::block_h1();
let b1_hash = b1.hash();
let b2 = test_data::block_h169();
let b2_hash = b2.hash();
let b3 = test_data::block_h2();
let b3_hash = b3.hash();
pool.insert_orphaned_block(b1.into());
pool.insert_orphaned_block(b2.into());
pool.insert_orphaned_block(b3.into());
let removed = pool.remove_blocks_for_parent(&test_data::genesis().hash());
assert_eq!(removed.len(), 2);
assert_eq!(removed[0].header.hash, b1_hash);
assert_eq!(removed[1].header.hash, b3_hash);
assert_eq!(pool.len(), 1);
}
#[test]
fn orphan_block_pool_remove_blocks() {
let mut pool = OrphanBlocksPool::new();
let b1 = test_data::block_h1();
let b1_hash = b1.hash();
let b2 = test_data::block_h2();
let b2_hash = b2.hash();
let b3 = test_data::block_h169();
let b3_hash = b3.hash();
let b4 = test_data::block_h170();
let b4_hash = b4.hash();
let b5 = test_data::block_h181();
let b5_hash = b5.hash();
pool.insert_orphaned_block(b1.into());
pool.insert_orphaned_block(b2.into());
pool.insert_orphaned_block(b3.into());
pool.insert_orphaned_block(b4.into());
pool.insert_orphaned_block(b5.into());
let mut blocks_to_remove: HashSet<H256> = HashSet::new();
blocks_to_remove.insert(b1_hash.clone());
blocks_to_remove.insert(b3_hash.clone());
let removed = pool.remove_blocks(&blocks_to_remove);
assert_eq!(removed.len(), 4);
assert!(removed.iter().any(|ref b| &b.header.hash == &b1_hash));
assert!(removed.iter().any(|ref b| &b.header.hash == &b2_hash));
assert!(removed.iter().any(|ref b| &b.header.hash == &b3_hash));
assert!(removed.iter().any(|ref b| &b.header.hash == &b4_hash));
assert_eq!(pool.len(), 1);
}
}

View File

@ -1,209 +0,0 @@
use std::collections::{HashMap, HashSet, VecDeque};
use std::collections::hash_map::Entry;
use linked_hash_map::LinkedHashMap;
use time;
use chain::IndexedTransaction;
use primitives::hash::H256;
/// Storage for transactions, for which we have no parent transactions yet.
/// Transactions from this storage are either moved to verification queue, or removed at all.
#[derive(Debug)]
pub struct OrphanTransactionsPool {
/// Orphan transactions by hash.
by_hash: LinkedHashMap<H256, OrphanTransaction>,
/// Orphan transactions by parent' transaction hash
by_parent: HashMap<H256, HashSet<H256>>,
}
/// Orphan transaction representation.
#[derive(Debug)]
pub struct OrphanTransaction {
/// Time when this transaction was inserted to the pool
pub insertion_time: f64,
/// Transaction itself
pub transaction: IndexedTransaction,
/// Parent transactions, which are still unknown to us
pub unknown_parents: HashSet<H256>,
}
impl OrphanTransactionsPool {
/// Create new pool
pub fn new() -> Self {
OrphanTransactionsPool {
by_hash: LinkedHashMap::new(),
by_parent: HashMap::new(),
}
}
#[cfg(test)]
/// Get total number of transactions in pool
pub fn len(&self) -> usize {
self.by_hash.len()
}
/// Get unknown transactions in the insertion order
pub fn transactions(&self) -> &LinkedHashMap<H256, OrphanTransaction> {
&self.by_hash
}
/// Check if pool contains this transaction
pub fn contains(&mut self, hash: &H256) -> bool {
self.by_hash.contains_key(hash)
}
/// Insert orphan transaction
pub fn insert(&mut self, transaction: IndexedTransaction, unknown_parents: HashSet<H256>) {
assert!(!self.by_hash.contains_key(&transaction.hash));
assert!(unknown_parents.iter().all(|h| transaction.raw.inputs.iter().any(|i| &i.previous_output.hash == h)));
for unknown_parent in &unknown_parents {
self.by_parent.entry(unknown_parent.clone())
.or_insert_with(HashSet::new)
.insert(transaction.hash.clone());
}
self.by_hash.insert(transaction.hash.clone(), OrphanTransaction::new(transaction, unknown_parents));
}
/// Remove all transactions, depending on this parent
pub fn remove_transactions_for_parent(&mut self, hash: &H256) -> Vec<IndexedTransaction> {
assert!(!self.by_hash.contains_key(hash));
let mut removal_queue: VecDeque<H256> = VecDeque::new();
removal_queue.push_back(hash.clone());
let mut removed_orphans: Vec<IndexedTransaction> = Vec::new();
while let Some(hash) = removal_queue.pop_front() {
// remove direct children of hash
let mut removed_orphans_hashes: Vec<H256> = Vec::new();
if let Entry::Occupied(children_entry) = self.by_parent.entry(hash.clone()) {
for child in children_entry.get() {
let all_parents_are_known = {
let child_entry = self.by_hash.get_mut(child)
.expect("by_parent contains same entries as by_hash; child is from by_parent; qed");
child_entry.remove_known_parent(&hash)
};
if all_parents_are_known {
removed_orphans_hashes.push(child.clone());
removed_orphans.push(self.by_hash.remove(child)
.expect("by_parent contains same entries as by_hash; child is from by_parent; qed")
.transaction
);
}
}
children_entry.remove_entry();
}
// then also remove grandchildren of hash & so on
removal_queue.extend(removed_orphans_hashes);
}
removed_orphans
}
/// Remove transactions with given hashes + all dependent blocks
pub fn remove_transactions(&mut self, hashes: &[H256]) -> Vec<IndexedTransaction> {
let mut removed: Vec<IndexedTransaction> = Vec::new();
for hash in hashes {
if let Some(transaction) = self.by_hash.remove(hash) {
removed.push(transaction.transaction);
}
removed.extend(self.remove_transactions_for_parent(hash));
}
removed
}
}
impl OrphanTransaction {
/// Create new orphaned transaction
pub fn new(transaction: IndexedTransaction, unknown_parents: HashSet<H256>) -> Self {
OrphanTransaction {
insertion_time: time::precise_time_s(),
transaction: transaction,
unknown_parents: unknown_parents,
}
}
/// Remove parent, which is now known. Return true if all parents all now known
pub fn remove_known_parent(&mut self, parent_hash: &H256) -> bool {
self.unknown_parents.remove(parent_hash);
self.unknown_parents.is_empty()
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use test_data::{TransactionBuilder, ChainBuilder};
use primitives::hash::H256;
use super::OrphanTransactionsPool;
#[test]
fn orphan_transaction_pool_empty_on_start() {
let pool = OrphanTransactionsPool::new();
assert_eq!(pool.len(), 0);
}
#[test]
fn orphan_transaction_pool_insert_dependent_transactions() {
let chain = &mut ChainBuilder::new();
TransactionBuilder::with_output(100).store(chain) // t1
.into_input(0).add_output(200).store(chain) // t1 -> t2
.into_input(0).add_output(300).store(chain) // t1 -> t2 -> t3
.set_default_input(0).set_output(400).store(chain) // t4
.into_input(0).set_output(500).store(chain); // t4 -> t5
let t2_unknown: HashSet<H256> = chain.at(1).inputs.iter().map(|i| i.previous_output.hash.clone()).collect();
let t3_unknown: HashSet<H256> = chain.at(2).inputs.iter().map(|i| i.previous_output.hash.clone()).collect();
let t5_unknown: HashSet<H256> = chain.at(4).inputs.iter().map(|i| i.previous_output.hash.clone()).collect();
let mut pool = OrphanTransactionsPool::new();
pool.insert(chain.at(1).into(), t2_unknown); // t2
pool.insert(chain.at(2).into(), t3_unknown); // t3
pool.insert(chain.at(4).into(), t5_unknown); // t5
assert_eq!(pool.len(), 3);
let removed = pool.remove_transactions_for_parent(&chain.at(0).hash());
assert_eq!(pool.len(), 1);
let removed: Vec<H256> = removed.into_iter().map(|t| t.hash).collect();
assert_eq!(removed, vec![chain.at(1).hash(), chain.at(2).hash()]);
let removed = pool.remove_transactions_for_parent(&chain.at(3).hash());
assert_eq!(pool.len(), 0);
let removed: Vec<H256> = removed.into_iter().map(|t| t.hash).collect();
assert_eq!(removed, vec![chain.at(4).hash()]);
}
#[test]
fn orphan_transaction_pool_remove_transactions() {
let chain = &mut ChainBuilder::new();
TransactionBuilder::with_output(100).store(chain) // t1
.into_input(0).add_output(200).store(chain) // t1 -> t2
.into_input(0).add_output(300).store(chain) // t1 -> t2 -> t3
.set_default_input(0).set_output(400).store(chain) // t4
.into_input(0).set_output(500).store(chain) // t4 -> t5
.set_default_input(0).set_output(600).store(chain) // t6
.into_input(0).set_output(700).store(chain); // t6 -> t7
let t2_unknown: HashSet<H256> = chain.at(1).inputs.iter().map(|i| i.previous_output.hash.clone()).collect();
let t3_unknown: HashSet<H256> = chain.at(2).inputs.iter().map(|i| i.previous_output.hash.clone()).collect();
let t5_unknown: HashSet<H256> = chain.at(4).inputs.iter().map(|i| i.previous_output.hash.clone()).collect();
let t7_unknown: HashSet<H256> = chain.at(6).inputs.iter().map(|i| i.previous_output.hash.clone()).collect();
let mut pool = OrphanTransactionsPool::new();
pool.insert(chain.at(1).into(), t2_unknown); // t2
pool.insert(chain.at(2).into(), t3_unknown); // t3
pool.insert(chain.at(4).into(), t5_unknown); // t5
pool.insert(chain.at(6).into(), t7_unknown); // t7
assert_eq!(pool.len(), 4);
let removed = pool.remove_transactions(&vec![chain.at(1).hash(), chain.at(3).hash()]);
assert_eq!(pool.len(), 1);
let removed: Vec<H256> = removed.into_iter().map(|t| t.hash).collect();
assert_eq!(removed, vec![chain.at(1).hash(), chain.at(2).hash(), chain.at(4).hash()]);
let removed = pool.remove_transactions(&vec![chain.at(6).hash()]);
assert_eq!(pool.len(), 0);
let removed: Vec<H256> = removed.into_iter().map(|t| t.hash).collect();
assert_eq!(removed, vec![chain.at(6).hash()]);
}
}

View File

@ -1,281 +0,0 @@
use std::cmp::min;
use bit_vec::BitVec;
use chain::merkle_node_hash;
use primitives::hash::H256;
/// Partial merkle tree
pub struct PartialMerkleTree {
/// Total number of transactions
pub tx_count: usize,
/// Nodes hashes
pub hashes: Vec<H256>,
/// Match flags
pub flags: BitVec,
}
/// Partial merkle tree parse result
#[cfg(test)]
pub struct ParsedPartialMerkleTree {
/// Merkle root
pub root: H256,
/// Matched hashes
pub hashes: Vec<H256>,
/// Match flags
pub flags: BitVec,
}
/// Build partial merkle tree
pub fn build_partial_merkle_tree(tx_hashes: Vec<H256>, tx_matches: BitVec) -> PartialMerkleTree {
PartialMerkleTreeBuilder::build(tx_hashes, tx_matches)
}
/// Parse partial merkle tree
#[cfg(test)]
pub fn parse_partial_merkle_tree(tree: PartialMerkleTree) -> Result<ParsedPartialMerkleTree, String> {
PartialMerkleTreeBuilder::parse(tree)
}
/// Service structure to construct `merkleblock` message.
struct PartialMerkleTreeBuilder {
/// All transactions length.
all_len: usize,
/// All transactions hashes.
all_hashes: Vec<H256>,
/// Match flags for all transactions.
all_matches: BitVec,
/// Partial hashes.
hashes: Vec<H256>,
/// Partial match flags.
matches: BitVec,
}
impl PartialMerkleTree {
/// Create new merkle tree with given data
pub fn new(tx_count:usize, hashes: Vec<H256>, flags: BitVec) -> Self {
PartialMerkleTree {
tx_count: tx_count,
hashes: hashes,
flags: flags,
}
}
}
#[cfg(test)]
impl ParsedPartialMerkleTree {
pub fn new(root: H256, hashes: Vec<H256>, flags: BitVec) -> Self {
ParsedPartialMerkleTree {
root: root,
hashes: hashes,
flags: flags,
}
}
}
impl PartialMerkleTreeBuilder {
/// Build partial merkle tree as described here:
/// https://bitcoin.org/en/developer-reference#creating-a-merkleblock-message
pub fn build(all_hashes: Vec<H256>, all_matches: BitVec) -> PartialMerkleTree {
let mut partial_merkle_tree = PartialMerkleTreeBuilder {
all_len: all_hashes.len(),
all_hashes: all_hashes,
all_matches: all_matches,
hashes: Vec::new(),
matches: BitVec::new(),
};
partial_merkle_tree.build_tree();
PartialMerkleTree::new(partial_merkle_tree.all_len, partial_merkle_tree.hashes, partial_merkle_tree.matches)
}
#[cfg(test)]
/// Parse partial merkle tree as described here:
/// https://bitcoin.org/en/developer-reference#parsing-a-merkleblock-message
pub fn parse(tree: PartialMerkleTree) -> Result<ParsedPartialMerkleTree, String> {
let mut partial_merkle_tree = PartialMerkleTreeBuilder {
all_len: tree.tx_count,
all_hashes: Vec::new(),
all_matches: BitVec::from_elem(tree.tx_count, false),
hashes: tree.hashes,
matches: tree.flags,
};
let merkle_root = try!(partial_merkle_tree.parse_tree());
Ok(ParsedPartialMerkleTree::new(merkle_root, partial_merkle_tree.all_hashes, partial_merkle_tree.all_matches))
}
fn build_tree(&mut self) {
let tree_height = self.tree_height();
self.build_branch(tree_height, 0)
}
#[cfg(test)]
fn parse_tree(&mut self) -> Result<H256, String> {
if self.all_len == 0 {
return Err("no transactions".into());
}
if self.hashes.len() > self.all_len {
return Err("too many hashes".into());
}
if self.matches.len() < self.hashes.len() {
return Err("too few matches".into());
}
// parse tree
let mut matches_used = 0usize;
let mut hashes_used = 0usize;
let tree_height = self.tree_height();
let merkle_root = try!(self.parse_branch(tree_height, 0, &mut matches_used, &mut hashes_used));
if matches_used != self.matches.len() {
return Err("not all matches used".into());
}
if hashes_used != self.hashes.len() {
return Err("not all hashes used".into());
}
Ok(merkle_root)
}
fn build_branch(&mut self, height: usize, pos: usize) {
// determine whether this node is the parent of at least one matched txid
let transactions_begin = pos << height;
let transactions_end = min(self.all_len, (pos + 1) << height);
let flag = (transactions_begin..transactions_end).any(|idx| self.all_matches[idx]);
// remember flag
self.matches.push(flag);
// proceeed with descendants
if height == 0 || !flag {
// we're at the leaf level || there is no match
let hash = self.branch_hash(height, pos);
self.hashes.push(hash);
} else {
// proceed with left child
self.build_branch(height - 1, pos << 1);
// proceed with right child if any
if (pos << 1) + 1 < self.level_width(height - 1) {
self.build_branch(height - 1, (pos << 1) + 1);
}
}
}
#[cfg(test)]
fn parse_branch(&mut self, height: usize, pos: usize, matches_used: &mut usize, hashes_used: &mut usize) -> Result<H256, String> {
if *matches_used >= self.matches.len() {
return Err("all matches used".into());
}
let flag = self.matches[*matches_used];
*matches_used += 1;
if height == 0 || !flag {
// we're at the leaf level || there is no match
if *hashes_used > self.hashes.len() {
return Err("all hashes used".into());
}
// get node hash
let ref hash = self.hashes[*hashes_used];
*hashes_used += 1;
// on leaf level && matched flag set => mark transaction as matched
if height == 0 && flag {
self.all_hashes.push(hash.clone());
self.all_matches.set(pos, true);
}
Ok(hash.clone())
} else {
// proceed with left child
let left = try!(self.parse_branch(height - 1, pos << 1, matches_used, hashes_used));
// proceed with right child if any
let has_right_child = (pos << 1) + 1 < self.level_width(height - 1);
let right = if has_right_child {
try!(self.parse_branch(height - 1, (pos << 1) + 1, matches_used, hashes_used))
} else {
left.clone()
};
if has_right_child && left == right {
Err("met same hash twice".into())
} else {
Ok(merkle_node_hash(&left, &right))
}
}
}
fn tree_height(&self) -> usize {
let mut height = 0usize;
while self.level_width(height) > 1 {
height += 1;
}
height
}
fn level_width(&self, height: usize) -> usize {
(self.all_len + (1 << height) - 1) >> height
}
fn branch_hash(&self, height: usize, pos: usize) -> H256 {
if height == 0 {
self.all_hashes[pos].clone()
} else {
let left = self.branch_hash(height - 1, pos << 1);
let right = if (pos << 1) + 1 < self.level_width(height - 1) {
self.branch_hash(height - 1, (pos << 1) + 1)
} else {
left.clone()
};
merkle_node_hash(&left, &right)
}
}
}
#[cfg(test)]
mod tests {
use chain::{Transaction, merkle_root, merkle_node_hash};
use primitives::hash::H256;
use test_data;
use super::{build_partial_merkle_tree, parse_partial_merkle_tree};
#[test]
// test from core implementation (slow)
// https://github.com/bitcoin/bitcoin/blob/master/src/test/pmt_tests.cpp
fn test_build_merkle_block() {
use bit_vec::BitVec;
use rand::{Rng, SeedableRng, StdRng};
let rng_seed: &[_] = &[0, 0, 0, 0];
let mut rng: StdRng = SeedableRng::from_seed(rng_seed);
// for some transactions counts
let tx_counts: Vec<usize> = vec![1, 4, 7, 17, 56, 100, 127, 256, 312, 513, 1000, 4095];
for tx_count in tx_counts {
// build block with given transactions number
let transactions: Vec<Transaction> = (0..tx_count).map(|n| test_data::TransactionBuilder::with_version(n as i32).into()).collect();
let hashes: Vec<_> = transactions.iter().map(|t| t.hash()).collect();
let merkle_root = merkle_root(&hashes);
// mark different transactions as matched
for seed_tweak in 1..15 {
let mut matches: BitVec = BitVec::with_capacity(tx_count);
let mut matched_hashes: Vec<H256> = Vec::with_capacity(tx_count);
for i in 0usize..tx_count {
let is_match = (rng.gen::<u32>() & ((1 << (seed_tweak / 2)) - 1)) == 0;
matches.push(is_match);
if is_match {
matched_hashes.push(hashes[i].clone());
}
}
// build partial merkle tree
let partial_tree = build_partial_merkle_tree(hashes.clone(), matches.clone());
// parse tree back
let parsed_tree = parse_partial_merkle_tree(partial_tree).expect("no error");
assert_eq!(matched_hashes, parsed_tree.hashes);
assert_eq!(matches, parsed_tree.flags);
assert_eq!(merkle_root, parsed_tree.root);
}
}
}
}

View File

@ -1,3 +0,0 @@
/// Promise of execution
pub struct Promise {
}

View File

@ -1,2 +0,0 @@
/// Block height type
pub type BlockHeight = u32;

View File

@ -1,82 +0,0 @@
use std::collections::{HashMap, HashSet, VecDeque};
use std::collections::hash_map::Entry;
use linked_hash_map::LinkedHashMap;
use time;
use chain::IndexedBlock;
use orphan_blocks_pool::OrphanBlocksPool;
use primitives::hash::H256;
/// Block, for which parent block is unknown.
#[derive(Debug)]
pub struct UnknownBlock {
/// Time when this block was inserted to the pool
pub insertion_time: f64,
/// Block itself
pub block: IndexedBlock,
}
/// Storage for blocks, for which parent block is unknown.
#[derive(Debug)]
pub struct UnknownBlocksPool {
/// { Parent block hash: { Block hash : block } }.
by_parent_hash: HashMap<H256, HashMap<H256, UnknownBlock>>,
/// { Block hash: parent block hash } ordered by insertion time.
by_insertion_time: LinkedHashMap<H256, H256>,
}
impl UnknownBlocksPool {
/// Create new pool
pub fn new() -> Self {
UnknownBlocksPool {
by_parent_hash: HashMap::new(),
by_insertion_time: LinkedHashMap::new(),
}
}
/// Get total number of blocks in pool
pub fn len(&self) -> usize {
self.by_parent_hash.len()
}
/// Check if pool already contains this block
pub fn contains_block(&self, block: &IndexedBlock) -> bool {
self.by_insertion_time.contains_key(&block.header.hash)
}
/// Insert unknown block
pub fn insert_block(&mut self, block: IndexedBlock) {
self.by_parent_hash
.entry(block.header.raw.previous_header_hash.clone())
.or_insert_with(HashMap::new)
.insert(block.header.hash.clone(), block.into());
}
/// Remove all blocks, depending on this parent
pub fn remove_blocks_for_parent(&mut self, hash: &H256) -> Vec<IndexedBlock> {
let mut queue: VecDeque<H256> = VecDeque::new();
queue.push_back(hash.clone());
let mut removed: Vec<IndexedBlock> = Vec::new();
while let Some(parent_hash) = queue.pop_front() {
if let Entry::Occupied(entry) = self.by_parent_hash.entry(parent_hash) {
let (_, mut orphaned) = entry.remove_entry();
queue.extend(orphaned.keys().cloned());
removed.extend(orphaned.drain().map(|(_, v)| v.block));
}
}
removed
}
}
impl From<IndexedBlock> for UnknownBlock {
fn from(block: IndexedBlock) -> Self {
UnknownBlock {
insertion_time: time::precise_time_s(),
block: block,
}
}
}
#[cfg(test)]
mod tests {
}