fixed clippy warnings after sync refactoring

This commit is contained in:
Svyatoslav Nikolsky 2017-01-09 12:29:34 +03:00
parent afc9c53df0
commit 2aa0aa6721
9 changed files with 89 additions and 94 deletions

View File

@ -49,6 +49,7 @@ struct TransactionAcceptSinkData {
impl<T, U, V> LocalNode<T, U, V> where T: TaskExecutor, U: Server, V: Client {
/// Create new synchronization node
#[cfg_attr(feature="cargo-clippy", allow(too_many_arguments))]
pub fn new(network: Magic, storage: StorageRef, memory_pool: MemoryPoolRef, peers: PeersRef,
state: SynchronizationStateRef, executor: ExecutorRef<T>, client: ClientRef<V>, server: ServerRef<U>) -> Self {
LocalNode {
@ -130,7 +131,7 @@ impl<T, U, V> LocalNode<T, U, V> where T: TaskExecutor, U: Server, V: Client {
}
trace!(target: "sync", "Got `getdata` message from peer#{}. Inventory len: {}", peer_index, message.inventory.len());
self.server.execute(ServerTask::ServeGetData(peer_index, message));
self.server.execute(ServerTask::GetData(peer_index, message));
}
/// When peer is requesting for known blocks hashes
@ -141,7 +142,7 @@ impl<T, U, V> LocalNode<T, U, V> where T: TaskExecutor, U: Server, V: Client {
}
trace!(target: "sync", "Got `getblocks` message from peer#{}", peer_index);
self.server.execute(ServerTask::ServeGetBlocks(peer_index, message));
self.server.execute(ServerTask::GetBlocks(peer_index, message));
}
/// When peer is requesting for known blocks headers
@ -158,7 +159,7 @@ impl<T, U, V> LocalNode<T, U, V> where T: TaskExecutor, U: Server, V: Client {
// and peer, which has just provided a new blocks to us, is asking for headers
// => do not serve getheaders until we have fully process his blocks + wait until headers are served before returning
let server = Arc::downgrade(&self.server);
let server_task = ServerTask::ServeGetHeaders(peer_index, message, id);
let server_task = ServerTask::GetHeaders(peer_index, message, id);
let lazy_server_task = lazy(move || {
server.upgrade().map(|s| s.execute(server_task));
finished::<(), ()>(())
@ -174,7 +175,7 @@ impl<T, U, V> LocalNode<T, U, V> where T: TaskExecutor, U: Server, V: Client {
}
trace!(target: "sync", "Got `mempool` message from peer#{}", peer_index);
self.server.execute(ServerTask::ServeMempool(peer_index));
self.server.execute(ServerTask::Mempool(peer_index));
}
/// When peer asks us from specific transactions from specific block
@ -185,7 +186,7 @@ impl<T, U, V> LocalNode<T, U, V> where T: TaskExecutor, U: Server, V: Client {
}
trace!(target: "sync", "Got `getblocktxn` message from peer#{}", peer_index);
self.server.execute(ServerTask::ServeGetBlockTxn(peer_index, message));
self.server.execute(ServerTask::GetBlockTxn(peer_index, message));
}
/// When peer sets bloom filter for connection
@ -243,21 +244,21 @@ impl<T, U, V> LocalNode<T, U, V> where T: TaskExecutor, U: Server, V: Client {
pub fn on_merkleblock(&self, peer_index: PeerIndex, _message: types::MerkleBlock) {
trace!(target: "sync", "Got `merkleblock` message from peer#{}", peer_index);
// we never setup filter on connections => misbehaving
self.peers.misbehaving(peer_index, &format!("Got unrequested 'merkleblock' message"));
self.peers.misbehaving(peer_index, "Got unrequested 'merkleblock' message");
}
/// When peer sents us a compact block
pub fn on_compact_block(&self, peer_index: PeerIndex, _message: types::CompactBlock) {
trace!(target: "sync", "Got `cmpctblock` message from peer#{}", peer_index);
// we never ask compact block from peers => misbehaving
self.peers.misbehaving(peer_index, &format!("Got unrequested 'cmpctblock' message"));
self.peers.misbehaving(peer_index, "Got unrequested 'cmpctblock' message");
}
/// When peer sents us specific transactions for specific block
pub fn on_block_txn(&self, peer_index: PeerIndex, _message: types::BlockTxn) {
trace!(target: "sync", "Got `blocktxn` message from peer#{}", peer_index);
// we never ask for this => misbehaving
self.peers.misbehaving(peer_index, &format!("Got unrequested 'blocktxn' message"));
self.peers.misbehaving(peer_index, "Got unrequested 'blocktxn' message");
}
pub fn accept_transaction(&self, transaction: Transaction) -> Result<H256, String> {
@ -294,11 +295,11 @@ impl TransactionAcceptSinkData {
pub fn wait(&self) -> Result<H256, String> {
let mut lock = self.result.lock();
if lock.is_some() {
return lock.take().unwrap();
return lock.take().expect("checked line above");
}
self.waiter.wait(&mut lock);
return lock.take().unwrap();
lock.take().expect("waiter.wait returns only when result is set; lock.take() takes result from waiter.result; qed")
}
}
@ -398,7 +399,7 @@ pub mod tests {
});
// => `getdata` is served
let tasks = server.take_tasks();
assert_eq!(tasks, vec![ServerTask::ServeGetData(peer_index, types::GetData::with_inventory(inventory))]);
assert_eq!(tasks, vec![ServerTask::GetData(peer_index, types::GetData::with_inventory(inventory))]);
}
#[test]

View File

@ -258,12 +258,10 @@ impl Chain {
Some(queue_index) => BlockState::from_queue_index(queue_index),
None => if self.storage.contains_block(db::BlockRef::Hash(hash.clone())) {
BlockState::Stored
} else if self.dead_end_blocks.contains(hash) {
BlockState::DeadEnd
} else {
if self.dead_end_blocks.contains(hash) {
BlockState::DeadEnd
} else {
BlockState::Unknown
}
BlockState::Unknown
},
}
}

View File

@ -227,7 +227,7 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
BlockState::DeadEnd if !self.config.close_connection_on_bad_block => true,
BlockState::DeadEnd if self.config.close_connection_on_bad_block => {
self.peers.misbehaving(peer_index, &format!("Provided dead-end block {:?}", item.hash.to_reversed_str()));
return false;
false
},
_ => false,
},
@ -236,7 +236,7 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
// unknown inventory type
InventoryType::Error => {
self.peers.misbehaving(peer_index, &format!("Provided unknown inventory type {:?}", item.hash.to_reversed_str()));
return false;
false
}
}
})
@ -405,18 +405,18 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
let blocks_hashes_to_forget: Vec<_> = blocks_to_verify.iter().map(|b| b.hash().clone()).collect();
self.chain.forget_blocks_leave_header(&blocks_hashes_to_forget);
// remember that we are verifying these blocks
let blocks_headers_to_verify: Vec<_> = blocks_to_verify.iter().map(|ref b| b.header.clone()).collect();
let blocks_headers_to_verify: Vec<_> = blocks_to_verify.iter().map(|b| b.header.clone()).collect();
self.chain.verify_blocks(blocks_headers_to_verify);
// remember that we are verifying block from this peer
for verifying_block_hash in blocks_to_verify.iter().map(|ref b| b.hash().clone()) {
for verifying_block_hash in blocks_to_verify.iter().map(|b| b.hash().clone()) {
self.verifying_blocks_by_peer.insert(verifying_block_hash, peer_index);
}
match self.verifying_blocks_futures.entry(peer_index) {
Entry::Occupied(mut entry) => {
entry.get_mut().0.extend(blocks_to_verify.iter().map(|ref b| b.hash().clone()));
entry.get_mut().0.extend(blocks_to_verify.iter().map(|b| b.hash().clone()));
},
Entry::Vacant(entry) => {
let block_hashes: HashSet<_> = blocks_to_verify.iter().map(|ref b| b.hash().clone()).collect();
let block_hashes: HashSet<_> = blocks_to_verify.iter().map(|b| b.hash().clone()).collect();
entry.insert((block_hashes, Vec::new()));
}
}
@ -589,14 +589,12 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
// => bad situation
0_f64
}
} else if verification_speed < 0.01_f64 {
// verification speed is too slow
60_f64
} else {
if verification_speed < 0.01_f64 {
// verification speed is too slow
60_f64
} else {
// blocks / (blocks / second) -> second
verifying_hashes_len as f64 / verification_speed
}
// blocks / (blocks / second) -> second
verifying_hashes_len as f64 / verification_speed
};
// estimate time when all synchronization requests will complete
let synchronization_queue_will_be_full_in = if synchronization_speed < 0.01_f64 {
@ -879,7 +877,7 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
transactions.extend(self.orphaned_transactions_pool.remove_transactions_for_parent(&transaction.hash));
transactions.push_front(transaction);
// remember that we are verifying these transactions
for ref tx in &transactions {
for tx in &transactions {
if !relay {
self.do_not_relay.insert(tx.hash.clone());
}
@ -922,7 +920,7 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
// request blocks
let getdata = types::GetData {
inventory: chunk_hashes.into_iter().map(|h| InventoryVector::block(h)).collect(),
inventory: chunk_hashes.into_iter().map(InventoryVector::block).collect(),
};
tasks.push(Task::GetData(peer, getdata));
}
@ -1018,12 +1016,10 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
self.execute_synchronization_tasks(None, None);
// relay block to our peers
if self.state.is_saturated() || self.state.is_nearly_saturated() {
if needs_relay {
for block_hash in insert_result.canonized_blocks_hashes {
if let Some(block) = self.chain.storage().block(block_hash.into()) {
self.executor.execute(Task::RelayNewBlock(block.into()));
}
if needs_relay && (self.state.is_saturated() || self.state.is_nearly_saturated()) {
for block_hash in insert_result.canonized_blocks_hashes {
if let Some(block) = self.chain.storage().block(block_hash.into()) {
self.executor.execute(Task::RelayNewBlock(block.into()));
}
}
}
@ -1104,8 +1100,8 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
}
// call verification future, if any
if let Some(mut future_sink) = self.verifying_transactions_sinks.remove(&transaction.hash) {
(&mut future_sink).on_transaction_verification_success(transaction);
if let Some(future_sink) = self.verifying_transactions_sinks.remove(&transaction.hash) {
future_sink.on_transaction_verification_success(transaction);
}
}
@ -1119,8 +1115,8 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
self.chain.forget_verifying_transaction_with_children(hash);
// call verification future, if any
if let Some(mut future_sink) = self.verifying_transactions_sinks.remove(hash) {
(&mut future_sink).on_transaction_verification_error(err, hash);
if let Some(future_sink) = self.verifying_transactions_sinks.remove(hash) {
future_sink.on_transaction_verification_error(err, hash);
}
}

View File

@ -113,8 +113,8 @@ impl PeersTasks {
/// Sort peers for blocks request
pub fn sort_peers_for_blocks(&self, peers: &mut Vec<PeerIndex>) {
peers.sort_by(|left, right| {
let left_speed = self.stats.get(&left).map(|s| s.speed.speed()).unwrap_or(0f64);
let right_speed = self.stats.get(&right).map(|s| s.speed.speed()).unwrap_or(0f64);
let left_speed = self.stats.get(left).map(|s| s.speed.speed()).unwrap_or(0f64);
let right_speed = self.stats.get(right).map(|s| s.speed.speed()).unwrap_or(0f64);
// larger speed => better
right_speed.partial_cmp(&left_speed).unwrap_or(Ordering::Equal)
})
@ -179,11 +179,11 @@ impl PeersTasks {
/// Block is received from peer.
pub fn on_block_received(&mut self, peer_index: PeerIndex, block_hash: &H256) {
// block received => reset failures
self.blocks_stats.remove(&block_hash);
self.blocks_stats.remove(block_hash);
let is_last_requested_block_received = if let Some(blocks_request) = self.blocks_requests.get_mut(&peer_index) {
// if block hasn't been requested => do nothing
if !blocks_request.blocks.remove(&block_hash) {
if !blocks_request.blocks.remove(block_hash) {
return;
}
@ -260,7 +260,7 @@ impl PeersTasks {
let mut normal_blocks: Vec<H256> = Vec::with_capacity(hashes.len());
for hash in hashes {
let is_failed_block = {
let block_stats = self.blocks_stats.entry(hash.clone()).or_insert(BlockStats::default());
let block_stats = self.blocks_stats.entry(hash.clone()).or_insert_with(BlockStats::default);
block_stats.failures += 1;
block_stats.failures > MAX_BLOCKS_FAILURES
};

View File

@ -15,17 +15,17 @@ use utils::KnownHashType;
#[derive(Debug, PartialEq)]
pub enum ServerTask {
/// Serve 'getdata' request
ServeGetData(PeerIndex, types::GetData),
GetData(PeerIndex, types::GetData),
/// Serve reversed 'getdata' request
ServeReversedGetData(PeerIndex, types::GetData, types::NotFound),
ReversedGetData(PeerIndex, types::GetData, types::NotFound),
/// Serve 'getblocks' request
ServeGetBlocks(PeerIndex, types::GetBlocks),
GetBlocks(PeerIndex, types::GetBlocks),
/// Serve 'getheaders' request
ServeGetHeaders(PeerIndex, types::GetHeaders, RequestId),
GetHeaders(PeerIndex, types::GetHeaders, RequestId),
/// Serve 'mempool' request
ServeMempool(PeerIndex),
Mempool(PeerIndex),
/// Serve 'getblocktxn' request
ServeGetBlockTxn(PeerIndex, types::GetBlockTxn),
GetBlockTxn(PeerIndex, types::GetBlockTxn),
}
/// Synchronization server
@ -76,12 +76,12 @@ impl Server for ServerImpl {
impl ServerTask {
pub fn peer_index(&self) -> PeerIndex {
match *self {
ServerTask::ServeGetData(peer_index, _) => peer_index,
ServerTask::ServeReversedGetData(peer_index, _, _) => peer_index,
ServerTask::ServeGetBlocks(peer_index, _) => peer_index,
ServerTask::ServeGetHeaders(peer_index, _, _) => peer_index,
ServerTask::ServeMempool(peer_index) => peer_index,
ServerTask::ServeGetBlockTxn(peer_index, _) => peer_index,
ServerTask::GetData(peer_index, _)
| ServerTask::ReversedGetData(peer_index, _, _)
| ServerTask::GetBlocks(peer_index, _)
| ServerTask::GetHeaders(peer_index, _, _)
| ServerTask::Mempool(peer_index)
| ServerTask::GetBlockTxn(peer_index, _) => peer_index,
}
}
}
@ -229,12 +229,12 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
pub fn execute(&self, task: ServerTask) -> Option<ServerTask> {
match task {
ServerTask::ServeGetData(peer_index, message) => return self.serve_get_data(peer_index, message),
ServerTask::ServeReversedGetData(peer_index, message, notfound) => return self.serve_reversed_get_data(peer_index, message, notfound),
ServerTask::ServeGetBlocks(peer_index, message) => self.serve_get_blocks(peer_index, message),
ServerTask::ServeGetHeaders(peer_index, message, request_id) => self.serve_get_headers(peer_index, message, request_id),
ServerTask::ServeMempool(peer_index) => self.serve_mempool(peer_index),
ServerTask::ServeGetBlockTxn(peer_index, message) => self.serve_get_block_txn(peer_index, message),
ServerTask::GetData(peer_index, message) => return self.serve_get_data(peer_index, message),
ServerTask::ReversedGetData(peer_index, message, notfound) => return self.serve_reversed_get_data(peer_index, message, notfound),
ServerTask::GetBlocks(peer_index, message) => self.serve_get_blocks(peer_index, message),
ServerTask::GetHeaders(peer_index, message, request_id) => self.serve_get_headers(peer_index, message, request_id),
ServerTask::Mempool(peer_index) => self.serve_mempool(peer_index),
ServerTask::GetBlockTxn(peer_index, message) => self.serve_get_block_txn(peer_index, message),
}
None
@ -247,7 +247,7 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
message.inventory.reverse();
// + while iterating by items, also accumulate unknown items to respond with notfound
let notfound = types::NotFound { inventory: Vec::new(), };
Some(ServerTask::ServeReversedGetData(peer_index, message, notfound))
Some(ServerTask::ReversedGetData(peer_index, message, notfound))
}
fn serve_reversed_get_data(&self, peer_index: PeerIndex, mut message: types::GetData, mut notfound: types::NotFound) -> Option<ServerTask> {
@ -317,7 +317,7 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
},
}
Some(ServerTask::ServeReversedGetData(peer_index, message, notfound))
Some(ServerTask::ReversedGetData(peer_index, message, notfound))
}
fn serve_get_blocks(&self, peer_index: PeerIndex, message: types::GetBlocks) {
@ -337,7 +337,7 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
trace!(target: "sync", "'getblocks' request from peer#{} is ignored as there are no new blocks for peer", peer_index);
}
} else {
self.peers.misbehaving(peer_index, &format!("Got 'getblocks' message without known blocks"));
self.peers.misbehaving(peer_index, "Got 'getblocks' message without known blocks");
return;
}
}
@ -357,7 +357,7 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
trace!(target: "sync", "'getheaders' response to peer#{} is ready with {} headers", peer_index, headers.len());
self.executor.execute(Task::Headers(peer_index, types::Headers::with_headers(headers), Some(request_id)));
} else {
self.peers.misbehaving(peer_index, &format!("Got 'headers' message without known blocks"));
self.peers.misbehaving(peer_index, "Got 'headers' message without known blocks");
return;
}
}
@ -522,7 +522,7 @@ pub mod tests {
hash: H256::default(),
}
];
server.execute(ServerTask::ServeGetData(0, types::GetData::with_inventory(inventory.clone())));
server.execute(ServerTask::GetData(0, types::GetData::with_inventory(inventory.clone())));
// => respond with notfound
let tasks = DummyTaskExecutor::wait_tasks(executor);
assert_eq!(tasks, vec![Task::NotFound(0, types::NotFound::with_inventory(inventory))]);
@ -538,7 +538,7 @@ pub mod tests {
hash: test_data::genesis().hash(),
}
];
server.execute(ServerTask::ServeGetData(0, types::GetData::with_inventory(inventory.clone())));
server.execute(ServerTask::GetData(0, types::GetData::with_inventory(inventory.clone())));
// => respond with block
let tasks = DummyTaskExecutor::wait_tasks(executor);
assert_eq!(tasks, vec![Task::Block(0, test_data::genesis().into())]);
@ -549,7 +549,7 @@ pub mod tests {
let (_, _, executor, _, server) = create_synchronization_server();
// when asking for blocks hashes
let genesis_block_hash = test_data::genesis().hash();
server.execute(ServerTask::ServeGetBlocks(0, types::GetBlocks {
server.execute(ServerTask::GetBlocks(0, types::GetBlocks {
version: 0,
block_locator_hashes: vec![genesis_block_hash.clone()],
hash_stop: H256::default(),
@ -564,7 +564,7 @@ pub mod tests {
let (storage, _, executor, _, server) = create_synchronization_server();
storage.insert_block(&test_data::block_h1()).expect("Db write error");
// when asking for blocks hashes
server.execute(ServerTask::ServeGetBlocks(0, types::GetBlocks {
server.execute(ServerTask::GetBlocks(0, types::GetBlocks {
version: 0,
block_locator_hashes: vec![test_data::genesis().hash()],
hash_stop: H256::default(),
@ -584,7 +584,7 @@ pub mod tests {
// when asking for blocks hashes
let genesis_block_hash = test_data::genesis().hash();
let dummy_id = 6;
server.execute(ServerTask::ServeGetHeaders(0, types::GetHeaders {
server.execute(ServerTask::GetHeaders(0, types::GetHeaders {
version: 0,
block_locator_hashes: vec![genesis_block_hash.clone()],
hash_stop: H256::default(),
@ -600,7 +600,7 @@ pub mod tests {
storage.insert_block(&test_data::block_h1()).expect("Db write error");
// when asking for blocks hashes
let dummy_id = 0;
server.execute(ServerTask::ServeGetHeaders(0, types::GetHeaders {
server.execute(ServerTask::GetHeaders(0, types::GetHeaders {
version: 0,
block_locator_hashes: vec![test_data::genesis().hash()],
hash_stop: H256::default(),
@ -617,7 +617,7 @@ pub mod tests {
fn server_mempool_do_not_responds_inventory_when_empty_memory_pool() {
let (_, _, executor, _, server) = create_synchronization_server();
// when asking for memory pool transactions ids
server.execute(ServerTask::ServeMempool(0));
server.execute(ServerTask::Mempool(0));
// => no response
let tasks = DummyTaskExecutor::wait_tasks_for(executor, 100); // TODO: get rid of explicit timeout
assert_eq!(tasks, vec![]);
@ -631,7 +631,7 @@ pub mod tests {
let transaction_hash = transaction.hash();
memory_pool.write().insert_verified(transaction.into());
// when asking for memory pool transactions ids
server.execute(ServerTask::ServeMempool(0));
server.execute(ServerTask::Mempool(0));
// => respond with inventory
let inventory = vec![InventoryVector {
inv_type: InventoryType::MessageTx,
@ -649,7 +649,7 @@ pub mod tests {
peers.hash_known_as(0, test_data::genesis().hash(), KnownHashType::CompactBlock);
// when asking for block_txns
server.execute(ServerTask::ServeGetBlockTxn(0, types::GetBlockTxn {
server.execute(ServerTask::GetBlockTxn(0, types::GetBlockTxn {
request: common::BlockTransactionsRequest {
blockhash: test_data::genesis().hash(),
indexes: vec![0],
@ -674,7 +674,7 @@ pub mod tests {
assert!(peers.enumerate().contains(&0));
// when asking for block_txns
server.execute(ServerTask::ServeGetBlockTxn(0, types::GetBlockTxn {
server.execute(ServerTask::GetBlockTxn(0, types::GetBlockTxn {
request: common::BlockTransactionsRequest {
blockhash: test_data::genesis().hash(),
indexes: vec![1],
@ -702,7 +702,7 @@ pub mod tests {
hash: test_data::genesis().transactions[0].hash(),
},
];
server.execute(ServerTask::ServeGetData(0, types::GetData::with_inventory(inventory.clone())));
server.execute(ServerTask::GetData(0, types::GetData::with_inventory(inventory.clone())));
// => respond with notfound
let tasks = DummyTaskExecutor::wait_tasks(executor);
assert_eq!(tasks, vec![Task::NotFound(0, types::NotFound::with_inventory(inventory))]);
@ -724,7 +724,7 @@ pub mod tests {
hash: tx_verified_hash,
},
];
server.execute(ServerTask::ServeGetData(0, types::GetData::with_inventory(inventory.clone())));
server.execute(ServerTask::GetData(0, types::GetData::with_inventory(inventory.clone())));
// => respond with transaction
let mut tasks = DummyTaskExecutor::wait_tasks(executor.clone());
// 2 tasks => can be situation when single task is ready
@ -743,7 +743,7 @@ pub mod tests {
storage.insert_block(&test_data::block_h1()).expect("no error");
}
// when asking with stop_hash
server.execute(ServerTask::ServeGetBlocks(0, types::GetBlocks {
server.execute(ServerTask::GetBlocks(0, types::GetBlocks {
version: 0,
block_locator_hashes: vec![],
hash_stop: test_data::genesis().hash(),
@ -765,7 +765,7 @@ pub mod tests {
}
// when asking with stop_hash
let dummy_id = 6;
server.execute(ServerTask::ServeGetHeaders(0, types::GetHeaders {
server.execute(ServerTask::GetHeaders(0, types::GetHeaders {
version: 0,
block_locator_hashes: vec![],
hash_stop: test_data::genesis().hash(),
@ -807,7 +807,7 @@ pub mod tests {
// This peer won't get any blocks, because it has not set filter for the connection
let peer_index2 = 1; peers.insert(peer_index2, DummyOutboundSyncConnection::new());
let mut loop_task = ServerTask::ServeGetData(peer_index2, types::GetData {inventory: vec![
let mut loop_task = ServerTask::GetData(peer_index2, types::GetData {inventory: vec![
InventoryVector { inv_type: InventoryType::MessageFilteredBlock, hash: b1_hash.clone() },
InventoryVector { inv_type: InventoryType::MessageFilteredBlock, hash: b2_hash.clone() },
]});
@ -840,7 +840,7 @@ pub mod tests {
}
// ask for data
let mut loop_task = ServerTask::ServeGetData(peer_index, types::GetData {inventory: vec![
let mut loop_task = ServerTask::GetData(peer_index, types::GetData {inventory: vec![
InventoryVector { inv_type: InventoryType::MessageFilteredBlock, hash: b1_hash.clone() },
InventoryVector { inv_type: InventoryType::MessageFilteredBlock, hash: b2_hash.clone() },
]});
@ -901,7 +901,7 @@ pub mod tests {
let peer_index2 = 1; peers.insert(peer_index2, DummyOutboundSyncConnection::new());
// ask for data
let mut loop_task = ServerTask::ServeGetData(peer_index2, types::GetData {inventory: vec![
let mut loop_task = ServerTask::GetData(peer_index2, types::GetData {inventory: vec![
InventoryVector { inv_type: InventoryType::MessageCompactBlock, hash: b1_hash.clone() },
]});
while let Some(new_task) = executor.execute(loop_task) {

View File

@ -61,8 +61,8 @@ pub struct AsyncVerifier {
impl VerificationTask {
/// Returns transaction reference if it is transaction verification task
pub fn transaction(&self) -> Option<&IndexedTransaction> {
match self {
&VerificationTask::VerifyTransaction(_, ref transaction) => Some(&transaction),
match *self {
VerificationTask::VerifyTransaction(_, ref transaction) => Some(transaction),
_ => None,
}
}
@ -105,7 +105,7 @@ impl AsyncVerifier {
},
Ok(Chain::Orphan) => {
// this can happen for B1 if B0 verification has failed && we have already scheduled verification of B0
sink.on_block_verification_error(&format!("orphaned block because parent block verification has failed"), block.hash())
sink.on_block_verification_error("orphaned block because parent block verification has failed", block.hash())
},
Err(e) => {
sink.on_block_verification_error(&format!("{:?}", e), block.hash())
@ -194,7 +194,7 @@ impl<T> Verifier for SyncVerifier<T> where T: VerificationSink {
// => we could ignore decanonized transactions
self.sink.on_block_verification_success(block);
},
Ok(Chain::Orphan) => self.sink.on_block_verification_error(&format!("orphaned block because parent block verification has failed"), block.hash()),
Ok(Chain::Orphan) => self.sink.on_block_verification_error("orphaned block because parent block verification has failed", block.hash()),
Err(e) => self.sink.on_block_verification_error(&format!("{:?}", e), block.hash()),
}
}

View File

@ -67,7 +67,7 @@ impl ConnectionFilter {
.map(|(idx, _)| idx)
.collect();
types::CompactBlock {
header: build_compact_block(&block, unknown_transaction_indexes),
header: build_compact_block(block, unknown_transaction_indexes),
}
}
@ -93,7 +93,7 @@ impl ConnectionFilter {
// calculate hashes && match flags for all transactions
let (all_hashes, all_flags) = block.transactions.iter()
.fold((Vec::<H256>::with_capacity(all_len), BitVec::with_capacity(all_len)), |(mut all_hashes, mut all_flags), t| {
let flag = self.bloom_filter.filter_transaction(&t);
let flag = self.bloom_filter.filter_transaction(t);
all_flags.push(flag);
all_hashes.push(t.hash.clone());
if flag {

View File

@ -81,8 +81,8 @@ impl PreviousTransactionOutputProvider for MemoryPoolTransactionOutputProvider {
}
// check if this is output from memory pool transaction
if let Some(ref output) = self.mempool_inputs.get(&hashed_prevout) {
if let Some(ref output) = **output {
if let Some(output) = self.mempool_inputs.get(&hashed_prevout) {
if let Some(ref output) = *output {
return Some(output.clone());
}
}

View File

@ -43,7 +43,7 @@ impl<'a> BlockHeaderProvider for MessageBlockHeadersProvider<'a> {
.or_else(move || match block_ref {
BlockRef::Hash(h) => self.headers.get(&h).cloned(),
BlockRef::Number(n) => if n >= self.first_header_number && n - self.first_header_number < self.headers_order.len() as u32 {
let ref header_hash = self.headers_order[(n - self.first_header_number) as usize];
let header_hash = &self.headers_order[(n - self.first_header_number) as usize];
Some(self.headers[header_hash].clone())
} else {
None