Merge pull request #138 from ethcore/clippy_suggestions

applied clippy suggestions to make code more idiomatic
This commit is contained in:
Svyatoslav Nikolsky 2016-11-16 22:24:24 +03:00 committed by GitHub
commit cf85285112
11 changed files with 58 additions and 61 deletions

View File

@ -26,7 +26,7 @@ pub fn merkle_root(hashes: &[H256]) -> H256 {
// duplicate the last element if len is not even
if hashes.len() % 2 == 1 {
let last = &hashes[hashes.len() - 1];
row.push(dhash256(&*concat(&last, last)));
row.push(dhash256(&*concat(last, last)));
}
merkle_root(&row)

View File

@ -96,13 +96,11 @@ impl PeerContext {
} else {
queue.push_finished_response(id, self.to_message(payload).into());
}
} else if sync.is_permitted(id) {
let send = Context::send_to_peer(self.context.clone(), self.info.id, payload);
self.context.spawn(send);
} else {
if sync.is_permitted(id) {
let send = Context::send_to_peer(self.context.clone(), self.info.id, payload);
self.context.spawn(send);
} else {
queue.push_unfinished_response(id, self.to_message(payload).into());
}
queue.push_unfinished_response(id, self.to_message(payload).into());
}
}

View File

@ -99,8 +99,7 @@ impl ThresholdSynchronizer {
self.to_grant_min <= id && id < self.to_grant_max
} else {
// otherwise if is in range [min, u32::max_value()] || [0, max)
(self.to_grant_min <= id && id <= u32::max_value()) ||
id < self.to_grant_max
self.to_grant_min <= id || id < self.to_grant_max
}
}
}
@ -158,12 +157,12 @@ impl ConfigurableSynchronizer {
/// from last_processed response will still be granted permissions.
pub fn change_sync_policy(&mut self, sync: bool) {
let new_inner = match self.inner {
InnerSynchronizer::Threshold(ref s) if sync == false => {
InnerSynchronizer::Threshold(ref s) if !sync => {
InnerSynchronizer::Noop(NoopSynchronizer {
declared_responses: s.inner.declared_responses,
})
},
InnerSynchronizer::Noop(ref s) if sync == true => {
InnerSynchronizer::Noop(ref s) if sync => {
let threshold = ThresholdSynchronizer::new(
s.declared_responses,
CONFIGURABLE_SYNCHRONIZER_THRESHOLD,

View File

@ -56,11 +56,11 @@ impl BestHeadersChain {
self.best.position(hash)
.and_then(|pos| self.best.at(pos + 1))
.and_then(|child| Some(vec![child]))
.unwrap_or(Vec::new())
.unwrap_or_default()
}
pub fn best_block_hash(&self) -> H256 {
self.best.back().or(Some(self.storage_best_hash.clone())).expect("storage_best_hash is always known")
self.best.back().or_else(|| Some(self.storage_best_hash.clone())).expect("storage_best_hash is always known")
}
pub fn insert(&mut self, header: BlockHeader) {
@ -80,7 +80,7 @@ impl BestHeadersChain {
}
pub fn remove(&mut self, hash: &H256) {
if let Some(_) = self.headers.remove(hash) {
if self.headers.remove(hash).is_some() {
match self.best.remove(hash) {
HashPosition::Front => self.clear(),
HashPosition::Inside(position) => self.clear_after(position),
@ -89,8 +89,8 @@ impl BestHeadersChain {
}
}
pub fn remove_n<'a, I: IntoIterator<Item=H256>> (&mut self, hashes: I) {
for hash in hashes.into_iter() {
pub fn remove_n<I: IntoIterator<Item=H256>> (&mut self, hashes: I) {
for hash in hashes {
self.remove(&hash);
}
}

View File

@ -191,7 +191,7 @@ impl HashQueueChain {
/// Returns element at the given position
pub fn at(&self, mut index: u32) -> Option<H256> {
for queue in self.chain.iter() {
for queue in &self.chain {
let queue_len = queue.len();
if index < queue_len {
return queue.at(index);

View File

@ -203,7 +203,7 @@ impl<T, U, V> LocalNode<T, U, V> where T: SynchronizationTaskExecutor + PeersCon
self.client.lock().on_peer_blocks_notfound(peer_index, blocks_inventory);
}
fn blocks_inventory(&self, inventory: &Vec<InventoryVector>) -> Vec<H256> {
fn blocks_inventory(&self, inventory: &[InventoryVector]) -> Vec<H256> {
inventory.iter()
.filter(|item| item.inv_type == InventoryType::MessageBlock)
.map(|item| item.hash.clone())

View File

@ -296,9 +296,9 @@ impl Chain {
match self.hash_chain.remove_at(VERIFYING_QUEUE, hash) {
HashPosition::Missing => match self.hash_chain.remove_at(REQUESTED_QUEUE, hash) {
HashPosition::Missing => self.hash_chain.remove_at(SCHEDULED_QUEUE, hash),
position @ _ => position,
position => position,
},
position @ _ => position,
position => position,
}
}
@ -338,14 +338,14 @@ impl Chain {
}
/// Intersect chain with inventory
pub fn intersect_with_headers(&self, hashes: &Vec<H256>, headers: &Vec<BlockHeader>) -> HeadersIntersection {
pub fn intersect_with_headers(&self, hashes: &[H256], headers: &[BlockHeader]) -> HeadersIntersection {
let hashes_len = hashes.len();
assert!(hashes_len != 0 && hashes.len() == headers.len());
// giving that headers are ordered
let (is_first_known, first_state) = match self.block_state(&hashes[0]) {
BlockState::Unknown => (false, self.block_state(&headers[0].previous_header_hash)),
state @ _ => (true, state),
state => (true, state),
};
match first_state {
// if first block of inventory is unknown && its parent is unknonw => all other blocks are also unknown
@ -353,32 +353,32 @@ impl Chain {
HeadersIntersection::NoKnownBlocks(0)
},
// else if first block is known
first_block_state @ _ => match self.block_state(&hashes[hashes_len - 1]) {
first_block_state => match self.block_state(&hashes[hashes_len - 1]) {
// if last block is known to be in db => all inventory blocks are also in db
BlockState::Stored => {
HeadersIntersection::DbAllBlocksKnown
HeadersIntersection::DbAllBlocksKnown
},
// if first block is known && last block is unknown but we know block before first one => intersection with queue or with db
BlockState::Unknown if !is_first_known => {
// previous block is stored => fork from stored block
if first_state == BlockState::Stored {
return HeadersIntersection::DbForkNewBlocks(0);
HeadersIntersection::DbForkNewBlocks(0)
}
// previous block is best block => no fork
else if &self.best_block().hash == &headers[0].previous_header_hash {
return HeadersIntersection::InMemoryMainNewBlocks(0);
HeadersIntersection::InMemoryMainNewBlocks(0)
}
// previous block is not a best block => fork
else {
return HeadersIntersection::InMemoryForkNewBlocks(0);
HeadersIntersection::InMemoryForkNewBlocks(0)
}
},
// if first block is known && last block is unknown => intersection with queue or with db
BlockState::Unknown if is_first_known => {
// find last known block
let mut previous_state = first_block_state;
for index in 1..hashes_len {
let state = self.block_state(&hashes[index]);
for (index, hash) in hashes.iter().enumerate().take(hashes_len).skip(1) {
let state = self.block_state(hash);
if state == BlockState::Unknown {
// previous block is stored => fork from stored block
if previous_state == BlockState::Stored {

View File

@ -258,8 +258,8 @@ impl Config {
impl State {
pub fn is_saturated(&self) -> bool {
match self {
&State::Saturated => true,
match *self {
State::Saturated => true,
_ => false,
}
}
@ -272,8 +272,8 @@ impl State {
}
pub fn is_nearly_saturated(&self) -> bool {
match self {
&State::NearlySaturated => true,
match *self {
State::NearlySaturated => true,
_ => false,
}
}
@ -300,7 +300,7 @@ impl<T> Client for SynchronizationClient<T> where T: TaskExecutor {
/// Get synchronization state
fn state(&self) -> State {
self.state.clone()
self.state
}
/// Try to queue synchronization of unknown blocks when new inventory is received.
@ -318,7 +318,7 @@ impl<T> Client for SynchronizationClient<T> where T: TaskExecutor {
let unknown_blocks_hashes: Vec<_> = {
let chain = self.chain.read();
blocks_hashes.into_iter()
.filter(|h| chain.block_state(&h) == BlockState::Unknown)
.filter(|h| chain.block_state(h) == BlockState::Unknown)
.filter(|h| !self.unknown_blocks.contains_key(h))
.collect()
};
@ -331,10 +331,9 @@ impl<T> Client for SynchronizationClient<T> where T: TaskExecutor {
fn on_new_blocks_headers(&mut self, peer_index: usize, blocks_headers: Vec<BlockHeader>) {
let blocks_hashes = {
// we can't process headers message if it has no link to our headers
let ref header0 = blocks_headers[0];
if {
self.chain.read().block_state(&header0.previous_header_hash) == BlockState::Unknown
} {
let header0 = &blocks_headers[0];
let unknown_state = self.chain.read().block_state(&header0.previous_header_hash) == BlockState::Unknown;
if unknown_state {
warn!(
target: "sync",
"Previous header of the first header from peer#{} `headers` message is unknown. First: {:?}. Previous: {:?}",
@ -349,7 +348,7 @@ impl<T> Client for SynchronizationClient<T> where T: TaskExecutor {
// validate blocks headers before scheduling
let mut blocks_hashes: Vec<H256> = Vec::with_capacity(blocks_headers.len());
let mut prev_block_hash = header0.previous_header_hash.clone();
for block_header in blocks_headers.iter() {
for block_header in &blocks_headers {
let block_header_hash = block_header.hash();
if block_header.previous_header_hash != prev_block_hash {
warn!(target: "sync", "Neighbour headers in peer#{} `headers` message are unlinked: Prev: {:?}, PrevLink: {:?}, Curr: {:?}", peer_index, prev_block_hash, block_header.previous_header_hash, block_header_hash);
@ -476,11 +475,11 @@ impl<T> Client for SynchronizationClient<T> where T: TaskExecutor {
// forget for this block and all its children
// headers are also removed as they all are invalid
chain.forget_with_children(&hash);
chain.forget_with_children(hash);
}
// awake threads, waiting for this block insertion
self.awake_waiting_threads(&hash);
self.awake_waiting_threads(hash);
// start new tasks
self.execute_synchronization_tasks(None);
@ -572,7 +571,7 @@ impl<T> SynchronizationClient<T> where T: TaskExecutor {
}
/// Get configuration parameters.
pub fn config<'a>(&'a self) -> &'a Config {
pub fn config(&self) -> &Config {
&self.config
}
@ -755,11 +754,11 @@ impl<T> SynchronizationClient<T> where T: TaskExecutor {
if !inventory_idle_peers.is_empty() {
let scheduled_hashes_len = { self.chain.read().length_of_state(BlockState::Scheduled) };
if scheduled_hashes_len < MAX_SCHEDULED_HASHES {
for inventory_peer in inventory_idle_peers.iter() {
for inventory_peer in &inventory_idle_peers {
self.peers.on_inventory_requested(*inventory_peer);
}
let inventory_tasks = inventory_idle_peers.into_iter().map(|p| Task::RequestBlocksHeaders(p));
let inventory_tasks = inventory_idle_peers.into_iter().map(Task::RequestBlocksHeaders);
tasks.extend(inventory_tasks);
}
}
@ -865,7 +864,7 @@ impl<T> SynchronizationClient<T> where T: TaskExecutor {
if let Entry::Occupied(entry) = orphaned_blocks.entry(parent_hash) {
let (_, orphaned) = entry.remove_entry();
for orphaned_hash in orphaned.keys() {
unknown_blocks.remove(&orphaned_hash);
unknown_blocks.remove(orphaned_hash);
}
queue.extend(orphaned.keys().cloned());
removed.extend(orphaned.into_iter());
@ -877,16 +876,18 @@ impl<T> SynchronizationClient<T> where T: TaskExecutor {
/// Remove given orphaned blocks
fn remove_orphaned_blocks(&mut self, orphans_to_remove: HashSet<H256>) {
let parent_orphan_keys: Vec<_> = self.orphaned_blocks.keys().cloned().collect();
for parent_orphan_key in parent_orphan_keys.into_iter() {
for parent_orphan_key in parent_orphan_keys {
if let Entry::Occupied(mut orphan_entry) = self.orphaned_blocks.entry(parent_orphan_key.clone()) {
if {
let is_empty = {
let mut orphans = orphan_entry.get_mut();
let orphans_keys: HashSet<H256> = orphans.keys().cloned().collect();
for orphan_to_remove in orphans_keys.intersection(&orphans_to_remove) {
orphans.remove(orphan_to_remove);
}
orphans.is_empty()
} {
};
if is_empty {
orphan_entry.remove_entry();
}
}

View File

@ -111,7 +111,7 @@ pub fn manage_unknown_orphaned_blocks(config: &ManageUnknownBlocksConfig, unknow
}
// remove unknown blocks
for unknown_block in unknown_to_remove.iter() {
for unknown_block in &unknown_to_remove {
unknown_blocks.remove(unknown_block);
}

View File

@ -58,8 +58,7 @@ pub enum ServerTaskIndex {
impl ServerTaskIndex {
pub fn raw(&self) -> u32 {
match *self {
ServerTaskIndex::Partial(id) => id,
ServerTaskIndex::Final(id) => id,
ServerTaskIndex::Partial(id) | ServerTaskIndex::Final(id) => id,
}
}
@ -282,9 +281,9 @@ impl SynchronizationServer {
// `max_hashes` hashes after best_block.number OR hash_stop OR blockchain end
(first_block_number..last_block_number).into_iter()
.map(|number| chain.block_hash(number))
.take_while(|ref hash| hash.is_some())
.take_while(|hash| hash.is_some())
.map(|hash| hash.unwrap())
.take_while(|ref hash| *hash != hash_stop)
.take_while(|hash| hash != hash_stop)
.collect()
}
@ -300,16 +299,16 @@ impl SynchronizationServer {
// `max_hashes` hashes after best_block.number OR hash_stop OR blockchain end
(first_block_number..last_block_number).into_iter()
.map(|number| chain.block_header_by_number(number))
.take_while(|ref header| header.is_some())
.take_while(|header| header.is_some())
.map(|header| header.unwrap())
.take_while(|ref header| &header.hash() != hash_stop)
.take_while(|header| &header.hash() != hash_stop)
.collect()
}
fn locate_best_known_block_hash(chain: &ChainRef, hash: &H256) -> Option<db::BestBlock> {
let chain = chain.read();
match chain.block_number(&hash) {
match chain.block_number(hash) {
Some(number) => Some(db::BestBlock {
number: number,
hash: hash.clone(),
@ -317,7 +316,7 @@ impl SynchronizationServer {
// block with hash is not in the main chain (block_number has returned None)
// but maybe it is in some fork? if so => we should find intersection with main chain
// and this would be our best common block
None => chain.block_header_by_hash(&hash)
None => chain.block_header_by_hash(hash)
.and_then(|block| {
let mut current_block_hash = block.previous_header_hash;
loop {

View File

@ -50,14 +50,14 @@ pub fn age(protocol_time: u32) -> i64 {
pub fn block_reward_satoshi(block_height: u32) -> u64 {
let mut res = 50 * 100 * 1000 * 1000;
for _ in 0..block_height / 210000 { res = res / 2 }
for _ in 0..block_height / 210000 { res /= 2 }
res
}
pub fn transaction_sigops(transaction: &chain::Transaction) -> Result<usize, script::Error> {
let mut result = 0usize;
for output in transaction.outputs.iter() {
for output in &transaction.outputs {
let output_script: Script = output.script_pubkey.to_vec().into();
// todo: not always allow malformed output?
result += output_script.sigop_count(false).unwrap_or(0);
@ -65,7 +65,7 @@ pub fn transaction_sigops(transaction: &chain::Transaction) -> Result<usize, scr
if transaction.is_coinbase() { return Ok(result); }
for input in transaction.inputs.iter() {
for input in &transaction.inputs {
let input_script: Script = input.script_sig().to_vec().into();
result += try!(input_script.sigop_count(false));
}