cleanup clippy tests (#10172)

automerge
This commit is contained in:
Kristofer Peterson 2020-05-29 08:26:06 +01:00 committed by GitHub
parent a71833c17b
commit fb4d8e1f62
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 46 additions and 46 deletions

View File

@ -2425,8 +2425,7 @@ mod tests {
fn test_crds_values(pubkey: Pubkey) -> Vec<CrdsValue> {
let entrypoint = ContactInfo::new_localhost(&pubkey, timestamp());
let entrypoint_crdsvalue =
CrdsValue::new_unsigned(CrdsData::ContactInfo(entrypoint.clone()));
let entrypoint_crdsvalue = CrdsValue::new_unsigned(CrdsData::ContactInfo(entrypoint));
vec![entrypoint_crdsvalue]
}

View File

@ -957,7 +957,7 @@ mod tests {
&exit,
Arc::new(RwLock::new(bank_forks)),
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
blockstore.clone(),
blockstore,
))),
));
@ -1072,7 +1072,7 @@ mod tests {
&exit,
Arc::new(RwLock::new(bank_forks)),
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
blockstore.clone(),
blockstore,
))),
));

View File

@ -551,9 +551,9 @@ mod tests {
cache2.increase_confirmation_stake(2, 5);
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache0.clone()); // Slot 1, conf 2
block_commitment.entry(2).or_insert(cache1.clone()); // Slot 2, conf 1
block_commitment.entry(3).or_insert(cache2.clone()); // Slot 3, conf 0
block_commitment.entry(1).or_insert_with(|| cache0.clone()); // Slot 1, conf 2
block_commitment.entry(2).or_insert_with(|| cache1.clone()); // Slot 2, conf 1
block_commitment.entry(3).or_insert_with(|| cache2.clone()); // Slot 3, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
@ -568,9 +568,9 @@ mod tests {
// Build map with multiple slots at conf 1
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache1.clone()); // Slot 1, conf 1
block_commitment.entry(2).or_insert(cache1.clone()); // Slot 2, conf 1
block_commitment.entry(3).or_insert(cache2.clone()); // Slot 3, conf 0
block_commitment.entry(1).or_insert_with(|| cache1.clone()); // Slot 1, conf 1
block_commitment.entry(2).or_insert_with(|| cache1.clone()); // Slot 2, conf 1
block_commitment.entry(3).or_insert_with(|| cache2.clone()); // Slot 3, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
@ -585,9 +585,9 @@ mod tests {
// Build map with slot gaps
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache1.clone()); // Slot 1, conf 1
block_commitment.entry(3).or_insert(cache1.clone()); // Slot 3, conf 1
block_commitment.entry(5).or_insert(cache2.clone()); // Slot 5, conf 0
block_commitment.entry(1).or_insert_with(|| cache1.clone()); // Slot 1, conf 1
block_commitment.entry(3).or_insert(cache1); // Slot 3, conf 1
block_commitment.entry(5).or_insert_with(|| cache2.clone()); // Slot 5, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
@ -602,9 +602,9 @@ mod tests {
// Build map with no conf 1 slots, but one higher
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache0.clone()); // Slot 1, conf 2
block_commitment.entry(2).or_insert(cache2.clone()); // Slot 2, conf 0
block_commitment.entry(3).or_insert(cache2.clone()); // Slot 3, conf 0
block_commitment.entry(1).or_insert(cache0); // Slot 1, conf 2
block_commitment.entry(2).or_insert_with(|| cache2.clone()); // Slot 2, conf 0
block_commitment.entry(3).or_insert_with(|| cache2.clone()); // Slot 3, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
@ -619,15 +619,15 @@ mod tests {
// Build map with no conf 1 or higher slots
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache2.clone()); // Slot 1, conf 0
block_commitment.entry(2).or_insert(cache2.clone()); // Slot 2, conf 0
block_commitment.entry(3).or_insert(cache2.clone()); // Slot 3, conf 0
block_commitment.entry(1).or_insert_with(|| cache2.clone()); // Slot 1, conf 0
block_commitment.entry(2).or_insert_with(|| cache2.clone()); // Slot 2, conf 0
block_commitment.entry(3).or_insert(cache2); // Slot 3, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
total_stake,
bank_slot_5.clone(),
blockstore.clone(),
bank_slot_5,
blockstore,
0,
0,
);

View File

@ -2618,7 +2618,7 @@ pub(crate) mod tests {
let exit = Arc::new(AtomicBool::new(false));
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
BlockCommitmentCache::default_with_blockstore(blockstore),
));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,

View File

@ -2321,7 +2321,7 @@ pub mod tests {
r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}"]}}"#,
tx_serialized_encoded,
);
let res = io.handle_request_sync(&req, meta.clone());
let res = io.handle_request_sync(&req, meta);
let expected = json!({
"jsonrpc": "2.0",
"result": {
@ -2361,7 +2361,7 @@ pub mod tests {
);
// should panic because `bank` is not frozen
let _ = io.handle_request_sync(&req, meta.clone());
let _ = io.handle_request_sync(&req, meta);
}
#[test]

View File

@ -745,11 +745,11 @@ mod tests {
);
let exit = Arc::new(AtomicBool::new(false));
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore.clone()),
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
));
let subscriptions =
RpcSubscriptions::new(&exit, bank_forks.clone(), block_commitment_cache.clone());
RpcSubscriptions::new(&exit, bank_forks.clone(), block_commitment_cache);
rpc.subscriptions = Arc::new(subscriptions);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
@ -895,8 +895,7 @@ mod tests {
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("voteNotification");
// Setup Subscriptions
let subscriptions =
RpcSubscriptions::new(&exit, bank_forks.clone(), block_commitment_cache.clone());
let subscriptions = RpcSubscriptions::new(&exit, bank_forks, block_commitment_cache);
rpc.subscriptions = Arc::new(subscriptions);
rpc.vote_subscribe(session, subscriber);
@ -922,6 +921,8 @@ mod tests {
});
// Process votes and check they were notified.
// FIX-ME-BETTER-LATER - clone below is required for testcase to pass
#[allow(clippy::redundant_clone)]
ClusterInfoVoteListener::get_and_process_votes_for_tests(
&votes_receiver,
&vote_tracker,

View File

@ -4922,7 +4922,7 @@ pub mod tests {
// Trying to insert value into slot <= than last root should fail
{
let mut coding_shred =
Shred::new_empty_from_header(shred.clone(), DataShredHeader::default(), coding);
Shred::new_empty_from_header(shred, DataShredHeader::default(), coding);
let index = index_cf.get(coding_shred.slot()).unwrap().unwrap();
coding_shred.set_slot(*last_root.read().unwrap());
assert!(!Blockstore::should_insert_coding_shred(

View File

@ -15,6 +15,8 @@ use std::{
sync::Arc,
};
type IndexShredsMap = BTreeMap<u32, Vec<Shred>>;
#[test]
fn test_multi_fec_block_coding() {
let keypair = Arc::new(Keypair::new());
@ -123,9 +125,9 @@ fn test_multi_fec_block_different_size_coding() {
let num_data = fec_data_shreds.len();
let num_coding = fec_coding_shreds.len();
let all_shreds: Vec<Shred> = fec_data_shreds
.into_iter()
.iter()
.step_by(2)
.chain(fec_coding_shreds.into_iter().step_by(2))
.chain(fec_coding_shreds.iter().step_by(2))
.cloned()
.collect();
@ -162,8 +164,8 @@ fn test_multi_fec_block_different_size_coding() {
fn sort_data_coding_into_fec_sets(
data_shreds: Vec<Shred>,
coding_shreds: Vec<Shred>,
fec_data: &mut BTreeMap<u32, Vec<Shred>>,
fec_coding: &mut BTreeMap<u32, Vec<Shred>>,
fec_data: &mut IndexShredsMap,
fec_coding: &mut IndexShredsMap,
data_slot_and_index: &mut HashSet<(Slot, u32)>,
coding_slot_and_index: &mut HashSet<(Slot, u32)>,
) {
@ -175,7 +177,7 @@ fn sort_data_coding_into_fec_sets(
data_slot_and_index.insert(key);
let fec_entry = fec_data
.entry(shred.common_header.fec_set_index)
.or_insert(vec![]);
.or_insert_with(|| vec![]);
fec_entry.push(shred);
}
for shred in coding_shreds {
@ -186,16 +188,17 @@ fn sort_data_coding_into_fec_sets(
coding_slot_and_index.insert(key);
let fec_entry = fec_coding
.entry(shred.common_header.fec_set_index)
.or_insert(vec![]);
.or_insert_with(|| vec![]);
fec_entry.push(shred);
}
}
#[allow(clippy::assertions_on_constants)]
fn setup_different_sized_fec_blocks(
slot: Slot,
parent_slot: Slot,
keypair: Arc<Keypair>,
) -> (BTreeMap<u32, Vec<Shred>>, BTreeMap<u32, Vec<Shred>>, usize) {
) -> (IndexShredsMap, IndexShredsMap, usize) {
let shredder =
Shredder::new(slot, parent_slot, 1.0, keypair, 0, 0).expect("Failed in creating shredder");
let keypair0 = Keypair::new();

View File

@ -561,7 +561,7 @@ mod tests {
}
fn pubkeys(signers: &[&dyn Signer]) -> Vec<Pubkey> {
signers.into_iter().map(|x| x.pubkey()).collect()
signers.iter().map(|x| x.pubkey()).collect()
}
#[test]

View File

@ -99,9 +99,9 @@ mod tests {
slot_history.add(MAX_ENTRIES);
assert_eq!(slot_history.check(0), Check::TooOld);
assert_eq!(slot_history.check(1), Check::NotFound);
assert_eq!(slot_history.check(2), Check::Found);
assert_eq!(slot_history.check(20), Check::Found);
assert_eq!(slot_history.check(MAX_ENTRIES), Check::Found);
for i in &[2, 20, MAX_ENTRIES] {
assert_eq!(slot_history.check(*i), Check::Found);
}
for i in 3..20 {
assert_eq!(slot_history.check(i), Check::NotFound, "i: {}", i);
}
@ -113,12 +113,9 @@ mod tests {
info!("add max_entries + 3");
let slot = 3 * MAX_ENTRIES + 3;
slot_history.add(slot);
assert_eq!(slot_history.check(0), Check::TooOld);
assert_eq!(slot_history.check(1), Check::TooOld);
assert_eq!(slot_history.check(2), Check::TooOld);
assert_eq!(slot_history.check(20), Check::TooOld);
assert_eq!(slot_history.check(21), Check::TooOld);
assert_eq!(slot_history.check(MAX_ENTRIES), Check::TooOld);
for i in &[0, 1, 2, 20, 21, MAX_ENTRIES] {
assert_eq!(slot_history.check(*i), Check::TooOld);
}
let start = slot - MAX_ENTRIES + 1;
let end = slot;
for i in start..end {