fixes errors from clippy::useless_conversion (#29534)

https://rust-lang.github.io/rust-clippy/master/index.html#useless_conversion
This commit is contained in:
behzad nouri 2023-01-05 18:05:32 +00:00 committed by GitHub
parent 1e8a8e07b6
commit 5c9beef498
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
33 changed files with 46 additions and 135 deletions

View File

@ -155,7 +155,6 @@ fn make_create_message(
let space = maybe_space.unwrap_or_else(|| thread_rng().gen_range(0, 1000));
let instructions: Vec<_> = (0..num_instructions)
.into_iter()
.flat_map(|_| {
let program_id = if mint.is_some() {
inline_spl_token::id()
@ -203,7 +202,6 @@ fn make_close_message(
spl_token: bool,
) -> Message {
let instructions: Vec<_> = (0..num_instructions)
.into_iter()
.filter_map(|_| {
let program_id = if spl_token {
inline_spl_token::id()

View File

@ -120,7 +120,6 @@ fn main() -> Result<()> {
}
let producer_threads: Vec<_> = (0..num_producers)
.into_iter()
.map(|_| producer(&addr, exit.clone()))
.collect();

View File

@ -39,7 +39,7 @@ DEFINE_NxM_BENCH!(dim_32x64, 32, 64);
/// Benchmark insert with Hashmap as baseline for N threads inserting M keys each
fn do_bench_insert_baseline_hashmap(bencher: &mut Bencher, n: usize, m: usize) {
let index = RwLock::new(HashMap::new());
(0..n).into_iter().into_par_iter().for_each(|i| {
(0..n).into_par_iter().for_each(|i| {
let key = Pubkey::new_unique();
index
.write()
@ -47,7 +47,7 @@ fn do_bench_insert_baseline_hashmap(bencher: &mut Bencher, n: usize, m: usize) {
.insert(key, vec![(i, IndexValue::default())]);
});
bencher.iter(|| {
(0..n).into_iter().into_par_iter().for_each(|_| {
(0..n).into_par_iter().for_each(|_| {
for j in 0..m {
let key = Pubkey::new_unique();
index
@ -62,12 +62,12 @@ fn do_bench_insert_baseline_hashmap(bencher: &mut Bencher, n: usize, m: usize) {
/// Benchmark insert with BucketMap with N buckets for N threads inserting M keys each
fn do_bench_insert_bucket_map(bencher: &mut Bencher, n: usize, m: usize) {
let index = BucketMap::new(BucketMapConfig::new(n));
(0..n).into_iter().into_par_iter().for_each(|i| {
(0..n).into_par_iter().for_each(|i| {
let key = Pubkey::new_unique();
index.update(&key, |_| Some((vec![(i, IndexValue::default())], 0)));
});
bencher.iter(|| {
(0..n).into_iter().into_par_iter().for_each(|_| {
(0..n).into_par_iter().for_each(|_| {
for j in 0..m {
let key = Pubkey::new_unique();
index.update(&key, |_| Some((vec![(j, IndexValue::default())], 0)));

View File

@ -81,7 +81,6 @@ impl<T: Clone + Copy + Debug> BucketMap<T> {
let stats = Arc::default();
let buckets = (0..config.max_buckets)
.into_iter()
.map(|_| {
Arc::new(BucketApi::new(
Arc::clone(&drives),
@ -320,7 +319,7 @@ mod tests {
fn bucket_map_test_grow_read() {
let config = BucketMapConfig::new(1 << 2);
let index = BucketMap::new(config);
let keys: Vec<Pubkey> = (0..100).into_iter().map(|_| Pubkey::new_unique()).collect();
let keys: Vec<Pubkey> = (0..100).map(|_| Pubkey::new_unique()).collect();
for k in 0..keys.len() {
let key = &keys[k];
let i = read_be_u64(key.as_ref());
@ -339,7 +338,7 @@ mod tests {
fn bucket_map_test_n_delete() {
let config = BucketMapConfig::new(1 << 2);
let index = BucketMap::new(config);
let keys: Vec<Pubkey> = (0..20).into_iter().map(|_| Pubkey::new_unique()).collect();
let keys: Vec<Pubkey> = (0..20).map(|_| Pubkey::new_unique()).collect();
for key in keys.iter() {
let i = read_be_u64(key.as_ref());
index.update(key, |_| Some((vec![i], 0)));
@ -366,7 +365,6 @@ mod tests {
use std::sync::Mutex;
solana_logger::setup();
let maps = (0..2)
.into_iter()
.map(|max_buckets_pow2| {
let config = BucketMapConfig::new(1 << max_buckets_pow2);
BucketMap::new(config)
@ -379,7 +377,6 @@ mod tests {
let gen_rand_value = || {
let count = thread_rng().gen_range(0, max_slot_list_len);
let v = (0..count)
.into_iter()
.map(|x| (x as usize, x as usize /*thread_rng().gen::<usize>()*/))
.collect::<Vec<_>>();
let rc = thread_rng().gen::<RefCount>();

View File

@ -324,7 +324,7 @@ impl BucketStorage {
let increment = self.capacity_pow2 - old_bucket.capacity_pow2;
let index_grow = 1 << increment;
(0..old_cap as usize).into_iter().for_each(|i| {
(0..old_cap as usize).for_each(|i| {
let old_ix = i * old_bucket.cell_size as usize;
let new_ix = old_ix * index_grow;
let dst_slice: &[u8] = &self.mmap[new_ix..new_ix + old_bucket.cell_size as usize];

View File

@ -23,12 +23,12 @@ fn bucket_map_test_mt() {
drives: Some(paths.clone()),
..BucketMapConfig::default()
});
(0..threads).into_iter().into_par_iter().for_each(|_| {
(0..threads).into_par_iter().for_each(|_| {
let key = Pubkey::new_unique();
index.update(&key, |_| Some((vec![0u64], 0)));
});
let mut timer = Measure::start("bucket_map_test_mt");
(0..threads).into_iter().into_par_iter().for_each(|_| {
(0..threads).into_par_iter().for_each(|_| {
for _ in 0..items {
let key = Pubkey::new_unique();
let ix: u64 = index.bucket_ix(&key) as u64;

View File

@ -531,7 +531,6 @@ mod tests {
0
};
let addrs = (0..MAX_CONNECTIONS)
.into_iter()
.map(|_| {
let addr = get_addr(&mut rng);
connection_cache.get_connection(&addr);

View File

@ -40,7 +40,6 @@ fn run_bench_packet_discard(num_ips: usize, bencher: &mut Bencher) {
let mut total = 0;
let ips: Vec<_> = (0..num_ips)
.into_iter()
.map(|_| {
let mut addr = [0u16; 8];
thread_rng().fill(&mut addr);

View File

@ -2185,7 +2185,6 @@ mod tests {
let request_slot = MAX_ANCESTOR_RESPONSES as Slot;
let repair = AncestorHashesRepairType(request_slot);
let mut response: Vec<SlotHash> = (0..request_slot)
.into_iter()
.map(|slot| (slot, Hash::new_unique()))
.collect();
assert!(repair.verify_response(&AncestorHashesResponse::Hashes(response.clone())));

View File

@ -1082,12 +1082,7 @@ mod tests {
let expected_ports: Vec<_> = (0..256).collect();
let mut forwarded_ports: Vec<_> = forward_packet_batches_by_accounts
.iter_batches()
.flat_map(|batch| {
batch
.get_forwardable_packets()
.into_iter()
.map(|p| p.meta().port)
})
.flat_map(|batch| batch.get_forwardable_packets().map(|p| p.meta().port))
.collect();
forwarded_ports.sort_unstable();
assert_eq!(expected_ports, forwarded_ports);

View File

@ -717,7 +717,6 @@ mod tests {
slot,
confirmation_count,
})
.into_iter()
.collect::<VecDeque<Lockout>>();
let vote = VoteTransaction::from(VoteStateUpdate::new(
slots,

View File

@ -9141,7 +9141,6 @@ pub mod tests {
fn make_large_tx_entry(num_txs: usize) -> Entry {
let txs: Vec<_> = (0..num_txs)
.into_iter()
.map(|_| {
let keypair0 = Keypair::new();
let to = solana_sdk::pubkey::new_rand();

View File

@ -17,7 +17,6 @@ use {
#[bench]
fn bench_write_points(bencher: &mut Bencher) {
let points = (0..10)
.into_iter()
.map(|_| {
DataPoint::new("measurement")
.add_field_i64("i", 0)

View File

@ -3704,11 +3704,7 @@ impl AccountsDb {
});
// sort by pubkey to keep account index lookups close
let mut stored_accounts = stored_accounts
.drain()
.into_iter()
.map(|(_k, v)| v)
.collect::<Vec<_>>();
let mut stored_accounts = stored_accounts.drain().map(|(_k, v)| v).collect::<Vec<_>>();
stored_accounts.sort_unstable_by(|a, b| a.pubkey().cmp(b.pubkey()));
GetUniqueAccountsResult {
@ -6594,7 +6590,7 @@ impl AccountsDb {
) -> Vec<AccountInfo> {
let mut calc_stored_meta_time = Measure::start("calc_stored_meta");
let slot = accounts.target_slot();
(0..accounts.len()).into_iter().for_each(|index| {
(0..accounts.len()).for_each(|index| {
let pubkey = accounts.pubkey(index);
self.read_only_accounts_cache.remove(*pubkey, slot);
});
@ -7731,7 +7727,7 @@ impl AccountsDb {
let update = |start, end| {
let mut reclaims = Vec::with_capacity((end - start) / 2);
(start..end).into_iter().for_each(|i| {
(start..end).for_each(|i| {
let info = infos[i];
let pubkey_account = (accounts.pubkey(i), accounts.account(i));
let pubkey = pubkey_account.0;
@ -14627,7 +14623,7 @@ pub mod tests {
accounts_db.write_cache_limit_bytes = write_cache_limit_bytes;
let accounts_db = Arc::new(accounts_db);
let slots: Vec<_> = (0..num_slots as Slot).into_iter().collect();
let slots: Vec<_> = (0..num_slots as Slot).collect();
let stall_slot = num_slots as Slot;
let scan_stall_key = Pubkey::new_unique();
let keys: Vec<Pubkey> = std::iter::repeat_with(Pubkey::new_unique)
@ -14911,9 +14907,7 @@ pub mod tests {
} else {
// Slots less than `requested_flush_root` and `scan_root` were cleaned in the cache before being flushed
// to storage, should only contain one account
std::iter::once(keys[*slot as usize])
.into_iter()
.collect::<HashSet<Pubkey>>()
std::iter::once(keys[*slot as usize]).collect::<HashSet<Pubkey>>()
};
assert_eq!(slot_accounts, expected_accounts);
@ -15010,9 +15004,7 @@ pub mod tests {
} else {
// If clean was specified, only the latest slot should have all the updates.
// All these other slots have been cleaned before flush
std::iter::once(keys[*slot as usize])
.into_iter()
.collect::<HashSet<Pubkey>>()
std::iter::once(keys[*slot as usize]).collect::<HashSet<Pubkey>>()
};
assert_eq!(slot_accounts, expected_accounts);
}
@ -17333,7 +17325,6 @@ pub mod tests {
fn get_all_accounts(db: &AccountsDb, slots: Range<Slot>) -> Vec<(Pubkey, AccountSharedData)> {
slots
.clone()
.into_iter()
.filter_map(|slot| {
let storages = db.get_storages_for_slot(slot);
storages.map(|storages| {

View File

@ -1341,7 +1341,7 @@ pub mod tests {
let key_b = Pubkey::new(&[2u8; 32]);
let key_c = Pubkey::new(&[3u8; 32]);
const COUNT: usize = 6;
let hashes = (0..COUNT).into_iter().map(|i| Hash::new(&[i as u8; 32]));
let hashes = (0..COUNT).map(|i| Hash::new(&[i as u8; 32]));
// create this vector
// abbbcc
let keys = [key_a, key_b, key_b, key_b, key_c, key_c];
@ -1670,13 +1670,7 @@ pub mod tests {
let input: Vec<Vec<Vec<u64>>> = vec![vec![vec![0, 1], vec![], vec![2, 3, 4], vec![]]];
let cumulative = CumulativeOffsets::from_raw_2d(&input);
let src: Vec<_> = input
.clone()
.into_iter()
.flatten()
.into_iter()
.flatten()
.collect();
let src: Vec<_> = input.clone().into_iter().flatten().flatten().collect();
let len = src.len();
assert_eq!(cumulative.total_count, len);
assert_eq!(cumulative.cumulative_offsets.len(), 2); // 2 non-empty vectors
@ -1701,13 +1695,7 @@ pub mod tests {
let input = vec![vec![vec![], vec![0, 1], vec![], vec![2, 3, 4], vec![]]];
let cumulative = CumulativeOffsets::from_raw_2d(&input);
let src: Vec<_> = input
.clone()
.into_iter()
.flatten()
.into_iter()
.flatten()
.collect();
let src: Vec<_> = input.clone().into_iter().flatten().flatten().collect();
let len = src.len();
assert_eq!(cumulative.total_count, len);
assert_eq!(cumulative.cumulative_offsets.len(), 2); // 2 non-empty vectors
@ -1741,13 +1729,7 @@ pub mod tests {
];
let cumulative = CumulativeOffsets::from_raw_2d(&input);
let src: Vec<_> = input
.clone()
.into_iter()
.flatten()
.into_iter()
.flatten()
.collect();
let src: Vec<_> = input.clone().into_iter().flatten().flatten().collect();
let len = src.len();
assert_eq!(cumulative.total_count, len);
assert_eq!(cumulative.cumulative_offsets.len(), 2); // 2 non-empty vectors
@ -1841,10 +1823,7 @@ pub mod tests {
hash_counts.extend(threshold - 1..=threshold + target);
for hash_count in hash_counts {
let hashes: Vec<_> = (0..hash_count)
.into_iter()
.map(|_| Hash::new_unique())
.collect();
let hashes: Vec<_> = (0..hash_count).map(|_| Hash::new_unique()).collect();
test_hashing(hashes, FANOUT);
}

View File

@ -754,7 +754,6 @@ impl<T: IndexValue> AccountsIndex<T> {
let bin_calculator = PubkeyBinCalculator24::new(bins);
let storage = AccountsIndexStorage::new(bins, &config, exit);
let account_maps = (0..bins)
.into_iter()
.map(|bin| Arc::clone(&storage.in_mem[bin]))
.collect::<Vec<_>>();
(account_maps, bin_calculator, storage)
@ -1582,7 +1581,6 @@ impl<T: IndexValue> AccountsIndex<T> {
let random_offset = thread_rng().gen_range(0, bins);
let use_disk = self.storage.storage.disk.is_some();
let mut binned = (0..bins)
.into_iter()
.map(|mut pubkey_bin| {
// opposite of (pubkey_bin + random_offset) % bins
pubkey_bin = if pubkey_bin < random_offset {
@ -1637,7 +1635,6 @@ impl<T: IndexValue> AccountsIndex<T> {
/// return Vec<Vec<>> because the internal vecs are already allocated per bin
pub fn retrieve_duplicate_keys_from_startup(&self) -> Vec<Vec<(Slot, Pubkey)>> {
(0..self.bins())
.into_iter()
.map(|pubkey_bin| {
let r_account_maps = &self.account_maps[pubkey_bin];
r_account_maps.retrieve_duplicate_keys_from_startup()
@ -3961,14 +3958,7 @@ pub mod tests {
);
assert_eq!((bins - 1, usize::MAX), iter.bin_start_and_range());
assert_eq!(
(0..2)
.into_iter()
.skip(1)
.take(usize::MAX)
.collect::<Vec<_>>(),
vec![1]
);
assert_eq!((0..2).skip(1).take(usize::MAX).collect::<Vec<_>>(), vec![1]);
}
#[test]

View File

@ -64,7 +64,6 @@ impl BgThreads {
let local_exit = Arc::new(AtomicBool::default());
let handles = Some(
(0..threads)
.into_iter()
.map(|idx| {
// the first thread we start is special
let can_advance_age = can_advance_age && idx == 0;
@ -164,7 +163,6 @@ impl<T: IndexValue> AccountsIndexStorage<T> {
let storage = Arc::new(BucketMapHolder::new(bins, config, threads));
let in_mem = (0..bins)
.into_iter()
.map(|bin| Arc::new(InMemAccountsIndex::new(&storage, bin)))
.collect::<Vec<_>>();

View File

@ -6272,7 +6272,7 @@ impl Bank {
) {
assert!(!self.freeze_started());
let mut m = Measure::start("stakes_cache.check_and_store");
(0..accounts.len()).into_iter().for_each(|i| {
(0..accounts.len()).for_each(|i| {
self.stakes_cache
.check_and_store(accounts.pubkey(i), accounts.account(i))
});

View File

@ -404,7 +404,6 @@ pub mod tests {
let bins = 4;
let test = BucketMapHolder::<u64>::new(bins, &Some(AccountsIndexConfig::default()), 1);
let visited = (0..bins)
.into_iter()
.map(|_| AtomicUsize::default())
.collect::<Vec<_>>();
let iterations = bins * 30;
@ -412,7 +411,7 @@ pub mod tests {
let expected = threads * iterations / bins;
(0..threads).into_par_iter().for_each(|_| {
(0..iterations).into_iter().for_each(|_| {
(0..iterations).for_each(|_| {
let bin = test.next_bucket_to_flush();
visited[bin].fetch_add(1, Ordering::Relaxed);
});

View File

@ -59,10 +59,7 @@ impl BucketMapHolderStats {
pub fn new(bins: usize) -> BucketMapHolderStats {
BucketMapHolderStats {
bins: bins as u64,
per_bucket_count: (0..bins)
.into_iter()
.map(|_| AtomicUsize::default())
.collect(),
per_bucket_count: (0..bins).map(|_| AtomicUsize::default()).collect(),
..BucketMapHolderStats::default()
}
}
@ -195,7 +192,6 @@ impl BucketMapHolderStats {
let disk_per_bucket_counts = disk
.map(|disk| {
(0..self.bins)
.into_iter()
.map(|i| disk.get_bucket_from_index(i as usize).bucket_len() as usize)
.collect::<Vec<_>>()
})

View File

@ -403,7 +403,7 @@ pub mod tests {
.collect::<Vec<_>>(),
vec![&file_name],
);
let mut accum = (0..bins_per_pass).into_iter().map(|_| vec![]).collect();
let mut accum = (0..bins_per_pass).map(|_| vec![]).collect();
cache
.load(&file_name, &mut accum, start_bin_this_pass, &bin_calculator)
.unwrap();
@ -431,9 +431,9 @@ pub mod tests {
bins: usize,
start_bin: usize,
) {
let mut accum: SavedType = (0..bins).into_iter().map(|_| vec![]).collect();
data.drain(..).into_iter().for_each(|mut x| {
x.drain(..).into_iter().for_each(|item| {
let mut accum: SavedType = (0..bins).map(|_| vec![]).collect();
data.drain(..).for_each(|mut x| {
x.drain(..).for_each(|item| {
let bin = bin_calculator.bin_from_pubkey(&item.pubkey);
accum[bin - start_bin].push(item);
})
@ -450,12 +450,10 @@ pub mod tests {
let mut ct = 0;
(
(0..bins)
.into_iter()
.map(|bin| {
let rnd = rng.gen::<u64>() % (bins as u64);
if rnd < count as u64 {
(0..std::cmp::max(1, count / bins))
.into_iter()
.map(|_| {
ct += 1;
let mut pk;

View File

@ -585,7 +585,6 @@ impl<T: IndexValue> InMemAccountsIndex<T> {
let mut found_slot = false;
let mut found_other_slot = false;
(0..slot_list.len())
.into_iter()
.rev() // rev since we delete from the list in some cases
.for_each(|slot_list_index| {
let (cur_slot, cur_account_info) = &slot_list[slot_list_index];
@ -1758,7 +1757,6 @@ mod tests {
{
// up to 3 ignored slot account_info (ignored means not 'new_slot', not 'other_slot', but different slot #s which could exist in the slot_list initially)
possible_initial_slot_list_contents = (0..3)
.into_iter()
.map(|i| (ignored_slot + i, ignored_value + i))
.collect::<Vec<_>>();
// account_info that already exists in the slot_list AT 'new_slot'

View File

@ -32,7 +32,6 @@ impl RentPayingAccountsByPartition {
Self {
partition_count,
accounts: (0..=partition_count)
.into_iter()
.map(|_| HashSet::<Pubkey>::default())
.collect(),
}

View File

@ -755,7 +755,6 @@ pub mod tests {
let reader2 = SharedBufferReader::new(&shared_buffer);
let sent = (0..size)
.into_iter()
.map(|i| ((i + size) % 256) as u8)
.collect::<Vec<_>>();
@ -835,7 +834,6 @@ pub mod tests {
None
};
let sent = (0..data_size)
.into_iter()
.map(|i| ((i + data_size) % 256) as u8)
.collect::<Vec<_>>();
@ -846,7 +844,6 @@ pub mod tests {
let threads = std::cmp::min(8, rayon::current_num_threads());
Some({
let parallel = (0..threads)
.into_iter()
.map(|_| {
// create before any reading starts
let reader_ = SharedBufferReader::new(&shared_buffer);

View File

@ -1747,7 +1747,6 @@ fn unpack_snapshot_local(
// allocate all readers before any readers start reading
let readers = (0..parallel_divisions)
.into_iter()
.map(|_| SharedBufferReader::new(&shared_buffer))
.collect::<Vec<_>>();

View File

@ -245,19 +245,19 @@ pub mod tests {
slot
};
assert_eq!(
(0..5).into_iter().collect::<Vec<_>>(),
(0..5).collect::<Vec<_>>(),
storages.iter_range(&(..5)).map(check).collect::<Vec<_>>()
);
assert_eq!(
(1..5).into_iter().collect::<Vec<_>>(),
(1..5).collect::<Vec<_>>(),
storages.iter_range(&(1..5)).map(check).collect::<Vec<_>>()
);
assert_eq!(
(0..0).into_iter().collect::<Vec<_>>(),
(0..0).collect::<Vec<_>>(),
storages.iter_range(&(..)).map(check).collect::<Vec<_>>()
);
assert_eq!(
(0..0).into_iter().collect::<Vec<_>>(),
(0..0).collect::<Vec<_>>(),
storages.iter_range(&(1..)).map(check).collect::<Vec<_>>()
);
@ -274,7 +274,7 @@ pub mod tests {
for start in 0..5 {
for end in 0..5 {
assert_eq!(
(start..end).into_iter().collect::<Vec<_>>(),
(start..end).collect::<Vec<_>>(),
storages
.iter_range(&(start..end))
.map(check)
@ -283,15 +283,15 @@ pub mod tests {
}
}
assert_eq!(
(3..5).into_iter().collect::<Vec<_>>(),
(3..5).collect::<Vec<_>>(),
storages.iter_range(&(..5)).map(check).collect::<Vec<_>>()
);
assert_eq!(
(1..=3).into_iter().collect::<Vec<_>>(),
(1..=3).collect::<Vec<_>>(),
storages.iter_range(&(1..)).map(check).collect::<Vec<_>>()
);
assert_eq!(
(3..=3).into_iter().collect::<Vec<_>>(),
(3..=3).collect::<Vec<_>>(),
storages.iter_range(&(..)).map(check).collect::<Vec<_>>()
);
@ -312,7 +312,7 @@ pub mod tests {
for start in 0..5 {
for end in 0..5 {
assert_eq!(
(start..end).into_iter().collect::<Vec<_>>(),
(start..end).collect::<Vec<_>>(),
storages
.iter_range(&(start..end))
.map(check)
@ -321,15 +321,15 @@ pub mod tests {
}
}
assert_eq!(
(2..5).into_iter().collect::<Vec<_>>(),
(2..5).collect::<Vec<_>>(),
storages.iter_range(&(..5)).map(check).collect::<Vec<_>>()
);
assert_eq!(
(1..=4).into_iter().collect::<Vec<_>>(),
(1..=4).collect::<Vec<_>>(),
storages.iter_range(&(1..)).map(check).collect::<Vec<_>>()
);
assert_eq!(
(2..=4).into_iter().collect::<Vec<_>>(),
(2..=4).collect::<Vec<_>>(),
storages.iter_range(&(..)).map(check).collect::<Vec<_>>()
);
}

View File

@ -266,7 +266,7 @@ pub mod tests {
assert_eq!(a.target_slot(), b.target_slot());
assert_eq!(a.len(), b.len());
assert_eq!(a.is_empty(), b.is_empty());
(0..a.len()).into_iter().for_each(|i| {
(0..a.len()).for_each(|i| {
assert_eq!(a.pubkey(i), b.pubkey(i));
assert!(accounts_equal(a.account(i), b.account(i)));
})

View File

@ -112,7 +112,7 @@ fn test_bad_bank_hash() {
last_print = Instant::now();
}
let num_accounts = thread_rng().gen_range(0, 100);
(0..num_accounts).into_iter().for_each(|_| {
(0..num_accounts).for_each(|_| {
let mut idx;
loop {
idx = thread_rng().gen_range(0, max_accounts);

View File

@ -170,10 +170,7 @@ mod tests {
vec![keys[5]],
];
assert!(account_keys
.key_segment_iter()
.into_iter()
.eq(expected_segments.iter()));
assert!(account_keys.key_segment_iter().eq(expected_segments.iter()));
}
#[test]

View File

@ -1398,7 +1398,6 @@ pub mod test {
let mut num_entries = 5;
let max_connections_per_peer = 10;
let sockets: Vec<_> = (0..num_entries)
.into_iter()
.map(|i| SocketAddr::new(IpAddr::V4(Ipv4Addr::new(i, 0, 0, 0)), 0))
.collect();
for (i, socket) in sockets.iter().enumerate() {
@ -1451,10 +1450,7 @@ pub mod test {
let num_entries = 15;
let max_connections_per_peer = 10;
let pubkeys: Vec<_> = (0..num_entries)
.into_iter()
.map(|_| Pubkey::new_unique())
.collect();
let pubkeys: Vec<_> = (0..num_entries).map(|_| Pubkey::new_unique()).collect();
for (i, pubkey) in pubkeys.iter().enumerate() {
table
.try_add_connection(
@ -1546,7 +1542,6 @@ pub mod test {
let num_entries = 5;
let max_connections_per_peer = 10;
let sockets: Vec<_> = (0..num_entries)
.into_iter()
.map(|i| SocketAddr::new(IpAddr::V4(Ipv4Addr::new(i, 0, 0, 0)), 0))
.collect();
for (i, socket) in sockets.iter().enumerate() {
@ -1581,7 +1576,6 @@ pub mod test {
let num_ips = 5;
let max_connections_per_peer = 10;
let mut sockets: Vec<_> = (0..num_ips)
.into_iter()
.map(|i| SocketAddr::new(IpAddr::V4(Ipv4Addr::new(i, 0, 0, 0)), 0))
.collect();
for (i, socket) in sockets.iter().enumerate() {

View File

@ -530,7 +530,6 @@ mod tests {
TpuConnectionCache::<MockUdpPool>::new(DEFAULT_TPU_CONNECTION_POOL_SIZE).unwrap();
let port_offset = MOCK_PORT_OFFSET;
let addrs = (0..MAX_CONNECTIONS)
.into_iter()
.map(|_| {
let addr = get_addr(&mut rng);
connection_cache.get_connection(&addr);

View File

@ -125,7 +125,6 @@ fn make_dos_message(
account_metas: &[AccountMeta],
) -> Message {
let instructions: Vec<_> = (0..num_instructions)
.into_iter()
.map(|_| {
let data = [num_program_iterations, thread_rng().gen_range(0, 255)];
Instruction::new_with_bytes(program_id, &data, account_metas.to_vec())
@ -654,7 +653,6 @@ pub mod test {
let num_accounts = 17;
let account_metas: Vec<_> = (0..num_accounts)
.into_iter()
.map(|_| AccountMeta::new(Pubkey::new_unique(), false))
.collect();
let num_program_iterations = 10;
@ -705,10 +703,7 @@ pub mod test {
let num_instructions = 70;
let num_program_iterations = 10;
let num_accounts = 7;
let account_keypairs: Vec<_> = (0..num_accounts)
.into_iter()
.map(|_| Keypair::new())
.collect();
let account_keypairs: Vec<_> = (0..num_accounts).map(|_| Keypair::new()).collect();
let account_keypair_refs: Vec<_> = account_keypairs.iter().collect();
let mut start = Measure::start("total accounts run");
run_transactions_dos(

View File

@ -131,7 +131,6 @@ impl DiscreteLog {
pub fn decode_u32(self) -> Option<u64> {
let mut starting_point = self.target;
let handles = (0..self.num_threads)
.into_iter()
.map(|i| {
let ristretto_iterator = RistrettoIterator::new(
(starting_point, i as u64),