From 5c9beef49835fb9f093fee3ba47d7976c6cf3371 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Thu, 5 Jan 2023 18:05:32 +0000 Subject: [PATCH] fixes errors from clippy::useless_conversion (#29534) https://rust-lang.github.io/rust-clippy/master/index.html#useless_conversion --- accounts-cluster-bench/src/main.rs | 2 -- bench-streamer/src/main.rs | 1 - bucket_map/benches/bucket_map.rs | 8 ++--- bucket_map/src/bucket_map.rs | 7 ++--- bucket_map/src/bucket_storage.rs | 2 +- bucket_map/tests/bucket_map.rs | 4 +-- client/src/connection_cache.rs | 1 - core/benches/sigverify_stage.rs | 1 - core/src/serve_repair.rs | 1 - core/src/unprocessed_transaction_storage.rs | 7 +---- core/src/verified_vote_packets.rs | 1 - ledger/src/blockstore.rs | 1 - metrics/benches/metrics.rs | 1 - runtime/src/accounts_db.rs | 21 ++++--------- runtime/src/accounts_hash.rs | 31 +++---------------- runtime/src/accounts_index.rs | 12 +------ runtime/src/accounts_index_storage.rs | 2 -- runtime/src/bank.rs | 2 +- runtime/src/bucket_map_holder.rs | 3 +- runtime/src/bucket_map_holder_stats.rs | 6 +--- runtime/src/cache_hash_data.rs | 10 +++--- runtime/src/in_mem_accounts_index.rs | 2 -- .../src/rent_paying_accounts_by_partition.rs | 1 - runtime/src/shared_buffer_reader.rs | 3 -- runtime/src/snapshot_utils.rs | 1 - runtime/src/sorted_storages.rs | 24 +++++++------- runtime/src/storable_accounts.rs | 2 +- runtime/tests/accounts.rs | 2 +- sdk/program/src/message/account_keys.rs | 5 +-- streamer/src/nonblocking/quic.rs | 8 +---- tpu-client/src/tpu_connection_cache.rs | 1 - transaction-dos/src/main.rs | 7 +---- zk-token-sdk/src/encryption/discrete_log.rs | 1 - 33 files changed, 46 insertions(+), 135 deletions(-) diff --git a/accounts-cluster-bench/src/main.rs b/accounts-cluster-bench/src/main.rs index 74bad5a431..0829d43da9 100644 --- a/accounts-cluster-bench/src/main.rs +++ b/accounts-cluster-bench/src/main.rs @@ -155,7 +155,6 @@ fn make_create_message( let space = maybe_space.unwrap_or_else(|| thread_rng().gen_range(0, 1000)); let instructions: Vec<_> = (0..num_instructions) - .into_iter() .flat_map(|_| { let program_id = if mint.is_some() { inline_spl_token::id() @@ -203,7 +202,6 @@ fn make_close_message( spl_token: bool, ) -> Message { let instructions: Vec<_> = (0..num_instructions) - .into_iter() .filter_map(|_| { let program_id = if spl_token { inline_spl_token::id() diff --git a/bench-streamer/src/main.rs b/bench-streamer/src/main.rs index 32be598c84..8d70a2209a 100644 --- a/bench-streamer/src/main.rs +++ b/bench-streamer/src/main.rs @@ -120,7 +120,6 @@ fn main() -> Result<()> { } let producer_threads: Vec<_> = (0..num_producers) - .into_iter() .map(|_| producer(&addr, exit.clone())) .collect(); diff --git a/bucket_map/benches/bucket_map.rs b/bucket_map/benches/bucket_map.rs index 053b5a7d05..aec83fe502 100644 --- a/bucket_map/benches/bucket_map.rs +++ b/bucket_map/benches/bucket_map.rs @@ -39,7 +39,7 @@ DEFINE_NxM_BENCH!(dim_32x64, 32, 64); /// Benchmark insert with Hashmap as baseline for N threads inserting M keys each fn do_bench_insert_baseline_hashmap(bencher: &mut Bencher, n: usize, m: usize) { let index = RwLock::new(HashMap::new()); - (0..n).into_iter().into_par_iter().for_each(|i| { + (0..n).into_par_iter().for_each(|i| { let key = Pubkey::new_unique(); index .write() @@ -47,7 +47,7 @@ fn do_bench_insert_baseline_hashmap(bencher: &mut Bencher, n: usize, m: usize) { .insert(key, vec![(i, IndexValue::default())]); }); bencher.iter(|| { - (0..n).into_iter().into_par_iter().for_each(|_| { + (0..n).into_par_iter().for_each(|_| { for j in 0..m { let key = Pubkey::new_unique(); index @@ -62,12 +62,12 @@ fn do_bench_insert_baseline_hashmap(bencher: &mut Bencher, n: usize, m: usize) { /// Benchmark insert with BucketMap with N buckets for N threads inserting M keys each fn do_bench_insert_bucket_map(bencher: &mut Bencher, n: usize, m: usize) { let index = BucketMap::new(BucketMapConfig::new(n)); - (0..n).into_iter().into_par_iter().for_each(|i| { + (0..n).into_par_iter().for_each(|i| { let key = Pubkey::new_unique(); index.update(&key, |_| Some((vec![(i, IndexValue::default())], 0))); }); bencher.iter(|| { - (0..n).into_iter().into_par_iter().for_each(|_| { + (0..n).into_par_iter().for_each(|_| { for j in 0..m { let key = Pubkey::new_unique(); index.update(&key, |_| Some((vec![(j, IndexValue::default())], 0))); diff --git a/bucket_map/src/bucket_map.rs b/bucket_map/src/bucket_map.rs index 55f669c1b0..bad5cec5a2 100644 --- a/bucket_map/src/bucket_map.rs +++ b/bucket_map/src/bucket_map.rs @@ -81,7 +81,6 @@ impl BucketMap { let stats = Arc::default(); let buckets = (0..config.max_buckets) - .into_iter() .map(|_| { Arc::new(BucketApi::new( Arc::clone(&drives), @@ -320,7 +319,7 @@ mod tests { fn bucket_map_test_grow_read() { let config = BucketMapConfig::new(1 << 2); let index = BucketMap::new(config); - let keys: Vec = (0..100).into_iter().map(|_| Pubkey::new_unique()).collect(); + let keys: Vec = (0..100).map(|_| Pubkey::new_unique()).collect(); for k in 0..keys.len() { let key = &keys[k]; let i = read_be_u64(key.as_ref()); @@ -339,7 +338,7 @@ mod tests { fn bucket_map_test_n_delete() { let config = BucketMapConfig::new(1 << 2); let index = BucketMap::new(config); - let keys: Vec = (0..20).into_iter().map(|_| Pubkey::new_unique()).collect(); + let keys: Vec = (0..20).map(|_| Pubkey::new_unique()).collect(); for key in keys.iter() { let i = read_be_u64(key.as_ref()); index.update(key, |_| Some((vec![i], 0))); @@ -366,7 +365,6 @@ mod tests { use std::sync::Mutex; solana_logger::setup(); let maps = (0..2) - .into_iter() .map(|max_buckets_pow2| { let config = BucketMapConfig::new(1 << max_buckets_pow2); BucketMap::new(config) @@ -379,7 +377,6 @@ mod tests { let gen_rand_value = || { let count = thread_rng().gen_range(0, max_slot_list_len); let v = (0..count) - .into_iter() .map(|x| (x as usize, x as usize /*thread_rng().gen::()*/)) .collect::>(); let rc = thread_rng().gen::(); diff --git a/bucket_map/src/bucket_storage.rs b/bucket_map/src/bucket_storage.rs index 49bc2963ca..373610115b 100644 --- a/bucket_map/src/bucket_storage.rs +++ b/bucket_map/src/bucket_storage.rs @@ -324,7 +324,7 @@ impl BucketStorage { let increment = self.capacity_pow2 - old_bucket.capacity_pow2; let index_grow = 1 << increment; - (0..old_cap as usize).into_iter().for_each(|i| { + (0..old_cap as usize).for_each(|i| { let old_ix = i * old_bucket.cell_size as usize; let new_ix = old_ix * index_grow; let dst_slice: &[u8] = &self.mmap[new_ix..new_ix + old_bucket.cell_size as usize]; diff --git a/bucket_map/tests/bucket_map.rs b/bucket_map/tests/bucket_map.rs index 31adcdb04c..dfc67f138d 100644 --- a/bucket_map/tests/bucket_map.rs +++ b/bucket_map/tests/bucket_map.rs @@ -23,12 +23,12 @@ fn bucket_map_test_mt() { drives: Some(paths.clone()), ..BucketMapConfig::default() }); - (0..threads).into_iter().into_par_iter().for_each(|_| { + (0..threads).into_par_iter().for_each(|_| { let key = Pubkey::new_unique(); index.update(&key, |_| Some((vec![0u64], 0))); }); let mut timer = Measure::start("bucket_map_test_mt"); - (0..threads).into_iter().into_par_iter().for_each(|_| { + (0..threads).into_par_iter().for_each(|_| { for _ in 0..items { let key = Pubkey::new_unique(); let ix: u64 = index.bucket_ix(&key) as u64; diff --git a/client/src/connection_cache.rs b/client/src/connection_cache.rs index 2a7cc85a61..e6387ce008 100644 --- a/client/src/connection_cache.rs +++ b/client/src/connection_cache.rs @@ -531,7 +531,6 @@ mod tests { 0 }; let addrs = (0..MAX_CONNECTIONS) - .into_iter() .map(|_| { let addr = get_addr(&mut rng); connection_cache.get_connection(&addr); diff --git a/core/benches/sigverify_stage.rs b/core/benches/sigverify_stage.rs index 689bf1d011..9090388948 100644 --- a/core/benches/sigverify_stage.rs +++ b/core/benches/sigverify_stage.rs @@ -40,7 +40,6 @@ fn run_bench_packet_discard(num_ips: usize, bencher: &mut Bencher) { let mut total = 0; let ips: Vec<_> = (0..num_ips) - .into_iter() .map(|_| { let mut addr = [0u16; 8]; thread_rng().fill(&mut addr); diff --git a/core/src/serve_repair.rs b/core/src/serve_repair.rs index 6b303c94d1..84d415b50e 100644 --- a/core/src/serve_repair.rs +++ b/core/src/serve_repair.rs @@ -2185,7 +2185,6 @@ mod tests { let request_slot = MAX_ANCESTOR_RESPONSES as Slot; let repair = AncestorHashesRepairType(request_slot); let mut response: Vec = (0..request_slot) - .into_iter() .map(|slot| (slot, Hash::new_unique())) .collect(); assert!(repair.verify_response(&AncestorHashesResponse::Hashes(response.clone()))); diff --git a/core/src/unprocessed_transaction_storage.rs b/core/src/unprocessed_transaction_storage.rs index b92ff90b38..0e4f3d7d93 100644 --- a/core/src/unprocessed_transaction_storage.rs +++ b/core/src/unprocessed_transaction_storage.rs @@ -1082,12 +1082,7 @@ mod tests { let expected_ports: Vec<_> = (0..256).collect(); let mut forwarded_ports: Vec<_> = forward_packet_batches_by_accounts .iter_batches() - .flat_map(|batch| { - batch - .get_forwardable_packets() - .into_iter() - .map(|p| p.meta().port) - }) + .flat_map(|batch| batch.get_forwardable_packets().map(|p| p.meta().port)) .collect(); forwarded_ports.sort_unstable(); assert_eq!(expected_ports, forwarded_ports); diff --git a/core/src/verified_vote_packets.rs b/core/src/verified_vote_packets.rs index 709db89d9b..95f7feba8a 100644 --- a/core/src/verified_vote_packets.rs +++ b/core/src/verified_vote_packets.rs @@ -717,7 +717,6 @@ mod tests { slot, confirmation_count, }) - .into_iter() .collect::>(); let vote = VoteTransaction::from(VoteStateUpdate::new( slots, diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 00fbf87c42..f361d27fd6 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -9141,7 +9141,6 @@ pub mod tests { fn make_large_tx_entry(num_txs: usize) -> Entry { let txs: Vec<_> = (0..num_txs) - .into_iter() .map(|_| { let keypair0 = Keypair::new(); let to = solana_sdk::pubkey::new_rand(); diff --git a/metrics/benches/metrics.rs b/metrics/benches/metrics.rs index 112d0c2035..f8038ef512 100644 --- a/metrics/benches/metrics.rs +++ b/metrics/benches/metrics.rs @@ -17,7 +17,6 @@ use { #[bench] fn bench_write_points(bencher: &mut Bencher) { let points = (0..10) - .into_iter() .map(|_| { DataPoint::new("measurement") .add_field_i64("i", 0) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 55b9c69e11..b6f365ab5d 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -3704,11 +3704,7 @@ impl AccountsDb { }); // sort by pubkey to keep account index lookups close - let mut stored_accounts = stored_accounts - .drain() - .into_iter() - .map(|(_k, v)| v) - .collect::>(); + let mut stored_accounts = stored_accounts.drain().map(|(_k, v)| v).collect::>(); stored_accounts.sort_unstable_by(|a, b| a.pubkey().cmp(b.pubkey())); GetUniqueAccountsResult { @@ -6594,7 +6590,7 @@ impl AccountsDb { ) -> Vec { let mut calc_stored_meta_time = Measure::start("calc_stored_meta"); let slot = accounts.target_slot(); - (0..accounts.len()).into_iter().for_each(|index| { + (0..accounts.len()).for_each(|index| { let pubkey = accounts.pubkey(index); self.read_only_accounts_cache.remove(*pubkey, slot); }); @@ -7731,7 +7727,7 @@ impl AccountsDb { let update = |start, end| { let mut reclaims = Vec::with_capacity((end - start) / 2); - (start..end).into_iter().for_each(|i| { + (start..end).for_each(|i| { let info = infos[i]; let pubkey_account = (accounts.pubkey(i), accounts.account(i)); let pubkey = pubkey_account.0; @@ -14627,7 +14623,7 @@ pub mod tests { accounts_db.write_cache_limit_bytes = write_cache_limit_bytes; let accounts_db = Arc::new(accounts_db); - let slots: Vec<_> = (0..num_slots as Slot).into_iter().collect(); + let slots: Vec<_> = (0..num_slots as Slot).collect(); let stall_slot = num_slots as Slot; let scan_stall_key = Pubkey::new_unique(); let keys: Vec = std::iter::repeat_with(Pubkey::new_unique) @@ -14911,9 +14907,7 @@ pub mod tests { } else { // Slots less than `requested_flush_root` and `scan_root` were cleaned in the cache before being flushed // to storage, should only contain one account - std::iter::once(keys[*slot as usize]) - .into_iter() - .collect::>() + std::iter::once(keys[*slot as usize]).collect::>() }; assert_eq!(slot_accounts, expected_accounts); @@ -15010,9 +15004,7 @@ pub mod tests { } else { // If clean was specified, only the latest slot should have all the updates. // All these other slots have been cleaned before flush - std::iter::once(keys[*slot as usize]) - .into_iter() - .collect::>() + std::iter::once(keys[*slot as usize]).collect::>() }; assert_eq!(slot_accounts, expected_accounts); } @@ -17333,7 +17325,6 @@ pub mod tests { fn get_all_accounts(db: &AccountsDb, slots: Range) -> Vec<(Pubkey, AccountSharedData)> { slots .clone() - .into_iter() .filter_map(|slot| { let storages = db.get_storages_for_slot(slot); storages.map(|storages| { diff --git a/runtime/src/accounts_hash.rs b/runtime/src/accounts_hash.rs index 77af53ed42..c56fda81f8 100644 --- a/runtime/src/accounts_hash.rs +++ b/runtime/src/accounts_hash.rs @@ -1341,7 +1341,7 @@ pub mod tests { let key_b = Pubkey::new(&[2u8; 32]); let key_c = Pubkey::new(&[3u8; 32]); const COUNT: usize = 6; - let hashes = (0..COUNT).into_iter().map(|i| Hash::new(&[i as u8; 32])); + let hashes = (0..COUNT).map(|i| Hash::new(&[i as u8; 32])); // create this vector // abbbcc let keys = [key_a, key_b, key_b, key_b, key_c, key_c]; @@ -1670,13 +1670,7 @@ pub mod tests { let input: Vec>> = vec![vec![vec![0, 1], vec![], vec![2, 3, 4], vec![]]]; let cumulative = CumulativeOffsets::from_raw_2d(&input); - let src: Vec<_> = input - .clone() - .into_iter() - .flatten() - .into_iter() - .flatten() - .collect(); + let src: Vec<_> = input.clone().into_iter().flatten().flatten().collect(); let len = src.len(); assert_eq!(cumulative.total_count, len); assert_eq!(cumulative.cumulative_offsets.len(), 2); // 2 non-empty vectors @@ -1701,13 +1695,7 @@ pub mod tests { let input = vec![vec![vec![], vec![0, 1], vec![], vec![2, 3, 4], vec![]]]; let cumulative = CumulativeOffsets::from_raw_2d(&input); - let src: Vec<_> = input - .clone() - .into_iter() - .flatten() - .into_iter() - .flatten() - .collect(); + let src: Vec<_> = input.clone().into_iter().flatten().flatten().collect(); let len = src.len(); assert_eq!(cumulative.total_count, len); assert_eq!(cumulative.cumulative_offsets.len(), 2); // 2 non-empty vectors @@ -1741,13 +1729,7 @@ pub mod tests { ]; let cumulative = CumulativeOffsets::from_raw_2d(&input); - let src: Vec<_> = input - .clone() - .into_iter() - .flatten() - .into_iter() - .flatten() - .collect(); + let src: Vec<_> = input.clone().into_iter().flatten().flatten().collect(); let len = src.len(); assert_eq!(cumulative.total_count, len); assert_eq!(cumulative.cumulative_offsets.len(), 2); // 2 non-empty vectors @@ -1841,10 +1823,7 @@ pub mod tests { hash_counts.extend(threshold - 1..=threshold + target); for hash_count in hash_counts { - let hashes: Vec<_> = (0..hash_count) - .into_iter() - .map(|_| Hash::new_unique()) - .collect(); + let hashes: Vec<_> = (0..hash_count).map(|_| Hash::new_unique()).collect(); test_hashing(hashes, FANOUT); } diff --git a/runtime/src/accounts_index.rs b/runtime/src/accounts_index.rs index 4dff00de25..80ae44da27 100644 --- a/runtime/src/accounts_index.rs +++ b/runtime/src/accounts_index.rs @@ -754,7 +754,6 @@ impl AccountsIndex { let bin_calculator = PubkeyBinCalculator24::new(bins); let storage = AccountsIndexStorage::new(bins, &config, exit); let account_maps = (0..bins) - .into_iter() .map(|bin| Arc::clone(&storage.in_mem[bin])) .collect::>(); (account_maps, bin_calculator, storage) @@ -1582,7 +1581,6 @@ impl AccountsIndex { let random_offset = thread_rng().gen_range(0, bins); let use_disk = self.storage.storage.disk.is_some(); let mut binned = (0..bins) - .into_iter() .map(|mut pubkey_bin| { // opposite of (pubkey_bin + random_offset) % bins pubkey_bin = if pubkey_bin < random_offset { @@ -1637,7 +1635,6 @@ impl AccountsIndex { /// return Vec> because the internal vecs are already allocated per bin pub fn retrieve_duplicate_keys_from_startup(&self) -> Vec> { (0..self.bins()) - .into_iter() .map(|pubkey_bin| { let r_account_maps = &self.account_maps[pubkey_bin]; r_account_maps.retrieve_duplicate_keys_from_startup() @@ -3961,14 +3958,7 @@ pub mod tests { ); assert_eq!((bins - 1, usize::MAX), iter.bin_start_and_range()); - assert_eq!( - (0..2) - .into_iter() - .skip(1) - .take(usize::MAX) - .collect::>(), - vec![1] - ); + assert_eq!((0..2).skip(1).take(usize::MAX).collect::>(), vec![1]); } #[test] diff --git a/runtime/src/accounts_index_storage.rs b/runtime/src/accounts_index_storage.rs index bea6242f28..22c92ab2ae 100644 --- a/runtime/src/accounts_index_storage.rs +++ b/runtime/src/accounts_index_storage.rs @@ -64,7 +64,6 @@ impl BgThreads { let local_exit = Arc::new(AtomicBool::default()); let handles = Some( (0..threads) - .into_iter() .map(|idx| { // the first thread we start is special let can_advance_age = can_advance_age && idx == 0; @@ -164,7 +163,6 @@ impl AccountsIndexStorage { let storage = Arc::new(BucketMapHolder::new(bins, config, threads)); let in_mem = (0..bins) - .into_iter() .map(|bin| Arc::new(InMemAccountsIndex::new(&storage, bin))) .collect::>(); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index cfe3cda5c5..60a98b222f 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -6272,7 +6272,7 @@ impl Bank { ) { assert!(!self.freeze_started()); let mut m = Measure::start("stakes_cache.check_and_store"); - (0..accounts.len()).into_iter().for_each(|i| { + (0..accounts.len()).for_each(|i| { self.stakes_cache .check_and_store(accounts.pubkey(i), accounts.account(i)) }); diff --git a/runtime/src/bucket_map_holder.rs b/runtime/src/bucket_map_holder.rs index 51c15f2d1b..703646f801 100644 --- a/runtime/src/bucket_map_holder.rs +++ b/runtime/src/bucket_map_holder.rs @@ -404,7 +404,6 @@ pub mod tests { let bins = 4; let test = BucketMapHolder::::new(bins, &Some(AccountsIndexConfig::default()), 1); let visited = (0..bins) - .into_iter() .map(|_| AtomicUsize::default()) .collect::>(); let iterations = bins * 30; @@ -412,7 +411,7 @@ pub mod tests { let expected = threads * iterations / bins; (0..threads).into_par_iter().for_each(|_| { - (0..iterations).into_iter().for_each(|_| { + (0..iterations).for_each(|_| { let bin = test.next_bucket_to_flush(); visited[bin].fetch_add(1, Ordering::Relaxed); }); diff --git a/runtime/src/bucket_map_holder_stats.rs b/runtime/src/bucket_map_holder_stats.rs index 15cdf826d6..830f6b235d 100644 --- a/runtime/src/bucket_map_holder_stats.rs +++ b/runtime/src/bucket_map_holder_stats.rs @@ -59,10 +59,7 @@ impl BucketMapHolderStats { pub fn new(bins: usize) -> BucketMapHolderStats { BucketMapHolderStats { bins: bins as u64, - per_bucket_count: (0..bins) - .into_iter() - .map(|_| AtomicUsize::default()) - .collect(), + per_bucket_count: (0..bins).map(|_| AtomicUsize::default()).collect(), ..BucketMapHolderStats::default() } } @@ -195,7 +192,6 @@ impl BucketMapHolderStats { let disk_per_bucket_counts = disk .map(|disk| { (0..self.bins) - .into_iter() .map(|i| disk.get_bucket_from_index(i as usize).bucket_len() as usize) .collect::>() }) diff --git a/runtime/src/cache_hash_data.rs b/runtime/src/cache_hash_data.rs index 3d22b2e755..88e2382f2b 100644 --- a/runtime/src/cache_hash_data.rs +++ b/runtime/src/cache_hash_data.rs @@ -403,7 +403,7 @@ pub mod tests { .collect::>(), vec![&file_name], ); - let mut accum = (0..bins_per_pass).into_iter().map(|_| vec![]).collect(); + let mut accum = (0..bins_per_pass).map(|_| vec![]).collect(); cache .load(&file_name, &mut accum, start_bin_this_pass, &bin_calculator) .unwrap(); @@ -431,9 +431,9 @@ pub mod tests { bins: usize, start_bin: usize, ) { - let mut accum: SavedType = (0..bins).into_iter().map(|_| vec![]).collect(); - data.drain(..).into_iter().for_each(|mut x| { - x.drain(..).into_iter().for_each(|item| { + let mut accum: SavedType = (0..bins).map(|_| vec![]).collect(); + data.drain(..).for_each(|mut x| { + x.drain(..).for_each(|item| { let bin = bin_calculator.bin_from_pubkey(&item.pubkey); accum[bin - start_bin].push(item); }) @@ -450,12 +450,10 @@ pub mod tests { let mut ct = 0; ( (0..bins) - .into_iter() .map(|bin| { let rnd = rng.gen::() % (bins as u64); if rnd < count as u64 { (0..std::cmp::max(1, count / bins)) - .into_iter() .map(|_| { ct += 1; let mut pk; diff --git a/runtime/src/in_mem_accounts_index.rs b/runtime/src/in_mem_accounts_index.rs index ca11c4ed98..fe328880fd 100644 --- a/runtime/src/in_mem_accounts_index.rs +++ b/runtime/src/in_mem_accounts_index.rs @@ -585,7 +585,6 @@ impl InMemAccountsIndex { let mut found_slot = false; let mut found_other_slot = false; (0..slot_list.len()) - .into_iter() .rev() // rev since we delete from the list in some cases .for_each(|slot_list_index| { let (cur_slot, cur_account_info) = &slot_list[slot_list_index]; @@ -1758,7 +1757,6 @@ mod tests { { // up to 3 ignored slot account_info (ignored means not 'new_slot', not 'other_slot', but different slot #s which could exist in the slot_list initially) possible_initial_slot_list_contents = (0..3) - .into_iter() .map(|i| (ignored_slot + i, ignored_value + i)) .collect::>(); // account_info that already exists in the slot_list AT 'new_slot' diff --git a/runtime/src/rent_paying_accounts_by_partition.rs b/runtime/src/rent_paying_accounts_by_partition.rs index 9232677ac9..2a67e2a234 100644 --- a/runtime/src/rent_paying_accounts_by_partition.rs +++ b/runtime/src/rent_paying_accounts_by_partition.rs @@ -32,7 +32,6 @@ impl RentPayingAccountsByPartition { Self { partition_count, accounts: (0..=partition_count) - .into_iter() .map(|_| HashSet::::default()) .collect(), } diff --git a/runtime/src/shared_buffer_reader.rs b/runtime/src/shared_buffer_reader.rs index ecb313c57b..5542c18981 100644 --- a/runtime/src/shared_buffer_reader.rs +++ b/runtime/src/shared_buffer_reader.rs @@ -755,7 +755,6 @@ pub mod tests { let reader2 = SharedBufferReader::new(&shared_buffer); let sent = (0..size) - .into_iter() .map(|i| ((i + size) % 256) as u8) .collect::>(); @@ -835,7 +834,6 @@ pub mod tests { None }; let sent = (0..data_size) - .into_iter() .map(|i| ((i + data_size) % 256) as u8) .collect::>(); @@ -846,7 +844,6 @@ pub mod tests { let threads = std::cmp::min(8, rayon::current_num_threads()); Some({ let parallel = (0..threads) - .into_iter() .map(|_| { // create before any reading starts let reader_ = SharedBufferReader::new(&shared_buffer); diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 8b8ad17a31..9c6d2117ee 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1747,7 +1747,6 @@ fn unpack_snapshot_local( // allocate all readers before any readers start reading let readers = (0..parallel_divisions) - .into_iter() .map(|_| SharedBufferReader::new(&shared_buffer)) .collect::>(); diff --git a/runtime/src/sorted_storages.rs b/runtime/src/sorted_storages.rs index ffcfd38323..a328a3ba16 100644 --- a/runtime/src/sorted_storages.rs +++ b/runtime/src/sorted_storages.rs @@ -245,19 +245,19 @@ pub mod tests { slot }; assert_eq!( - (0..5).into_iter().collect::>(), + (0..5).collect::>(), storages.iter_range(&(..5)).map(check).collect::>() ); assert_eq!( - (1..5).into_iter().collect::>(), + (1..5).collect::>(), storages.iter_range(&(1..5)).map(check).collect::>() ); assert_eq!( - (0..0).into_iter().collect::>(), + (0..0).collect::>(), storages.iter_range(&(..)).map(check).collect::>() ); assert_eq!( - (0..0).into_iter().collect::>(), + (0..0).collect::>(), storages.iter_range(&(1..)).map(check).collect::>() ); @@ -274,7 +274,7 @@ pub mod tests { for start in 0..5 { for end in 0..5 { assert_eq!( - (start..end).into_iter().collect::>(), + (start..end).collect::>(), storages .iter_range(&(start..end)) .map(check) @@ -283,15 +283,15 @@ pub mod tests { } } assert_eq!( - (3..5).into_iter().collect::>(), + (3..5).collect::>(), storages.iter_range(&(..5)).map(check).collect::>() ); assert_eq!( - (1..=3).into_iter().collect::>(), + (1..=3).collect::>(), storages.iter_range(&(1..)).map(check).collect::>() ); assert_eq!( - (3..=3).into_iter().collect::>(), + (3..=3).collect::>(), storages.iter_range(&(..)).map(check).collect::>() ); @@ -312,7 +312,7 @@ pub mod tests { for start in 0..5 { for end in 0..5 { assert_eq!( - (start..end).into_iter().collect::>(), + (start..end).collect::>(), storages .iter_range(&(start..end)) .map(check) @@ -321,15 +321,15 @@ pub mod tests { } } assert_eq!( - (2..5).into_iter().collect::>(), + (2..5).collect::>(), storages.iter_range(&(..5)).map(check).collect::>() ); assert_eq!( - (1..=4).into_iter().collect::>(), + (1..=4).collect::>(), storages.iter_range(&(1..)).map(check).collect::>() ); assert_eq!( - (2..=4).into_iter().collect::>(), + (2..=4).collect::>(), storages.iter_range(&(..)).map(check).collect::>() ); } diff --git a/runtime/src/storable_accounts.rs b/runtime/src/storable_accounts.rs index a9cd5b94dd..cb00575407 100644 --- a/runtime/src/storable_accounts.rs +++ b/runtime/src/storable_accounts.rs @@ -266,7 +266,7 @@ pub mod tests { assert_eq!(a.target_slot(), b.target_slot()); assert_eq!(a.len(), b.len()); assert_eq!(a.is_empty(), b.is_empty()); - (0..a.len()).into_iter().for_each(|i| { + (0..a.len()).for_each(|i| { assert_eq!(a.pubkey(i), b.pubkey(i)); assert!(accounts_equal(a.account(i), b.account(i))); }) diff --git a/runtime/tests/accounts.rs b/runtime/tests/accounts.rs index a591dfcee8..317f73b906 100644 --- a/runtime/tests/accounts.rs +++ b/runtime/tests/accounts.rs @@ -112,7 +112,7 @@ fn test_bad_bank_hash() { last_print = Instant::now(); } let num_accounts = thread_rng().gen_range(0, 100); - (0..num_accounts).into_iter().for_each(|_| { + (0..num_accounts).for_each(|_| { let mut idx; loop { idx = thread_rng().gen_range(0, max_accounts); diff --git a/sdk/program/src/message/account_keys.rs b/sdk/program/src/message/account_keys.rs index c121150fe1..8662f59cae 100644 --- a/sdk/program/src/message/account_keys.rs +++ b/sdk/program/src/message/account_keys.rs @@ -170,10 +170,7 @@ mod tests { vec![keys[5]], ]; - assert!(account_keys - .key_segment_iter() - .into_iter() - .eq(expected_segments.iter())); + assert!(account_keys.key_segment_iter().eq(expected_segments.iter())); } #[test] diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index c4a1f25046..8361dab3f4 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -1398,7 +1398,6 @@ pub mod test { let mut num_entries = 5; let max_connections_per_peer = 10; let sockets: Vec<_> = (0..num_entries) - .into_iter() .map(|i| SocketAddr::new(IpAddr::V4(Ipv4Addr::new(i, 0, 0, 0)), 0)) .collect(); for (i, socket) in sockets.iter().enumerate() { @@ -1451,10 +1450,7 @@ pub mod test { let num_entries = 15; let max_connections_per_peer = 10; - let pubkeys: Vec<_> = (0..num_entries) - .into_iter() - .map(|_| Pubkey::new_unique()) - .collect(); + let pubkeys: Vec<_> = (0..num_entries).map(|_| Pubkey::new_unique()).collect(); for (i, pubkey) in pubkeys.iter().enumerate() { table .try_add_connection( @@ -1546,7 +1542,6 @@ pub mod test { let num_entries = 5; let max_connections_per_peer = 10; let sockets: Vec<_> = (0..num_entries) - .into_iter() .map(|i| SocketAddr::new(IpAddr::V4(Ipv4Addr::new(i, 0, 0, 0)), 0)) .collect(); for (i, socket) in sockets.iter().enumerate() { @@ -1581,7 +1576,6 @@ pub mod test { let num_ips = 5; let max_connections_per_peer = 10; let mut sockets: Vec<_> = (0..num_ips) - .into_iter() .map(|i| SocketAddr::new(IpAddr::V4(Ipv4Addr::new(i, 0, 0, 0)), 0)) .collect(); for (i, socket) in sockets.iter().enumerate() { diff --git a/tpu-client/src/tpu_connection_cache.rs b/tpu-client/src/tpu_connection_cache.rs index a9c58852af..203c0b1eef 100644 --- a/tpu-client/src/tpu_connection_cache.rs +++ b/tpu-client/src/tpu_connection_cache.rs @@ -530,7 +530,6 @@ mod tests { TpuConnectionCache::::new(DEFAULT_TPU_CONNECTION_POOL_SIZE).unwrap(); let port_offset = MOCK_PORT_OFFSET; let addrs = (0..MAX_CONNECTIONS) - .into_iter() .map(|_| { let addr = get_addr(&mut rng); connection_cache.get_connection(&addr); diff --git a/transaction-dos/src/main.rs b/transaction-dos/src/main.rs index 0bea000497..0505693fe6 100644 --- a/transaction-dos/src/main.rs +++ b/transaction-dos/src/main.rs @@ -125,7 +125,6 @@ fn make_dos_message( account_metas: &[AccountMeta], ) -> Message { let instructions: Vec<_> = (0..num_instructions) - .into_iter() .map(|_| { let data = [num_program_iterations, thread_rng().gen_range(0, 255)]; Instruction::new_with_bytes(program_id, &data, account_metas.to_vec()) @@ -654,7 +653,6 @@ pub mod test { let num_accounts = 17; let account_metas: Vec<_> = (0..num_accounts) - .into_iter() .map(|_| AccountMeta::new(Pubkey::new_unique(), false)) .collect(); let num_program_iterations = 10; @@ -705,10 +703,7 @@ pub mod test { let num_instructions = 70; let num_program_iterations = 10; let num_accounts = 7; - let account_keypairs: Vec<_> = (0..num_accounts) - .into_iter() - .map(|_| Keypair::new()) - .collect(); + let account_keypairs: Vec<_> = (0..num_accounts).map(|_| Keypair::new()).collect(); let account_keypair_refs: Vec<_> = account_keypairs.iter().collect(); let mut start = Measure::start("total accounts run"); run_transactions_dos( diff --git a/zk-token-sdk/src/encryption/discrete_log.rs b/zk-token-sdk/src/encryption/discrete_log.rs index 24e9ada478..2ebe099016 100644 --- a/zk-token-sdk/src/encryption/discrete_log.rs +++ b/zk-token-sdk/src/encryption/discrete_log.rs @@ -131,7 +131,6 @@ impl DiscreteLog { pub fn decode_u32(self) -> Option { let mut starting_point = self.target; let handles = (0..self.num_threads) - .into_iter() .map(|i| { let ristretto_iterator = RistrettoIterator::new( (starting_point, i as u64),