removes #[allow(clippy::same_item_push)] (#29543)

This commit is contained in:
behzad nouri 2023-01-06 17:32:26 +00:00 committed by GitHub
parent 3fc4015a4d
commit 283a2b1540
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 83 additions and 109 deletions

View File

@ -42,6 +42,7 @@ use {
vote_state::VoteStateUpdate, vote_transaction::new_vote_state_update_transaction, vote_state::VoteStateUpdate, vote_transaction::new_vote_state_update_transaction,
}, },
std::{ std::{
iter::repeat_with,
sync::{atomic::Ordering, Arc, RwLock}, sync::{atomic::Ordering, Arc, RwLock},
time::{Duration, Instant}, time::{Duration, Instant},
}, },
@ -128,17 +129,17 @@ fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Tra
.collect() .collect()
} }
#[allow(clippy::same_item_push)]
fn make_programs_txs(txes: usize, hash: Hash) -> Vec<Transaction> { fn make_programs_txs(txes: usize, hash: Hash) -> Vec<Transaction> {
let progs = 4; let progs = 4;
(0..txes) (0..txes)
.map(|_| { .map(|_| {
let mut instructions = vec![];
let from_key = Keypair::new(); let from_key = Keypair::new();
for _ in 1..progs { let instructions: Vec<_> = repeat_with(|| {
let to_key = pubkey::new_rand(); let to_key = pubkey::new_rand();
instructions.push(system_instruction::transfer(&from_key.pubkey(), &to_key, 1)); system_instruction::transfer(&from_key.pubkey(), &to_key, 1)
} })
.take(progs)
.collect();
let message = Message::new(&instructions, Some(&from_key.pubkey())); let message = Message::new(&instructions, Some(&from_key.pubkey()));
Transaction::new(&[&from_key], message, hash) Transaction::new(&[&from_key], message, hash)
}) })
@ -400,7 +401,6 @@ fn simulate_process_entries(
process_entries_for_tests(&bank, vec![entry], randomize_txs, None, None).unwrap(); process_entries_for_tests(&bank, vec![entry], randomize_txs, None, None).unwrap();
} }
#[allow(clippy::same_item_push)]
fn bench_process_entries(randomize_txs: bool, bencher: &mut Bencher) { fn bench_process_entries(randomize_txs: bool, bencher: &mut Bencher) {
// entropy multiplier should be big enough to provide sufficient entropy // entropy multiplier should be big enough to provide sufficient entropy
// but small enough to not take too much time while executing the test. // but small enough to not take too much time while executing the test.
@ -416,14 +416,9 @@ fn bench_process_entries(randomize_txs: bool, bencher: &mut Bencher) {
.. ..
} = create_genesis_config((num_accounts + 1) as u64 * initial_lamports); } = create_genesis_config((num_accounts + 1) as u64 * initial_lamports);
let mut keypairs: Vec<Keypair> = vec![]; let keypairs: Vec<Keypair> = repeat_with(Keypair::new).take(num_accounts).collect();
let tx_vector: Vec<VersionedTransaction> = Vec::with_capacity(num_accounts / 2); let tx_vector: Vec<VersionedTransaction> = Vec::with_capacity(num_accounts / 2);
for _ in 0..num_accounts {
let keypair = Keypair::new();
keypairs.push(keypair);
}
bencher.iter(|| { bencher.iter(|| {
simulate_process_entries( simulate_process_entries(
randomize_txs, randomize_txs,

View File

@ -28,6 +28,7 @@ use {
}, },
solana_streamer::socket::SocketAddrSpace, solana_streamer::socket::SocketAddrSpace,
std::{ std::{
iter::repeat_with,
net::UdpSocket, net::UdpSocket,
sync::{ sync::{
atomic::{AtomicUsize, Ordering}, atomic::{AtomicUsize, Ordering},
@ -47,7 +48,6 @@ use {
// threads loop indefinitely. // threads loop indefinitely.
#[ignore] #[ignore]
#[bench] #[bench]
#[allow(clippy::same_item_push)]
fn bench_retransmitter(bencher: &mut Bencher) { fn bench_retransmitter(bencher: &mut Bencher) {
solana_logger::setup(); solana_logger::setup();
let cluster_info = ClusterInfo::new( let cluster_info = ClusterInfo::new(
@ -56,8 +56,7 @@ fn bench_retransmitter(bencher: &mut Bencher) {
SocketAddrSpace::Unspecified, SocketAddrSpace::Unspecified,
); );
const NUM_PEERS: usize = 4; const NUM_PEERS: usize = 4;
let mut peer_sockets = Vec::new(); let peer_sockets: Vec<_> = repeat_with(|| {
for _ in 0..NUM_PEERS {
let id = Pubkey::new_unique(); let id = Pubkey::new_unique();
let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut contact_info = ContactInfo::new_localhost(&id, timestamp()); let mut contact_info = ContactInfo::new_localhost(&id, timestamp());
@ -67,8 +66,10 @@ fn bench_retransmitter(bencher: &mut Bencher) {
info!("local: {:?}", contact_info.tvu); info!("local: {:?}", contact_info.tvu);
cluster_info.insert_info(contact_info); cluster_info.insert_info(contact_info);
socket.set_nonblocking(true).unwrap(); socket.set_nonblocking(true).unwrap();
peer_sockets.push(socket); socket
} })
.take(NUM_PEERS)
.collect();
let peer_sockets = Arc::new(peer_sockets); let peer_sockets = Arc::new(peer_sockets);
let cluster_info = Arc::new(cluster_info); let cluster_info = Arc::new(cluster_info);

View File

@ -35,6 +35,7 @@ use {
}, },
std::{ std::{
collections::{HashMap, HashSet}, collections::{HashMap, HashSet},
iter::repeat_with,
net::UdpSocket, net::UdpSocket,
sync::{ sync::{
atomic::{AtomicBool, Ordering}, atomic::{AtomicBool, Ordering},
@ -235,7 +236,6 @@ impl BroadcastStage {
/// which will then close FetchStage in the Tpu, and then the rest of the Tpu, /// which will then close FetchStage in the Tpu, and then the rest of the Tpu,
/// completing the cycle. /// completing the cycle.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
#[allow(clippy::same_item_push)]
fn new( fn new(
socks: Vec<UdpSocket>, socks: Vec<UdpSocket>,
cluster_info: Arc<ClusterInfo>, cluster_info: Arc<ClusterInfo>,
@ -271,42 +271,43 @@ impl BroadcastStage {
}; };
let mut thread_hdls = vec![thread_hdl]; let mut thread_hdls = vec![thread_hdl];
let socket_receiver = Arc::new(Mutex::new(socket_receiver)); let socket_receiver = Arc::new(Mutex::new(socket_receiver));
for sock in socks.into_iter() { thread_hdls.extend(socks.into_iter().map(|sock| {
let socket_receiver = socket_receiver.clone(); let socket_receiver = socket_receiver.clone();
let mut bs_transmit = broadcast_stage_run.clone(); let mut bs_transmit = broadcast_stage_run.clone();
let cluster_info = cluster_info.clone(); let cluster_info = cluster_info.clone();
let bank_forks = bank_forks.clone(); let bank_forks = bank_forks.clone();
let t = Builder::new() let run_transmit = move || loop {
let res = bs_transmit.transmit(&socket_receiver, &cluster_info, &sock, &bank_forks);
let res = Self::handle_error(res, "solana-broadcaster-transmit");
if let Some(res) = res {
return res;
}
};
Builder::new()
.name("solBroadcastTx".to_string()) .name("solBroadcastTx".to_string())
.spawn(move || loop { .spawn(run_transmit)
let res = .unwrap()
bs_transmit.transmit(&socket_receiver, &cluster_info, &sock, &bank_forks); }));
let res = Self::handle_error(res, "solana-broadcaster-transmit");
if let Some(res) = res {
return res;
}
})
.unwrap();
thread_hdls.push(t);
}
let blockstore_receiver = Arc::new(Mutex::new(blockstore_receiver)); let blockstore_receiver = Arc::new(Mutex::new(blockstore_receiver));
for _ in 0..NUM_INSERT_THREADS { thread_hdls.extend(
let blockstore_receiver = blockstore_receiver.clone(); repeat_with(|| {
let mut bs_record = broadcast_stage_run.clone(); let blockstore_receiver = blockstore_receiver.clone();
let btree = blockstore.clone(); let mut bs_record = broadcast_stage_run.clone();
let t = Builder::new() let btree = blockstore.clone();
.name("solBroadcastRec".to_string()) let run_record = move || loop {
.spawn(move || loop {
let res = bs_record.record(&blockstore_receiver, &btree); let res = bs_record.record(&blockstore_receiver, &btree);
let res = Self::handle_error(res, "solana-broadcaster-record"); let res = Self::handle_error(res, "solana-broadcaster-record");
if let Some(res) = res { if let Some(res) = res {
return res; return res;
} }
}) };
.unwrap(); Builder::new()
thread_hdls.push(t); .name("solBroadcastRec".to_string())
} .spawn(run_record)
.unwrap()
})
.take(NUM_INSERT_THREADS),
);
let retransmit_thread = Builder::new() let retransmit_thread = Builder::new()
.name("solBroadcastRtx".to_string()) .name("solBroadcastRtx".to_string())
.spawn(move || loop { .spawn(move || loop {

View File

@ -35,6 +35,7 @@ use {
std::{ std::{
cmp, cmp,
ffi::OsStr, ffi::OsStr,
iter::repeat_with,
sync::{Arc, Mutex, Once}, sync::{Arc, Mutex, Once},
thread::{self, JoinHandle}, thread::{self, JoinHandle},
time::Instant, time::Instant,
@ -859,27 +860,19 @@ pub fn next_entry_mut(start: &mut Hash, num_hashes: u64, transactions: Vec<Trans
entry entry
} }
#[allow(clippy::same_item_push)]
pub fn create_ticks(num_ticks: u64, hashes_per_tick: u64, mut hash: Hash) -> Vec<Entry> { pub fn create_ticks(num_ticks: u64, hashes_per_tick: u64, mut hash: Hash) -> Vec<Entry> {
let mut ticks = Vec::with_capacity(num_ticks as usize); repeat_with(|| next_entry_mut(&mut hash, hashes_per_tick, vec![]))
for _ in 0..num_ticks { .take(num_ticks as usize)
let new_tick = next_entry_mut(&mut hash, hashes_per_tick, vec![]); .collect()
ticks.push(new_tick);
}
ticks
} }
#[allow(clippy::same_item_push)]
pub fn create_random_ticks(num_ticks: u64, max_hashes_per_tick: u64, mut hash: Hash) -> Vec<Entry> { pub fn create_random_ticks(num_ticks: u64, max_hashes_per_tick: u64, mut hash: Hash) -> Vec<Entry> {
let mut ticks = Vec::with_capacity(num_ticks as usize); repeat_with(|| {
for _ in 0..num_ticks {
let hashes_per_tick = thread_rng().gen_range(1, max_hashes_per_tick); let hashes_per_tick = thread_rng().gen_range(1, max_hashes_per_tick);
let new_tick = next_entry_mut(&mut hash, hashes_per_tick, vec![]); next_entry_mut(&mut hash, hashes_per_tick, vec![])
ticks.push(new_tick); })
} .take(num_ticks as usize)
.collect()
ticks
} }
/// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`. /// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`.

View File

@ -4356,7 +4356,6 @@ RPC Enabled Nodes: 1"#;
} }
#[test] #[test]
#[allow(clippy::same_item_push)]
fn test_push_epoch_slots_large() { fn test_push_epoch_slots_large() {
let node_keypair = Arc::new(Keypair::new()); let node_keypair = Arc::new(Keypair::new());
let cluster_info = ClusterInfo::new( let cluster_info = ClusterInfo::new(
@ -4364,12 +4363,15 @@ RPC Enabled Nodes: 1"#;
node_keypair, node_keypair,
SocketAddrSpace::Unspecified, SocketAddrSpace::Unspecified,
); );
let mut range: Vec<Slot> = vec![];
//random should be hard to compress //random should be hard to compress
for _ in 0..32000 { let mut rng = rand::thread_rng();
let last = *range.last().unwrap_or(&0); let range: Vec<Slot> = repeat_with(|| rng.gen_range(1, 32))
range.push(last + rand::thread_rng().gen_range(1, 32)); .scan(0, |slot, step| {
} *slot += step;
Some(*slot)
})
.take(32000)
.collect();
cluster_info.push_epoch_slots(&range[..16000]); cluster_info.push_epoch_slots(&range[..16000]);
cluster_info.push_epoch_slots(&range[16000..]); cluster_info.push_epoch_slots(&range[16000..]);
let slots = cluster_info.get_epoch_slots(&mut Cursor::default()); let slots = cluster_info.get_epoch_slots(&mut Cursor::default());

View File

@ -337,7 +337,8 @@ impl EpochSlots {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use {super::*, rand::Rng, std::iter::repeat_with};
#[test] #[test]
fn test_epoch_slots_max_size() { fn test_epoch_slots_max_size() {
let epoch_slots = EpochSlots::default(); let epoch_slots = EpochSlots::default();
@ -484,16 +485,18 @@ mod tests {
assert_eq!(slots.to_slots(0), range); assert_eq!(slots.to_slots(0), range);
} }
fn make_rand_slots<R: Rng>(rng: &mut R) -> impl Iterator<Item = Slot> + '_ {
repeat_with(|| rng.gen_range(1, 5)).scan(0, |slot, step| {
*slot += step;
Some(*slot)
})
}
#[test] #[test]
#[allow(clippy::same_item_push)]
fn test_epoch_slots_fill_uncompressed_random_range() { fn test_epoch_slots_fill_uncompressed_random_range() {
use rand::Rng; let mut rng = rand::thread_rng();
for _ in 0..10 { for _ in 0..10 {
let mut range: Vec<Slot> = vec![]; let range: Vec<Slot> = make_rand_slots(&mut rng).take(5000).collect();
for _ in 0..5000 {
let last = *range.last().unwrap_or(&0);
range.push(last + rand::thread_rng().gen_range(1, 5));
}
let sz = EpochSlots::default().max_compressed_slot_size(); let sz = EpochSlots::default().max_compressed_slot_size();
let mut slots = Uncompressed::new(sz as usize); let mut slots = Uncompressed::new(sz as usize);
let sz = slots.add(&range); let sz = slots.add(&range);
@ -504,15 +507,10 @@ mod tests {
} }
#[test] #[test]
#[allow(clippy::same_item_push)]
fn test_epoch_slots_fill_compressed_random_range() { fn test_epoch_slots_fill_compressed_random_range() {
use rand::Rng; let mut rng = rand::thread_rng();
for _ in 0..10 { for _ in 0..10 {
let mut range: Vec<Slot> = vec![]; let range: Vec<Slot> = make_rand_slots(&mut rng).take(5000).collect();
for _ in 0..5000 {
let last = *range.last().unwrap_or(&0);
range.push(last + rand::thread_rng().gen_range(1, 5));
}
let sz = EpochSlots::default().max_compressed_slot_size(); let sz = EpochSlots::default().max_compressed_slot_size();
let mut slots = Uncompressed::new(sz as usize); let mut slots = Uncompressed::new(sz as usize);
let sz = slots.add(&range); let sz = slots.add(&range);
@ -525,15 +523,10 @@ mod tests {
} }
#[test] #[test]
#[allow(clippy::same_item_push)]
fn test_epoch_slots_fill_random_range() { fn test_epoch_slots_fill_random_range() {
use rand::Rng; let mut rng = rand::thread_rng();
for _ in 0..10 { for _ in 0..10 {
let mut range: Vec<Slot> = vec![]; let range: Vec<Slot> = make_rand_slots(&mut rng).take(5000).collect();
for _ in 0..5000 {
let last = *range.last().unwrap_or(&0);
range.push(last + rand::thread_rng().gen_range(1, 5));
}
let mut slots = EpochSlots::default(); let mut slots = EpochSlots::default();
let sz = slots.fill(&range, 1); let sz = slots.fill(&range, 1);
let last = range[sz - 1]; let last = range[sz - 1];

View File

@ -8421,18 +8421,17 @@ pub mod tests {
} }
#[test] #[test]
#[allow(clippy::same_item_push)]
fn test_get_last_hash() { fn test_get_last_hash() {
let mut entries: Vec<Entry> = vec![]; let entries: Vec<Entry> = vec![];
let empty_entries_iterator = entries.iter(); let empty_entries_iterator = entries.iter();
assert!(get_last_hash(empty_entries_iterator).is_none()); assert!(get_last_hash(empty_entries_iterator).is_none());
let mut prev_hash = hash::hash(&[42u8]); let entry = next_entry(&hash::hash(&[42u8]), 1, vec![]);
for _ in 0..10 { let entries: Vec<Entry> = std::iter::successors(Some(entry), |entry| {
let entry = next_entry(&prev_hash, 1, vec![]); Some(next_entry(&entry.hash, 1, vec![]))
prev_hash = entry.hash; })
entries.push(entry); .take(10)
} .collect();
let entries_iterator = entries.iter(); let entries_iterator = entries.iter();
assert_eq!(get_last_hash(entries_iterator).unwrap(), entries[9].hash); assert_eq!(get_last_hash(entries_iterator).unwrap(), entries[9].hash);
} }

View File

@ -318,6 +318,7 @@ mod test {
}, },
sysvar, sysvar,
}, },
std::iter::repeat_with,
}; };
#[test] #[test]
@ -760,12 +761,8 @@ mod test {
} }
#[test] #[test]
#[allow(clippy::same_item_push)]
fn test_parse_stake_set_lockup() { fn test_parse_stake_set_lockup() {
let mut keys: Vec<Pubkey> = vec![]; let keys: Vec<Pubkey> = repeat_with(Pubkey::new_unique).take(3).collect();
for _ in 0..3 {
keys.push(Pubkey::new_unique());
}
let unix_timestamp = 1_234_567_890; let unix_timestamp = 1_234_567_890;
let epoch = 11; let epoch = 11;
let custodian = Pubkey::new_unique(); let custodian = Pubkey::new_unique();

View File

@ -729,7 +729,7 @@ mod test {
pubkey::Pubkey as SplTokenPubkey, pubkey::Pubkey as SplTokenPubkey,
}, },
}, },
std::str::FromStr, std::{iter::repeat_with, str::FromStr},
}; };
pub(super) fn convert_pubkey(pubkey: Pubkey) -> SplTokenPubkey { pub(super) fn convert_pubkey(pubkey: Pubkey) -> SplTokenPubkey {
@ -1711,13 +1711,11 @@ mod test {
} }
#[test] #[test]
#[allow(clippy::same_item_push)]
fn test_parse_token_v3() { fn test_parse_token_v3() {
test_parse_token(&spl_token::id()); test_parse_token(&spl_token::id());
} }
#[test] #[test]
#[allow(clippy::same_item_push)]
fn test_parse_token_2022() { fn test_parse_token_2022() {
test_parse_token(&spl_token_2022::id()); test_parse_token(&spl_token_2022::id());
} }
@ -1747,10 +1745,7 @@ mod test {
} }
fn test_token_ix_not_enough_keys(program_id: &SplTokenPubkey) { fn test_token_ix_not_enough_keys(program_id: &SplTokenPubkey) {
let mut keys: Vec<Pubkey> = vec![]; let keys: Vec<Pubkey> = repeat_with(solana_sdk::pubkey::new_rand).take(10).collect();
for _ in 0..10 {
keys.push(solana_sdk::pubkey::new_rand());
}
// Test InitializeMint variations // Test InitializeMint variations
let initialize_mint_ix = initialize_mint( let initialize_mint_ix = initialize_mint(
@ -2225,13 +2220,11 @@ mod test {
} }
#[test] #[test]
#[allow(clippy::same_item_push)]
fn test_not_enough_keys_token_v3() { fn test_not_enough_keys_token_v3() {
test_token_ix_not_enough_keys(&spl_token::id()); test_token_ix_not_enough_keys(&spl_token::id());
} }
#[test] #[test]
#[allow(clippy::same_item_push)]
fn test_not_enough_keys_token_2022() { fn test_not_enough_keys_token_2022() {
test_token_ix_not_enough_keys(&spl_token_2022::id()); test_token_ix_not_enough_keys(&spl_token_2022::id());
} }