From a30830d7a91bf8f5b66a342f6c3907ec6e6b0df7 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Thu, 27 Apr 2023 10:10:16 +0900 Subject: [PATCH] ci: treewide: deny used_underscore_binding (#31319) * Enforce used_underscore_binding * Fix all * Work around for cfg()-ed code... * ci.... * Make clipply fixes more pleasant * Clone exit signal while intentionally shadowing * Use more verbose code to avoid any #[allow(...)]s --- ci/test-checks.sh | 1 + core/src/ancestor_hashes_service.rs | 3 --- core/src/tvu.rs | 4 ++-- core/src/window_service.rs | 15 +++------------ core/tests/snapshots.rs | 6 ++++-- faucet/src/faucet.rs | 4 ++-- ledger/src/blockstore_processor.rs | 8 ++++---- perf/src/cuda_runtime.rs | 22 +++++++++++----------- rpc-test/tests/rpc.rs | 8 ++++---- runtime/src/accounts_index.rs | 4 ++-- sdk/program/src/pubkey.rs | 1 + sdk/program/src/vote/state/mod.rs | 1 + tokens/src/main.rs | 8 +++++--- 13 files changed, 40 insertions(+), 45 deletions(-) diff --git a/ci/test-checks.sh b/ci/test-checks.sh index dccfae8cf3..04919cd132 100755 --- a/ci/test-checks.sh +++ b/ci/test-checks.sh @@ -82,6 +82,7 @@ nightly_clippy_allows=() --deny=warnings \ --deny=clippy::default_trait_access \ --deny=clippy::integer_arithmetic \ + --deny=clippy::used_underscore_binding \ "${nightly_clippy_allows[@]}" if [[ -n $CI ]]; then diff --git a/core/src/ancestor_hashes_service.rs b/core/src/ancestor_hashes_service.rs index d3ced37f51..5e50b58681 100644 --- a/core/src/ancestor_hashes_service.rs +++ b/core/src/ancestor_hashes_service.rs @@ -761,7 +761,6 @@ mod test { super::*, crate::{ cluster_slot_state_verifier::{DuplicateSlotsToRepair, PurgeRepairSlotCounter}, - repair_service::DuplicateSlotsResetReceiver, replay_stage::{ tests::{replay_blockstore_components, ReplayBlockstoreComponents}, ReplayStage, @@ -1039,7 +1038,6 @@ mod test { repairable_dead_slot_pool: HashSet, request_throttle: Vec, repair_stats: AncestorRepairRequestsStats, - _duplicate_slots_reset_receiver: DuplicateSlotsResetReceiver, retryable_slots_sender: RetryableSlotsSender, retryable_slots_receiver: RetryableSlotsReceiver, ancestor_hashes_replay_update_sender: AncestorHashesReplayUpdateSender, @@ -1089,7 +1087,6 @@ mod test { repairable_dead_slot_pool: HashSet::new(), request_throttle: vec![], repair_stats: AncestorRepairRequestsStats::default(), - _duplicate_slots_reset_receiver, ancestor_hashes_replay_update_sender, ancestor_hashes_replay_update_receiver, retryable_slots_sender, diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 69a535cc1b..2f9f1387c2 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -426,7 +426,7 @@ pub mod tests { let bank_forks = Arc::new(RwLock::new(bank_forks)); let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let max_complete_rewards_slot = Arc::new(AtomicU64::default()); - let _ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); let tvu = Tvu::new( &vote_keypair.pubkey(), Arc::new(RwLock::new(vec![Arc::new(vote_keypair)])), @@ -476,7 +476,7 @@ pub mod tests { AbsRequestSender::default(), None, &Arc::new(ConnectionCache::default()), - &_ignored_prioritization_fee_cache, + &ignored_prioritization_fee_cache, BankingTracer::new_disabled(), ) .expect("assume success"); diff --git a/core/src/window_service.rs b/core/src/window_service.rs index e68edd60a9..b41e7a700f 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -240,7 +240,6 @@ where let shred = Shred::new_from_serialized_shred(shred.to_vec()).ok()?; if packet.meta().repair() { let repair_info = RepairMeta { - _from_addr: packet.meta().socket_addr(), // If can't parse the nonce, dump the packet. nonce: repair_response::nonce(packet)?, }; @@ -292,7 +291,6 @@ where } struct RepairMeta { - _from_addr: SocketAddr, nonce: Nonce, } @@ -573,10 +571,7 @@ mod test { #[test] fn test_prune_shreds() { - use { - crate::serve_repair::ShredRepairType, - std::net::{IpAddr, Ipv4Addr}, - }; + use crate::serve_repair::ShredRepairType; solana_logger::setup(); let shred = Shred::new_from_parity_shard( 5, // slot @@ -589,18 +584,14 @@ mod test { 0, // version ); let mut shreds = vec![shred.clone(), shred.clone(), shred]; - let _from_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080); - let repair_meta = RepairMeta { - _from_addr, - nonce: 0, - }; + let repair_meta = RepairMeta { nonce: 0 }; let outstanding_requests = Arc::new(RwLock::new(OutstandingShredRepairs::default())); let repair_type = ShredRepairType::Orphan(9); let nonce = outstanding_requests .write() .unwrap() .add_request(repair_type, timestamp()); - let repair_meta1 = RepairMeta { _from_addr, nonce }; + let repair_meta1 = RepairMeta { nonce }; let mut repair_infos = vec![None, Some(repair_meta), Some(repair_meta1)]; prune_shreds_invalid_repair(&mut shreds, &mut repair_infos, &outstanding_requests); assert_eq!(repair_infos.len(), 2); diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index f9c570e9a8..094cd417d7 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -71,6 +71,8 @@ struct SnapshotTestConfig { full_snapshot_archives_dir: TempDir, bank_snapshots_dir: TempDir, accounts_dir: PathBuf, + // as the underscore prefix indicates, this isn't explictly used; but it's needed to keep + // TempDir::drop from running to retain that dir for the duration of test _accounts_tmp_dir: TempDir, } @@ -82,7 +84,7 @@ impl SnapshotTestConfig { full_snapshot_archive_interval_slots: Slot, incremental_snapshot_archive_interval_slots: Slot, ) -> SnapshotTestConfig { - let (_accounts_tmp_dir, accounts_dir) = create_tmp_accounts_dir_for_tests(); + let (accounts_tmp_dir, accounts_dir) = create_tmp_accounts_dir_for_tests(); let bank_snapshots_dir = TempDir::new().unwrap(); let full_snapshot_archives_dir = TempDir::new().unwrap(); let incremental_snapshot_archives_dir = TempDir::new().unwrap(); @@ -128,7 +130,7 @@ impl SnapshotTestConfig { full_snapshot_archives_dir, bank_snapshots_dir, accounts_dir, - _accounts_tmp_dir, + _accounts_tmp_dir: accounts_tmp_dir, } } } diff --git a/faucet/src/faucet.rs b/faucet/src/faucet.rs index 98d8e665ac..368543da96 100644 --- a/faucet/src/faucet.rs +++ b/faucet/src/faucet.rs @@ -389,11 +389,11 @@ pub async fn run_faucet( ); loop { - let _faucet = faucet.clone(); + let faucet = faucet.clone(); match listener.accept().await { Ok((stream, _)) => { tokio::spawn(async move { - if let Err(e) = process(stream, _faucet).await { + if let Err(e) = process(stream, faucet).await { info!("failed to process request; error = {:?}", e); } }); diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 19bced5877..c3a0559370 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -444,7 +444,7 @@ pub fn process_entries_for_tests( }) .collect(); - let _ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64); + let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64); let result = process_entries( bank, &mut replay_entries, @@ -453,7 +453,7 @@ pub fn process_entries_for_tests( replay_vote_sender, &mut batch_timing, None, - &_ignored_prioritization_fee_cache, + &ignored_prioritization_fee_cache, ); debug!("process_entries: {:?}", batch_timing); @@ -904,7 +904,7 @@ fn confirm_full_slot( ) -> result::Result<(), BlockstoreProcessorError> { let mut confirmation_timing = ConfirmationTiming::default(); let skip_verification = !opts.run_verification; - let _ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64); + let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64); confirm_slot( blockstore, @@ -917,7 +917,7 @@ fn confirm_full_slot( recyclers, opts.allow_dead_slots, opts.runtime_config.log_messages_bytes_limit, - &_ignored_prioritization_fee_cache, + &ignored_prioritization_fee_cache, )?; timing.accumulate(&confirmation_timing.batch_execute.totals); diff --git a/perf/src/cuda_runtime.rs b/perf/src/cuda_runtime.rs index efc7545c6e..a2986af181 100644 --- a/perf/src/cuda_runtime.rs +++ b/perf/src/cuda_runtime.rs @@ -23,12 +23,12 @@ use { const CUDA_SUCCESS: c_int = 0; -fn pin(_mem: &mut Vec) { +fn pin(mem: &mut Vec) { if let Some(api) = perf_libs::api() { use std::{ffi::c_void, mem::size_of}; - let ptr = _mem.as_mut_ptr(); - let size = _mem.capacity().saturating_mul(size_of::()); + let ptr = mem.as_mut_ptr(); + let size = mem.capacity().saturating_mul(size_of::()); let err = unsafe { (api.cuda_host_register)(ptr as *mut c_void, size, /*flags=*/ 0) }; @@ -39,14 +39,14 @@ fn pin(_mem: &mut Vec) { } } -fn unpin(_mem: *mut T) { +fn unpin(mem: *mut T) { if let Some(api) = perf_libs::api() { use std::ffi::c_void; - let err = unsafe { (api.cuda_host_unregister)(_mem as *mut c_void) }; + let err = unsafe { (api.cuda_host_unregister)(mem as *mut c_void) }; assert!( err == CUDA_SUCCESS, - "cudaHostUnregister returned: {err} ptr: {_mem:?}" + "cudaHostUnregister returned: {err} ptr: {mem:?}" ); } } @@ -277,21 +277,21 @@ impl PinnedVec { self.x.shuffle(rng) } - fn check_ptr(&mut self, _old_ptr: *mut T, _old_capacity: usize, _from: &'static str) { + fn check_ptr(&mut self, old_ptr: *mut T, old_capacity: usize, from: &'static str) { let api = perf_libs::api(); if api.is_some() && self.pinnable - && (self.x.as_ptr() != _old_ptr || self.x.capacity() != _old_capacity) + && (self.x.as_ptr() != old_ptr || self.x.capacity() != old_capacity) { if self.pinned { - unpin(_old_ptr); + unpin(old_ptr); } trace!( "pinning from check_ptr old: {} size: {} from: {}", - _old_capacity, + old_capacity, self.x.capacity(), - _from + from ); pin(&mut self.x); self.pinned = true; diff --git a/rpc-test/tests/rpc.rs b/rpc-test/tests/rpc.rs index 02b84b7741..58b0a73b69 100644 --- a/rpc-test/tests/rpc.rs +++ b/rpc-test/tests/rpc.rs @@ -297,9 +297,9 @@ fn test_rpc_subscriptions() { let status_sender = status_sender.clone(); let signature_subscription_ready_clone = signature_subscription_ready_clone.clone(); tokio::spawn({ - let _pubsub_client = Arc::clone(&pubsub_client); + let pubsub_client = Arc::clone(&pubsub_client); async move { - let (mut sig_notifications, sig_unsubscribe) = _pubsub_client + let (mut sig_notifications, sig_unsubscribe) = pubsub_client .signature_subscribe( &signature, Some(RpcSignatureSubscribeConfig { @@ -324,9 +324,9 @@ fn test_rpc_subscriptions() { let account_sender = account_sender.clone(); let account_subscription_ready_clone = account_subscription_ready_clone.clone(); tokio::spawn({ - let _pubsub_client = Arc::clone(&pubsub_client); + let pubsub_client = Arc::clone(&pubsub_client); async move { - let (mut account_notifications, account_unsubscribe) = _pubsub_client + let (mut account_notifications, account_unsubscribe) = pubsub_client .account_subscribe( &pubkey, Some(RpcAccountInfoConfig { diff --git a/runtime/src/accounts_index.rs b/runtime/src/accounts_index.rs index d22a24c505..d107983f2f 100644 --- a/runtime/src/accounts_index.rs +++ b/runtime/src/accounts_index.rs @@ -3404,10 +3404,10 @@ pub mod tests { index.unchecked_scan_accounts( "", &Ancestors::default(), - |pubkey, _index| { + |pubkey, index| { if pubkey == &key { found_key = true; - assert_eq!(_index, (&true, 3)); + assert_eq!(index, (&true, 3)); }; num += 1 }, diff --git a/sdk/program/src/pubkey.rs b/sdk/program/src/pubkey.rs index 8e8985b226..da9b65bb5b 100644 --- a/sdk/program/src/pubkey.rs +++ b/sdk/program/src/pubkey.rs @@ -158,6 +158,7 @@ impl TryFrom<&str> for Pubkey { } } +#[allow(clippy::used_underscore_binding)] pub fn bytes_are_curve_point>(_bytes: T) -> bool { #[cfg(not(target_os = "solana"))] { diff --git a/sdk/program/src/vote/state/mod.rs b/sdk/program/src/vote/state/mod.rs index 4bc77d5ca5..3e39d2f4df 100644 --- a/sdk/program/src/vote/state/mod.rs +++ b/sdk/program/src/vote/state/mod.rs @@ -341,6 +341,7 @@ impl VoteState { 3762 // see test_vote_state_size_of. } + #[allow(clippy::used_underscore_binding)] pub fn deserialize(_input: &[u8]) -> Result { #[cfg(not(target_os = "solana"))] { diff --git a/tokens/src/main.rs b/tokens/src/main.rs index 9487d5d0eb..f72278a99f 100644 --- a/tokens/src/main.rs +++ b/tokens/src/main.rs @@ -31,10 +31,12 @@ fn main() -> Result<(), Box> { let client = RpcClient::new(json_rpc_url); let exit = Arc::new(AtomicBool::default()); - let _exit = exit.clone(); // Initialize CTRL-C handler to ensure db changes are written before exit. - ctrlc::set_handler(move || { - _exit.store(true, Ordering::SeqCst); + ctrlc::set_handler({ + let exit = exit.clone(); + move || { + exit.store(true, Ordering::SeqCst); + } }) .expect("Error setting Ctrl-C handler");