ci: treewide: deny used_underscore_binding (#31319)

* Enforce used_underscore_binding

* Fix all

* Work around for cfg()-ed code...

* ci....

* Make clipply fixes more pleasant

* Clone exit signal while intentionally shadowing

* Use more verbose code to avoid any #[allow(...)]s
This commit is contained in:
Ryo Onodera 2023-04-27 10:10:16 +09:00 committed by GitHub
parent 685ebcb3b0
commit a30830d7a9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 40 additions and 45 deletions

View File

@ -82,6 +82,7 @@ nightly_clippy_allows=()
--deny=warnings \ --deny=warnings \
--deny=clippy::default_trait_access \ --deny=clippy::default_trait_access \
--deny=clippy::integer_arithmetic \ --deny=clippy::integer_arithmetic \
--deny=clippy::used_underscore_binding \
"${nightly_clippy_allows[@]}" "${nightly_clippy_allows[@]}"
if [[ -n $CI ]]; then if [[ -n $CI ]]; then

View File

@ -761,7 +761,6 @@ mod test {
super::*, super::*,
crate::{ crate::{
cluster_slot_state_verifier::{DuplicateSlotsToRepair, PurgeRepairSlotCounter}, cluster_slot_state_verifier::{DuplicateSlotsToRepair, PurgeRepairSlotCounter},
repair_service::DuplicateSlotsResetReceiver,
replay_stage::{ replay_stage::{
tests::{replay_blockstore_components, ReplayBlockstoreComponents}, tests::{replay_blockstore_components, ReplayBlockstoreComponents},
ReplayStage, ReplayStage,
@ -1039,7 +1038,6 @@ mod test {
repairable_dead_slot_pool: HashSet<Slot>, repairable_dead_slot_pool: HashSet<Slot>,
request_throttle: Vec<u64>, request_throttle: Vec<u64>,
repair_stats: AncestorRepairRequestsStats, repair_stats: AncestorRepairRequestsStats,
_duplicate_slots_reset_receiver: DuplicateSlotsResetReceiver,
retryable_slots_sender: RetryableSlotsSender, retryable_slots_sender: RetryableSlotsSender,
retryable_slots_receiver: RetryableSlotsReceiver, retryable_slots_receiver: RetryableSlotsReceiver,
ancestor_hashes_replay_update_sender: AncestorHashesReplayUpdateSender, ancestor_hashes_replay_update_sender: AncestorHashesReplayUpdateSender,
@ -1089,7 +1087,6 @@ mod test {
repairable_dead_slot_pool: HashSet::new(), repairable_dead_slot_pool: HashSet::new(),
request_throttle: vec![], request_throttle: vec![],
repair_stats: AncestorRepairRequestsStats::default(), repair_stats: AncestorRepairRequestsStats::default(),
_duplicate_slots_reset_receiver,
ancestor_hashes_replay_update_sender, ancestor_hashes_replay_update_sender,
ancestor_hashes_replay_update_receiver, ancestor_hashes_replay_update_receiver,
retryable_slots_sender, retryable_slots_sender,

View File

@ -426,7 +426,7 @@ pub mod tests {
let bank_forks = Arc::new(RwLock::new(bank_forks)); let bank_forks = Arc::new(RwLock::new(bank_forks));
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let max_complete_rewards_slot = Arc::new(AtomicU64::default()); let max_complete_rewards_slot = Arc::new(AtomicU64::default());
let _ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64));
let tvu = Tvu::new( let tvu = Tvu::new(
&vote_keypair.pubkey(), &vote_keypair.pubkey(),
Arc::new(RwLock::new(vec![Arc::new(vote_keypair)])), Arc::new(RwLock::new(vec![Arc::new(vote_keypair)])),
@ -476,7 +476,7 @@ pub mod tests {
AbsRequestSender::default(), AbsRequestSender::default(),
None, None,
&Arc::new(ConnectionCache::default()), &Arc::new(ConnectionCache::default()),
&_ignored_prioritization_fee_cache, &ignored_prioritization_fee_cache,
BankingTracer::new_disabled(), BankingTracer::new_disabled(),
) )
.expect("assume success"); .expect("assume success");

View File

@ -240,7 +240,6 @@ where
let shred = Shred::new_from_serialized_shred(shred.to_vec()).ok()?; let shred = Shred::new_from_serialized_shred(shred.to_vec()).ok()?;
if packet.meta().repair() { if packet.meta().repair() {
let repair_info = RepairMeta { let repair_info = RepairMeta {
_from_addr: packet.meta().socket_addr(),
// If can't parse the nonce, dump the packet. // If can't parse the nonce, dump the packet.
nonce: repair_response::nonce(packet)?, nonce: repair_response::nonce(packet)?,
}; };
@ -292,7 +291,6 @@ where
} }
struct RepairMeta { struct RepairMeta {
_from_addr: SocketAddr,
nonce: Nonce, nonce: Nonce,
} }
@ -573,10 +571,7 @@ mod test {
#[test] #[test]
fn test_prune_shreds() { fn test_prune_shreds() {
use { use crate::serve_repair::ShredRepairType;
crate::serve_repair::ShredRepairType,
std::net::{IpAddr, Ipv4Addr},
};
solana_logger::setup(); solana_logger::setup();
let shred = Shred::new_from_parity_shard( let shred = Shred::new_from_parity_shard(
5, // slot 5, // slot
@ -589,18 +584,14 @@ mod test {
0, // version 0, // version
); );
let mut shreds = vec![shred.clone(), shred.clone(), shred]; let mut shreds = vec![shred.clone(), shred.clone(), shred];
let _from_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080); let repair_meta = RepairMeta { nonce: 0 };
let repair_meta = RepairMeta {
_from_addr,
nonce: 0,
};
let outstanding_requests = Arc::new(RwLock::new(OutstandingShredRepairs::default())); let outstanding_requests = Arc::new(RwLock::new(OutstandingShredRepairs::default()));
let repair_type = ShredRepairType::Orphan(9); let repair_type = ShredRepairType::Orphan(9);
let nonce = outstanding_requests let nonce = outstanding_requests
.write() .write()
.unwrap() .unwrap()
.add_request(repair_type, timestamp()); .add_request(repair_type, timestamp());
let repair_meta1 = RepairMeta { _from_addr, nonce }; let repair_meta1 = RepairMeta { nonce };
let mut repair_infos = vec![None, Some(repair_meta), Some(repair_meta1)]; let mut repair_infos = vec![None, Some(repair_meta), Some(repair_meta1)];
prune_shreds_invalid_repair(&mut shreds, &mut repair_infos, &outstanding_requests); prune_shreds_invalid_repair(&mut shreds, &mut repair_infos, &outstanding_requests);
assert_eq!(repair_infos.len(), 2); assert_eq!(repair_infos.len(), 2);

View File

@ -71,6 +71,8 @@ struct SnapshotTestConfig {
full_snapshot_archives_dir: TempDir, full_snapshot_archives_dir: TempDir,
bank_snapshots_dir: TempDir, bank_snapshots_dir: TempDir,
accounts_dir: PathBuf, accounts_dir: PathBuf,
// as the underscore prefix indicates, this isn't explictly used; but it's needed to keep
// TempDir::drop from running to retain that dir for the duration of test
_accounts_tmp_dir: TempDir, _accounts_tmp_dir: TempDir,
} }
@ -82,7 +84,7 @@ impl SnapshotTestConfig {
full_snapshot_archive_interval_slots: Slot, full_snapshot_archive_interval_slots: Slot,
incremental_snapshot_archive_interval_slots: Slot, incremental_snapshot_archive_interval_slots: Slot,
) -> SnapshotTestConfig { ) -> SnapshotTestConfig {
let (_accounts_tmp_dir, accounts_dir) = create_tmp_accounts_dir_for_tests(); let (accounts_tmp_dir, accounts_dir) = create_tmp_accounts_dir_for_tests();
let bank_snapshots_dir = TempDir::new().unwrap(); let bank_snapshots_dir = TempDir::new().unwrap();
let full_snapshot_archives_dir = TempDir::new().unwrap(); let full_snapshot_archives_dir = TempDir::new().unwrap();
let incremental_snapshot_archives_dir = TempDir::new().unwrap(); let incremental_snapshot_archives_dir = TempDir::new().unwrap();
@ -128,7 +130,7 @@ impl SnapshotTestConfig {
full_snapshot_archives_dir, full_snapshot_archives_dir,
bank_snapshots_dir, bank_snapshots_dir,
accounts_dir, accounts_dir,
_accounts_tmp_dir, _accounts_tmp_dir: accounts_tmp_dir,
} }
} }
} }

View File

@ -389,11 +389,11 @@ pub async fn run_faucet(
); );
loop { loop {
let _faucet = faucet.clone(); let faucet = faucet.clone();
match listener.accept().await { match listener.accept().await {
Ok((stream, _)) => { Ok((stream, _)) => {
tokio::spawn(async move { tokio::spawn(async move {
if let Err(e) = process(stream, _faucet).await { if let Err(e) = process(stream, faucet).await {
info!("failed to process request; error = {:?}", e); info!("failed to process request; error = {:?}", e);
} }
}); });

View File

@ -444,7 +444,7 @@ pub fn process_entries_for_tests(
}) })
.collect(); .collect();
let _ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64); let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64);
let result = process_entries( let result = process_entries(
bank, bank,
&mut replay_entries, &mut replay_entries,
@ -453,7 +453,7 @@ pub fn process_entries_for_tests(
replay_vote_sender, replay_vote_sender,
&mut batch_timing, &mut batch_timing,
None, None,
&_ignored_prioritization_fee_cache, &ignored_prioritization_fee_cache,
); );
debug!("process_entries: {:?}", batch_timing); debug!("process_entries: {:?}", batch_timing);
@ -904,7 +904,7 @@ fn confirm_full_slot(
) -> result::Result<(), BlockstoreProcessorError> { ) -> result::Result<(), BlockstoreProcessorError> {
let mut confirmation_timing = ConfirmationTiming::default(); let mut confirmation_timing = ConfirmationTiming::default();
let skip_verification = !opts.run_verification; let skip_verification = !opts.run_verification;
let _ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64); let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64);
confirm_slot( confirm_slot(
blockstore, blockstore,
@ -917,7 +917,7 @@ fn confirm_full_slot(
recyclers, recyclers,
opts.allow_dead_slots, opts.allow_dead_slots,
opts.runtime_config.log_messages_bytes_limit, opts.runtime_config.log_messages_bytes_limit,
&_ignored_prioritization_fee_cache, &ignored_prioritization_fee_cache,
)?; )?;
timing.accumulate(&confirmation_timing.batch_execute.totals); timing.accumulate(&confirmation_timing.batch_execute.totals);

View File

@ -23,12 +23,12 @@ use {
const CUDA_SUCCESS: c_int = 0; const CUDA_SUCCESS: c_int = 0;
fn pin<T>(_mem: &mut Vec<T>) { fn pin<T>(mem: &mut Vec<T>) {
if let Some(api) = perf_libs::api() { if let Some(api) = perf_libs::api() {
use std::{ffi::c_void, mem::size_of}; use std::{ffi::c_void, mem::size_of};
let ptr = _mem.as_mut_ptr(); let ptr = mem.as_mut_ptr();
let size = _mem.capacity().saturating_mul(size_of::<T>()); let size = mem.capacity().saturating_mul(size_of::<T>());
let err = unsafe { let err = unsafe {
(api.cuda_host_register)(ptr as *mut c_void, size, /*flags=*/ 0) (api.cuda_host_register)(ptr as *mut c_void, size, /*flags=*/ 0)
}; };
@ -39,14 +39,14 @@ fn pin<T>(_mem: &mut Vec<T>) {
} }
} }
fn unpin<T>(_mem: *mut T) { fn unpin<T>(mem: *mut T) {
if let Some(api) = perf_libs::api() { if let Some(api) = perf_libs::api() {
use std::ffi::c_void; use std::ffi::c_void;
let err = unsafe { (api.cuda_host_unregister)(_mem as *mut c_void) }; let err = unsafe { (api.cuda_host_unregister)(mem as *mut c_void) };
assert!( assert!(
err == CUDA_SUCCESS, err == CUDA_SUCCESS,
"cudaHostUnregister returned: {err} ptr: {_mem:?}" "cudaHostUnregister returned: {err} ptr: {mem:?}"
); );
} }
} }
@ -277,21 +277,21 @@ impl<T: Clone + Default + Sized> PinnedVec<T> {
self.x.shuffle(rng) self.x.shuffle(rng)
} }
fn check_ptr(&mut self, _old_ptr: *mut T, _old_capacity: usize, _from: &'static str) { fn check_ptr(&mut self, old_ptr: *mut T, old_capacity: usize, from: &'static str) {
let api = perf_libs::api(); let api = perf_libs::api();
if api.is_some() if api.is_some()
&& self.pinnable && self.pinnable
&& (self.x.as_ptr() != _old_ptr || self.x.capacity() != _old_capacity) && (self.x.as_ptr() != old_ptr || self.x.capacity() != old_capacity)
{ {
if self.pinned { if self.pinned {
unpin(_old_ptr); unpin(old_ptr);
} }
trace!( trace!(
"pinning from check_ptr old: {} size: {} from: {}", "pinning from check_ptr old: {} size: {} from: {}",
_old_capacity, old_capacity,
self.x.capacity(), self.x.capacity(),
_from from
); );
pin(&mut self.x); pin(&mut self.x);
self.pinned = true; self.pinned = true;

View File

@ -297,9 +297,9 @@ fn test_rpc_subscriptions() {
let status_sender = status_sender.clone(); let status_sender = status_sender.clone();
let signature_subscription_ready_clone = signature_subscription_ready_clone.clone(); let signature_subscription_ready_clone = signature_subscription_ready_clone.clone();
tokio::spawn({ tokio::spawn({
let _pubsub_client = Arc::clone(&pubsub_client); let pubsub_client = Arc::clone(&pubsub_client);
async move { async move {
let (mut sig_notifications, sig_unsubscribe) = _pubsub_client let (mut sig_notifications, sig_unsubscribe) = pubsub_client
.signature_subscribe( .signature_subscribe(
&signature, &signature,
Some(RpcSignatureSubscribeConfig { Some(RpcSignatureSubscribeConfig {
@ -324,9 +324,9 @@ fn test_rpc_subscriptions() {
let account_sender = account_sender.clone(); let account_sender = account_sender.clone();
let account_subscription_ready_clone = account_subscription_ready_clone.clone(); let account_subscription_ready_clone = account_subscription_ready_clone.clone();
tokio::spawn({ tokio::spawn({
let _pubsub_client = Arc::clone(&pubsub_client); let pubsub_client = Arc::clone(&pubsub_client);
async move { async move {
let (mut account_notifications, account_unsubscribe) = _pubsub_client let (mut account_notifications, account_unsubscribe) = pubsub_client
.account_subscribe( .account_subscribe(
&pubkey, &pubkey,
Some(RpcAccountInfoConfig { Some(RpcAccountInfoConfig {

View File

@ -3404,10 +3404,10 @@ pub mod tests {
index.unchecked_scan_accounts( index.unchecked_scan_accounts(
"", "",
&Ancestors::default(), &Ancestors::default(),
|pubkey, _index| { |pubkey, index| {
if pubkey == &key { if pubkey == &key {
found_key = true; found_key = true;
assert_eq!(_index, (&true, 3)); assert_eq!(index, (&true, 3));
}; };
num += 1 num += 1
}, },

View File

@ -158,6 +158,7 @@ impl TryFrom<&str> for Pubkey {
} }
} }
#[allow(clippy::used_underscore_binding)]
pub fn bytes_are_curve_point<T: AsRef<[u8]>>(_bytes: T) -> bool { pub fn bytes_are_curve_point<T: AsRef<[u8]>>(_bytes: T) -> bool {
#[cfg(not(target_os = "solana"))] #[cfg(not(target_os = "solana"))]
{ {

View File

@ -341,6 +341,7 @@ impl VoteState {
3762 // see test_vote_state_size_of. 3762 // see test_vote_state_size_of.
} }
#[allow(clippy::used_underscore_binding)]
pub fn deserialize(_input: &[u8]) -> Result<Self, InstructionError> { pub fn deserialize(_input: &[u8]) -> Result<Self, InstructionError> {
#[cfg(not(target_os = "solana"))] #[cfg(not(target_os = "solana"))]
{ {

View File

@ -31,10 +31,12 @@ fn main() -> Result<(), Box<dyn Error>> {
let client = RpcClient::new(json_rpc_url); let client = RpcClient::new(json_rpc_url);
let exit = Arc::new(AtomicBool::default()); let exit = Arc::new(AtomicBool::default());
let _exit = exit.clone();
// Initialize CTRL-C handler to ensure db changes are written before exit. // Initialize CTRL-C handler to ensure db changes are written before exit.
ctrlc::set_handler(move || { ctrlc::set_handler({
_exit.store(true, Ordering::SeqCst); let exit = exit.clone();
move || {
exit.store(true, Ordering::SeqCst);
}
}) })
.expect("Error setting Ctrl-C handler"); .expect("Error setting Ctrl-C handler");