ci: treewide: deny used_underscore_binding (#31319)
* Enforce used_underscore_binding * Fix all * Work around for cfg()-ed code... * ci.... * Make clipply fixes more pleasant * Clone exit signal while intentionally shadowing * Use more verbose code to avoid any #[allow(...)]s
This commit is contained in:
parent
685ebcb3b0
commit
a30830d7a9
|
@ -82,6 +82,7 @@ nightly_clippy_allows=()
|
|||
--deny=warnings \
|
||||
--deny=clippy::default_trait_access \
|
||||
--deny=clippy::integer_arithmetic \
|
||||
--deny=clippy::used_underscore_binding \
|
||||
"${nightly_clippy_allows[@]}"
|
||||
|
||||
if [[ -n $CI ]]; then
|
||||
|
|
|
@ -761,7 +761,6 @@ mod test {
|
|||
super::*,
|
||||
crate::{
|
||||
cluster_slot_state_verifier::{DuplicateSlotsToRepair, PurgeRepairSlotCounter},
|
||||
repair_service::DuplicateSlotsResetReceiver,
|
||||
replay_stage::{
|
||||
tests::{replay_blockstore_components, ReplayBlockstoreComponents},
|
||||
ReplayStage,
|
||||
|
@ -1039,7 +1038,6 @@ mod test {
|
|||
repairable_dead_slot_pool: HashSet<Slot>,
|
||||
request_throttle: Vec<u64>,
|
||||
repair_stats: AncestorRepairRequestsStats,
|
||||
_duplicate_slots_reset_receiver: DuplicateSlotsResetReceiver,
|
||||
retryable_slots_sender: RetryableSlotsSender,
|
||||
retryable_slots_receiver: RetryableSlotsReceiver,
|
||||
ancestor_hashes_replay_update_sender: AncestorHashesReplayUpdateSender,
|
||||
|
@ -1089,7 +1087,6 @@ mod test {
|
|||
repairable_dead_slot_pool: HashSet::new(),
|
||||
request_throttle: vec![],
|
||||
repair_stats: AncestorRepairRequestsStats::default(),
|
||||
_duplicate_slots_reset_receiver,
|
||||
ancestor_hashes_replay_update_sender,
|
||||
ancestor_hashes_replay_update_receiver,
|
||||
retryable_slots_sender,
|
||||
|
|
|
@ -426,7 +426,7 @@ pub mod tests {
|
|||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
|
||||
let max_complete_rewards_slot = Arc::new(AtomicU64::default());
|
||||
let _ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64));
|
||||
let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64));
|
||||
let tvu = Tvu::new(
|
||||
&vote_keypair.pubkey(),
|
||||
Arc::new(RwLock::new(vec![Arc::new(vote_keypair)])),
|
||||
|
@ -476,7 +476,7 @@ pub mod tests {
|
|||
AbsRequestSender::default(),
|
||||
None,
|
||||
&Arc::new(ConnectionCache::default()),
|
||||
&_ignored_prioritization_fee_cache,
|
||||
&ignored_prioritization_fee_cache,
|
||||
BankingTracer::new_disabled(),
|
||||
)
|
||||
.expect("assume success");
|
||||
|
|
|
@ -240,7 +240,6 @@ where
|
|||
let shred = Shred::new_from_serialized_shred(shred.to_vec()).ok()?;
|
||||
if packet.meta().repair() {
|
||||
let repair_info = RepairMeta {
|
||||
_from_addr: packet.meta().socket_addr(),
|
||||
// If can't parse the nonce, dump the packet.
|
||||
nonce: repair_response::nonce(packet)?,
|
||||
};
|
||||
|
@ -292,7 +291,6 @@ where
|
|||
}
|
||||
|
||||
struct RepairMeta {
|
||||
_from_addr: SocketAddr,
|
||||
nonce: Nonce,
|
||||
}
|
||||
|
||||
|
@ -573,10 +571,7 @@ mod test {
|
|||
|
||||
#[test]
|
||||
fn test_prune_shreds() {
|
||||
use {
|
||||
crate::serve_repair::ShredRepairType,
|
||||
std::net::{IpAddr, Ipv4Addr},
|
||||
};
|
||||
use crate::serve_repair::ShredRepairType;
|
||||
solana_logger::setup();
|
||||
let shred = Shred::new_from_parity_shard(
|
||||
5, // slot
|
||||
|
@ -589,18 +584,14 @@ mod test {
|
|||
0, // version
|
||||
);
|
||||
let mut shreds = vec![shred.clone(), shred.clone(), shred];
|
||||
let _from_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080);
|
||||
let repair_meta = RepairMeta {
|
||||
_from_addr,
|
||||
nonce: 0,
|
||||
};
|
||||
let repair_meta = RepairMeta { nonce: 0 };
|
||||
let outstanding_requests = Arc::new(RwLock::new(OutstandingShredRepairs::default()));
|
||||
let repair_type = ShredRepairType::Orphan(9);
|
||||
let nonce = outstanding_requests
|
||||
.write()
|
||||
.unwrap()
|
||||
.add_request(repair_type, timestamp());
|
||||
let repair_meta1 = RepairMeta { _from_addr, nonce };
|
||||
let repair_meta1 = RepairMeta { nonce };
|
||||
let mut repair_infos = vec![None, Some(repair_meta), Some(repair_meta1)];
|
||||
prune_shreds_invalid_repair(&mut shreds, &mut repair_infos, &outstanding_requests);
|
||||
assert_eq!(repair_infos.len(), 2);
|
||||
|
|
|
@ -71,6 +71,8 @@ struct SnapshotTestConfig {
|
|||
full_snapshot_archives_dir: TempDir,
|
||||
bank_snapshots_dir: TempDir,
|
||||
accounts_dir: PathBuf,
|
||||
// as the underscore prefix indicates, this isn't explictly used; but it's needed to keep
|
||||
// TempDir::drop from running to retain that dir for the duration of test
|
||||
_accounts_tmp_dir: TempDir,
|
||||
}
|
||||
|
||||
|
@ -82,7 +84,7 @@ impl SnapshotTestConfig {
|
|||
full_snapshot_archive_interval_slots: Slot,
|
||||
incremental_snapshot_archive_interval_slots: Slot,
|
||||
) -> SnapshotTestConfig {
|
||||
let (_accounts_tmp_dir, accounts_dir) = create_tmp_accounts_dir_for_tests();
|
||||
let (accounts_tmp_dir, accounts_dir) = create_tmp_accounts_dir_for_tests();
|
||||
let bank_snapshots_dir = TempDir::new().unwrap();
|
||||
let full_snapshot_archives_dir = TempDir::new().unwrap();
|
||||
let incremental_snapshot_archives_dir = TempDir::new().unwrap();
|
||||
|
@ -128,7 +130,7 @@ impl SnapshotTestConfig {
|
|||
full_snapshot_archives_dir,
|
||||
bank_snapshots_dir,
|
||||
accounts_dir,
|
||||
_accounts_tmp_dir,
|
||||
_accounts_tmp_dir: accounts_tmp_dir,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -389,11 +389,11 @@ pub async fn run_faucet(
|
|||
);
|
||||
|
||||
loop {
|
||||
let _faucet = faucet.clone();
|
||||
let faucet = faucet.clone();
|
||||
match listener.accept().await {
|
||||
Ok((stream, _)) => {
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = process(stream, _faucet).await {
|
||||
if let Err(e) = process(stream, faucet).await {
|
||||
info!("failed to process request; error = {:?}", e);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -444,7 +444,7 @@ pub fn process_entries_for_tests(
|
|||
})
|
||||
.collect();
|
||||
|
||||
let _ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64);
|
||||
let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64);
|
||||
let result = process_entries(
|
||||
bank,
|
||||
&mut replay_entries,
|
||||
|
@ -453,7 +453,7 @@ pub fn process_entries_for_tests(
|
|||
replay_vote_sender,
|
||||
&mut batch_timing,
|
||||
None,
|
||||
&_ignored_prioritization_fee_cache,
|
||||
&ignored_prioritization_fee_cache,
|
||||
);
|
||||
|
||||
debug!("process_entries: {:?}", batch_timing);
|
||||
|
@ -904,7 +904,7 @@ fn confirm_full_slot(
|
|||
) -> result::Result<(), BlockstoreProcessorError> {
|
||||
let mut confirmation_timing = ConfirmationTiming::default();
|
||||
let skip_verification = !opts.run_verification;
|
||||
let _ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64);
|
||||
let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64);
|
||||
|
||||
confirm_slot(
|
||||
blockstore,
|
||||
|
@ -917,7 +917,7 @@ fn confirm_full_slot(
|
|||
recyclers,
|
||||
opts.allow_dead_slots,
|
||||
opts.runtime_config.log_messages_bytes_limit,
|
||||
&_ignored_prioritization_fee_cache,
|
||||
&ignored_prioritization_fee_cache,
|
||||
)?;
|
||||
|
||||
timing.accumulate(&confirmation_timing.batch_execute.totals);
|
||||
|
|
|
@ -23,12 +23,12 @@ use {
|
|||
|
||||
const CUDA_SUCCESS: c_int = 0;
|
||||
|
||||
fn pin<T>(_mem: &mut Vec<T>) {
|
||||
fn pin<T>(mem: &mut Vec<T>) {
|
||||
if let Some(api) = perf_libs::api() {
|
||||
use std::{ffi::c_void, mem::size_of};
|
||||
|
||||
let ptr = _mem.as_mut_ptr();
|
||||
let size = _mem.capacity().saturating_mul(size_of::<T>());
|
||||
let ptr = mem.as_mut_ptr();
|
||||
let size = mem.capacity().saturating_mul(size_of::<T>());
|
||||
let err = unsafe {
|
||||
(api.cuda_host_register)(ptr as *mut c_void, size, /*flags=*/ 0)
|
||||
};
|
||||
|
@ -39,14 +39,14 @@ fn pin<T>(_mem: &mut Vec<T>) {
|
|||
}
|
||||
}
|
||||
|
||||
fn unpin<T>(_mem: *mut T) {
|
||||
fn unpin<T>(mem: *mut T) {
|
||||
if let Some(api) = perf_libs::api() {
|
||||
use std::ffi::c_void;
|
||||
|
||||
let err = unsafe { (api.cuda_host_unregister)(_mem as *mut c_void) };
|
||||
let err = unsafe { (api.cuda_host_unregister)(mem as *mut c_void) };
|
||||
assert!(
|
||||
err == CUDA_SUCCESS,
|
||||
"cudaHostUnregister returned: {err} ptr: {_mem:?}"
|
||||
"cudaHostUnregister returned: {err} ptr: {mem:?}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -277,21 +277,21 @@ impl<T: Clone + Default + Sized> PinnedVec<T> {
|
|||
self.x.shuffle(rng)
|
||||
}
|
||||
|
||||
fn check_ptr(&mut self, _old_ptr: *mut T, _old_capacity: usize, _from: &'static str) {
|
||||
fn check_ptr(&mut self, old_ptr: *mut T, old_capacity: usize, from: &'static str) {
|
||||
let api = perf_libs::api();
|
||||
if api.is_some()
|
||||
&& self.pinnable
|
||||
&& (self.x.as_ptr() != _old_ptr || self.x.capacity() != _old_capacity)
|
||||
&& (self.x.as_ptr() != old_ptr || self.x.capacity() != old_capacity)
|
||||
{
|
||||
if self.pinned {
|
||||
unpin(_old_ptr);
|
||||
unpin(old_ptr);
|
||||
}
|
||||
|
||||
trace!(
|
||||
"pinning from check_ptr old: {} size: {} from: {}",
|
||||
_old_capacity,
|
||||
old_capacity,
|
||||
self.x.capacity(),
|
||||
_from
|
||||
from
|
||||
);
|
||||
pin(&mut self.x);
|
||||
self.pinned = true;
|
||||
|
|
|
@ -297,9 +297,9 @@ fn test_rpc_subscriptions() {
|
|||
let status_sender = status_sender.clone();
|
||||
let signature_subscription_ready_clone = signature_subscription_ready_clone.clone();
|
||||
tokio::spawn({
|
||||
let _pubsub_client = Arc::clone(&pubsub_client);
|
||||
let pubsub_client = Arc::clone(&pubsub_client);
|
||||
async move {
|
||||
let (mut sig_notifications, sig_unsubscribe) = _pubsub_client
|
||||
let (mut sig_notifications, sig_unsubscribe) = pubsub_client
|
||||
.signature_subscribe(
|
||||
&signature,
|
||||
Some(RpcSignatureSubscribeConfig {
|
||||
|
@ -324,9 +324,9 @@ fn test_rpc_subscriptions() {
|
|||
let account_sender = account_sender.clone();
|
||||
let account_subscription_ready_clone = account_subscription_ready_clone.clone();
|
||||
tokio::spawn({
|
||||
let _pubsub_client = Arc::clone(&pubsub_client);
|
||||
let pubsub_client = Arc::clone(&pubsub_client);
|
||||
async move {
|
||||
let (mut account_notifications, account_unsubscribe) = _pubsub_client
|
||||
let (mut account_notifications, account_unsubscribe) = pubsub_client
|
||||
.account_subscribe(
|
||||
&pubkey,
|
||||
Some(RpcAccountInfoConfig {
|
||||
|
|
|
@ -3404,10 +3404,10 @@ pub mod tests {
|
|||
index.unchecked_scan_accounts(
|
||||
"",
|
||||
&Ancestors::default(),
|
||||
|pubkey, _index| {
|
||||
|pubkey, index| {
|
||||
if pubkey == &key {
|
||||
found_key = true;
|
||||
assert_eq!(_index, (&true, 3));
|
||||
assert_eq!(index, (&true, 3));
|
||||
};
|
||||
num += 1
|
||||
},
|
||||
|
|
|
@ -158,6 +158,7 @@ impl TryFrom<&str> for Pubkey {
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::used_underscore_binding)]
|
||||
pub fn bytes_are_curve_point<T: AsRef<[u8]>>(_bytes: T) -> bool {
|
||||
#[cfg(not(target_os = "solana"))]
|
||||
{
|
||||
|
|
|
@ -341,6 +341,7 @@ impl VoteState {
|
|||
3762 // see test_vote_state_size_of.
|
||||
}
|
||||
|
||||
#[allow(clippy::used_underscore_binding)]
|
||||
pub fn deserialize(_input: &[u8]) -> Result<Self, InstructionError> {
|
||||
#[cfg(not(target_os = "solana"))]
|
||||
{
|
||||
|
|
|
@ -31,10 +31,12 @@ fn main() -> Result<(), Box<dyn Error>> {
|
|||
let client = RpcClient::new(json_rpc_url);
|
||||
|
||||
let exit = Arc::new(AtomicBool::default());
|
||||
let _exit = exit.clone();
|
||||
// Initialize CTRL-C handler to ensure db changes are written before exit.
|
||||
ctrlc::set_handler(move || {
|
||||
_exit.store(true, Ordering::SeqCst);
|
||||
ctrlc::set_handler({
|
||||
let exit = exit.clone();
|
||||
move || {
|
||||
exit.store(true, Ordering::SeqCst);
|
||||
}
|
||||
})
|
||||
.expect("Error setting Ctrl-C handler");
|
||||
|
||||
|
|
Loading…
Reference in New Issue