Resolve nightly-2021-10-05 clippy complaints

This commit is contained in:
Michael Vines 2021-10-05 22:24:48 -07:00
parent eb4ce3dfed
commit 7027d56064
53 changed files with 229 additions and 293 deletions

View File

@ -121,9 +121,7 @@ impl BucketStorage {
}
pub fn uid(&self, ix: u64) -> Uid {
if ix >= self.capacity() {
panic!("bad index size");
}
assert!(ix < self.capacity(), "bad index size");
let ix = (ix * self.cell_size) as usize;
let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()];
unsafe {
@ -133,12 +131,8 @@ impl BucketStorage {
}
pub fn allocate(&self, ix: u64, uid: Uid) -> Result<(), BucketStorageError> {
if ix >= self.capacity() {
panic!("allocate: bad index size");
}
if UID_UNLOCKED == uid {
panic!("allocate: bad uid");
}
assert!(ix < self.capacity(), "allocate: bad index size");
assert!(UID_UNLOCKED != uid, "allocate: bad uid");
let mut e = Err(BucketStorageError::AlreadyAllocated);
let ix = (ix * self.cell_size) as usize;
//debug!("ALLOC {} {}", ix, uid);
@ -154,12 +148,8 @@ impl BucketStorage {
}
pub fn free(&self, ix: u64, uid: Uid) {
if ix >= self.capacity() {
panic!("free: bad index size");
}
if UID_UNLOCKED == uid {
panic!("free: bad uid");
}
assert!(ix < self.capacity(), "bad index size");
assert!(UID_UNLOCKED != uid, "free: bad uid");
let ix = (ix * self.cell_size) as usize;
//debug!("FREE {} {}", ix, uid);
let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()];
@ -177,9 +167,7 @@ impl BucketStorage {
}
pub fn get<T: Sized>(&self, ix: u64) -> &T {
if ix >= self.capacity() {
panic!("bad index size");
}
assert!(ix < self.capacity(), "bad index size");
let start = (ix * self.cell_size) as usize + std::mem::size_of::<Header>();
let end = start + std::mem::size_of::<T>();
let item_slice: &[u8] = &self.mmap[start..end];
@ -199,9 +187,7 @@ impl BucketStorage {
}
pub fn get_cell_slice<T: Sized>(&self, ix: u64, len: u64) -> &[T] {
if ix >= self.capacity() {
panic!("bad index size");
}
assert!(ix < self.capacity(), "bad index size");
let ix = self.cell_size * ix;
let start = ix as usize + std::mem::size_of::<Header>();
let end = start + std::mem::size_of::<T>() * len as usize;
@ -215,9 +201,7 @@ impl BucketStorage {
#[allow(clippy::mut_from_ref)]
pub fn get_mut<T: Sized>(&self, ix: u64) -> &mut T {
if ix >= self.capacity() {
panic!("bad index size");
}
assert!(ix < self.capacity(), "bad index size");
let start = (ix * self.cell_size) as usize + std::mem::size_of::<Header>();
let end = start + std::mem::size_of::<T>();
let item_slice: &[u8] = &self.mmap[start..end];
@ -229,9 +213,7 @@ impl BucketStorage {
#[allow(clippy::mut_from_ref)]
pub fn get_mut_cell_slice<T: Sized>(&self, ix: u64, len: u64) -> &mut [T] {
if ix >= self.capacity() {
panic!("bad index size");
}
assert!(ix < self.capacity(), "bad index size");
let ix = self.cell_size * ix;
let start = ix as usize + std::mem::size_of::<Header>();
let end = start + std::mem::size_of::<T>() * len as usize;

View File

@ -324,19 +324,11 @@ pub fn presigner_from_pubkey_sigs(
})
}
#[derive(Debug)]
#[derive(Debug, Default)]
pub struct SignerFromPathConfig {
pub allow_null_signer: bool,
}
impl Default for SignerFromPathConfig {
fn default() -> Self {
Self {
allow_null_signer: false,
}
}
}
pub fn signer_from_path(
matches: &ArgMatches,
path: &str,

View File

@ -149,7 +149,7 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
pub fn parse_args<'a>(
matches: &ArgMatches<'_>,
mut wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<(CliConfig<'a>, CliSigners), Box<dyn error::Error>> {
let config = if let Some(config_file) = matches.value_of("config_file") {
Config::load(config_file).unwrap_or_default()
@ -186,11 +186,11 @@ pub fn parse_args<'a>(
let CliCommandInfo {
command,
mut signers,
} = parse_command(matches, &default_signer, &mut wallet_manager)?;
} = parse_command(matches, &default_signer, wallet_manager)?;
if signers.is_empty() {
if let Ok(signer_info) =
default_signer.generate_unique_signers(vec![None], matches, &mut wallet_manager)
default_signer.generate_unique_signers(vec![None], matches, wallet_manager)
{
signers.extend(signer_info.signers);
}

View File

@ -510,6 +510,7 @@ pub fn process_get_nonce(
config: &CliConfig,
nonce_account_pubkey: &Pubkey,
) -> ProcessResult {
#[allow(clippy::redundant_closure)]
match get_account_with_commitment(rpc_client, nonce_account_pubkey, config.commitment)
.and_then(|ref a| state_from_account(a))?
{

View File

@ -215,6 +215,7 @@ fn full_battery_tests(
}
#[test]
#[allow(clippy::redundant_closure)]
fn test_create_account_with_seed() {
solana_logger::setup();
let mint_keypair = Keypair::new();

View File

@ -1,3 +1,4 @@
#![allow(clippy::redundant_closure)]
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
spend_utils::SpendAmount,

View File

@ -1,3 +1,4 @@
#![allow(clippy::redundant_closure)]
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
spend_utils::SpendAmount,

View File

@ -34,6 +34,7 @@ impl Source {
Ok((res.0, res.1))
}
Self::NonceAccount(ref pubkey) => {
#[allow(clippy::redundant_closure)]
let data = nonce_utils::get_account_with_commitment(rpc_client, pubkey, commitment)
.and_then(|ref a| nonce_utils::data_from_account(a))?;
Ok((data.blockhash, data.fee_calculator))
@ -80,6 +81,7 @@ impl Source {
Ok(blockhash)
}
Self::NonceAccount(ref pubkey) => {
#[allow(clippy::redundant_closure)]
let data = nonce_utils::get_account_with_commitment(rpc_client, pubkey, commitment)
.and_then(|ref a| nonce_utils::data_from_account(a))?;
Ok(data.blockhash)
@ -96,6 +98,7 @@ impl Source {
Ok(match self {
Self::Cluster => rpc_client.is_blockhash_valid(blockhash, commitment)?,
Self::NonceAccount(ref pubkey) => {
#[allow(clippy::redundant_closure)]
let _ = nonce_utils::get_account_with_commitment(rpc_client, pubkey, commitment)
.and_then(|ref a| nonce_utils::data_from_account(a))?;
true

View File

@ -4,7 +4,6 @@ use {
itertools::Itertools,
solana_entry::entry::Entry,
solana_ledger::shred::Shredder,
solana_runtime::blockhash_queue::BlockhashQueue,
solana_sdk::{
hash::Hash,
signature::{Keypair, Signature, Signer},
@ -26,11 +25,6 @@ pub struct BroadcastDuplicatesConfig {
#[derive(Clone)]
pub(super) struct BroadcastDuplicatesRun {
config: BroadcastDuplicatesConfig,
// Local queue for broadcast to track which duplicate blockhashes we've sent
duplicate_queue: BlockhashQueue,
// Buffer for duplicate entries
duplicate_entries_buffer: Vec<Entry>,
last_duplicate_entry_hash: Hash,
current_slot: Slot,
next_shred_index: u32,
shred_version: u16,
@ -50,10 +44,7 @@ impl BroadcastDuplicatesRun {
));
Self {
config,
duplicate_queue: BlockhashQueue::default(),
duplicate_entries_buffer: vec![],
next_shred_index: u32::MAX,
last_duplicate_entry_hash: Hash::default(),
shred_version,
current_slot: 0,
recent_blockhash: None,

View File

@ -32,7 +32,6 @@ pub struct StandardBroadcastRun {
last_datapoint_submit: Arc<AtomicInterval>,
num_batches: usize,
cluster_nodes_cache: Arc<ClusterNodesCache<BroadcastStage>>,
last_peer_update: Arc<AtomicInterval>,
}
impl StandardBroadcastRun {
@ -52,7 +51,6 @@ impl StandardBroadcastRun {
last_datapoint_submit: Arc::default(),
num_batches: 0,
cluster_nodes_cache,
last_peer_update: Arc::new(AtomicInterval::default()),
}
}

View File

@ -1313,7 +1313,7 @@ pub mod test {
}
VoteState::serialize(
&VoteStateVersions::new_current(vote_state),
&mut account.data_as_mut_slice(),
account.data_as_mut_slice(),
)
.expect("serialize state");
(

View File

@ -3535,7 +3535,7 @@ pub mod tests {
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let bank1 = bank_forks.read().unwrap().get(1).cloned().unwrap();
let mut bank1_progress = progress
let bank1_progress = progress
.entry(bank1.slot())
.or_insert_with(|| ForkProgress::new(bank1.last_blockhash(), None, None, 0, 0));
let shreds = shred_to_insert(
@ -3548,7 +3548,7 @@ pub mod tests {
let res = ReplayStage::replay_blockstore_into_bank(
&bank1,
&blockstore,
&mut bank1_progress,
bank1_progress,
None,
&replay_vote_sender,
&VerifyRecyclers::default(),
@ -3923,7 +3923,7 @@ pub mod tests {
.values()
.cloned()
.collect();
let mut heaviest_subtree_fork_choice = &mut vote_simulator.heaviest_subtree_fork_choice;
let heaviest_subtree_fork_choice = &mut vote_simulator.heaviest_subtree_fork_choice;
let mut latest_validator_votes_for_frozen_banks =
LatestValidatorVotesForFrozenBanks::default();
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
@ -3938,7 +3938,7 @@ pub mod tests {
&VoteTracker::default(),
&ClusterSlots::default(),
&vote_simulator.bank_forks,
&mut heaviest_subtree_fork_choice,
heaviest_subtree_fork_choice,
&mut latest_validator_votes_for_frozen_banks,
);

View File

@ -95,9 +95,9 @@ impl ShredFetchStage {
}
}
stats.shred_count += p.packets.len();
p.packets.iter_mut().for_each(|mut packet| {
p.packets.iter_mut().for_each(|packet| {
Self::process_packet(
&mut packet,
packet,
&mut shreds_received,
&mut stats,
last_root,

View File

@ -29,25 +29,24 @@ mod tests {
#[derive(Debug)]
struct BenchmarkConfig {
pub benchmark_slots: u64,
pub batch_size: u64,
pub max_ledger_shreds: u64,
pub entries_per_slot: u64,
pub stop_size_bytes: u64,
pub stop_size_iterations: u64,
pub pre_generate_data: bool,
pub cleanup_blockstore: bool,
pub emit_cpu_info: bool,
pub assert_compaction: bool,
pub compaction_interval: Option<u64>,
pub no_compaction: bool,
benchmark_slots: u64,
batch_size: u64,
max_ledger_shreds: u64,
entries_per_slot: u64,
stop_size_bytes: u64,
stop_size_iterations: u64,
pre_generate_data: bool,
cleanup_blockstore: bool,
assert_compaction: bool,
compaction_interval: Option<u64>,
no_compaction: bool,
}
#[derive(Clone, Copy, Debug)]
struct CpuStatsInner {
pub cpu_user: f32,
pub cpu_system: f32,
pub cpu_idle: f32,
cpu_user: f32,
cpu_system: f32,
cpu_idle: f32,
}
impl From<CPULoad> for CpuStatsInner {
@ -153,7 +152,6 @@ mod tests {
let stop_size_iterations = read_env("STOP_SIZE_ITERATIONS", DEFAULT_STOP_SIZE_ITERATIONS);
let pre_generate_data = read_env("PRE_GENERATE_DATA", false);
let cleanup_blockstore = read_env("CLEANUP_BLOCKSTORE", true);
let emit_cpu_info = read_env("EMIT_CPU_INFO", true);
// set default to `true` once compaction is merged
let assert_compaction = read_env("ASSERT_COMPACTION", false);
let compaction_interval = match read_env("COMPACTION_INTERVAL", 0) {
@ -171,7 +169,6 @@ mod tests {
stop_size_iterations,
pre_generate_data,
cleanup_blockstore,
emit_cpu_info,
assert_compaction,
compaction_interval,
no_compaction,

View File

@ -573,9 +573,7 @@ impl EntrySlice for [Entry] {
1,
);
}
if res != 0 {
panic!("GPU PoH verify many failed");
}
assert!(res == 0, "GPU PoH verify many failed");
inc_new_counter_info!(
"entry_verify-gpu_thread",
timing::duration_as_us(&gpu_wait.elapsed()) as usize

View File

@ -84,16 +84,6 @@ pub enum FaucetRequest {
},
}
impl Default for FaucetRequest {
fn default() -> Self {
Self::GetAirdrop {
lamports: u64::default(),
to: Pubkey::default(),
blockhash: Hash::default(),
}
}
}
pub enum FaucetTransaction {
Airdrop(Transaction),
Memo((Transaction, String)),
@ -416,7 +406,15 @@ async fn process(
mut stream: TokioTcpStream,
faucet: Arc<Mutex<Faucet>>,
) -> Result<(), Box<dyn std::error::Error>> {
let mut request = vec![0u8; serialized_size(&FaucetRequest::default()).unwrap() as usize];
let mut request = vec![
0u8;
serialized_size(&FaucetRequest::GetAirdrop {
lamports: u64::default(),
to: Pubkey::default(),
blockhash: Hash::default(),
})
.unwrap() as usize
];
while stream.read_exact(&mut request).await.is_ok() {
trace!("{:?}", request);

View File

@ -194,9 +194,7 @@ impl AbiDigester {
label: &'static str,
variant: &'static str,
) -> Result<(), DigestError> {
if !self.for_enum {
panic!("derive AbiEnumVisitor or implement it for the enum, which contains a variant ({}) named {}", label, variant);
}
assert!(self.for_enum, "derive AbiEnumVisitor or implement it for the enum, which contains a variant ({}) named {}", label, variant);
Ok(())
}

View File

@ -1728,6 +1728,7 @@ fn main() {
}
("shred-meta", Some(arg_matches)) => {
#[derive(Debug)]
#[allow(dead_code)]
struct ShredMeta<'a> {
slot: Slot,
full_slot: bool,
@ -1876,9 +1877,10 @@ fn main() {
wal_recovery_mode,
);
let mut ancestors = BTreeSet::new();
if blockstore.meta(ending_slot).unwrap().is_none() {
panic!("Ending slot doesn't exist");
}
assert!(
blockstore.meta(ending_slot).unwrap().is_some(),
"Ending slot doesn't exist"
);
for a in AncestorIterator::new(ending_slot, &blockstore) {
ancestors.insert(a);
if a <= starting_slot {

View File

@ -602,8 +602,8 @@ fn do_process_blockstore_from_root(
blockstore
.set_roots(std::iter::once(&start_slot))
.expect("Couldn't set root slot on startup");
} else if !blockstore.is_root(start_slot) {
panic!("starting slot isn't root and can't update due to being secondary blockstore access: {}", start_slot);
} else {
assert!(blockstore.is_root(start_slot), "starting slot isn't root and can't update due to being secondary blockstore access: {}", start_slot);
}
if let Ok(metas) = blockstore.slot_meta_iterator(start_slot) {
@ -1340,8 +1340,8 @@ fn process_single_slot(
blockstore
.set_dead_slot(slot)
.expect("Failed to mark slot as dead in blockstore");
} else if !blockstore.is_dead(slot) {
panic!("Failed slot isn't dead and can't update due to being secondary blockstore access: {}", slot);
} else {
assert!(blockstore.is_dead(slot), "Failed slot isn't dead and can't update due to being secondary blockstore access: {}", slot);
}
err
})?;

View File

@ -721,8 +721,8 @@ impl Shredder {
// 2) Sign coding shreds
PAR_THREAD_POOL.with(|thread_pool| {
thread_pool.borrow().install(|| {
coding_shreds.par_iter_mut().for_each(|mut coding_shred| {
Shredder::sign_shred(keypair, &mut coding_shred);
coding_shreds.par_iter_mut().for_each(|coding_shred| {
Shredder::sign_shred(keypair, coding_shred);
})
})
});
@ -858,7 +858,7 @@ impl Shredder {
if num_coding > 0 && shreds.len() < fec_set_size {
// Let's try recovering missing shreds using erasure
let mut present = &mut vec![true; fec_set_size];
let present = &mut vec![true; fec_set_size];
let mut next_expected_index = first_index;
let mut shred_bufs: Vec<Vec<u8>> = shreds
.into_iter()
@ -871,7 +871,7 @@ impl Shredder {
first_index,
next_expected_index,
index,
&mut present,
present,
);
blocks.push(shred.payload);
next_expected_index = index + 1;
@ -887,7 +887,7 @@ impl Shredder {
first_index,
next_expected_index,
first_index + fec_set_size,
&mut present,
present,
);
shred_bufs.append(&mut pending_shreds);

View File

@ -323,7 +323,7 @@ pub fn sign_shreds_cpu(keypair: &Keypair, batches: &mut [Packets]) {
batches.par_iter_mut().for_each(|p| {
p.packets[..]
.par_iter_mut()
.for_each(|mut p| sign_shred_cpu(keypair, &mut p));
.for_each(|p| sign_shred_cpu(keypair, p));
});
});
inc_new_counter_debug!("ed25519_shred_verify_cpu", count);

View File

@ -1478,9 +1478,7 @@ fn generate_frozen_account_panic(mut cluster: LocalCluster, frozen_account: Arc<
sleep(Duration::from_secs(1));
i += 1;
if i > 10 {
panic!("FROZEN_ACCOUNT_PANIC still false");
}
assert!(i <= 10, "FROZEN_ACCOUNT_PANIC still false");
}
// The validator is now broken and won't shutdown properly. Avoid LocalCluster panic in Drop
@ -3340,12 +3338,11 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b
let now = Instant::now();
loop {
let elapsed = now.elapsed();
if elapsed > Duration::from_secs(30) {
panic!(
"LocalCluster nodes failed to log enough tower votes in {} secs",
elapsed.as_secs()
);
}
assert!(
elapsed <= Duration::from_secs(30),
"LocalCluster nodes failed to log enough tower votes in {} secs",
elapsed.as_secs()
);
sleep(Duration::from_millis(100));
if let Some((last_vote, _)) = last_vote_in_tower(&val_b_ledger_path, &validator_b_pubkey) {

View File

@ -153,9 +153,10 @@ fn process_iftop_logs(matches: &ArgMatches) {
fn analyze_logs(matches: &ArgMatches) {
let dir_path = PathBuf::from(value_t_or_exit!(matches, "folder", String));
if !dir_path.is_dir() {
panic!("Need a folder that contains all log files");
}
assert!(
dir_path.is_dir(),
"Need a folder that contains all log files"
);
let list_all_diffs = matches.is_present("all");
let files = fs::read_dir(dir_path).expect("Failed to read log folder");
let logs: Vec<_> = files

View File

@ -389,9 +389,7 @@ fn shape_network_steps(
my_index: u64,
) -> bool {
// Integrity checks
if !topology.verify() {
panic!("Failed to verify the configuration file");
}
assert!(topology.verify(), "Failed to verify the configuration file");
assert!(my_index < network_size);
// Figure out partition we belong in
@ -479,9 +477,7 @@ fn configure(matches: &ArgMatches) {
NetworkTopology::new_random(max_partitions, max_drop, max_delay)
};
if !config.verify() {
panic!("Failed to verify the configuration");
}
assert!(config.verify(), "Failed to verify the configuration");
let topology = serde_json::to_string(&config).expect("Failed to write as JSON");

View File

@ -28,12 +28,13 @@ fn pin<T>(_mem: &mut Vec<T>) {
let err = unsafe {
(api.cuda_host_register)(ptr as *mut c_void, size, /*flags=*/ 0)
};
if err != CUDA_SUCCESS {
panic!(
"cudaHostRegister error: {} ptr: {:?} bytes: {}",
err, ptr, size,
);
}
assert!(
err == CUDA_SUCCESS,
"cudaHostRegister error: {} ptr: {:?} bytes: {}",
err,
ptr,
size
);
}
}
@ -42,9 +43,12 @@ fn unpin<T>(_mem: *mut T) {
use std::ffi::c_void;
let err = unsafe { (api.cuda_host_unregister)(_mem as *mut c_void) };
if err != CUDA_SUCCESS {
panic!("cudaHostUnregister returned: {} ptr: {:?}", err, _mem);
}
assert!(
err == CUDA_SUCCESS,
"cudaHostUnregister returned: {} ptr: {:?}",
err,
_mem
);
}
}

View File

@ -18,7 +18,6 @@ const RECYCLER_SHRINK_WINDOW: usize = 16384;
#[derive(Debug, Default)]
struct RecyclerStats {
total: AtomicUsize,
freed: AtomicUsize,
reuse: AtomicUsize,
max_gc: AtomicUsize,
}

View File

@ -96,9 +96,7 @@ pub fn init() {
if let Some(api) = perf_libs::api() {
unsafe {
(api.ed25519_set_verbose)(true);
if !(api.ed25519_init)() {
panic!("ed25519_init() failed");
}
assert!((api.ed25519_init)(), "ed25519_init() failed");
(api.ed25519_set_verbose)(false);
}
}
@ -413,7 +411,7 @@ pub fn ed25519_verify_cpu(batches: &mut [Packets]) {
PAR_THREAD_POOL.install(|| {
batches
.into_par_iter()
.for_each(|p| p.packets.par_iter_mut().for_each(|p| verify_packet(p)))
.for_each(|p| p.packets.par_iter_mut().for_each(verify_packet))
});
inc_new_counter_debug!("ed25519_verify_cpu", count);
}

View File

@ -21,18 +21,11 @@ use std::{
sync::Arc,
};
#[derive(Default)]
pub struct Executors {
pub executors: HashMap<Pubkey, Arc<dyn Executor>>,
pub is_dirty: bool,
}
impl Default for Executors {
fn default() -> Self {
Self {
executors: HashMap::default(),
is_dirty: false,
}
}
}
impl Executors {
pub fn insert(&mut self, key: Pubkey, executor: Arc<dyn Executor>) {
let _ = self.executors.insert(key, executor);
@ -267,7 +260,7 @@ impl PreAccount {
}
}
#[derive(Deserialize, Serialize)]
#[derive(Default, Deserialize, Serialize)]
pub struct InstructionProcessor {
#[serde(skip)]
programs: Vec<(Pubkey, ProcessInstructionWithContext)>,
@ -279,7 +272,9 @@ impl std::fmt::Debug for InstructionProcessor {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
#[derive(Debug)]
struct MessageProcessor<'a> {
#[allow(dead_code)]
programs: Vec<String>,
#[allow(dead_code)]
native_loader: &'a NativeLoader,
}
@ -309,14 +304,6 @@ impl std::fmt::Debug for InstructionProcessor {
}
}
impl Default for InstructionProcessor {
fn default() -> Self {
Self {
programs: vec![],
native_loader: NativeLoader::default(),
}
}
}
impl Clone for InstructionProcessor {
fn clone(&self) -> Self {
InstructionProcessor {

View File

@ -322,9 +322,11 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs {
break;
}
}
if !program_signer {
panic!("Missing signer for {}", instruction_account.pubkey);
}
assert!(
program_signer,
"Missing signer for {}",
instruction_account.pubkey
);
}
}
}
@ -355,15 +357,14 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs {
unsafe { transmute::<&Pubkey, &mut Pubkey>(account_info.owner) };
*account_info_mut = *account.borrow().owner();
}
if data.len() != new_data.len() {
// TODO: Figure out how to allow the System Program to resize the account data
panic!(
"Account data resizing not supported yet: {} -> {}. \
// TODO: Figure out how to allow the System Program to resize the account data
assert!(
data.len() == new_data.len(),
"Account data resizing not supported yet: {} -> {}. \
Consider making this test conditional on `#[cfg(feature = \"test-bpf\")]`",
data.len(),
new_data.len()
);
}
data.len(),
new_data.len()
);
data.clone_from_slice(new_data);
}
}

View File

@ -6,6 +6,7 @@ use std::alloc::Layout;
#[derive(Debug)]
pub struct BpfAllocator {
#[allow(dead_code)]
heap: AlignedMemory,
start: u64,
len: u64,

View File

@ -1822,7 +1822,9 @@ struct SolAccountInfo {
data_addr: u64,
owner_addr: u64,
rent_epoch: u64,
#[allow(dead_code)]
is_signer: bool,
#[allow(dead_code)]
is_writable: bool,
executable: bool,
}
@ -1837,7 +1839,9 @@ struct SolSignerSeedC {
/// Rust representation of C's SolSignerSeeds
#[derive(Debug)]
struct SolSignerSeedsC {
#[allow(dead_code)]
addr: u64,
#[allow(dead_code)]
len: u64,
}

View File

@ -235,7 +235,7 @@ impl VoteState {
// utility function, used by Stakes, tests
pub fn to<T: WritableAccount>(versioned: &VoteStateVersions, account: &mut T) -> Option<()> {
Self::serialize(versioned, &mut account.data_as_mut_slice()).ok()
Self::serialize(versioned, account.data_as_mut_slice()).ok()
}
pub fn deserialize(input: &[u8]) -> Result<Self, InstructionError> {

View File

@ -224,6 +224,7 @@ pub trait RemoteWallet {
/// `RemoteWallet` device
#[derive(Debug)]
pub struct Device {
#[allow(dead_code)]
pub(crate) path: String,
pub(crate) info: RemoteWalletInfo,
pub wallet_type: RemoteWalletType,

View File

@ -99,9 +99,9 @@ impl OptimisticallyConfirmedBankTracker {
bank_forks: &Arc<RwLock<BankForks>>,
optimistically_confirmed_bank: &Arc<RwLock<OptimisticallyConfirmedBank>>,
subscriptions: &Arc<RpcSubscriptions>,
mut pending_optimistically_confirmed_banks: &mut HashSet<Slot>,
mut last_notified_confirmed_slot: &mut Slot,
mut highest_confirmed_slot: &mut Slot,
pending_optimistically_confirmed_banks: &mut HashSet<Slot>,
last_notified_confirmed_slot: &mut Slot,
highest_confirmed_slot: &mut Slot,
bank_notification_subscribers: &Option<Arc<RwLock<Vec<BankNotificationSender>>>>,
) -> Result<(), RecvTimeoutError> {
let notification = receiver.recv_timeout(Duration::from_secs(1))?;
@ -110,9 +110,9 @@ impl OptimisticallyConfirmedBankTracker {
bank_forks,
optimistically_confirmed_bank,
subscriptions,
&mut pending_optimistically_confirmed_banks,
&mut last_notified_confirmed_slot,
&mut highest_confirmed_slot,
pending_optimistically_confirmed_banks,
last_notified_confirmed_slot,
highest_confirmed_slot,
bank_notification_subscribers,
);
Ok(())
@ -169,8 +169,8 @@ impl OptimisticallyConfirmedBankTracker {
bank_forks: &Arc<RwLock<BankForks>>,
bank: &Arc<Bank>,
slot_threshold: Slot,
mut last_notified_confirmed_slot: &mut Slot,
mut pending_optimistically_confirmed_banks: &mut HashSet<Slot>,
last_notified_confirmed_slot: &mut Slot,
pending_optimistically_confirmed_banks: &mut HashSet<Slot>,
bank_notification_subscribers: &Option<Arc<RwLock<Vec<BankNotificationSender>>>>,
) {
for confirmed_bank in bank.clone().parents_inclusive().iter().rev() {
@ -183,8 +183,8 @@ impl OptimisticallyConfirmedBankTracker {
subscriptions,
bank_forks,
confirmed_bank,
&mut last_notified_confirmed_slot,
&mut pending_optimistically_confirmed_banks,
last_notified_confirmed_slot,
pending_optimistically_confirmed_banks,
bank_notification_subscribers,
);
}
@ -196,8 +196,8 @@ impl OptimisticallyConfirmedBankTracker {
bank_forks: &Arc<RwLock<BankForks>>,
optimistically_confirmed_bank: &Arc<RwLock<OptimisticallyConfirmedBank>>,
subscriptions: &Arc<RpcSubscriptions>,
mut pending_optimistically_confirmed_banks: &mut HashSet<Slot>,
mut last_notified_confirmed_slot: &mut Slot,
pending_optimistically_confirmed_banks: &mut HashSet<Slot>,
last_notified_confirmed_slot: &mut Slot,
highest_confirmed_slot: &mut Slot,
bank_notification_subscribers: &Option<Arc<RwLock<Vec<BankNotificationSender>>>>,
) {
@ -219,8 +219,8 @@ impl OptimisticallyConfirmedBankTracker {
bank_forks,
bank,
*highest_confirmed_slot,
&mut last_notified_confirmed_slot,
&mut pending_optimistically_confirmed_banks,
last_notified_confirmed_slot,
pending_optimistically_confirmed_banks,
bank_notification_subscribers,
);
@ -273,8 +273,8 @@ impl OptimisticallyConfirmedBankTracker {
bank_forks,
&bank,
*last_notified_confirmed_slot,
&mut last_notified_confirmed_slot,
&mut pending_optimistically_confirmed_banks,
last_notified_confirmed_slot,
pending_optimistically_confirmed_banks,
bank_notification_subscribers,
);

View File

@ -155,6 +155,7 @@ pub struct JsonRpcRequestProcessor {
blockstore: Arc<Blockstore>,
config: JsonRpcConfig,
snapshot_config: Option<SnapshotConfig>,
#[allow(dead_code)]
validator_exit: Arc<RwLock<Exit>>,
health: Arc<RpcHealth>,
cluster_info: Arc<ClusterInfo>,

View File

@ -183,9 +183,10 @@ impl TestBroadcastReceiver {
}
}
Err(TryRecvError::Empty) => {
if started.elapsed() > timeout {
panic!("TestBroadcastReceiver: no data, timeout reached");
}
assert!(
started.elapsed() <= timeout,
"TestBroadcastReceiver: no data, timeout reached"
);
sleep(Duration::from_millis(50));
}
Err(err) => panic!("broadcast receiver error: {}", err),

View File

@ -116,12 +116,6 @@ impl std::fmt::Debug for NotificationEntry {
}
}
#[derive(Default, Clone)]
struct ProgramConfig {
filters: Vec<RpcFilterType>,
encoding: Option<UiAccountEncoding>,
}
#[allow(clippy::type_complexity)]
fn check_commitment_and_notify<P, S, B, F, X>(
params: &P,

View File

@ -926,9 +926,9 @@ impl Accounts {
let keys: Vec<_> = txs
.map(|tx| tx.get_account_locks(demote_program_write_locks))
.collect();
let mut account_locks = &mut self.account_locks.lock().unwrap();
let account_locks = &mut self.account_locks.lock().unwrap();
keys.into_iter()
.map(|keys| self.lock_account(&mut account_locks, keys.writable, keys.readonly))
.map(|keys| self.lock_account(account_locks, keys.writable, keys.readonly))
.collect()
}

View File

@ -216,6 +216,7 @@ struct GenerateIndexTimings {
pub insertion_time_us: u64,
pub min_bin_size: usize,
pub max_bin_size: usize,
#[allow(dead_code)]
pub total_items: usize,
pub storage_size_accounts_map_us: u64,
pub storage_size_storages_us: u64,
@ -971,11 +972,13 @@ pub struct AccountsDb {
// used by tests
// holds this until we are dropped
#[allow(dead_code)]
temp_accounts_hash_cache_path: Option<TempDir>,
pub shrink_paths: RwLock<Option<Vec<PathBuf>>>,
/// Directory of paths this accounts_db needs to hold/remove
#[allow(dead_code)]
pub(crate) temp_paths: Option<Vec<TempDir>>,
/// Starting file size of appendvecs
@ -1168,9 +1171,13 @@ impl PurgeStats {
#[derive(Debug)]
struct FlushStats {
#[allow(dead_code)]
slot: Slot,
#[allow(dead_code)]
num_flushed: usize,
#[allow(dead_code)]
num_purged: usize,
#[allow(dead_code)]
total_size: u64,
}
@ -3636,10 +3643,10 @@ impl AccountsDb {
Self::page_align(size),
));
if store.append_vec_id() == CACHE_VIRTUAL_STORAGE_ID {
panic!("We've run out of storage ids!");
}
assert!(
store.append_vec_id() != CACHE_VIRTUAL_STORAGE_ID,
"We've run out of storage ids!"
);
debug!(
"creating store: {} slot: {} len: {} size: {} from: {} path: {:?}",
store.append_vec_id(),
@ -11734,11 +11741,10 @@ pub mod tests {
let slot_accounts = accounts_db.scan_account_storage(
*slot as Slot,
|loaded_account: LoadedAccount| {
if is_cache_at_limit {
panic!(
"When cache is at limit, all roots should have been flushed to storage"
);
}
assert!(
!is_cache_at_limit,
"When cache is at limit, all roots should have been flushed to storage"
);
// All slots <= requested_flush_root should have been flushed, regardless
// of ongoing scans
assert!(*slot > requested_flush_root);

View File

@ -680,7 +680,7 @@ impl AccountsHash {
max_bin: usize,
) -> (Hash, u64, PreviousPass) {
let (mut hashes, mut total_lamports) =
Self::de_dup_and_eliminate_zeros(data_sections_by_pubkey, &mut stats, max_bin);
Self::de_dup_and_eliminate_zeros(data_sections_by_pubkey, stats, max_bin);
total_lamports += previous_state.lamports;

View File

@ -732,7 +732,6 @@ type AccountMapsReadLock<'a, T> = RwLockReadGuard<'a, MapType<T>>;
#[derive(Debug, Default)]
pub struct ScanSlotTracker {
is_removed: bool,
ref_count: u64,
}
impl ScanSlotTracker {

View File

@ -685,6 +685,7 @@ pub(crate) struct BankFieldsToDeserialize {
pub(crate) ns_per_slot: u128,
pub(crate) genesis_creation_time: UnixTimestamp,
pub(crate) slots_per_year: f64,
#[allow(dead_code)]
pub(crate) unused: u64,
pub(crate) slot: Slot,
pub(crate) epoch: Epoch,
@ -2902,9 +2903,11 @@ impl Bank {
self.fee_calculator = self.fee_rate_governor.create_fee_calculator();
for (pubkey, account) in genesis_config.accounts.iter() {
if self.get_account(pubkey).is_some() {
panic!("{} repeated in genesis config", pubkey);
}
assert!(
self.get_account(pubkey).is_none(),
"{} repeated in genesis config",
pubkey
);
self.store_account(pubkey, &AccountSharedData::from(account.clone()));
self.capitalization.fetch_add(account.lamports(), Relaxed);
}
@ -2913,9 +2916,11 @@ impl Bank {
self.update_fees();
for (pubkey, account) in genesis_config.rewards_pools.iter() {
if self.get_account(pubkey).is_some() {
panic!("{} repeated in genesis config", pubkey);
}
assert!(
self.get_account(pubkey).is_none(),
"{} repeated in genesis config",
pubkey
);
self.store_account(pubkey, &AccountSharedData::from(account.clone()));
}
@ -3514,23 +3519,14 @@ impl Bank {
sanitized_txs: &[SanitizedTransaction],
lock_results: &[Result<()>],
max_age: usize,
mut error_counters: &mut ErrorCounters,
error_counters: &mut ErrorCounters,
) -> Vec<TransactionCheckResult> {
let age_results = self.check_age(
sanitized_txs.iter(),
lock_results,
max_age,
&mut error_counters,
);
let cache_results =
self.check_status_cache(sanitized_txs, age_results, &mut error_counters);
let age_results =
self.check_age(sanitized_txs.iter(), lock_results, max_age, error_counters);
let cache_results = self.check_status_cache(sanitized_txs, age_results, error_counters);
if self.upgrade_epoch() {
// Reject all non-vote transactions
self.filter_by_vote_transactions(
sanitized_txs.iter(),
cache_results,
&mut error_counters,
)
self.filter_by_vote_transactions(sanitized_txs.iter(), cache_results, error_counters)
} else {
cache_results
}

View File

@ -75,9 +75,11 @@ pub(crate) struct DeserializableVersionedBank {
pub(crate) epoch_schedule: EpochSchedule,
pub(crate) inflation: Inflation,
pub(crate) stakes: Stakes,
#[allow(dead_code)]
pub(crate) unused_accounts: UnusedAccounts,
pub(crate) epoch_stakes: HashMap<Epoch, EpochStakes>,
pub(crate) is_delta: bool,
#[allow(dead_code)]
pub(crate) message_processor: InstructionProcessor,
}

View File

@ -153,6 +153,7 @@ struct SnapshotRootPaths {
/// Helper type to bundle up the results from `unarchive_snapshot()`
#[derive(Debug)]
struct UnarchivedSnapshot {
#[allow(dead_code)]
unpack_dir: TempDir,
unpacked_append_vec_map: UnpackedAppendVecMap,
unpacked_snapshots_dir_and_version: UnpackedSnapshotsDirAndVersion,
@ -464,7 +465,7 @@ pub fn deserialize_snapshot_data_file<T: Sized>(
deserializer: impl FnOnce(&mut BufReader<File>) -> Result<T>,
) -> Result<T> {
let wrapped_deserializer = move |streams: &mut SnapshotStreams<File>| -> Result<T> {
deserializer(&mut streams.full_snapshot_stream)
deserializer(streams.full_snapshot_stream)
};
let wrapped_data_file_path = SnapshotRootPaths {
@ -1460,12 +1461,12 @@ fn rebuild_bank_from_snapshots(
.map(|root_paths| root_paths.snapshot_path),
};
let bank = deserialize_snapshot_data_files(&snapshot_root_paths, |mut snapshot_streams| {
let bank = deserialize_snapshot_data_files(&snapshot_root_paths, |snapshot_streams| {
Ok(
match incremental_snapshot_version.unwrap_or(full_snapshot_version) {
SnapshotVersion::V1_2_0 => bank_from_streams(
SerdeStyle::Newer,
&mut snapshot_streams,
snapshot_streams,
account_paths,
unpacked_append_vec_map,
genesis_config,

View File

@ -5,7 +5,7 @@ use crate::message::Message;
use crate::secp256k1_program;
use log::*;
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug, AbiExample)]
#[derive(Serialize, Deserialize, Default, PartialEq, Eq, Clone, Debug, AbiExample)]
#[serde(rename_all = "camelCase")]
pub struct FeeCalculator {
// The current cost of a signature This amount may increase/decrease over time based on
@ -13,14 +13,6 @@ pub struct FeeCalculator {
pub lamports_per_signature: u64,
}
impl Default for FeeCalculator {
fn default() -> Self {
Self {
lamports_per_signature: 0,
}
}
}
impl FeeCalculator {
pub fn new(lamports_per_signature: u64) -> Self {
Self {

View File

@ -51,9 +51,10 @@ pub trait SyscallStubs: Sync + Send {
/// # Safety
unsafe fn sol_memcpy(&self, dst: *mut u8, src: *const u8, n: usize) {
// cannot be overlapping
if dst as usize + n > src as usize && src as usize > dst as usize {
panic!("memcpy does not support overlapping regions");
}
assert!(
!(dst as usize + n > src as usize && src as usize > dst as usize),
"memcpy does not support overlapping regions"
);
std::ptr::copy_nonoverlapping(src, dst, n as usize);
}
/// # Safety

View File

@ -45,9 +45,7 @@ macro_rules! declare_sysvar_id(
#[cfg(test)]
#[test]
fn test_sysvar_id() {
if !$crate::sysvar::is_sysvar_id(&id()) {
panic!("sysvar::is_sysvar_id() doesn't know about {}", $name);
}
assert!($crate::sysvar::is_sysvar_id(&id()), "sysvar::is_sysvar_id() doesn't know about {}", $name);
}
)
);
@ -72,10 +70,7 @@ macro_rules! declare_deprecated_sysvar_id(
#[cfg(test)]
#[test]
fn test_sysvar_id() {
#[allow(deprecated)]
if !$crate::sysvar::is_sysvar_id(&id()) {
panic!("sysvar::is_sysvar_id() doesn't know about {}", $name);
}
assert!($crate::sysvar::is_sysvar_id(&id()), "sysvar::is_sysvar_id() doesn't know about {}", $name);
}
)
);

View File

@ -277,9 +277,7 @@ impl fmt::Display for GenesisConfig {
self.accounts
.iter()
.map(|(pubkey, account)| {
if account.lamports == 0 {
panic!("{:?}", (pubkey, account));
}
assert!(account.lamports > 0, "{:?}", (pubkey, account));
account.lamports
})
.sum::<u64>()

View File

@ -62,6 +62,7 @@ impl Precompile {
where
F: Fn(&Pubkey) -> bool,
{
#![allow(clippy::redundant_closure)]
self.feature
.map_or(true, |ref feature_id| is_enabled(feature_id))
&& self.program_id == *program_id

View File

@ -95,9 +95,7 @@ pub fn receiver(
use_pinned_memory: bool,
) -> JoinHandle<()> {
let res = sock.set_read_timeout(Some(Duration::new(1, 0)));
if res.is_err() {
panic!("streamer::receiver set_read_timeout error");
}
assert!(!res.is_err(), "streamer::receiver set_read_timeout error");
let exit = exit.clone();
Builder::new()
.name("solana-receiver".to_string())

View File

@ -99,9 +99,9 @@ pub fn parse_token(
"destination": account_keys[instruction.accounts[1] as usize].to_string(),
"amount": amount.to_string(),
});
let mut map = value.as_object_mut().unwrap();
let map = value.as_object_mut().unwrap();
parse_signers(
&mut map,
map,
2,
account_keys,
&instruction.accounts,
@ -120,9 +120,9 @@ pub fn parse_token(
"delegate": account_keys[instruction.accounts[1] as usize].to_string(),
"amount": amount.to_string(),
});
let mut map = value.as_object_mut().unwrap();
let map = value.as_object_mut().unwrap();
parse_signers(
&mut map,
map,
2,
account_keys,
&instruction.accounts,
@ -139,9 +139,9 @@ pub fn parse_token(
let mut value = json!({
"source": account_keys[instruction.accounts[0] as usize].to_string(),
});
let mut map = value.as_object_mut().unwrap();
let map = value.as_object_mut().unwrap();
parse_signers(
&mut map,
map,
1,
account_keys,
&instruction.accounts,
@ -170,9 +170,9 @@ pub fn parse_token(
COption::None => None,
},
});
let mut map = value.as_object_mut().unwrap();
let map = value.as_object_mut().unwrap();
parse_signers(
&mut map,
map,
1,
account_keys,
&instruction.accounts,
@ -191,9 +191,9 @@ pub fn parse_token(
"account": account_keys[instruction.accounts[1] as usize].to_string(),
"amount": amount.to_string(),
});
let mut map = value.as_object_mut().unwrap();
let map = value.as_object_mut().unwrap();
parse_signers(
&mut map,
map,
2,
account_keys,
&instruction.accounts,
@ -212,9 +212,9 @@ pub fn parse_token(
"mint": account_keys[instruction.accounts[1] as usize].to_string(),
"amount": amount.to_string(),
});
let mut map = value.as_object_mut().unwrap();
let map = value.as_object_mut().unwrap();
parse_signers(
&mut map,
map,
2,
account_keys,
&instruction.accounts,
@ -232,9 +232,9 @@ pub fn parse_token(
"account": account_keys[instruction.accounts[0] as usize].to_string(),
"destination": account_keys[instruction.accounts[1] as usize].to_string(),
});
let mut map = value.as_object_mut().unwrap();
let map = value.as_object_mut().unwrap();
parse_signers(
&mut map,
map,
2,
account_keys,
&instruction.accounts,
@ -252,9 +252,9 @@ pub fn parse_token(
"account": account_keys[instruction.accounts[0] as usize].to_string(),
"mint": account_keys[instruction.accounts[1] as usize].to_string(),
});
let mut map = value.as_object_mut().unwrap();
let map = value.as_object_mut().unwrap();
parse_signers(
&mut map,
map,
2,
account_keys,
&instruction.accounts,
@ -272,9 +272,9 @@ pub fn parse_token(
"account": account_keys[instruction.accounts[0] as usize].to_string(),
"mint": account_keys[instruction.accounts[1] as usize].to_string(),
});
let mut map = value.as_object_mut().unwrap();
let map = value.as_object_mut().unwrap();
parse_signers(
&mut map,
map,
2,
account_keys,
&instruction.accounts,
@ -294,9 +294,9 @@ pub fn parse_token(
"destination": account_keys[instruction.accounts[2] as usize].to_string(),
"tokenAmount": token_amount_to_ui_amount(amount, decimals),
});
let mut map = value.as_object_mut().unwrap();
let map = value.as_object_mut().unwrap();
parse_signers(
&mut map,
map,
3,
account_keys,
&instruction.accounts,
@ -316,9 +316,9 @@ pub fn parse_token(
"delegate": account_keys[instruction.accounts[2] as usize].to_string(),
"tokenAmount": token_amount_to_ui_amount(amount, decimals),
});
let mut map = value.as_object_mut().unwrap();
let map = value.as_object_mut().unwrap();
parse_signers(
&mut map,
map,
3,
account_keys,
&instruction.accounts,
@ -337,9 +337,9 @@ pub fn parse_token(
"account": account_keys[instruction.accounts[1] as usize].to_string(),
"tokenAmount": token_amount_to_ui_amount(amount, decimals),
});
let mut map = value.as_object_mut().unwrap();
let map = value.as_object_mut().unwrap();
parse_signers(
&mut map,
map,
2,
account_keys,
&instruction.accounts,
@ -358,9 +358,9 @@ pub fn parse_token(
"mint": account_keys[instruction.accounts[1] as usize].to_string(),
"tokenAmount": token_amount_to_ui_amount(amount, decimals),
});
let mut map = value.as_object_mut().unwrap();
let map = value.as_object_mut().unwrap();
parse_signers(
&mut map,
map,
2,
account_keys,
&instruction.accounts,

View File

@ -51,7 +51,7 @@ fn get_mint_decimals(bank: &Bank, mint: &Pubkey) -> Option<u8> {
pub fn collect_token_balances(
bank: &Bank,
batch: &TransactionBatch,
mut mint_decimals: &mut HashMap<Pubkey, u8>,
mint_decimals: &mut HashMap<Pubkey, u8>,
) -> TransactionTokenBalances {
let mut balances: TransactionTokenBalances = vec![];
@ -59,7 +59,7 @@ pub fn collect_token_balances(
let has_token_program = transaction
.message()
.account_keys_iter()
.any(|p| is_token_program(p));
.any(is_token_program);
let mut transaction_balances: Vec<TransactionTokenBalance> = vec![];
if has_token_program {
@ -69,7 +69,7 @@ pub fn collect_token_balances(
}
if let Some((mint, ui_token_amount)) =
collect_token_balance_from_account(bank, account_id, &mut mint_decimals)
collect_token_balance_from_account(bank, account_id, mint_decimals)
{
transaction_balances.push(TransactionTokenBalance {
account_index: index as u8,

View File

@ -62,19 +62,18 @@ fn main() {
let median: i64 = v["median"].to_string().parse().unwrap();
let deviation: i64 = v["deviation"].to_string().parse().unwrap();
if upload_metrics {
panic!("TODO...");
/*
solana_metrics::datapoint_info!(
&v["name"].as_str().unwrap().trim_matches('\"'),
("test", "bench", String),
("branch", branch.to_string(), String),
("median", median, i64),
("deviation", deviation, i64),
("commit", git_commit_hash.trim().to_string(), String)
);
*/
}
assert!(!upload_metrics, "TODO");
/*
solana_metrics::datapoint_info!(
&v["name"].as_str().unwrap().trim_matches('\"'),
("test", "bench", String),
("branch", branch.to_string(), String),
("median", median, i64),
("deviation", deviation, i64),
("commit", git_commit_hash.trim().to_string(), String)
);
*/
let last_median =
get_last_metrics(&"median".to_string(), &db, &name, branch).unwrap_or_default();
let last_deviation = get_last_metrics(&"deviation".to_string(), &db, &name, branch)