Resolve nightly-2021-10-05 clippy complaints

This commit is contained in:
Michael Vines 2021-10-05 22:24:48 -07:00
parent eb4ce3dfed
commit 7027d56064
53 changed files with 229 additions and 293 deletions

View File

@ -121,9 +121,7 @@ impl BucketStorage {
} }
pub fn uid(&self, ix: u64) -> Uid { pub fn uid(&self, ix: u64) -> Uid {
if ix >= self.capacity() { assert!(ix < self.capacity(), "bad index size");
panic!("bad index size");
}
let ix = (ix * self.cell_size) as usize; let ix = (ix * self.cell_size) as usize;
let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()]; let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()];
unsafe { unsafe {
@ -133,12 +131,8 @@ impl BucketStorage {
} }
pub fn allocate(&self, ix: u64, uid: Uid) -> Result<(), BucketStorageError> { pub fn allocate(&self, ix: u64, uid: Uid) -> Result<(), BucketStorageError> {
if ix >= self.capacity() { assert!(ix < self.capacity(), "allocate: bad index size");
panic!("allocate: bad index size"); assert!(UID_UNLOCKED != uid, "allocate: bad uid");
}
if UID_UNLOCKED == uid {
panic!("allocate: bad uid");
}
let mut e = Err(BucketStorageError::AlreadyAllocated); let mut e = Err(BucketStorageError::AlreadyAllocated);
let ix = (ix * self.cell_size) as usize; let ix = (ix * self.cell_size) as usize;
//debug!("ALLOC {} {}", ix, uid); //debug!("ALLOC {} {}", ix, uid);
@ -154,12 +148,8 @@ impl BucketStorage {
} }
pub fn free(&self, ix: u64, uid: Uid) { pub fn free(&self, ix: u64, uid: Uid) {
if ix >= self.capacity() { assert!(ix < self.capacity(), "bad index size");
panic!("free: bad index size"); assert!(UID_UNLOCKED != uid, "free: bad uid");
}
if UID_UNLOCKED == uid {
panic!("free: bad uid");
}
let ix = (ix * self.cell_size) as usize; let ix = (ix * self.cell_size) as usize;
//debug!("FREE {} {}", ix, uid); //debug!("FREE {} {}", ix, uid);
let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()]; let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()];
@ -177,9 +167,7 @@ impl BucketStorage {
} }
pub fn get<T: Sized>(&self, ix: u64) -> &T { pub fn get<T: Sized>(&self, ix: u64) -> &T {
if ix >= self.capacity() { assert!(ix < self.capacity(), "bad index size");
panic!("bad index size");
}
let start = (ix * self.cell_size) as usize + std::mem::size_of::<Header>(); let start = (ix * self.cell_size) as usize + std::mem::size_of::<Header>();
let end = start + std::mem::size_of::<T>(); let end = start + std::mem::size_of::<T>();
let item_slice: &[u8] = &self.mmap[start..end]; let item_slice: &[u8] = &self.mmap[start..end];
@ -199,9 +187,7 @@ impl BucketStorage {
} }
pub fn get_cell_slice<T: Sized>(&self, ix: u64, len: u64) -> &[T] { pub fn get_cell_slice<T: Sized>(&self, ix: u64, len: u64) -> &[T] {
if ix >= self.capacity() { assert!(ix < self.capacity(), "bad index size");
panic!("bad index size");
}
let ix = self.cell_size * ix; let ix = self.cell_size * ix;
let start = ix as usize + std::mem::size_of::<Header>(); let start = ix as usize + std::mem::size_of::<Header>();
let end = start + std::mem::size_of::<T>() * len as usize; let end = start + std::mem::size_of::<T>() * len as usize;
@ -215,9 +201,7 @@ impl BucketStorage {
#[allow(clippy::mut_from_ref)] #[allow(clippy::mut_from_ref)]
pub fn get_mut<T: Sized>(&self, ix: u64) -> &mut T { pub fn get_mut<T: Sized>(&self, ix: u64) -> &mut T {
if ix >= self.capacity() { assert!(ix < self.capacity(), "bad index size");
panic!("bad index size");
}
let start = (ix * self.cell_size) as usize + std::mem::size_of::<Header>(); let start = (ix * self.cell_size) as usize + std::mem::size_of::<Header>();
let end = start + std::mem::size_of::<T>(); let end = start + std::mem::size_of::<T>();
let item_slice: &[u8] = &self.mmap[start..end]; let item_slice: &[u8] = &self.mmap[start..end];
@ -229,9 +213,7 @@ impl BucketStorage {
#[allow(clippy::mut_from_ref)] #[allow(clippy::mut_from_ref)]
pub fn get_mut_cell_slice<T: Sized>(&self, ix: u64, len: u64) -> &mut [T] { pub fn get_mut_cell_slice<T: Sized>(&self, ix: u64, len: u64) -> &mut [T] {
if ix >= self.capacity() { assert!(ix < self.capacity(), "bad index size");
panic!("bad index size");
}
let ix = self.cell_size * ix; let ix = self.cell_size * ix;
let start = ix as usize + std::mem::size_of::<Header>(); let start = ix as usize + std::mem::size_of::<Header>();
let end = start + std::mem::size_of::<T>() * len as usize; let end = start + std::mem::size_of::<T>() * len as usize;

View File

@ -324,19 +324,11 @@ pub fn presigner_from_pubkey_sigs(
}) })
} }
#[derive(Debug)] #[derive(Debug, Default)]
pub struct SignerFromPathConfig { pub struct SignerFromPathConfig {
pub allow_null_signer: bool, pub allow_null_signer: bool,
} }
impl Default for SignerFromPathConfig {
fn default() -> Self {
Self {
allow_null_signer: false,
}
}
}
pub fn signer_from_path( pub fn signer_from_path(
matches: &ArgMatches, matches: &ArgMatches,
path: &str, path: &str,

View File

@ -149,7 +149,7 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
pub fn parse_args<'a>( pub fn parse_args<'a>(
matches: &ArgMatches<'_>, matches: &ArgMatches<'_>,
mut wallet_manager: &mut Option<Arc<RemoteWalletManager>>, wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<(CliConfig<'a>, CliSigners), Box<dyn error::Error>> { ) -> Result<(CliConfig<'a>, CliSigners), Box<dyn error::Error>> {
let config = if let Some(config_file) = matches.value_of("config_file") { let config = if let Some(config_file) = matches.value_of("config_file") {
Config::load(config_file).unwrap_or_default() Config::load(config_file).unwrap_or_default()
@ -186,11 +186,11 @@ pub fn parse_args<'a>(
let CliCommandInfo { let CliCommandInfo {
command, command,
mut signers, mut signers,
} = parse_command(matches, &default_signer, &mut wallet_manager)?; } = parse_command(matches, &default_signer, wallet_manager)?;
if signers.is_empty() { if signers.is_empty() {
if let Ok(signer_info) = if let Ok(signer_info) =
default_signer.generate_unique_signers(vec![None], matches, &mut wallet_manager) default_signer.generate_unique_signers(vec![None], matches, wallet_manager)
{ {
signers.extend(signer_info.signers); signers.extend(signer_info.signers);
} }

View File

@ -510,6 +510,7 @@ pub fn process_get_nonce(
config: &CliConfig, config: &CliConfig,
nonce_account_pubkey: &Pubkey, nonce_account_pubkey: &Pubkey,
) -> ProcessResult { ) -> ProcessResult {
#[allow(clippy::redundant_closure)]
match get_account_with_commitment(rpc_client, nonce_account_pubkey, config.commitment) match get_account_with_commitment(rpc_client, nonce_account_pubkey, config.commitment)
.and_then(|ref a| state_from_account(a))? .and_then(|ref a| state_from_account(a))?
{ {

View File

@ -215,6 +215,7 @@ fn full_battery_tests(
} }
#[test] #[test]
#[allow(clippy::redundant_closure)]
fn test_create_account_with_seed() { fn test_create_account_with_seed() {
solana_logger::setup(); solana_logger::setup();
let mint_keypair = Keypair::new(); let mint_keypair = Keypair::new();

View File

@ -1,3 +1,4 @@
#![allow(clippy::redundant_closure)]
use solana_cli::{ use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig}, cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
spend_utils::SpendAmount, spend_utils::SpendAmount,

View File

@ -1,3 +1,4 @@
#![allow(clippy::redundant_closure)]
use solana_cli::{ use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig}, cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
spend_utils::SpendAmount, spend_utils::SpendAmount,

View File

@ -34,6 +34,7 @@ impl Source {
Ok((res.0, res.1)) Ok((res.0, res.1))
} }
Self::NonceAccount(ref pubkey) => { Self::NonceAccount(ref pubkey) => {
#[allow(clippy::redundant_closure)]
let data = nonce_utils::get_account_with_commitment(rpc_client, pubkey, commitment) let data = nonce_utils::get_account_with_commitment(rpc_client, pubkey, commitment)
.and_then(|ref a| nonce_utils::data_from_account(a))?; .and_then(|ref a| nonce_utils::data_from_account(a))?;
Ok((data.blockhash, data.fee_calculator)) Ok((data.blockhash, data.fee_calculator))
@ -80,6 +81,7 @@ impl Source {
Ok(blockhash) Ok(blockhash)
} }
Self::NonceAccount(ref pubkey) => { Self::NonceAccount(ref pubkey) => {
#[allow(clippy::redundant_closure)]
let data = nonce_utils::get_account_with_commitment(rpc_client, pubkey, commitment) let data = nonce_utils::get_account_with_commitment(rpc_client, pubkey, commitment)
.and_then(|ref a| nonce_utils::data_from_account(a))?; .and_then(|ref a| nonce_utils::data_from_account(a))?;
Ok(data.blockhash) Ok(data.blockhash)
@ -96,6 +98,7 @@ impl Source {
Ok(match self { Ok(match self {
Self::Cluster => rpc_client.is_blockhash_valid(blockhash, commitment)?, Self::Cluster => rpc_client.is_blockhash_valid(blockhash, commitment)?,
Self::NonceAccount(ref pubkey) => { Self::NonceAccount(ref pubkey) => {
#[allow(clippy::redundant_closure)]
let _ = nonce_utils::get_account_with_commitment(rpc_client, pubkey, commitment) let _ = nonce_utils::get_account_with_commitment(rpc_client, pubkey, commitment)
.and_then(|ref a| nonce_utils::data_from_account(a))?; .and_then(|ref a| nonce_utils::data_from_account(a))?;
true true

View File

@ -4,7 +4,6 @@ use {
itertools::Itertools, itertools::Itertools,
solana_entry::entry::Entry, solana_entry::entry::Entry,
solana_ledger::shred::Shredder, solana_ledger::shred::Shredder,
solana_runtime::blockhash_queue::BlockhashQueue,
solana_sdk::{ solana_sdk::{
hash::Hash, hash::Hash,
signature::{Keypair, Signature, Signer}, signature::{Keypair, Signature, Signer},
@ -26,11 +25,6 @@ pub struct BroadcastDuplicatesConfig {
#[derive(Clone)] #[derive(Clone)]
pub(super) struct BroadcastDuplicatesRun { pub(super) struct BroadcastDuplicatesRun {
config: BroadcastDuplicatesConfig, config: BroadcastDuplicatesConfig,
// Local queue for broadcast to track which duplicate blockhashes we've sent
duplicate_queue: BlockhashQueue,
// Buffer for duplicate entries
duplicate_entries_buffer: Vec<Entry>,
last_duplicate_entry_hash: Hash,
current_slot: Slot, current_slot: Slot,
next_shred_index: u32, next_shred_index: u32,
shred_version: u16, shred_version: u16,
@ -50,10 +44,7 @@ impl BroadcastDuplicatesRun {
)); ));
Self { Self {
config, config,
duplicate_queue: BlockhashQueue::default(),
duplicate_entries_buffer: vec![],
next_shred_index: u32::MAX, next_shred_index: u32::MAX,
last_duplicate_entry_hash: Hash::default(),
shred_version, shred_version,
current_slot: 0, current_slot: 0,
recent_blockhash: None, recent_blockhash: None,

View File

@ -32,7 +32,6 @@ pub struct StandardBroadcastRun {
last_datapoint_submit: Arc<AtomicInterval>, last_datapoint_submit: Arc<AtomicInterval>,
num_batches: usize, num_batches: usize,
cluster_nodes_cache: Arc<ClusterNodesCache<BroadcastStage>>, cluster_nodes_cache: Arc<ClusterNodesCache<BroadcastStage>>,
last_peer_update: Arc<AtomicInterval>,
} }
impl StandardBroadcastRun { impl StandardBroadcastRun {
@ -52,7 +51,6 @@ impl StandardBroadcastRun {
last_datapoint_submit: Arc::default(), last_datapoint_submit: Arc::default(),
num_batches: 0, num_batches: 0,
cluster_nodes_cache, cluster_nodes_cache,
last_peer_update: Arc::new(AtomicInterval::default()),
} }
} }

View File

@ -1313,7 +1313,7 @@ pub mod test {
} }
VoteState::serialize( VoteState::serialize(
&VoteStateVersions::new_current(vote_state), &VoteStateVersions::new_current(vote_state),
&mut account.data_as_mut_slice(), account.data_as_mut_slice(),
) )
.expect("serialize state"); .expect("serialize state");
( (

View File

@ -3535,7 +3535,7 @@ pub mod tests {
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1); let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1); bank_forks.write().unwrap().insert(bank1);
let bank1 = bank_forks.read().unwrap().get(1).cloned().unwrap(); let bank1 = bank_forks.read().unwrap().get(1).cloned().unwrap();
let mut bank1_progress = progress let bank1_progress = progress
.entry(bank1.slot()) .entry(bank1.slot())
.or_insert_with(|| ForkProgress::new(bank1.last_blockhash(), None, None, 0, 0)); .or_insert_with(|| ForkProgress::new(bank1.last_blockhash(), None, None, 0, 0));
let shreds = shred_to_insert( let shreds = shred_to_insert(
@ -3548,7 +3548,7 @@ pub mod tests {
let res = ReplayStage::replay_blockstore_into_bank( let res = ReplayStage::replay_blockstore_into_bank(
&bank1, &bank1,
&blockstore, &blockstore,
&mut bank1_progress, bank1_progress,
None, None,
&replay_vote_sender, &replay_vote_sender,
&VerifyRecyclers::default(), &VerifyRecyclers::default(),
@ -3923,7 +3923,7 @@ pub mod tests {
.values() .values()
.cloned() .cloned()
.collect(); .collect();
let mut heaviest_subtree_fork_choice = &mut vote_simulator.heaviest_subtree_fork_choice; let heaviest_subtree_fork_choice = &mut vote_simulator.heaviest_subtree_fork_choice;
let mut latest_validator_votes_for_frozen_banks = let mut latest_validator_votes_for_frozen_banks =
LatestValidatorVotesForFrozenBanks::default(); LatestValidatorVotesForFrozenBanks::default();
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors(); let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
@ -3938,7 +3938,7 @@ pub mod tests {
&VoteTracker::default(), &VoteTracker::default(),
&ClusterSlots::default(), &ClusterSlots::default(),
&vote_simulator.bank_forks, &vote_simulator.bank_forks,
&mut heaviest_subtree_fork_choice, heaviest_subtree_fork_choice,
&mut latest_validator_votes_for_frozen_banks, &mut latest_validator_votes_for_frozen_banks,
); );

View File

@ -95,9 +95,9 @@ impl ShredFetchStage {
} }
} }
stats.shred_count += p.packets.len(); stats.shred_count += p.packets.len();
p.packets.iter_mut().for_each(|mut packet| { p.packets.iter_mut().for_each(|packet| {
Self::process_packet( Self::process_packet(
&mut packet, packet,
&mut shreds_received, &mut shreds_received,
&mut stats, &mut stats,
last_root, last_root,

View File

@ -29,25 +29,24 @@ mod tests {
#[derive(Debug)] #[derive(Debug)]
struct BenchmarkConfig { struct BenchmarkConfig {
pub benchmark_slots: u64, benchmark_slots: u64,
pub batch_size: u64, batch_size: u64,
pub max_ledger_shreds: u64, max_ledger_shreds: u64,
pub entries_per_slot: u64, entries_per_slot: u64,
pub stop_size_bytes: u64, stop_size_bytes: u64,
pub stop_size_iterations: u64, stop_size_iterations: u64,
pub pre_generate_data: bool, pre_generate_data: bool,
pub cleanup_blockstore: bool, cleanup_blockstore: bool,
pub emit_cpu_info: bool, assert_compaction: bool,
pub assert_compaction: bool, compaction_interval: Option<u64>,
pub compaction_interval: Option<u64>, no_compaction: bool,
pub no_compaction: bool,
} }
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug)]
struct CpuStatsInner { struct CpuStatsInner {
pub cpu_user: f32, cpu_user: f32,
pub cpu_system: f32, cpu_system: f32,
pub cpu_idle: f32, cpu_idle: f32,
} }
impl From<CPULoad> for CpuStatsInner { impl From<CPULoad> for CpuStatsInner {
@ -153,7 +152,6 @@ mod tests {
let stop_size_iterations = read_env("STOP_SIZE_ITERATIONS", DEFAULT_STOP_SIZE_ITERATIONS); let stop_size_iterations = read_env("STOP_SIZE_ITERATIONS", DEFAULT_STOP_SIZE_ITERATIONS);
let pre_generate_data = read_env("PRE_GENERATE_DATA", false); let pre_generate_data = read_env("PRE_GENERATE_DATA", false);
let cleanup_blockstore = read_env("CLEANUP_BLOCKSTORE", true); let cleanup_blockstore = read_env("CLEANUP_BLOCKSTORE", true);
let emit_cpu_info = read_env("EMIT_CPU_INFO", true);
// set default to `true` once compaction is merged // set default to `true` once compaction is merged
let assert_compaction = read_env("ASSERT_COMPACTION", false); let assert_compaction = read_env("ASSERT_COMPACTION", false);
let compaction_interval = match read_env("COMPACTION_INTERVAL", 0) { let compaction_interval = match read_env("COMPACTION_INTERVAL", 0) {
@ -171,7 +169,6 @@ mod tests {
stop_size_iterations, stop_size_iterations,
pre_generate_data, pre_generate_data,
cleanup_blockstore, cleanup_blockstore,
emit_cpu_info,
assert_compaction, assert_compaction,
compaction_interval, compaction_interval,
no_compaction, no_compaction,

View File

@ -573,9 +573,7 @@ impl EntrySlice for [Entry] {
1, 1,
); );
} }
if res != 0 { assert!(res == 0, "GPU PoH verify many failed");
panic!("GPU PoH verify many failed");
}
inc_new_counter_info!( inc_new_counter_info!(
"entry_verify-gpu_thread", "entry_verify-gpu_thread",
timing::duration_as_us(&gpu_wait.elapsed()) as usize timing::duration_as_us(&gpu_wait.elapsed()) as usize

View File

@ -84,16 +84,6 @@ pub enum FaucetRequest {
}, },
} }
impl Default for FaucetRequest {
fn default() -> Self {
Self::GetAirdrop {
lamports: u64::default(),
to: Pubkey::default(),
blockhash: Hash::default(),
}
}
}
pub enum FaucetTransaction { pub enum FaucetTransaction {
Airdrop(Transaction), Airdrop(Transaction),
Memo((Transaction, String)), Memo((Transaction, String)),
@ -416,7 +406,15 @@ async fn process(
mut stream: TokioTcpStream, mut stream: TokioTcpStream,
faucet: Arc<Mutex<Faucet>>, faucet: Arc<Mutex<Faucet>>,
) -> Result<(), Box<dyn std::error::Error>> { ) -> Result<(), Box<dyn std::error::Error>> {
let mut request = vec![0u8; serialized_size(&FaucetRequest::default()).unwrap() as usize]; let mut request = vec![
0u8;
serialized_size(&FaucetRequest::GetAirdrop {
lamports: u64::default(),
to: Pubkey::default(),
blockhash: Hash::default(),
})
.unwrap() as usize
];
while stream.read_exact(&mut request).await.is_ok() { while stream.read_exact(&mut request).await.is_ok() {
trace!("{:?}", request); trace!("{:?}", request);

View File

@ -194,9 +194,7 @@ impl AbiDigester {
label: &'static str, label: &'static str,
variant: &'static str, variant: &'static str,
) -> Result<(), DigestError> { ) -> Result<(), DigestError> {
if !self.for_enum { assert!(self.for_enum, "derive AbiEnumVisitor or implement it for the enum, which contains a variant ({}) named {}", label, variant);
panic!("derive AbiEnumVisitor or implement it for the enum, which contains a variant ({}) named {}", label, variant);
}
Ok(()) Ok(())
} }

View File

@ -1728,6 +1728,7 @@ fn main() {
} }
("shred-meta", Some(arg_matches)) => { ("shred-meta", Some(arg_matches)) => {
#[derive(Debug)] #[derive(Debug)]
#[allow(dead_code)]
struct ShredMeta<'a> { struct ShredMeta<'a> {
slot: Slot, slot: Slot,
full_slot: bool, full_slot: bool,
@ -1876,9 +1877,10 @@ fn main() {
wal_recovery_mode, wal_recovery_mode,
); );
let mut ancestors = BTreeSet::new(); let mut ancestors = BTreeSet::new();
if blockstore.meta(ending_slot).unwrap().is_none() { assert!(
panic!("Ending slot doesn't exist"); blockstore.meta(ending_slot).unwrap().is_some(),
} "Ending slot doesn't exist"
);
for a in AncestorIterator::new(ending_slot, &blockstore) { for a in AncestorIterator::new(ending_slot, &blockstore) {
ancestors.insert(a); ancestors.insert(a);
if a <= starting_slot { if a <= starting_slot {

View File

@ -602,8 +602,8 @@ fn do_process_blockstore_from_root(
blockstore blockstore
.set_roots(std::iter::once(&start_slot)) .set_roots(std::iter::once(&start_slot))
.expect("Couldn't set root slot on startup"); .expect("Couldn't set root slot on startup");
} else if !blockstore.is_root(start_slot) { } else {
panic!("starting slot isn't root and can't update due to being secondary blockstore access: {}", start_slot); assert!(blockstore.is_root(start_slot), "starting slot isn't root and can't update due to being secondary blockstore access: {}", start_slot);
} }
if let Ok(metas) = blockstore.slot_meta_iterator(start_slot) { if let Ok(metas) = blockstore.slot_meta_iterator(start_slot) {
@ -1340,8 +1340,8 @@ fn process_single_slot(
blockstore blockstore
.set_dead_slot(slot) .set_dead_slot(slot)
.expect("Failed to mark slot as dead in blockstore"); .expect("Failed to mark slot as dead in blockstore");
} else if !blockstore.is_dead(slot) { } else {
panic!("Failed slot isn't dead and can't update due to being secondary blockstore access: {}", slot); assert!(blockstore.is_dead(slot), "Failed slot isn't dead and can't update due to being secondary blockstore access: {}", slot);
} }
err err
})?; })?;

View File

@ -721,8 +721,8 @@ impl Shredder {
// 2) Sign coding shreds // 2) Sign coding shreds
PAR_THREAD_POOL.with(|thread_pool| { PAR_THREAD_POOL.with(|thread_pool| {
thread_pool.borrow().install(|| { thread_pool.borrow().install(|| {
coding_shreds.par_iter_mut().for_each(|mut coding_shred| { coding_shreds.par_iter_mut().for_each(|coding_shred| {
Shredder::sign_shred(keypair, &mut coding_shred); Shredder::sign_shred(keypair, coding_shred);
}) })
}) })
}); });
@ -858,7 +858,7 @@ impl Shredder {
if num_coding > 0 && shreds.len() < fec_set_size { if num_coding > 0 && shreds.len() < fec_set_size {
// Let's try recovering missing shreds using erasure // Let's try recovering missing shreds using erasure
let mut present = &mut vec![true; fec_set_size]; let present = &mut vec![true; fec_set_size];
let mut next_expected_index = first_index; let mut next_expected_index = first_index;
let mut shred_bufs: Vec<Vec<u8>> = shreds let mut shred_bufs: Vec<Vec<u8>> = shreds
.into_iter() .into_iter()
@ -871,7 +871,7 @@ impl Shredder {
first_index, first_index,
next_expected_index, next_expected_index,
index, index,
&mut present, present,
); );
blocks.push(shred.payload); blocks.push(shred.payload);
next_expected_index = index + 1; next_expected_index = index + 1;
@ -887,7 +887,7 @@ impl Shredder {
first_index, first_index,
next_expected_index, next_expected_index,
first_index + fec_set_size, first_index + fec_set_size,
&mut present, present,
); );
shred_bufs.append(&mut pending_shreds); shred_bufs.append(&mut pending_shreds);

View File

@ -323,7 +323,7 @@ pub fn sign_shreds_cpu(keypair: &Keypair, batches: &mut [Packets]) {
batches.par_iter_mut().for_each(|p| { batches.par_iter_mut().for_each(|p| {
p.packets[..] p.packets[..]
.par_iter_mut() .par_iter_mut()
.for_each(|mut p| sign_shred_cpu(keypair, &mut p)); .for_each(|p| sign_shred_cpu(keypair, p));
}); });
}); });
inc_new_counter_debug!("ed25519_shred_verify_cpu", count); inc_new_counter_debug!("ed25519_shred_verify_cpu", count);

View File

@ -1478,9 +1478,7 @@ fn generate_frozen_account_panic(mut cluster: LocalCluster, frozen_account: Arc<
sleep(Duration::from_secs(1)); sleep(Duration::from_secs(1));
i += 1; i += 1;
if i > 10 { assert!(i <= 10, "FROZEN_ACCOUNT_PANIC still false");
panic!("FROZEN_ACCOUNT_PANIC still false");
}
} }
// The validator is now broken and won't shutdown properly. Avoid LocalCluster panic in Drop // The validator is now broken and won't shutdown properly. Avoid LocalCluster panic in Drop
@ -3340,12 +3338,11 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b
let now = Instant::now(); let now = Instant::now();
loop { loop {
let elapsed = now.elapsed(); let elapsed = now.elapsed();
if elapsed > Duration::from_secs(30) { assert!(
panic!( elapsed <= Duration::from_secs(30),
"LocalCluster nodes failed to log enough tower votes in {} secs", "LocalCluster nodes failed to log enough tower votes in {} secs",
elapsed.as_secs() elapsed.as_secs()
); );
}
sleep(Duration::from_millis(100)); sleep(Duration::from_millis(100));
if let Some((last_vote, _)) = last_vote_in_tower(&val_b_ledger_path, &validator_b_pubkey) { if let Some((last_vote, _)) = last_vote_in_tower(&val_b_ledger_path, &validator_b_pubkey) {

View File

@ -153,9 +153,10 @@ fn process_iftop_logs(matches: &ArgMatches) {
fn analyze_logs(matches: &ArgMatches) { fn analyze_logs(matches: &ArgMatches) {
let dir_path = PathBuf::from(value_t_or_exit!(matches, "folder", String)); let dir_path = PathBuf::from(value_t_or_exit!(matches, "folder", String));
if !dir_path.is_dir() { assert!(
panic!("Need a folder that contains all log files"); dir_path.is_dir(),
} "Need a folder that contains all log files"
);
let list_all_diffs = matches.is_present("all"); let list_all_diffs = matches.is_present("all");
let files = fs::read_dir(dir_path).expect("Failed to read log folder"); let files = fs::read_dir(dir_path).expect("Failed to read log folder");
let logs: Vec<_> = files let logs: Vec<_> = files

View File

@ -389,9 +389,7 @@ fn shape_network_steps(
my_index: u64, my_index: u64,
) -> bool { ) -> bool {
// Integrity checks // Integrity checks
if !topology.verify() { assert!(topology.verify(), "Failed to verify the configuration file");
panic!("Failed to verify the configuration file");
}
assert!(my_index < network_size); assert!(my_index < network_size);
// Figure out partition we belong in // Figure out partition we belong in
@ -479,9 +477,7 @@ fn configure(matches: &ArgMatches) {
NetworkTopology::new_random(max_partitions, max_drop, max_delay) NetworkTopology::new_random(max_partitions, max_drop, max_delay)
}; };
if !config.verify() { assert!(config.verify(), "Failed to verify the configuration");
panic!("Failed to verify the configuration");
}
let topology = serde_json::to_string(&config).expect("Failed to write as JSON"); let topology = serde_json::to_string(&config).expect("Failed to write as JSON");

View File

@ -28,13 +28,14 @@ fn pin<T>(_mem: &mut Vec<T>) {
let err = unsafe { let err = unsafe {
(api.cuda_host_register)(ptr as *mut c_void, size, /*flags=*/ 0) (api.cuda_host_register)(ptr as *mut c_void, size, /*flags=*/ 0)
}; };
if err != CUDA_SUCCESS { assert!(
panic!( err == CUDA_SUCCESS,
"cudaHostRegister error: {} ptr: {:?} bytes: {}", "cudaHostRegister error: {} ptr: {:?} bytes: {}",
err, ptr, size, err,
ptr,
size
); );
} }
}
} }
fn unpin<T>(_mem: *mut T) { fn unpin<T>(_mem: *mut T) {
@ -42,9 +43,12 @@ fn unpin<T>(_mem: *mut T) {
use std::ffi::c_void; use std::ffi::c_void;
let err = unsafe { (api.cuda_host_unregister)(_mem as *mut c_void) }; let err = unsafe { (api.cuda_host_unregister)(_mem as *mut c_void) };
if err != CUDA_SUCCESS { assert!(
panic!("cudaHostUnregister returned: {} ptr: {:?}", err, _mem); err == CUDA_SUCCESS,
} "cudaHostUnregister returned: {} ptr: {:?}",
err,
_mem
);
} }
} }

View File

@ -18,7 +18,6 @@ const RECYCLER_SHRINK_WINDOW: usize = 16384;
#[derive(Debug, Default)] #[derive(Debug, Default)]
struct RecyclerStats { struct RecyclerStats {
total: AtomicUsize, total: AtomicUsize,
freed: AtomicUsize,
reuse: AtomicUsize, reuse: AtomicUsize,
max_gc: AtomicUsize, max_gc: AtomicUsize,
} }

View File

@ -96,9 +96,7 @@ pub fn init() {
if let Some(api) = perf_libs::api() { if let Some(api) = perf_libs::api() {
unsafe { unsafe {
(api.ed25519_set_verbose)(true); (api.ed25519_set_verbose)(true);
if !(api.ed25519_init)() { assert!((api.ed25519_init)(), "ed25519_init() failed");
panic!("ed25519_init() failed");
}
(api.ed25519_set_verbose)(false); (api.ed25519_set_verbose)(false);
} }
} }
@ -413,7 +411,7 @@ pub fn ed25519_verify_cpu(batches: &mut [Packets]) {
PAR_THREAD_POOL.install(|| { PAR_THREAD_POOL.install(|| {
batches batches
.into_par_iter() .into_par_iter()
.for_each(|p| p.packets.par_iter_mut().for_each(|p| verify_packet(p))) .for_each(|p| p.packets.par_iter_mut().for_each(verify_packet))
}); });
inc_new_counter_debug!("ed25519_verify_cpu", count); inc_new_counter_debug!("ed25519_verify_cpu", count);
} }

View File

@ -21,18 +21,11 @@ use std::{
sync::Arc, sync::Arc,
}; };
#[derive(Default)]
pub struct Executors { pub struct Executors {
pub executors: HashMap<Pubkey, Arc<dyn Executor>>, pub executors: HashMap<Pubkey, Arc<dyn Executor>>,
pub is_dirty: bool, pub is_dirty: bool,
} }
impl Default for Executors {
fn default() -> Self {
Self {
executors: HashMap::default(),
is_dirty: false,
}
}
}
impl Executors { impl Executors {
pub fn insert(&mut self, key: Pubkey, executor: Arc<dyn Executor>) { pub fn insert(&mut self, key: Pubkey, executor: Arc<dyn Executor>) {
let _ = self.executors.insert(key, executor); let _ = self.executors.insert(key, executor);
@ -267,7 +260,7 @@ impl PreAccount {
} }
} }
#[derive(Deserialize, Serialize)] #[derive(Default, Deserialize, Serialize)]
pub struct InstructionProcessor { pub struct InstructionProcessor {
#[serde(skip)] #[serde(skip)]
programs: Vec<(Pubkey, ProcessInstructionWithContext)>, programs: Vec<(Pubkey, ProcessInstructionWithContext)>,
@ -279,7 +272,9 @@ impl std::fmt::Debug for InstructionProcessor {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
#[derive(Debug)] #[derive(Debug)]
struct MessageProcessor<'a> { struct MessageProcessor<'a> {
#[allow(dead_code)]
programs: Vec<String>, programs: Vec<String>,
#[allow(dead_code)]
native_loader: &'a NativeLoader, native_loader: &'a NativeLoader,
} }
@ -309,14 +304,6 @@ impl std::fmt::Debug for InstructionProcessor {
} }
} }
impl Default for InstructionProcessor {
fn default() -> Self {
Self {
programs: vec![],
native_loader: NativeLoader::default(),
}
}
}
impl Clone for InstructionProcessor { impl Clone for InstructionProcessor {
fn clone(&self) -> Self { fn clone(&self) -> Self {
InstructionProcessor { InstructionProcessor {

View File

@ -322,9 +322,11 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs {
break; break;
} }
} }
if !program_signer { assert!(
panic!("Missing signer for {}", instruction_account.pubkey); program_signer,
} "Missing signer for {}",
instruction_account.pubkey
);
} }
} }
} }
@ -355,15 +357,14 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs {
unsafe { transmute::<&Pubkey, &mut Pubkey>(account_info.owner) }; unsafe { transmute::<&Pubkey, &mut Pubkey>(account_info.owner) };
*account_info_mut = *account.borrow().owner(); *account_info_mut = *account.borrow().owner();
} }
if data.len() != new_data.len() {
// TODO: Figure out how to allow the System Program to resize the account data // TODO: Figure out how to allow the System Program to resize the account data
panic!( assert!(
data.len() == new_data.len(),
"Account data resizing not supported yet: {} -> {}. \ "Account data resizing not supported yet: {} -> {}. \
Consider making this test conditional on `#[cfg(feature = \"test-bpf\")]`", Consider making this test conditional on `#[cfg(feature = \"test-bpf\")]`",
data.len(), data.len(),
new_data.len() new_data.len()
); );
}
data.clone_from_slice(new_data); data.clone_from_slice(new_data);
} }
} }

View File

@ -6,6 +6,7 @@ use std::alloc::Layout;
#[derive(Debug)] #[derive(Debug)]
pub struct BpfAllocator { pub struct BpfAllocator {
#[allow(dead_code)]
heap: AlignedMemory, heap: AlignedMemory,
start: u64, start: u64,
len: u64, len: u64,

View File

@ -1822,7 +1822,9 @@ struct SolAccountInfo {
data_addr: u64, data_addr: u64,
owner_addr: u64, owner_addr: u64,
rent_epoch: u64, rent_epoch: u64,
#[allow(dead_code)]
is_signer: bool, is_signer: bool,
#[allow(dead_code)]
is_writable: bool, is_writable: bool,
executable: bool, executable: bool,
} }
@ -1837,7 +1839,9 @@ struct SolSignerSeedC {
/// Rust representation of C's SolSignerSeeds /// Rust representation of C's SolSignerSeeds
#[derive(Debug)] #[derive(Debug)]
struct SolSignerSeedsC { struct SolSignerSeedsC {
#[allow(dead_code)]
addr: u64, addr: u64,
#[allow(dead_code)]
len: u64, len: u64,
} }

View File

@ -235,7 +235,7 @@ impl VoteState {
// utility function, used by Stakes, tests // utility function, used by Stakes, tests
pub fn to<T: WritableAccount>(versioned: &VoteStateVersions, account: &mut T) -> Option<()> { pub fn to<T: WritableAccount>(versioned: &VoteStateVersions, account: &mut T) -> Option<()> {
Self::serialize(versioned, &mut account.data_as_mut_slice()).ok() Self::serialize(versioned, account.data_as_mut_slice()).ok()
} }
pub fn deserialize(input: &[u8]) -> Result<Self, InstructionError> { pub fn deserialize(input: &[u8]) -> Result<Self, InstructionError> {

View File

@ -224,6 +224,7 @@ pub trait RemoteWallet {
/// `RemoteWallet` device /// `RemoteWallet` device
#[derive(Debug)] #[derive(Debug)]
pub struct Device { pub struct Device {
#[allow(dead_code)]
pub(crate) path: String, pub(crate) path: String,
pub(crate) info: RemoteWalletInfo, pub(crate) info: RemoteWalletInfo,
pub wallet_type: RemoteWalletType, pub wallet_type: RemoteWalletType,

View File

@ -99,9 +99,9 @@ impl OptimisticallyConfirmedBankTracker {
bank_forks: &Arc<RwLock<BankForks>>, bank_forks: &Arc<RwLock<BankForks>>,
optimistically_confirmed_bank: &Arc<RwLock<OptimisticallyConfirmedBank>>, optimistically_confirmed_bank: &Arc<RwLock<OptimisticallyConfirmedBank>>,
subscriptions: &Arc<RpcSubscriptions>, subscriptions: &Arc<RpcSubscriptions>,
mut pending_optimistically_confirmed_banks: &mut HashSet<Slot>, pending_optimistically_confirmed_banks: &mut HashSet<Slot>,
mut last_notified_confirmed_slot: &mut Slot, last_notified_confirmed_slot: &mut Slot,
mut highest_confirmed_slot: &mut Slot, highest_confirmed_slot: &mut Slot,
bank_notification_subscribers: &Option<Arc<RwLock<Vec<BankNotificationSender>>>>, bank_notification_subscribers: &Option<Arc<RwLock<Vec<BankNotificationSender>>>>,
) -> Result<(), RecvTimeoutError> { ) -> Result<(), RecvTimeoutError> {
let notification = receiver.recv_timeout(Duration::from_secs(1))?; let notification = receiver.recv_timeout(Duration::from_secs(1))?;
@ -110,9 +110,9 @@ impl OptimisticallyConfirmedBankTracker {
bank_forks, bank_forks,
optimistically_confirmed_bank, optimistically_confirmed_bank,
subscriptions, subscriptions,
&mut pending_optimistically_confirmed_banks, pending_optimistically_confirmed_banks,
&mut last_notified_confirmed_slot, last_notified_confirmed_slot,
&mut highest_confirmed_slot, highest_confirmed_slot,
bank_notification_subscribers, bank_notification_subscribers,
); );
Ok(()) Ok(())
@ -169,8 +169,8 @@ impl OptimisticallyConfirmedBankTracker {
bank_forks: &Arc<RwLock<BankForks>>, bank_forks: &Arc<RwLock<BankForks>>,
bank: &Arc<Bank>, bank: &Arc<Bank>,
slot_threshold: Slot, slot_threshold: Slot,
mut last_notified_confirmed_slot: &mut Slot, last_notified_confirmed_slot: &mut Slot,
mut pending_optimistically_confirmed_banks: &mut HashSet<Slot>, pending_optimistically_confirmed_banks: &mut HashSet<Slot>,
bank_notification_subscribers: &Option<Arc<RwLock<Vec<BankNotificationSender>>>>, bank_notification_subscribers: &Option<Arc<RwLock<Vec<BankNotificationSender>>>>,
) { ) {
for confirmed_bank in bank.clone().parents_inclusive().iter().rev() { for confirmed_bank in bank.clone().parents_inclusive().iter().rev() {
@ -183,8 +183,8 @@ impl OptimisticallyConfirmedBankTracker {
subscriptions, subscriptions,
bank_forks, bank_forks,
confirmed_bank, confirmed_bank,
&mut last_notified_confirmed_slot, last_notified_confirmed_slot,
&mut pending_optimistically_confirmed_banks, pending_optimistically_confirmed_banks,
bank_notification_subscribers, bank_notification_subscribers,
); );
} }
@ -196,8 +196,8 @@ impl OptimisticallyConfirmedBankTracker {
bank_forks: &Arc<RwLock<BankForks>>, bank_forks: &Arc<RwLock<BankForks>>,
optimistically_confirmed_bank: &Arc<RwLock<OptimisticallyConfirmedBank>>, optimistically_confirmed_bank: &Arc<RwLock<OptimisticallyConfirmedBank>>,
subscriptions: &Arc<RpcSubscriptions>, subscriptions: &Arc<RpcSubscriptions>,
mut pending_optimistically_confirmed_banks: &mut HashSet<Slot>, pending_optimistically_confirmed_banks: &mut HashSet<Slot>,
mut last_notified_confirmed_slot: &mut Slot, last_notified_confirmed_slot: &mut Slot,
highest_confirmed_slot: &mut Slot, highest_confirmed_slot: &mut Slot,
bank_notification_subscribers: &Option<Arc<RwLock<Vec<BankNotificationSender>>>>, bank_notification_subscribers: &Option<Arc<RwLock<Vec<BankNotificationSender>>>>,
) { ) {
@ -219,8 +219,8 @@ impl OptimisticallyConfirmedBankTracker {
bank_forks, bank_forks,
bank, bank,
*highest_confirmed_slot, *highest_confirmed_slot,
&mut last_notified_confirmed_slot, last_notified_confirmed_slot,
&mut pending_optimistically_confirmed_banks, pending_optimistically_confirmed_banks,
bank_notification_subscribers, bank_notification_subscribers,
); );
@ -273,8 +273,8 @@ impl OptimisticallyConfirmedBankTracker {
bank_forks, bank_forks,
&bank, &bank,
*last_notified_confirmed_slot, *last_notified_confirmed_slot,
&mut last_notified_confirmed_slot, last_notified_confirmed_slot,
&mut pending_optimistically_confirmed_banks, pending_optimistically_confirmed_banks,
bank_notification_subscribers, bank_notification_subscribers,
); );

View File

@ -155,6 +155,7 @@ pub struct JsonRpcRequestProcessor {
blockstore: Arc<Blockstore>, blockstore: Arc<Blockstore>,
config: JsonRpcConfig, config: JsonRpcConfig,
snapshot_config: Option<SnapshotConfig>, snapshot_config: Option<SnapshotConfig>,
#[allow(dead_code)]
validator_exit: Arc<RwLock<Exit>>, validator_exit: Arc<RwLock<Exit>>,
health: Arc<RpcHealth>, health: Arc<RpcHealth>,
cluster_info: Arc<ClusterInfo>, cluster_info: Arc<ClusterInfo>,

View File

@ -183,9 +183,10 @@ impl TestBroadcastReceiver {
} }
} }
Err(TryRecvError::Empty) => { Err(TryRecvError::Empty) => {
if started.elapsed() > timeout { assert!(
panic!("TestBroadcastReceiver: no data, timeout reached"); started.elapsed() <= timeout,
} "TestBroadcastReceiver: no data, timeout reached"
);
sleep(Duration::from_millis(50)); sleep(Duration::from_millis(50));
} }
Err(err) => panic!("broadcast receiver error: {}", err), Err(err) => panic!("broadcast receiver error: {}", err),

View File

@ -116,12 +116,6 @@ impl std::fmt::Debug for NotificationEntry {
} }
} }
#[derive(Default, Clone)]
struct ProgramConfig {
filters: Vec<RpcFilterType>,
encoding: Option<UiAccountEncoding>,
}
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
fn check_commitment_and_notify<P, S, B, F, X>( fn check_commitment_and_notify<P, S, B, F, X>(
params: &P, params: &P,

View File

@ -926,9 +926,9 @@ impl Accounts {
let keys: Vec<_> = txs let keys: Vec<_> = txs
.map(|tx| tx.get_account_locks(demote_program_write_locks)) .map(|tx| tx.get_account_locks(demote_program_write_locks))
.collect(); .collect();
let mut account_locks = &mut self.account_locks.lock().unwrap(); let account_locks = &mut self.account_locks.lock().unwrap();
keys.into_iter() keys.into_iter()
.map(|keys| self.lock_account(&mut account_locks, keys.writable, keys.readonly)) .map(|keys| self.lock_account(account_locks, keys.writable, keys.readonly))
.collect() .collect()
} }

View File

@ -216,6 +216,7 @@ struct GenerateIndexTimings {
pub insertion_time_us: u64, pub insertion_time_us: u64,
pub min_bin_size: usize, pub min_bin_size: usize,
pub max_bin_size: usize, pub max_bin_size: usize,
#[allow(dead_code)]
pub total_items: usize, pub total_items: usize,
pub storage_size_accounts_map_us: u64, pub storage_size_accounts_map_us: u64,
pub storage_size_storages_us: u64, pub storage_size_storages_us: u64,
@ -971,11 +972,13 @@ pub struct AccountsDb {
// used by tests // used by tests
// holds this until we are dropped // holds this until we are dropped
#[allow(dead_code)]
temp_accounts_hash_cache_path: Option<TempDir>, temp_accounts_hash_cache_path: Option<TempDir>,
pub shrink_paths: RwLock<Option<Vec<PathBuf>>>, pub shrink_paths: RwLock<Option<Vec<PathBuf>>>,
/// Directory of paths this accounts_db needs to hold/remove /// Directory of paths this accounts_db needs to hold/remove
#[allow(dead_code)]
pub(crate) temp_paths: Option<Vec<TempDir>>, pub(crate) temp_paths: Option<Vec<TempDir>>,
/// Starting file size of appendvecs /// Starting file size of appendvecs
@ -1168,9 +1171,13 @@ impl PurgeStats {
#[derive(Debug)] #[derive(Debug)]
struct FlushStats { struct FlushStats {
#[allow(dead_code)]
slot: Slot, slot: Slot,
#[allow(dead_code)]
num_flushed: usize, num_flushed: usize,
#[allow(dead_code)]
num_purged: usize, num_purged: usize,
#[allow(dead_code)]
total_size: u64, total_size: u64,
} }
@ -3636,10 +3643,10 @@ impl AccountsDb {
Self::page_align(size), Self::page_align(size),
)); ));
if store.append_vec_id() == CACHE_VIRTUAL_STORAGE_ID { assert!(
panic!("We've run out of storage ids!"); store.append_vec_id() != CACHE_VIRTUAL_STORAGE_ID,
} "We've run out of storage ids!"
);
debug!( debug!(
"creating store: {} slot: {} len: {} size: {} from: {} path: {:?}", "creating store: {} slot: {} len: {} size: {} from: {} path: {:?}",
store.append_vec_id(), store.append_vec_id(),
@ -11734,11 +11741,10 @@ pub mod tests {
let slot_accounts = accounts_db.scan_account_storage( let slot_accounts = accounts_db.scan_account_storage(
*slot as Slot, *slot as Slot,
|loaded_account: LoadedAccount| { |loaded_account: LoadedAccount| {
if is_cache_at_limit { assert!(
panic!( !is_cache_at_limit,
"When cache is at limit, all roots should have been flushed to storage" "When cache is at limit, all roots should have been flushed to storage"
); );
}
// All slots <= requested_flush_root should have been flushed, regardless // All slots <= requested_flush_root should have been flushed, regardless
// of ongoing scans // of ongoing scans
assert!(*slot > requested_flush_root); assert!(*slot > requested_flush_root);

View File

@ -680,7 +680,7 @@ impl AccountsHash {
max_bin: usize, max_bin: usize,
) -> (Hash, u64, PreviousPass) { ) -> (Hash, u64, PreviousPass) {
let (mut hashes, mut total_lamports) = let (mut hashes, mut total_lamports) =
Self::de_dup_and_eliminate_zeros(data_sections_by_pubkey, &mut stats, max_bin); Self::de_dup_and_eliminate_zeros(data_sections_by_pubkey, stats, max_bin);
total_lamports += previous_state.lamports; total_lamports += previous_state.lamports;

View File

@ -732,7 +732,6 @@ type AccountMapsReadLock<'a, T> = RwLockReadGuard<'a, MapType<T>>;
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub struct ScanSlotTracker { pub struct ScanSlotTracker {
is_removed: bool, is_removed: bool,
ref_count: u64,
} }
impl ScanSlotTracker { impl ScanSlotTracker {

View File

@ -685,6 +685,7 @@ pub(crate) struct BankFieldsToDeserialize {
pub(crate) ns_per_slot: u128, pub(crate) ns_per_slot: u128,
pub(crate) genesis_creation_time: UnixTimestamp, pub(crate) genesis_creation_time: UnixTimestamp,
pub(crate) slots_per_year: f64, pub(crate) slots_per_year: f64,
#[allow(dead_code)]
pub(crate) unused: u64, pub(crate) unused: u64,
pub(crate) slot: Slot, pub(crate) slot: Slot,
pub(crate) epoch: Epoch, pub(crate) epoch: Epoch,
@ -2902,9 +2903,11 @@ impl Bank {
self.fee_calculator = self.fee_rate_governor.create_fee_calculator(); self.fee_calculator = self.fee_rate_governor.create_fee_calculator();
for (pubkey, account) in genesis_config.accounts.iter() { for (pubkey, account) in genesis_config.accounts.iter() {
if self.get_account(pubkey).is_some() { assert!(
panic!("{} repeated in genesis config", pubkey); self.get_account(pubkey).is_none(),
} "{} repeated in genesis config",
pubkey
);
self.store_account(pubkey, &AccountSharedData::from(account.clone())); self.store_account(pubkey, &AccountSharedData::from(account.clone()));
self.capitalization.fetch_add(account.lamports(), Relaxed); self.capitalization.fetch_add(account.lamports(), Relaxed);
} }
@ -2913,9 +2916,11 @@ impl Bank {
self.update_fees(); self.update_fees();
for (pubkey, account) in genesis_config.rewards_pools.iter() { for (pubkey, account) in genesis_config.rewards_pools.iter() {
if self.get_account(pubkey).is_some() { assert!(
panic!("{} repeated in genesis config", pubkey); self.get_account(pubkey).is_none(),
} "{} repeated in genesis config",
pubkey
);
self.store_account(pubkey, &AccountSharedData::from(account.clone())); self.store_account(pubkey, &AccountSharedData::from(account.clone()));
} }
@ -3514,23 +3519,14 @@ impl Bank {
sanitized_txs: &[SanitizedTransaction], sanitized_txs: &[SanitizedTransaction],
lock_results: &[Result<()>], lock_results: &[Result<()>],
max_age: usize, max_age: usize,
mut error_counters: &mut ErrorCounters, error_counters: &mut ErrorCounters,
) -> Vec<TransactionCheckResult> { ) -> Vec<TransactionCheckResult> {
let age_results = self.check_age( let age_results =
sanitized_txs.iter(), self.check_age(sanitized_txs.iter(), lock_results, max_age, error_counters);
lock_results, let cache_results = self.check_status_cache(sanitized_txs, age_results, error_counters);
max_age,
&mut error_counters,
);
let cache_results =
self.check_status_cache(sanitized_txs, age_results, &mut error_counters);
if self.upgrade_epoch() { if self.upgrade_epoch() {
// Reject all non-vote transactions // Reject all non-vote transactions
self.filter_by_vote_transactions( self.filter_by_vote_transactions(sanitized_txs.iter(), cache_results, error_counters)
sanitized_txs.iter(),
cache_results,
&mut error_counters,
)
} else { } else {
cache_results cache_results
} }

View File

@ -75,9 +75,11 @@ pub(crate) struct DeserializableVersionedBank {
pub(crate) epoch_schedule: EpochSchedule, pub(crate) epoch_schedule: EpochSchedule,
pub(crate) inflation: Inflation, pub(crate) inflation: Inflation,
pub(crate) stakes: Stakes, pub(crate) stakes: Stakes,
#[allow(dead_code)]
pub(crate) unused_accounts: UnusedAccounts, pub(crate) unused_accounts: UnusedAccounts,
pub(crate) epoch_stakes: HashMap<Epoch, EpochStakes>, pub(crate) epoch_stakes: HashMap<Epoch, EpochStakes>,
pub(crate) is_delta: bool, pub(crate) is_delta: bool,
#[allow(dead_code)]
pub(crate) message_processor: InstructionProcessor, pub(crate) message_processor: InstructionProcessor,
} }

View File

@ -153,6 +153,7 @@ struct SnapshotRootPaths {
/// Helper type to bundle up the results from `unarchive_snapshot()` /// Helper type to bundle up the results from `unarchive_snapshot()`
#[derive(Debug)] #[derive(Debug)]
struct UnarchivedSnapshot { struct UnarchivedSnapshot {
#[allow(dead_code)]
unpack_dir: TempDir, unpack_dir: TempDir,
unpacked_append_vec_map: UnpackedAppendVecMap, unpacked_append_vec_map: UnpackedAppendVecMap,
unpacked_snapshots_dir_and_version: UnpackedSnapshotsDirAndVersion, unpacked_snapshots_dir_and_version: UnpackedSnapshotsDirAndVersion,
@ -464,7 +465,7 @@ pub fn deserialize_snapshot_data_file<T: Sized>(
deserializer: impl FnOnce(&mut BufReader<File>) -> Result<T>, deserializer: impl FnOnce(&mut BufReader<File>) -> Result<T>,
) -> Result<T> { ) -> Result<T> {
let wrapped_deserializer = move |streams: &mut SnapshotStreams<File>| -> Result<T> { let wrapped_deserializer = move |streams: &mut SnapshotStreams<File>| -> Result<T> {
deserializer(&mut streams.full_snapshot_stream) deserializer(streams.full_snapshot_stream)
}; };
let wrapped_data_file_path = SnapshotRootPaths { let wrapped_data_file_path = SnapshotRootPaths {
@ -1460,12 +1461,12 @@ fn rebuild_bank_from_snapshots(
.map(|root_paths| root_paths.snapshot_path), .map(|root_paths| root_paths.snapshot_path),
}; };
let bank = deserialize_snapshot_data_files(&snapshot_root_paths, |mut snapshot_streams| { let bank = deserialize_snapshot_data_files(&snapshot_root_paths, |snapshot_streams| {
Ok( Ok(
match incremental_snapshot_version.unwrap_or(full_snapshot_version) { match incremental_snapshot_version.unwrap_or(full_snapshot_version) {
SnapshotVersion::V1_2_0 => bank_from_streams( SnapshotVersion::V1_2_0 => bank_from_streams(
SerdeStyle::Newer, SerdeStyle::Newer,
&mut snapshot_streams, snapshot_streams,
account_paths, account_paths,
unpacked_append_vec_map, unpacked_append_vec_map,
genesis_config, genesis_config,

View File

@ -5,7 +5,7 @@ use crate::message::Message;
use crate::secp256k1_program; use crate::secp256k1_program;
use log::*; use log::*;
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug, AbiExample)] #[derive(Serialize, Deserialize, Default, PartialEq, Eq, Clone, Debug, AbiExample)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct FeeCalculator { pub struct FeeCalculator {
// The current cost of a signature This amount may increase/decrease over time based on // The current cost of a signature This amount may increase/decrease over time based on
@ -13,14 +13,6 @@ pub struct FeeCalculator {
pub lamports_per_signature: u64, pub lamports_per_signature: u64,
} }
impl Default for FeeCalculator {
fn default() -> Self {
Self {
lamports_per_signature: 0,
}
}
}
impl FeeCalculator { impl FeeCalculator {
pub fn new(lamports_per_signature: u64) -> Self { pub fn new(lamports_per_signature: u64) -> Self {
Self { Self {

View File

@ -51,9 +51,10 @@ pub trait SyscallStubs: Sync + Send {
/// # Safety /// # Safety
unsafe fn sol_memcpy(&self, dst: *mut u8, src: *const u8, n: usize) { unsafe fn sol_memcpy(&self, dst: *mut u8, src: *const u8, n: usize) {
// cannot be overlapping // cannot be overlapping
if dst as usize + n > src as usize && src as usize > dst as usize { assert!(
panic!("memcpy does not support overlapping regions"); !(dst as usize + n > src as usize && src as usize > dst as usize),
} "memcpy does not support overlapping regions"
);
std::ptr::copy_nonoverlapping(src, dst, n as usize); std::ptr::copy_nonoverlapping(src, dst, n as usize);
} }
/// # Safety /// # Safety

View File

@ -45,9 +45,7 @@ macro_rules! declare_sysvar_id(
#[cfg(test)] #[cfg(test)]
#[test] #[test]
fn test_sysvar_id() { fn test_sysvar_id() {
if !$crate::sysvar::is_sysvar_id(&id()) { assert!($crate::sysvar::is_sysvar_id(&id()), "sysvar::is_sysvar_id() doesn't know about {}", $name);
panic!("sysvar::is_sysvar_id() doesn't know about {}", $name);
}
} }
) )
); );
@ -72,10 +70,7 @@ macro_rules! declare_deprecated_sysvar_id(
#[cfg(test)] #[cfg(test)]
#[test] #[test]
fn test_sysvar_id() { fn test_sysvar_id() {
#[allow(deprecated)] assert!($crate::sysvar::is_sysvar_id(&id()), "sysvar::is_sysvar_id() doesn't know about {}", $name);
if !$crate::sysvar::is_sysvar_id(&id()) {
panic!("sysvar::is_sysvar_id() doesn't know about {}", $name);
}
} }
) )
); );

View File

@ -277,9 +277,7 @@ impl fmt::Display for GenesisConfig {
self.accounts self.accounts
.iter() .iter()
.map(|(pubkey, account)| { .map(|(pubkey, account)| {
if account.lamports == 0 { assert!(account.lamports > 0, "{:?}", (pubkey, account));
panic!("{:?}", (pubkey, account));
}
account.lamports account.lamports
}) })
.sum::<u64>() .sum::<u64>()

View File

@ -62,6 +62,7 @@ impl Precompile {
where where
F: Fn(&Pubkey) -> bool, F: Fn(&Pubkey) -> bool,
{ {
#![allow(clippy::redundant_closure)]
self.feature self.feature
.map_or(true, |ref feature_id| is_enabled(feature_id)) .map_or(true, |ref feature_id| is_enabled(feature_id))
&& self.program_id == *program_id && self.program_id == *program_id

View File

@ -95,9 +95,7 @@ pub fn receiver(
use_pinned_memory: bool, use_pinned_memory: bool,
) -> JoinHandle<()> { ) -> JoinHandle<()> {
let res = sock.set_read_timeout(Some(Duration::new(1, 0))); let res = sock.set_read_timeout(Some(Duration::new(1, 0)));
if res.is_err() { assert!(!res.is_err(), "streamer::receiver set_read_timeout error");
panic!("streamer::receiver set_read_timeout error");
}
let exit = exit.clone(); let exit = exit.clone();
Builder::new() Builder::new()
.name("solana-receiver".to_string()) .name("solana-receiver".to_string())

View File

@ -99,9 +99,9 @@ pub fn parse_token(
"destination": account_keys[instruction.accounts[1] as usize].to_string(), "destination": account_keys[instruction.accounts[1] as usize].to_string(),
"amount": amount.to_string(), "amount": amount.to_string(),
}); });
let mut map = value.as_object_mut().unwrap(); let map = value.as_object_mut().unwrap();
parse_signers( parse_signers(
&mut map, map,
2, 2,
account_keys, account_keys,
&instruction.accounts, &instruction.accounts,
@ -120,9 +120,9 @@ pub fn parse_token(
"delegate": account_keys[instruction.accounts[1] as usize].to_string(), "delegate": account_keys[instruction.accounts[1] as usize].to_string(),
"amount": amount.to_string(), "amount": amount.to_string(),
}); });
let mut map = value.as_object_mut().unwrap(); let map = value.as_object_mut().unwrap();
parse_signers( parse_signers(
&mut map, map,
2, 2,
account_keys, account_keys,
&instruction.accounts, &instruction.accounts,
@ -139,9 +139,9 @@ pub fn parse_token(
let mut value = json!({ let mut value = json!({
"source": account_keys[instruction.accounts[0] as usize].to_string(), "source": account_keys[instruction.accounts[0] as usize].to_string(),
}); });
let mut map = value.as_object_mut().unwrap(); let map = value.as_object_mut().unwrap();
parse_signers( parse_signers(
&mut map, map,
1, 1,
account_keys, account_keys,
&instruction.accounts, &instruction.accounts,
@ -170,9 +170,9 @@ pub fn parse_token(
COption::None => None, COption::None => None,
}, },
}); });
let mut map = value.as_object_mut().unwrap(); let map = value.as_object_mut().unwrap();
parse_signers( parse_signers(
&mut map, map,
1, 1,
account_keys, account_keys,
&instruction.accounts, &instruction.accounts,
@ -191,9 +191,9 @@ pub fn parse_token(
"account": account_keys[instruction.accounts[1] as usize].to_string(), "account": account_keys[instruction.accounts[1] as usize].to_string(),
"amount": amount.to_string(), "amount": amount.to_string(),
}); });
let mut map = value.as_object_mut().unwrap(); let map = value.as_object_mut().unwrap();
parse_signers( parse_signers(
&mut map, map,
2, 2,
account_keys, account_keys,
&instruction.accounts, &instruction.accounts,
@ -212,9 +212,9 @@ pub fn parse_token(
"mint": account_keys[instruction.accounts[1] as usize].to_string(), "mint": account_keys[instruction.accounts[1] as usize].to_string(),
"amount": amount.to_string(), "amount": amount.to_string(),
}); });
let mut map = value.as_object_mut().unwrap(); let map = value.as_object_mut().unwrap();
parse_signers( parse_signers(
&mut map, map,
2, 2,
account_keys, account_keys,
&instruction.accounts, &instruction.accounts,
@ -232,9 +232,9 @@ pub fn parse_token(
"account": account_keys[instruction.accounts[0] as usize].to_string(), "account": account_keys[instruction.accounts[0] as usize].to_string(),
"destination": account_keys[instruction.accounts[1] as usize].to_string(), "destination": account_keys[instruction.accounts[1] as usize].to_string(),
}); });
let mut map = value.as_object_mut().unwrap(); let map = value.as_object_mut().unwrap();
parse_signers( parse_signers(
&mut map, map,
2, 2,
account_keys, account_keys,
&instruction.accounts, &instruction.accounts,
@ -252,9 +252,9 @@ pub fn parse_token(
"account": account_keys[instruction.accounts[0] as usize].to_string(), "account": account_keys[instruction.accounts[0] as usize].to_string(),
"mint": account_keys[instruction.accounts[1] as usize].to_string(), "mint": account_keys[instruction.accounts[1] as usize].to_string(),
}); });
let mut map = value.as_object_mut().unwrap(); let map = value.as_object_mut().unwrap();
parse_signers( parse_signers(
&mut map, map,
2, 2,
account_keys, account_keys,
&instruction.accounts, &instruction.accounts,
@ -272,9 +272,9 @@ pub fn parse_token(
"account": account_keys[instruction.accounts[0] as usize].to_string(), "account": account_keys[instruction.accounts[0] as usize].to_string(),
"mint": account_keys[instruction.accounts[1] as usize].to_string(), "mint": account_keys[instruction.accounts[1] as usize].to_string(),
}); });
let mut map = value.as_object_mut().unwrap(); let map = value.as_object_mut().unwrap();
parse_signers( parse_signers(
&mut map, map,
2, 2,
account_keys, account_keys,
&instruction.accounts, &instruction.accounts,
@ -294,9 +294,9 @@ pub fn parse_token(
"destination": account_keys[instruction.accounts[2] as usize].to_string(), "destination": account_keys[instruction.accounts[2] as usize].to_string(),
"tokenAmount": token_amount_to_ui_amount(amount, decimals), "tokenAmount": token_amount_to_ui_amount(amount, decimals),
}); });
let mut map = value.as_object_mut().unwrap(); let map = value.as_object_mut().unwrap();
parse_signers( parse_signers(
&mut map, map,
3, 3,
account_keys, account_keys,
&instruction.accounts, &instruction.accounts,
@ -316,9 +316,9 @@ pub fn parse_token(
"delegate": account_keys[instruction.accounts[2] as usize].to_string(), "delegate": account_keys[instruction.accounts[2] as usize].to_string(),
"tokenAmount": token_amount_to_ui_amount(amount, decimals), "tokenAmount": token_amount_to_ui_amount(amount, decimals),
}); });
let mut map = value.as_object_mut().unwrap(); let map = value.as_object_mut().unwrap();
parse_signers( parse_signers(
&mut map, map,
3, 3,
account_keys, account_keys,
&instruction.accounts, &instruction.accounts,
@ -337,9 +337,9 @@ pub fn parse_token(
"account": account_keys[instruction.accounts[1] as usize].to_string(), "account": account_keys[instruction.accounts[1] as usize].to_string(),
"tokenAmount": token_amount_to_ui_amount(amount, decimals), "tokenAmount": token_amount_to_ui_amount(amount, decimals),
}); });
let mut map = value.as_object_mut().unwrap(); let map = value.as_object_mut().unwrap();
parse_signers( parse_signers(
&mut map, map,
2, 2,
account_keys, account_keys,
&instruction.accounts, &instruction.accounts,
@ -358,9 +358,9 @@ pub fn parse_token(
"mint": account_keys[instruction.accounts[1] as usize].to_string(), "mint": account_keys[instruction.accounts[1] as usize].to_string(),
"tokenAmount": token_amount_to_ui_amount(amount, decimals), "tokenAmount": token_amount_to_ui_amount(amount, decimals),
}); });
let mut map = value.as_object_mut().unwrap(); let map = value.as_object_mut().unwrap();
parse_signers( parse_signers(
&mut map, map,
2, 2,
account_keys, account_keys,
&instruction.accounts, &instruction.accounts,

View File

@ -51,7 +51,7 @@ fn get_mint_decimals(bank: &Bank, mint: &Pubkey) -> Option<u8> {
pub fn collect_token_balances( pub fn collect_token_balances(
bank: &Bank, bank: &Bank,
batch: &TransactionBatch, batch: &TransactionBatch,
mut mint_decimals: &mut HashMap<Pubkey, u8>, mint_decimals: &mut HashMap<Pubkey, u8>,
) -> TransactionTokenBalances { ) -> TransactionTokenBalances {
let mut balances: TransactionTokenBalances = vec![]; let mut balances: TransactionTokenBalances = vec![];
@ -59,7 +59,7 @@ pub fn collect_token_balances(
let has_token_program = transaction let has_token_program = transaction
.message() .message()
.account_keys_iter() .account_keys_iter()
.any(|p| is_token_program(p)); .any(is_token_program);
let mut transaction_balances: Vec<TransactionTokenBalance> = vec![]; let mut transaction_balances: Vec<TransactionTokenBalance> = vec![];
if has_token_program { if has_token_program {
@ -69,7 +69,7 @@ pub fn collect_token_balances(
} }
if let Some((mint, ui_token_amount)) = if let Some((mint, ui_token_amount)) =
collect_token_balance_from_account(bank, account_id, &mut mint_decimals) collect_token_balance_from_account(bank, account_id, mint_decimals)
{ {
transaction_balances.push(TransactionTokenBalance { transaction_balances.push(TransactionTokenBalance {
account_index: index as u8, account_index: index as u8,

View File

@ -62,8 +62,7 @@ fn main() {
let median: i64 = v["median"].to_string().parse().unwrap(); let median: i64 = v["median"].to_string().parse().unwrap();
let deviation: i64 = v["deviation"].to_string().parse().unwrap(); let deviation: i64 = v["deviation"].to_string().parse().unwrap();
if upload_metrics { assert!(!upload_metrics, "TODO");
panic!("TODO...");
/* /*
solana_metrics::datapoint_info!( solana_metrics::datapoint_info!(
&v["name"].as_str().unwrap().trim_matches('\"'), &v["name"].as_str().unwrap().trim_matches('\"'),
@ -74,7 +73,7 @@ fn main() {
("commit", git_commit_hash.trim().to_string(), String) ("commit", git_commit_hash.trim().to_string(), String)
); );
*/ */
}
let last_median = let last_median =
get_last_metrics(&"median".to_string(), &db, &name, branch).unwrap_or_default(); get_last_metrics(&"median".to_string(), &db, &name, branch).unwrap_or_default();
let last_deviation = get_last_metrics(&"deviation".to_string(), &db, &name, branch) let last_deviation = get_last_metrics(&"deviation".to_string(), &db, &name, branch)