From 6514096a675ba6962a7fe105353155e22421cfe3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Fri, 18 Jun 2021 15:34:46 +0200 Subject: [PATCH] chore: cargo +nightly clippy --fix -Z unstable-options --- account-decoder/src/lib.rs | 12 +- account-decoder/src/parse_config.rs | 6 +- accounts-cluster-bench/src/main.rs | 8 +- banking-bench/src/main.rs | 2 +- banks-client/src/lib.rs | 4 +- banks-server/src/banks_server.rs | 2 +- banks-server/src/send_transaction_service.rs | 4 +- bench-exchange/src/bench.rs | 18 +-- bench-streamer/src/main.rs | 2 +- bench-tps/src/bench.rs | 10 +- bench-tps/src/main.rs | 6 +- clap-utils/src/keypair.rs | 8 +- cli-config/src/config.rs | 10 +- cli-output/src/cli_output.rs | 6 +- cli/src/cli.rs | 62 ++++----- cli/src/cluster_query.rs | 2 +- cli/src/inflation.rs | 2 +- cli/src/main.rs | 8 +- cli/src/nonce.rs | 16 +-- cli/src/program.rs | 30 ++--- cli/src/spend_utils.rs | 2 +- cli/src/stake.rs | 44 +++---- cli/src/validator_info.rs | 10 +- cli/src/vote.rs | 14 +-- cli/tests/program.rs | 42 +++---- client/src/rpc_cache.rs | 2 +- client/src/rpc_client.rs | 12 +- client/src/thin_client.rs | 4 +- client/src/tpu_client.rs | 2 +- core/benches/banking_stage.rs | 2 +- core/benches/consensus.rs | 4 +- core/src/accounts_hash_verifier.rs | 2 +- core/src/banking_stage.rs | 42 +++---- core/src/broadcast_stage.rs | 4 +- .../broadcast_duplicates_run.rs | 4 +- .../broadcast_stage/standard_broadcast_run.rs | 2 +- core/src/cluster_info_vote_listener.rs | 10 +- core/src/cluster_slot_state_verifier.rs | 2 +- core/src/commitment_service.rs | 6 +- core/src/consensus.rs | 28 ++--- core/src/cost_model.rs | 4 +- core/src/cost_tracker.rs | 4 +- core/src/execute_cost_table.rs | 6 +- core/src/fetch_stage.rs | 10 +- core/src/heaviest_subtree_fork_choice.rs | 30 ++--- core/src/ledger_cleanup_service.rs | 2 +- core/src/optimistic_confirmation_verifier.rs | 2 +- core/src/progress_map.rs | 2 +- core/src/repair_service.rs | 8 +- core/src/repair_weight.rs | 16 +-- core/src/repair_weighted_traversal.rs | 2 +- core/src/replay_stage.rs | 74 +++++------ core/src/serve_repair.rs | 10 +- core/src/serve_repair_service.rs | 2 +- core/src/shred_fetch_stage.rs | 8 +- core/src/tpu.rs | 12 +- core/src/tvu.rs | 12 +- .../unfrozen_gossip_verified_vote_hashes.rs | 2 +- core/src/validator.rs | 22 ++-- core/src/window_service.rs | 6 +- core/tests/fork-selection.rs | 4 +- core/tests/snapshots.rs | 26 ++-- dos/src/main.rs | 4 +- faucet/src/faucet.rs | 4 +- faucet/tests/local-faucet.rs | 2 +- frozen-abi/macro/src/lib.rs | 8 +- frozen-abi/src/abi_example.rs | 2 +- genesis-utils/src/lib.rs | 6 +- genesis/src/genesis_accounts.rs | 10 +- genesis/src/main.rs | 12 +- gossip/src/cluster_info.rs | 10 +- gossip/src/contact_info.rs | 12 +- gossip/src/crds_gossip.rs | 2 +- gossip/src/crds_gossip_push.rs | 2 +- gossip/src/crds_value.rs | 6 +- gossip/src/gossip_service.rs | 4 +- gossip/src/main.rs | 4 +- gossip/tests/crds_gossip.rs | 8 +- install/src/command.rs | 12 +- install/src/lib.rs | 20 +-- keygen/src/keygen.rs | 16 +-- ledger-tool/src/bigtable.rs | 2 +- ledger-tool/src/main.rs | 42 +++---- ledger-tool/tests/basic.rs | 4 +- ledger/src/bank_forks_utils.rs | 12 +- ledger/src/blockstore.rs | 30 ++--- ledger/src/blockstore_processor.rs | 20 +-- ledger/src/entry.rs | 14 +-- ledger/src/leader_schedule_utils.rs | 4 +- ledger/src/poh.rs | 22 ++-- ledger/src/shred.rs | 4 +- ledger/src/sigverify_shreds.rs | 4 +- local-cluster/src/cluster_tests.rs | 16 +-- local-cluster/src/local_cluster.rs | 10 +- local-cluster/tests/local_cluster.rs | 50 ++++---- measure/src/measure.rs | 2 +- metrics/src/counter.rs | 2 +- perf/src/packet.rs | 2 +- perf/src/sigverify.rs | 8 +- poh/benches/poh_verify.rs | 4 +- poh/src/poh_recorder.rs | 4 +- program-test/src/lib.rs | 10 +- programs/bpf_loader/benches/serialization.rs | 8 +- programs/bpf_loader/build.rs | 2 +- programs/bpf_loader/src/lib.rs | 8 +- programs/bpf_loader/src/serialization.rs | 16 +-- programs/bpf_loader/src/syscalls.rs | 8 +- programs/config/src/config_processor.rs | 20 +-- programs/config/src/date_instruction.rs | 2 +- programs/exchange/src/exchange_processor.rs | 28 ++--- programs/ownable/src/ownable_instruction.rs | 2 +- programs/ownable/src/ownable_processor.rs | 4 +- programs/stake/src/config.rs | 2 +- programs/stake/src/stake_instruction.rs | 6 +- programs/stake/src/stake_state.rs | 20 +-- programs/vote/src/vote_state/mod.rs | 8 +- programs/vote/src/vote_transaction.rs | 2 +- rbpf-cli/src/main.rs | 4 +- remote-wallet/src/remote_wallet.rs | 4 +- rpc/src/parsed_token_accounts.rs | 6 +- rpc/src/rpc.rs | 118 +++++++++--------- rpc/src/rpc_health.rs | 2 +- rpc/src/rpc_pubsub.rs | 16 +-- rpc/src/rpc_service.rs | 2 +- rpc/src/rpc_subscriptions.rs | 24 ++-- rpc/src/send_transaction_service.rs | 4 +- runtime/benches/accounts.rs | 2 +- runtime/benches/bank.rs | 10 +- runtime/src/accounts.rs | 10 +- runtime/src/accounts_background_service.rs | 2 +- runtime/src/accounts_db.rs | 94 +++++++------- runtime/src/accounts_hash.rs | 2 +- runtime/src/accounts_index.rs | 36 +++--- runtime/src/ancestors.rs | 2 +- runtime/src/append_vec.rs | 8 +- runtime/src/bank.rs | 84 ++++++------- runtime/src/bank_forks.rs | 2 +- runtime/src/epoch_stakes.rs | 2 +- runtime/src/genesis_utils.rs | 6 +- runtime/src/hardened_unpack.rs | 4 +- runtime/src/loader_utils.rs | 18 +-- runtime/src/message_processor.rs | 14 +-- runtime/src/native_loader.rs | 2 +- runtime/src/non_circulating_supply.rs | 2 +- runtime/src/secondary_index.rs | 8 +- runtime/src/serde_snapshot/future.rs | 2 +- runtime/src/serde_snapshot/tests.rs | 4 +- runtime/src/snapshot_utils.rs | 12 +- runtime/src/stakes.rs | 10 +- runtime/src/status_cache.rs | 2 +- runtime/src/system_instruction_processor.rs | 30 ++--- runtime/tests/accounts.rs | 6 +- runtime/tests/stake.rs | 2 +- sdk/cargo-build-bpf/src/main.rs | 12 +- sdk/cargo-test-bpf/src/main.rs | 6 +- sdk/program/src/message.rs | 20 +-- sdk/program/src/slot_hashes.rs | 4 +- sdk/program/src/stake/state.rs | 4 +- sdk/src/account.rs | 2 +- sdk/src/derivation_path.rs | 4 +- sdk/src/genesis_config.rs | 8 +- sdk/src/nonce_keyed_account.rs | 32 ++--- sdk/src/secp256k1_instruction.rs | 6 +- sdk/src/signature.rs | 4 +- sdk/src/transaction.rs | 4 +- stake-accounts/src/args.rs | 6 +- stake-accounts/src/main.rs | 6 +- stake-accounts/src/stake_accounts.rs | 22 ++-- storage-bigtable/src/bigtable.rs | 4 +- storage-bigtable/src/lib.rs | 2 +- tokens/src/commands.rs | 16 +-- transaction-status/src/token_balances.rs | 12 +- upload-perf/src/upload-perf.rs | 6 +- validator/src/bin/solana-test-validator.rs | 2 +- validator/src/dashboard.rs | 2 +- validator/src/main.rs | 30 ++--- watchtower/src/main.rs | 6 +- 177 files changed, 1021 insertions(+), 1021 deletions(-) diff --git a/account-decoder/src/lib.rs b/account-decoder/src/lib.rs index 7f1e7c40c7..904ae3fbd2 100644 --- a/account-decoder/src/lib.rs +++ b/account-decoder/src/lib.rs @@ -69,32 +69,32 @@ impl UiAccount { ) -> Self { let data = match encoding { UiAccountEncoding::Binary => UiAccountData::LegacyBinary( - bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(), + bs58::encode(slice_data(account.data(), data_slice_config)).into_string(), ), UiAccountEncoding::Base58 => UiAccountData::Binary( - bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(), + bs58::encode(slice_data(account.data(), data_slice_config)).into_string(), encoding, ), UiAccountEncoding::Base64 => UiAccountData::Binary( - base64::encode(slice_data(&account.data(), data_slice_config)), + base64::encode(slice_data(account.data(), data_slice_config)), encoding, ), UiAccountEncoding::Base64Zstd => { let mut encoder = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap(); match encoder - .write_all(slice_data(&account.data(), data_slice_config)) + .write_all(slice_data(account.data(), data_slice_config)) .and_then(|()| encoder.finish()) { Ok(zstd_data) => UiAccountData::Binary(base64::encode(zstd_data), encoding), Err(_) => UiAccountData::Binary( - base64::encode(slice_data(&account.data(), data_slice_config)), + base64::encode(slice_data(account.data(), data_slice_config)), UiAccountEncoding::Base64, ), } } UiAccountEncoding::JsonParsed => { if let Ok(parsed_data) = - parse_account_data(pubkey, &account.owner(), &account.data(), additional_data) + parse_account_data(pubkey, account.owner(), account.data(), additional_data) { UiAccountData::Json(parsed_data) } else { diff --git a/account-decoder/src/parse_config.rs b/account-decoder/src/parse_config.rs index c545c8c0d8..e4cdf2457e 100644 --- a/account-decoder/src/parse_config.rs +++ b/account-decoder/src/parse_config.rs @@ -37,7 +37,7 @@ fn parse_config_data(data: &[u8], keys: Vec<(Pubkey, bool)>) -> Option { let mut tries = 0; loop { @@ -431,7 +431,7 @@ fn run_accounts_bench( if !airdrop_lamports( &client, &faucet_addr, - &payer_keypairs[i], + payer_keypairs[i], lamports * 100_000, ) { warn!("failed airdrop, exiting"); @@ -487,14 +487,14 @@ fn run_accounts_bench( .into_par_iter() .map(|_| { let message = make_close_message( - &payer_keypairs[0], + payer_keypairs[0], &base_keypair, seed_tracker.max_closed.clone(), 1, min_balance, mint.is_some(), ); - let signers: Vec<&Keypair> = vec![&payer_keypairs[0], &base_keypair]; + let signers: Vec<&Keypair> = vec![payer_keypairs[0], &base_keypair]; Transaction::new(&signers, message, recent_blockhash.0) }) .collect(); diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index aeaceaceb6..7dff4bc411 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -195,7 +195,7 @@ fn main() { if !skip_sanity { //sanity check, make sure all the transactions can execute sequentially transactions.iter().for_each(|tx| { - let res = bank.process_transaction(&tx); + let res = bank.process_transaction(tx); assert!(res.is_ok(), "sanity test transactions error: {:?}", res); }); bank.clear_signatures(); diff --git a/banks-client/src/lib.rs b/banks-client/src/lib.rs index e1b85fa3cb..a4a6bf9b68 100644 --- a/banks-client/src/lib.rs +++ b/banks-client/src/lib.rs @@ -376,8 +376,8 @@ mod tests { let mint_pubkey = &genesis.mint_keypair.pubkey(); let bob_pubkey = solana_sdk::pubkey::new_rand(); - let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1); - let message = Message::new(&[instruction], Some(&mint_pubkey)); + let instruction = system_instruction::transfer(mint_pubkey, &bob_pubkey, 1); + let message = Message::new(&[instruction], Some(mint_pubkey)); Runtime::new()?.block_on(async { let client_transport = start_local_server(bank_forks, block_commitment_cache).await; diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index 6b8675d0e2..53b8d5f552 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -147,7 +147,7 @@ impl Banks for BanksServer { .read() .unwrap() .root_bank() - .get_blockhash_last_valid_slot(&blockhash) + .get_blockhash_last_valid_slot(blockhash) .unwrap(); let signature = transaction.signatures.get(0).cloned().unwrap_or_default(); let info = diff --git a/banks-server/src/send_transaction_service.rs b/banks-server/src/send_transaction_service.rs index 54eb6b3f4d..a1a930e126 100644 --- a/banks-server/src/send_transaction_service.rs +++ b/banks-server/src/send_transaction_service.rs @@ -138,8 +138,8 @@ impl SendTransactionService { result.retried += 1; inc_new_counter_info!("send_transaction_service-retry", 1); Self::send_transaction( - &send_socket, - &tpu_address, + send_socket, + tpu_address, &transaction_info.wire_transaction, ); true diff --git a/bench-exchange/src/bench.rs b/bench-exchange/src/bench.rs index f975d1602f..cffb7605be 100644 --- a/bench-exchange/src/bench.rs +++ b/bench-exchange/src/bench.rs @@ -451,13 +451,13 @@ fn swapper( let to_swap_txs: Vec<_> = to_swap .par_iter() .map(|(signer, swap, profit)| { - let s: &Keypair = &signer; + let s: &Keypair = signer; let owner = &signer.pubkey(); let instruction = exchange_instruction::swap_request( owner, &swap.0.pubkey, &swap.1.pubkey, - &profit, + profit, ); let message = Message::new(&[instruction], Some(&s.pubkey())); Transaction::new(&[s], message, blockhash) @@ -600,7 +600,7 @@ fn trader( src, ), ]; - let message = Message::new(&instructions, Some(&owner_pubkey)); + let message = Message::new(&instructions, Some(owner_pubkey)); Transaction::new(&[owner.as_ref(), trade], message, blockhash) }) .collect(); @@ -739,7 +739,7 @@ pub fn fund_keys(client: &T, source: &Keypair, dests: &[Arc] let mut to_fund_txs: Vec<_> = chunk .par_iter() .map(|(k, m)| { - let instructions = system_instruction::transfer_many(&k.pubkey(), &m); + let instructions = system_instruction::transfer_many(&k.pubkey(), m); let message = Message::new(&instructions, Some(&k.pubkey())); (k.clone(), Transaction::new_unsigned(message)) }) @@ -777,7 +777,7 @@ pub fn fund_keys(client: &T, source: &Keypair, dests: &[Arc] let mut waits = 0; loop { sleep(Duration::from_millis(200)); - to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, &tx, amount)); + to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, tx, amount)); if to_fund_txs.is_empty() { break; } @@ -836,7 +836,7 @@ pub fn create_token_accounts( ); let request_ix = exchange_instruction::account_request(owner_pubkey, &new_keypair.pubkey()); - let message = Message::new(&[create_ix, request_ix], Some(&owner_pubkey)); + let message = Message::new(&[create_ix, request_ix], Some(owner_pubkey)); ( (from_keypair, new_keypair), Transaction::new_unsigned(message), @@ -872,7 +872,7 @@ pub fn create_token_accounts( let mut waits = 0; while !to_create_txs.is_empty() { sleep(Duration::from_millis(200)); - to_create_txs.retain(|(_, tx)| !verify_transaction(client, &tx)); + to_create_txs.retain(|(_, tx)| !verify_transaction(client, tx)); if to_create_txs.is_empty() { break; } @@ -958,7 +958,7 @@ fn compute_and_report_stats(maxes: &Arc>>, tot fn generate_keypairs(num: u64) -> Vec { let mut seed = [0_u8; 32]; - seed.copy_from_slice(&Keypair::new().pubkey().as_ref()); + seed.copy_from_slice(Keypair::new().pubkey().as_ref()); let mut rnd = GenKeys::new(seed); rnd.gen_n_keypairs(num) } @@ -989,7 +989,7 @@ pub fn airdrop_lamports( let (blockhash, _fee_calculator, _last_valid_slot) = client .get_recent_blockhash_with_commitment(CommitmentConfig::processed()) .expect("Failed to get blockhash"); - match request_airdrop_transaction(&faucet_addr, &id.pubkey(), amount_to_drop, blockhash) { + match request_airdrop_transaction(faucet_addr, &id.pubkey(), amount_to_drop, blockhash) { Ok(transaction) => { let signature = client.async_send_transaction(transaction).unwrap(); diff --git a/bench-streamer/src/main.rs b/bench-streamer/src/main.rs index e397599187..4e1f070eb0 100644 --- a/bench-streamer/src/main.rs +++ b/bench-streamer/src/main.rs @@ -18,7 +18,7 @@ fn producer(addr: &SocketAddr, exit: Arc) -> JoinHandle<()> { msgs.packets.resize(10, Packet::default()); for w in msgs.packets.iter_mut() { w.meta.size = PACKET_DATA_SIZE; - w.meta.set_addr(&addr); + w.meta.set_addr(addr); } let msgs = Arc::new(msgs); spawn(move || loop { diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index 084b81ddec..a2c21ce7ef 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -544,12 +544,12 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> { // re-sign retained to_fund_txes with updated blockhash self.sign(blockhash); - self.send(&client); + self.send(client); // Sleep a few slots to allow transactions to process sleep(Duration::from_secs(1)); - self.verify(&client, to_lamports); + self.verify(client, to_lamports); // retry anything that seems to have dropped through cracks // again since these txs are all or nothing, they're fine to @@ -564,7 +564,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> { let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund .par_iter() .map(|(k, t)| { - let instructions = system_instruction::transfer_many(&k.pubkey(), &t); + let instructions = system_instruction::transfer_many(&k.pubkey(), t); let message = Message::new(&instructions, Some(&k.pubkey())); (*k, Transaction::new_unsigned(message)) }) @@ -617,7 +617,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> { return None; } - let verified = if verify_funding_transfer(&client, &tx, to_lamports) { + let verified = if verify_funding_transfer(&client, tx, to_lamports) { verified_txs.fetch_add(1, Ordering::Relaxed); Some(k.pubkey()) } else { @@ -733,7 +733,7 @@ pub fn airdrop_lamports( ); let (blockhash, _fee_calculator) = get_recent_blockhash(client); - match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) { + match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) { Ok(transaction) => { let mut tries = 0; loop { diff --git a/bench-tps/src/main.rs b/bench-tps/src/main.rs index abb9b3a7eb..ad2ad5b8f9 100644 --- a/bench-tps/src/main.rs +++ b/bench-tps/src/main.rs @@ -39,7 +39,7 @@ fn main() { let keypair_count = *tx_count * keypair_multiplier; if *write_to_client_file { info!("Generating {} keypairs", keypair_count); - let (keypairs, _) = generate_keypairs(&id, keypair_count as u64); + let (keypairs, _) = generate_keypairs(id, keypair_count as u64); let num_accounts = keypairs.len() as u64; let max_fee = FeeRateGovernor::new(*target_lamports_per_signature, 0).max_lamports_per_signature; @@ -68,7 +68,7 @@ fn main() { } info!("Connecting to the cluster"); - let nodes = discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| { + let nodes = discover_cluster(entrypoint_addr, *num_nodes).unwrap_or_else(|err| { eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err); exit(1); }); @@ -135,7 +135,7 @@ fn main() { generate_and_fund_keypairs( client.clone(), Some(*faucet_addr), - &id, + id, keypair_count, *num_lamports_per_account, ) diff --git a/clap-utils/src/keypair.rs b/clap-utils/src/keypair.rs index 38dc7b0a2c..f63227043d 100644 --- a/clap-utils/src/keypair.rs +++ b/clap-utils/src/keypair.rs @@ -506,7 +506,7 @@ pub const SKIP_SEED_PHRASE_VALIDATION_ARG: ArgConstant<'static> = ArgConstant { /// Prompts user for a passphrase and then asks for confirmirmation to check for mistakes pub fn prompt_passphrase(prompt: &str) -> Result> { - let passphrase = prompt_password_stderr(&prompt)?; + let passphrase = prompt_password_stderr(prompt)?; if !passphrase.is_empty() { let confirmed = rpassword::prompt_password_stderr("Enter same passphrase again: ")?; if confirmed != passphrase { @@ -586,9 +586,9 @@ pub fn keypair_from_seed_phrase( let keypair = if skip_validation { let passphrase = prompt_passphrase(&passphrase_prompt)?; if legacy { - keypair_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase)? + keypair_from_seed_phrase_and_passphrase(seed_phrase, &passphrase)? } else { - let seed = generate_seed_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase); + let seed = generate_seed_from_seed_phrase_and_passphrase(seed_phrase, &passphrase); keypair_from_seed_and_derivation_path(&seed, derivation_path)? } } else { @@ -616,7 +616,7 @@ pub fn keypair_from_seed_phrase( if legacy { keypair_from_seed(seed.as_bytes())? } else { - keypair_from_seed_and_derivation_path(&seed.as_bytes(), derivation_path)? + keypair_from_seed_and_derivation_path(seed.as_bytes(), derivation_path)? } }; diff --git a/cli-config/src/config.rs b/cli-config/src/config.rs index f98af53f4a..d9706ef929 100644 --- a/cli-config/src/config.rs +++ b/cli-config/src/config.rs @@ -107,24 +107,24 @@ mod test { #[test] fn compute_websocket_url() { assert_eq!( - Config::compute_websocket_url(&"http://api.devnet.solana.com"), + Config::compute_websocket_url("http://api.devnet.solana.com"), "ws://api.devnet.solana.com/".to_string() ); assert_eq!( - Config::compute_websocket_url(&"https://api.devnet.solana.com"), + Config::compute_websocket_url("https://api.devnet.solana.com"), "wss://api.devnet.solana.com/".to_string() ); assert_eq!( - Config::compute_websocket_url(&"http://example.com:8899"), + Config::compute_websocket_url("http://example.com:8899"), "ws://example.com:8900/".to_string() ); assert_eq!( - Config::compute_websocket_url(&"https://example.com:1234"), + Config::compute_websocket_url("https://example.com:1234"), "wss://example.com:1235/".to_string() ); - assert_eq!(Config::compute_websocket_url(&"garbage"), String::new()); + assert_eq!(Config::compute_websocket_url("garbage"), String::new()); } } diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index a1605b587a..9d94cfab2d 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -1287,7 +1287,7 @@ impl fmt::Display for CliValidatorInfo { writeln_name_value( f, &format!(" {}:", to_title_case(key)), - &value.as_str().unwrap_or("?"), + value.as_str().unwrap_or("?"), )?; } Ok(()) @@ -1768,7 +1768,7 @@ impl fmt::Display for CliTokenAccount { writeln_name_value( f, "Close authority:", - &account.close_authority.as_ref().unwrap_or(&String::new()), + account.close_authority.as_ref().unwrap_or(&String::new()), )?; Ok(()) } @@ -2006,7 +2006,7 @@ pub fn return_signers_with_config( } pub fn parse_sign_only_reply_string(reply: &str) -> SignOnly { - let object: Value = serde_json::from_str(&reply).unwrap(); + let object: Value = serde_json::from_str(reply).unwrap(); let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap(); let blockhash = blockhash_str.parse::().unwrap(); let mut present_signers: Vec<(Pubkey, Signature)> = Vec::new(); diff --git a/cli/src/cli.rs b/cli/src/cli.rs index a306d8c63e..f4f4772fde 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -1000,7 +1000,7 @@ fn process_airdrop( let result = request_and_confirm_airdrop(rpc_client, config, &pubkey, lamports); if let Ok(signature) = result { - let signature_cli_message = log_instruction_custom_error::(result, &config)?; + let signature_cli_message = log_instruction_custom_error::(result, config)?; println!("{}", signature_cli_message); let current_balance = rpc_client.get_balance(&pubkey)?; @@ -1013,7 +1013,7 @@ fn process_airdrop( Ok(build_balance_message(current_balance, false, true)) } } else { - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1098,7 +1098,7 @@ fn process_confirm( #[allow(clippy::unnecessary_wraps)] fn process_decode_transaction(config: &CliConfig, transaction: &Transaction) -> ProcessResult { - let sigverify_status = CliSignatureVerificationStatus::verify_transaction(&transaction); + let sigverify_status = CliSignatureVerificationStatus::verify_transaction(transaction); let decode_transaction = CliTransaction { decoded_transaction: transaction.clone(), transaction: EncodedTransaction::encode(transaction.clone(), UiTransactionEncoding::Json), @@ -1269,7 +1269,7 @@ fn process_transfer( } else { rpc_client.send_and_confirm_transaction_with_spinner(&tx) }; - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1324,7 +1324,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { from_pubkey, seed, program_id, - } => process_create_address_with_seed(config, from_pubkey.as_ref(), &seed, &program_id), + } => process_create_address_with_seed(config, from_pubkey.as_ref(), seed, program_id), CliCommand::Fees { ref blockhash } => process_fees(&rpc_client, config, blockhash.as_ref()), CliCommand::Feature(feature_subcommand) => { process_feature_subcommand(&rpc_client, config, feature_subcommand) @@ -1347,8 +1347,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { CliCommand::LeaderSchedule { epoch } => { process_leader_schedule(&rpc_client, config, *epoch) } - CliCommand::LiveSlots => process_live_slots(&config), - CliCommand::Logs { filter } => process_logs(&config, filter), + CliCommand::LiveSlots => process_live_slots(config), + CliCommand::Logs { filter } => process_logs(config, filter), CliCommand::Ping { lamports, interval, @@ -1453,7 +1453,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { ), // Get the current nonce CliCommand::GetNonce(nonce_account_pubkey) => { - process_get_nonce(&rpc_client, config, &nonce_account_pubkey) + process_get_nonce(&rpc_client, config, nonce_account_pubkey) } // Get a new nonce CliCommand::NewNonce { @@ -1474,7 +1474,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_show_nonce_account( &rpc_client, config, - &nonce_account_pubkey, + nonce_account_pubkey, *use_lamports_unit, ), // Withdraw lamports from a nonce account @@ -1487,10 +1487,10 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_withdraw_from_nonce_account( &rpc_client, config, - &nonce_account, + nonce_account, *nonce_authority, memo.as_ref(), - &destination_account_pubkey, + destination_account_pubkey, *lamports, ), @@ -1564,7 +1564,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_deactivate_stake_account( &rpc_client, config, - &stake_account_pubkey, + stake_account_pubkey, *stake_authority, *sign_only, *dump_transaction_message, @@ -1590,8 +1590,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_delegate_stake( &rpc_client, config, - &stake_account_pubkey, - &vote_account_pubkey, + stake_account_pubkey, + vote_account_pubkey, *stake_authority, *force, *sign_only, @@ -1618,7 +1618,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_split_stake( &rpc_client, config, - &stake_account_pubkey, + stake_account_pubkey, *stake_authority, *sign_only, *dump_transaction_message, @@ -1645,8 +1645,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_merge_stake( &rpc_client, config, - &stake_account_pubkey, - &source_stake_account_pubkey, + stake_account_pubkey, + source_stake_account_pubkey, *stake_authority, *sign_only, *dump_transaction_message, @@ -1663,7 +1663,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_show_stake_account( &rpc_client, config, - &stake_account_pubkey, + stake_account_pubkey, *use_lamports_unit, *with_rewards, ), @@ -1686,7 +1686,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_stake_authorize( &rpc_client, config, - &stake_account_pubkey, + stake_account_pubkey, new_authorizations, *custodian, *sign_only, @@ -1712,7 +1712,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_stake_set_lockup( &rpc_client, config, - &stake_account_pubkey, + stake_account_pubkey, &mut lockup, *custodian, *sign_only, @@ -1740,8 +1740,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_withdraw_stake( &rpc_client, config, - &stake_account_pubkey, - &destination_account_pubkey, + stake_account_pubkey, + destination_account_pubkey, *amount, *withdraw_authority, *custodian, @@ -1769,7 +1769,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_set_validator_info( &rpc_client, config, - &validator_info, + validator_info, *force_keybase, *info_pubkey, ), @@ -1803,7 +1803,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_show_vote_account( &rpc_client, config, - &vote_account_pubkey, + vote_account_pubkey, *use_lamports_unit, *with_rewards, ), @@ -1830,8 +1830,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_vote_authorize( &rpc_client, config, - &vote_account_pubkey, - &new_authorized_pubkey, + vote_account_pubkey, + new_authorized_pubkey, *vote_authorize, memo.as_ref(), ), @@ -1843,7 +1843,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_vote_update_validator( &rpc_client, config, - &vote_account_pubkey, + vote_account_pubkey, *new_identity_account, *withdraw_authority, memo.as_ref(), @@ -1856,7 +1856,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_vote_update_commission( &rpc_client, config, - &vote_account_pubkey, + vote_account_pubkey, *commission, *withdraw_authority, memo.as_ref(), @@ -1872,7 +1872,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { CliCommand::Balance { pubkey, use_lamports_unit, - } => process_balance(&rpc_client, config, &pubkey, *use_lamports_unit), + } => process_balance(&rpc_client, config, pubkey, *use_lamports_unit), // Confirm the last client transaction by signature CliCommand::Confirm(signature) => process_confirm(&rpc_client, config, signature), CliCommand::DecodeTransaction(transaction) => { @@ -1892,8 +1892,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_show_account( &rpc_client, config, - &pubkey, - &output_file, + pubkey, + output_file, *use_lamports_unit, ), CliCommand::Transfer { diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index 236ce9c2b3..9a7fff174b 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -122,7 +122,7 @@ impl ClusterQuerySubCommands for App<'_, '_> { .long("our-localhost") .takes_value(false) .value_name("PORT") - .default_value(&DEFAULT_RPC_PORT_STR) + .default_value(DEFAULT_RPC_PORT_STR) .validator(is_port) .help("Guess Identity pubkey and validator rpc node assuming local (possibly private) validator"), ) diff --git a/cli/src/inflation.rs b/cli/src/inflation.rs index 11d3fbfb52..8ec8233db0 100644 --- a/cli/src/inflation.rs +++ b/cli/src/inflation.rs @@ -102,7 +102,7 @@ fn process_rewards( rewards_epoch: Option, ) -> ProcessResult { let rewards = rpc_client - .get_inflation_reward(&addresses, rewards_epoch) + .get_inflation_reward(addresses, rewards_epoch) .map_err(|err| { if let Some(epoch) = rewards_epoch { format!("Rewards not available for epoch {}", epoch) diff --git a/cli/src/main.rs b/cli/src/main.rs index 164a684dc9..732f9dbc2e 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -184,7 +184,7 @@ pub fn parse_args<'a>( let CliCommandInfo { command, mut signers, - } = parse_command(&matches, &default_signer, &mut wallet_manager)?; + } = parse_command(matches, &default_signer, &mut wallet_manager)?; if signers.is_empty() { if let Ok(signer_info) = @@ -257,7 +257,7 @@ fn main() -> Result<(), Box> { .global(true) .help("Configuration file to use"); if let Some(ref config_file) = *CONFIG_FILE { - arg.default_value(&config_file) + arg.default_value(config_file) } else { arg } @@ -411,10 +411,10 @@ fn main() -> Result<(), Box> { } fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { - if parse_settings(&matches)? { + if parse_settings(matches)? { let mut wallet_manager = None; - let (mut config, signers) = parse_args(&matches, &mut wallet_manager)?; + let (mut config, signers) = parse_args(matches, &mut wallet_manager)?; config.signers = signers.iter().map(|s| s.as_ref()).collect(); let result = process_command(&config)?; println!("{}", result); diff --git a/cli/src/nonce.rs b/cli/src/nonce.rs index 8e4b625fe8..50d951b5a0 100644 --- a/cli/src/nonce.rs +++ b/cli/src/nonce.rs @@ -364,7 +364,7 @@ pub fn process_authorize_nonce_account( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } pub fn process_create_nonce_account( @@ -449,7 +449,7 @@ pub fn process_create_nonce_account( let mut tx = Transaction::new_unsigned(message); tx.try_sign(&config.signers, recent_blockhash)?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } pub fn process_get_nonce( @@ -474,10 +474,10 @@ pub fn process_new_nonce( ) -> ProcessResult { check_unique_pubkeys( (&config.signers[0].pubkey(), "cli keypair".to_string()), - (&nonce_account, "nonce_account_pubkey".to_string()), + (nonce_account, "nonce_account_pubkey".to_string()), )?; - if let Err(err) = rpc_client.get_account(&nonce_account) { + if let Err(err) = rpc_client.get_account(nonce_account) { return Err(CliError::BadParameter(format!( "Unable to advance nonce account {}. error: {}", nonce_account, err @@ -487,7 +487,7 @@ pub fn process_new_nonce( let nonce_authority = config.signers[nonce_authority]; let ixs = vec![advance_nonce_account( - &nonce_account, + nonce_account, &nonce_authority.pubkey(), )] .with_memo(memo); @@ -503,7 +503,7 @@ pub fn process_new_nonce( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } pub fn process_show_nonce_account( @@ -522,7 +522,7 @@ pub fn process_show_nonce_account( use_lamports_unit, ..CliNonceAccount::default() }; - if let Some(ref data) = data { + if let Some(data) = data { nonce_account.nonce = Some(data.blockhash.to_string()); nonce_account.lamports_per_signature = Some(data.fee_calculator.lamports_per_signature); nonce_account.authority = Some(data.authority.to_string()); @@ -566,7 +566,7 @@ pub fn process_withdraw_from_nonce_account( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } #[cfg(test)] diff --git a/cli/src/program.rs b/cli/src/program.rs index fc0411422c..51201028a4 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -770,7 +770,7 @@ fn process_program_deploy( }; let upgrade_authority_signer = config.signers[upgrade_authority_signer_index]; - let default_program_keypair = get_default_program_keypair(&program_location); + let default_program_keypair = get_default_program_keypair(program_location); let (program_signer, program_pubkey) = if let Some(i) = program_signer_index { (Some(config.signers[i]), config.signers[i].pubkey()) } else if let Some(program_pubkey) = program_pubkey { @@ -846,7 +846,7 @@ fn process_program_deploy( }; let (program_data, program_len) = if let Some(program_location) = program_location { - let program_data = read_and_verify_elf(&program_location)?; + let program_data = read_and_verify_elf(program_location)?; let program_len = program_data.len(); (program_data, program_len) } else if buffer_provided { @@ -1262,7 +1262,7 @@ fn process_dump( UpgradeableLoaderState::programdata_data_offset().unwrap_or(0); let program_data = &programdata_account.data[offset..]; let mut f = File::create(output_location)?; - f.write_all(&program_data)?; + f.write_all(program_data)?; Ok(format!("Wrote program to {}", output_location)) } else { Err( @@ -1282,7 +1282,7 @@ fn process_dump( let offset = UpgradeableLoaderState::buffer_data_offset().unwrap_or(0); let program_data = &account.data[offset..]; let mut f = File::create(output_location)?; - f.write_all(&program_data)?; + f.write_all(program_data)?; Ok(format!("Wrote program to {}", output_location)) } else { Err(format!( @@ -1313,8 +1313,8 @@ fn close( let mut tx = Transaction::new_unsigned(Message::new( &[bpf_loader_upgradeable::close( - &account_pubkey, - &recipient_pubkey, + account_pubkey, + recipient_pubkey, &authority_signer.pubkey(), )], Some(&config.signers[0].pubkey()), @@ -1423,7 +1423,7 @@ fn process_close( if close( rpc_client, config, - &address, + address, &recipient_pubkey, authority_signer, ) @@ -1524,7 +1524,7 @@ fn do_process_program_write_and_deploy( .value { complete_partial_program_init( - &loader_id, + loader_id, &config.signers[0].pubkey(), buffer_pubkey, &account, @@ -1554,7 +1554,7 @@ fn do_process_program_write_and_deploy( buffer_pubkey, minimum_balance, buffer_data_len as u64, - &loader_id, + loader_id, )], minimum_balance, ) @@ -1582,7 +1582,7 @@ fn do_process_program_write_and_deploy( } else { loader_instruction::write( buffer_pubkey, - &loader_id, + loader_id, (i * DATA_CHUNK_SIZE) as u32, chunk.to_vec(), ) @@ -1626,7 +1626,7 @@ fn do_process_program_write_and_deploy( ) } else { Message::new( - &[loader_instruction::finalize(buffer_pubkey, &loader_id)], + &[loader_instruction::finalize(buffer_pubkey, loader_id)], Some(&config.signers[0].pubkey()), ) }; @@ -1752,8 +1752,8 @@ fn do_process_program_upgrade( // Create and add final message let final_message = Message::new( &[bpf_loader_upgradeable::upgrade( - &program_id, - &buffer_pubkey, + program_id, + buffer_pubkey, &upgrade_authority.pubkey(), &config.signers[0].pubkey(), )], @@ -1821,7 +1821,7 @@ fn complete_partial_program_init( account_data_len as u64, )); if account.owner != *loader_id { - instructions.push(system_instruction::assign(elf_pubkey, &loader_id)); + instructions.push(system_instruction::assign(elf_pubkey, loader_id)); } } if account.lamports < minimum_balance { @@ -1893,7 +1893,7 @@ fn send_deploy_messages( initial_transaction.try_sign(&[payer_signer], blockhash)?; } let result = rpc_client.send_and_confirm_transaction_with_spinner(&initial_transaction); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) .map_err(|err| format!("Account allocation failed: {}", err))?; } else { return Err("Buffer account not created yet, must provide a key pair".into()); diff --git a/cli/src/spend_utils.rs b/cli/src/spend_utils.rs index 95431ccad2..df785e457b 100644 --- a/cli/src/spend_utils.rs +++ b/cli/src/spend_utils.rs @@ -92,7 +92,7 @@ where Ok((message, spend)) } else { let from_balance = rpc_client - .get_balance_with_commitment(&from_pubkey, commitment)? + .get_balance_with_commitment(from_pubkey, commitment)? .value; let (message, SpendAndFee { spend, fee }) = resolve_spend_message( amount, diff --git a/cli/src/stake.rs b/cli/src/stake.rs index 42d21fea41..682f41b465 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -972,7 +972,7 @@ pub fn process_create_stake_account( ) -> ProcessResult { let stake_account = config.signers[stake_account]; let stake_account_address = if let Some(seed) = seed { - Pubkey::create_with_seed(&stake_account.pubkey(), &seed, &stake::program::id())? + Pubkey::create_with_seed(&stake_account.pubkey(), seed, &stake::program::id())? } else { stake_account.pubkey() }; @@ -1085,7 +1085,7 @@ pub fn process_create_stake_account( } else { tx.try_sign(&config.signers, recent_blockhash)?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1172,7 +1172,7 @@ pub fn process_stake_authorize( } else { rpc_client.send_and_confirm_transaction_with_spinner(&tx) }; - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1196,7 +1196,7 @@ pub fn process_deactivate_stake_account( let stake_authority = config.signers[stake_authority]; let stake_account_address = if let Some(seed) = seed { - Pubkey::create_with_seed(&stake_account_pubkey, seed, &stake::program::id())? + Pubkey::create_with_seed(stake_account_pubkey, seed, &stake::program::id())? } else { *stake_account_pubkey }; @@ -1248,7 +1248,7 @@ pub fn process_deactivate_stake_account( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1274,7 +1274,7 @@ pub fn process_withdraw_stake( let custodian = custodian.map(|index| config.signers[index]); let stake_account_address = if let Some(seed) = seed { - Pubkey::create_with_seed(&stake_account_pubkey, seed, &stake::program::id())? + Pubkey::create_with_seed(stake_account_pubkey, seed, &stake::program::id())? } else { *stake_account_pubkey }; @@ -1347,7 +1347,7 @@ pub fn process_withdraw_stake( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1382,10 +1382,10 @@ pub fn process_split_stake( } check_unique_pubkeys( (&fee_payer.pubkey(), "fee-payer keypair".to_string()), - (&stake_account_pubkey, "stake_account".to_string()), + (stake_account_pubkey, "stake_account".to_string()), )?; check_unique_pubkeys( - (&stake_account_pubkey, "stake_account".to_string()), + (stake_account_pubkey, "stake_account".to_string()), ( &split_stake_account.pubkey(), "split_stake_account".to_string(), @@ -1395,7 +1395,7 @@ pub fn process_split_stake( let stake_authority = config.signers[stake_authority]; let split_stake_account_address = if let Some(seed) = split_stake_account_seed { - Pubkey::create_with_seed(&split_stake_account.pubkey(), &seed, &stake::program::id())? + Pubkey::create_with_seed(&split_stake_account.pubkey(), seed, &stake::program::id())? } else { split_stake_account.pubkey() }; @@ -1433,7 +1433,7 @@ pub fn process_split_stake( let ixs = if let Some(seed) = split_stake_account_seed { stake_instruction::split_with_seed( - &stake_account_pubkey, + stake_account_pubkey, &stake_authority.pubkey(), lamports, &split_stake_account_address, @@ -1443,7 +1443,7 @@ pub fn process_split_stake( .with_memo(memo) } else { stake_instruction::split( - &stake_account_pubkey, + stake_account_pubkey, &stake_authority.pubkey(), lamports, &split_stake_account_address, @@ -1492,7 +1492,7 @@ pub fn process_split_stake( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1515,19 +1515,19 @@ pub fn process_merge_stake( check_unique_pubkeys( (&fee_payer.pubkey(), "fee-payer keypair".to_string()), - (&stake_account_pubkey, "stake_account".to_string()), + (stake_account_pubkey, "stake_account".to_string()), )?; check_unique_pubkeys( (&fee_payer.pubkey(), "fee-payer keypair".to_string()), ( - &source_stake_account_pubkey, + source_stake_account_pubkey, "source_stake_account".to_string(), ), )?; check_unique_pubkeys( - (&stake_account_pubkey, "stake_account".to_string()), + (stake_account_pubkey, "stake_account".to_string()), ( - &source_stake_account_pubkey, + source_stake_account_pubkey, "source_stake_account".to_string(), ), )?; @@ -1552,8 +1552,8 @@ pub fn process_merge_stake( blockhash_query.get_blockhash_and_fee_calculator(rpc_client, config.commitment)?; let ixs = stake_instruction::merge( - &stake_account_pubkey, - &source_stake_account_pubkey, + stake_account_pubkey, + source_stake_account_pubkey, &stake_authority.pubkey(), ) .with_memo(memo); @@ -1603,7 +1603,7 @@ pub fn process_merge_stake( config.commitment, config.send_transaction_config, ); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1674,7 +1674,7 @@ pub fn process_stake_set_lockup( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -2076,7 +2076,7 @@ pub fn process_delegate_stake( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } diff --git a/cli/src/validator_info.rs b/cli/src/validator_info.rs index 5479c0cb87..6ac5f239b9 100644 --- a/cli/src/validator_info.rs +++ b/cli/src/validator_info.rs @@ -119,7 +119,7 @@ fn parse_validator_info( let key_list: ConfigKeys = deserialize(&account.data)?; if !key_list.keys.is_empty() { let (validator_pubkey, _) = key_list.keys[1]; - let validator_info_string: String = deserialize(&get_config_data(&account.data)?)?; + let validator_info_string: String = deserialize(get_config_data(&account.data)?)?; let validator_info: Map<_, _> = serde_json::from_str(&validator_info_string)?; Ok((validator_pubkey, validator_info)) } else { @@ -246,7 +246,7 @@ pub fn process_set_validator_info( ) -> ProcessResult { // Validate keybase username if let Some(string) = validator_info.get("keybaseUsername") { - let result = verify_keybase(&config.signers[0].pubkey(), &string); + let result = verify_keybase(&config.signers[0].pubkey(), string); if result.is_err() { if force_keybase { println!("--force supplied, ignoring: {:?}", result); @@ -272,7 +272,7 @@ pub fn process_set_validator_info( }, ) .find(|(pubkey, account)| { - let (validator_pubkey, _) = parse_validator_info(&pubkey, &account).unwrap(); + let (validator_pubkey, _) = parse_validator_info(pubkey, account).unwrap(); validator_pubkey == config.signers[0].pubkey() }); @@ -393,7 +393,7 @@ pub fn process_get_validator_info( } for (validator_info_pubkey, validator_info_account) in validator_info.iter() { let (validator_pubkey, validator_info) = - parse_validator_info(&validator_info_pubkey, &validator_info_account)?; + parse_validator_info(validator_info_pubkey, validator_info_account)?; validator_info_list.push(CliValidatorInfo { identity_pubkey: validator_pubkey.to_string(), info_pubkey: validator_info_pubkey.to_string(), @@ -451,7 +451,7 @@ mod tests { "name": "Alice", "keybaseUsername": "alice_keybase", }); - assert_eq!(parse_args(&matches), expected); + assert_eq!(parse_args(matches), expected); } #[test] diff --git a/cli/src/vote.rs b/cli/src/vote.rs index 8ef78a5003..0b45efdaec 100644 --- a/cli/src/vote.rs +++ b/cli/src/vote.rs @@ -468,7 +468,7 @@ pub fn process_create_vote_account( let vote_account = config.signers[vote_account]; let vote_account_pubkey = vote_account.pubkey(); let vote_account_address = if let Some(seed) = seed { - Pubkey::create_with_seed(&vote_account_pubkey, &seed, &solana_vote_program::id())? + Pubkey::create_with_seed(&vote_account_pubkey, seed, &solana_vote_program::id())? } else { vote_account_pubkey }; @@ -549,7 +549,7 @@ pub fn process_create_vote_account( let mut tx = Transaction::new_unsigned(message); tx.try_sign(&config.signers, recent_blockhash)?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } pub fn process_vote_authorize( @@ -592,7 +592,7 @@ pub fn process_vote_authorize( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } pub fn process_vote_update_validator( @@ -629,7 +629,7 @@ pub fn process_vote_update_validator( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } pub fn process_vote_update_commission( @@ -660,7 +660,7 @@ pub fn process_vote_update_commission( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } fn get_vote_account( @@ -763,7 +763,7 @@ pub fn process_withdraw_from_vote_account( let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; let withdraw_authority = config.signers[withdraw_authority]; - let current_balance = rpc_client.get_balance(&vote_account_pubkey)?; + let current_balance = rpc_client.get_balance(vote_account_pubkey)?; let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption(VoteState::size_of())?; let lamports = match withdraw_amount { @@ -798,7 +798,7 @@ pub fn process_withdraw_from_vote_account( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&transaction); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } #[cfg(test)] diff --git a/cli/tests/program.rs b/cli/tests/program.rs index 5a83d0ea24..adf96fb72e 100644 --- a/cli/tests/program.rs +++ b/cli/tests/program.rs @@ -68,7 +68,7 @@ fn test_cli_program_deploy_non_upgradeable() { .unwrap() .as_str() .unwrap(); - let program_id = Pubkey::from_str(&program_id_str).unwrap(); + let program_id = Pubkey::from_str(program_id_str).unwrap(); let account0 = rpc_client.get_account(&program_id).unwrap(); assert_eq!(account0.lamports, minimum_balance_for_rent_exemption); assert_eq!(account0.owner, bpf_loader::id()); @@ -198,7 +198,7 @@ fn test_cli_program_deploy_no_authority() { .unwrap() .as_str() .unwrap(); - let program_id = Pubkey::from_str(&program_id_str).unwrap(); + let program_id = Pubkey::from_str(program_id_str).unwrap(); // Attempt to upgrade the program config.signers = vec![&keypair, &upgrade_authority]; @@ -284,7 +284,7 @@ fn test_cli_program_deploy_with_authority() { .unwrap(); assert_eq!( program_keypair.pubkey(), - Pubkey::from_str(&program_pubkey_str).unwrap() + Pubkey::from_str(program_pubkey_str).unwrap() ); let program_account = rpc_client.get_account(&program_keypair.pubkey()).unwrap(); assert_eq!(program_account.lamports, minimum_balance_for_program); @@ -328,7 +328,7 @@ fn test_cli_program_deploy_with_authority() { .unwrap() .as_str() .unwrap(); - let program_pubkey = Pubkey::from_str(&program_pubkey_str).unwrap(); + let program_pubkey = Pubkey::from_str(program_pubkey_str).unwrap(); let program_account = rpc_client.get_account(&program_pubkey).unwrap(); assert_eq!(program_account.lamports, minimum_balance_for_program); assert_eq!(program_account.owner, bpf_loader_upgradeable::id()); @@ -397,7 +397,7 @@ fn test_cli_program_deploy_with_authority() { .as_str() .unwrap(); assert_eq!( - Pubkey::from_str(&new_upgrade_authority_str).unwrap(), + Pubkey::from_str(new_upgrade_authority_str).unwrap(), new_upgrade_authority.pubkey() ); @@ -452,7 +452,7 @@ fn test_cli_program_deploy_with_authority() { .unwrap(); assert_eq!( new_upgrade_authority.pubkey(), - Pubkey::from_str(&authority_pubkey_str).unwrap() + Pubkey::from_str(authority_pubkey_str).unwrap() ); // Set no authority @@ -510,7 +510,7 @@ fn test_cli_program_deploy_with_authority() { .unwrap() .as_str() .unwrap(); - let program_pubkey = Pubkey::from_str(&program_pubkey_str).unwrap(); + let program_pubkey = Pubkey::from_str(program_pubkey_str).unwrap(); let (programdata_pubkey, _) = Pubkey::find_program_address(&[program_pubkey.as_ref()], &bpf_loader_upgradeable::id()); let programdata_account = rpc_client.get_account(&programdata_pubkey).unwrap(); @@ -606,7 +606,7 @@ fn test_cli_program_write_buffer() { .unwrap() .as_str() .unwrap(); - let new_buffer_pubkey = Pubkey::from_str(&buffer_pubkey_str).unwrap(); + let new_buffer_pubkey = Pubkey::from_str(buffer_pubkey_str).unwrap(); let buffer_account = rpc_client.get_account(&new_buffer_pubkey).unwrap(); assert_eq!(buffer_account.lamports, minimum_balance_for_buffer_default); assert_eq!(buffer_account.owner, bpf_loader_upgradeable::id()); @@ -641,7 +641,7 @@ fn test_cli_program_write_buffer() { .unwrap(); assert_eq!( buffer_keypair.pubkey(), - Pubkey::from_str(&buffer_pubkey_str).unwrap() + Pubkey::from_str(buffer_pubkey_str).unwrap() ); let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap(); assert_eq!(buffer_account.lamports, minimum_balance_for_buffer); @@ -675,7 +675,7 @@ fn test_cli_program_write_buffer() { .unwrap(); assert_eq!( keypair.pubkey(), - Pubkey::from_str(&authority_pubkey_str).unwrap() + Pubkey::from_str(authority_pubkey_str).unwrap() ); // Specify buffer authority @@ -700,7 +700,7 @@ fn test_cli_program_write_buffer() { .unwrap(); assert_eq!( buffer_keypair.pubkey(), - Pubkey::from_str(&buffer_pubkey_str).unwrap() + Pubkey::from_str(buffer_pubkey_str).unwrap() ); let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap(); assert_eq!(buffer_account.lamports, minimum_balance_for_buffer_default); @@ -735,7 +735,7 @@ fn test_cli_program_write_buffer() { .unwrap() .as_str() .unwrap(); - let buffer_pubkey = Pubkey::from_str(&buffer_pubkey_str).unwrap(); + let buffer_pubkey = Pubkey::from_str(buffer_pubkey_str).unwrap(); let buffer_account = rpc_client.get_account(&buffer_pubkey).unwrap(); assert_eq!(buffer_account.lamports, minimum_balance_for_buffer_default); assert_eq!(buffer_account.owner, bpf_loader_upgradeable::id()); @@ -768,7 +768,7 @@ fn test_cli_program_write_buffer() { .unwrap(); assert_eq!( authority_keypair.pubkey(), - Pubkey::from_str(&authority_pubkey_str).unwrap() + Pubkey::from_str(authority_pubkey_str).unwrap() ); // Close buffer @@ -806,7 +806,7 @@ fn test_cli_program_write_buffer() { .unwrap() .as_str() .unwrap(); - let new_buffer_pubkey = Pubkey::from_str(&buffer_pubkey_str).unwrap(); + let new_buffer_pubkey = Pubkey::from_str(buffer_pubkey_str).unwrap(); // Close buffers and deposit default keypair let pre_lamports = rpc_client.get_account(&keypair.pubkey()).unwrap().lamports; @@ -901,7 +901,7 @@ fn test_cli_program_set_buffer_authority() { .as_str() .unwrap(); assert_eq!( - Pubkey::from_str(&new_buffer_authority_str).unwrap(), + Pubkey::from_str(new_buffer_authority_str).unwrap(), new_buffer_authority.pubkey() ); let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap(); @@ -928,7 +928,7 @@ fn test_cli_program_set_buffer_authority() { .as_str() .unwrap(); assert_eq!( - Pubkey::from_str(&buffer_authority_str).unwrap(), + Pubkey::from_str(buffer_authority_str).unwrap(), buffer_keypair.pubkey() ); let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap(); @@ -1101,7 +1101,7 @@ fn test_cli_program_show() { .unwrap(); assert_eq!( buffer_keypair.pubkey(), - Pubkey::from_str(&address_str).unwrap() + Pubkey::from_str(address_str).unwrap() ); let authority_str = json .as_object() @@ -1112,7 +1112,7 @@ fn test_cli_program_show() { .unwrap(); assert_eq!( authority_keypair.pubkey(), - Pubkey::from_str(&authority_str).unwrap() + Pubkey::from_str(authority_str).unwrap() ); let data_len = json .as_object() @@ -1161,7 +1161,7 @@ fn test_cli_program_show() { .unwrap(); assert_eq!( program_keypair.pubkey(), - Pubkey::from_str(&address_str).unwrap() + Pubkey::from_str(address_str).unwrap() ); let programdata_address_str = json .as_object() @@ -1176,7 +1176,7 @@ fn test_cli_program_show() { ); assert_eq!( programdata_pubkey, - Pubkey::from_str(&programdata_address_str).unwrap() + Pubkey::from_str(programdata_address_str).unwrap() ); let authority_str = json .as_object() @@ -1187,7 +1187,7 @@ fn test_cli_program_show() { .unwrap(); assert_eq!( authority_keypair.pubkey(), - Pubkey::from_str(&authority_str).unwrap() + Pubkey::from_str(authority_str).unwrap() ); let deployed_slot = json .as_object() diff --git a/client/src/rpc_cache.rs b/client/src/rpc_cache.rs index 38dbba5825..4207d3ce36 100644 --- a/client/src/rpc_cache.rs +++ b/client/src/rpc_cache.rs @@ -31,7 +31,7 @@ impl LargestAccountsCache { &self, filter: &Option, ) -> Option<(u64, Vec)> { - self.cache.get(&filter).and_then(|value| { + self.cache.get(filter).and_then(|value| { if let Ok(elapsed) = value.cached_time.elapsed() { if elapsed < Duration::from_secs(self.duration) { return Some((value.slot, value.accounts.clone())); diff --git a/client/src/rpc_client.rs b/client/src/rpc_client.rs index 0b3f11608d..e4fb5ffc6f 100644 --- a/client/src/rpc_client.rs +++ b/client/src/rpc_client.rs @@ -1627,7 +1627,7 @@ impl RpcClient { ) -> ClientResult { let now = Instant::now(); loop { - match self.get_balance_with_commitment(&pubkey, commitment_config) { + match self.get_balance_with_commitment(pubkey, commitment_config) { Ok(bal) => { return Ok(bal.value); } @@ -1696,7 +1696,7 @@ impl RpcClient { let now = Instant::now(); loop { if let Ok(Some(_)) = - self.get_signature_status_with_commitment(&signature, commitment_config) + self.get_signature_status_with_commitment(signature, commitment_config) { break; } @@ -1853,11 +1853,11 @@ impl RpcClient { let (signature, status) = loop { // Get recent commitment in order to count confirmations for successful transactions let status = self - .get_signature_status_with_commitment(&signature, CommitmentConfig::processed())?; + .get_signature_status_with_commitment(signature, CommitmentConfig::processed())?; if status.is_none() { if self .get_fee_calculator_for_blockhash_with_commitment( - &recent_blockhash, + recent_blockhash, CommitmentConfig::processed(), )? .value @@ -1891,7 +1891,7 @@ impl RpcClient { // Return when specified commitment is reached // Failed transactions have already been eliminated, `is_some` check is sufficient if self - .get_signature_status_with_commitment(&signature, commitment)? + .get_signature_status_with_commitment(signature, commitment)? .is_some() { progress_bar.set_message("Transaction confirmed"); @@ -1907,7 +1907,7 @@ impl RpcClient { )); sleep(Duration::from_millis(500)); confirmations = self - .get_num_blocks_since_signature_confirmation(&signature) + .get_num_blocks_since_signature_confirmation(signature) .unwrap_or(confirmations); if now.elapsed().as_secs() >= MAX_HASH_AGE_IN_SECONDS as u64 { return Err( diff --git a/client/src/thin_client.rs b/client/src/thin_client.rs index 8b3d083084..3988e8e5d0 100644 --- a/client/src/thin_client.rs +++ b/client/src/thin_client.rs @@ -451,7 +451,7 @@ impl SyncClient for ThinClient { ) -> TransportResult>> { let status = self .rpc_client() - .get_signature_status(&signature) + .get_signature_status(signature) .map_err(|err| { io::Error::new( io::ErrorKind::Other, @@ -468,7 +468,7 @@ impl SyncClient for ThinClient { ) -> TransportResult>> { let status = self .rpc_client() - .get_signature_status_with_commitment(&signature, commitment_config) + .get_signature_status_with_commitment(signature, commitment_config) .map_err(|err| { io::Error::new( io::ErrorKind::Other, diff --git a/client/src/tpu_client.rs b/client/src/tpu_client.rs index ae264f9875..01c902af12 100644 --- a/client/src/tpu_client.rs +++ b/client/src/tpu_client.rs @@ -121,7 +121,7 @@ struct LeaderTpuCache { impl LeaderTpuCache { fn new(rpc_client: &RpcClient, first_slot: Slot) -> Self { let leaders = Self::fetch_slot_leaders(rpc_client, first_slot).unwrap_or_default(); - let leader_tpu_map = Self::fetch_cluster_tpu_sockets(&rpc_client).unwrap_or_default(); + let leader_tpu_map = Self::fetch_cluster_tpu_sockets(rpc_client).unwrap_or_default(); Self { first_slot, leaders, diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 09118d3151..4de561a9ef 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -187,7 +187,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { }); //sanity check, make sure all the transactions can execute sequentially transactions.iter().for_each(|tx| { - let res = bank.process_transaction(&tx); + let res = bank.process_transaction(tx); assert!(res.is_ok(), "sanity test transactions"); }); bank.clear_signatures(); diff --git a/core/benches/consensus.rs b/core/benches/consensus.rs index 64035f4c3a..280ee08c13 100644 --- a/core/benches/consensus.rs +++ b/core/benches/consensus.rs @@ -24,10 +24,10 @@ fn bench_save_tower(bench: &mut Bencher) { let heaviest_bank = BankForks::new(Bank::default()).working_bank(); let tower = Tower::new( &node_keypair.pubkey(), - &vote_account_pubkey, + vote_account_pubkey, 0, &heaviest_bank, - &path, + path, ); bench.iter(move || { diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index 07f115ac30..58a3c8331e 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -148,7 +148,7 @@ impl AccountsHashVerifier { for (slot, hash) in hashes.iter() { slot_to_hash.insert(*slot, *hash); } - if Self::should_halt(&cluster_info, trusted_validators, &mut slot_to_hash) { + if Self::should_halt(cluster_info, trusted_validators, &mut slot_to_hash) { exit.store(true, Ordering::Relaxed); } } diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 4cd8ecc011..63c7c5f355 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -257,7 +257,7 @@ impl BankingStage { Self::num_threads(), transaction_status_sender, gossip_vote_sender, - &cost_model, + cost_model, &cost_tracker, ) } @@ -393,9 +393,9 @@ impl BankingStage { // We've hit the end of this slot, no need to perform more processing, // just filter the remaining packets for the invalid (e.g. too old) ones let new_unprocessed_indexes = Self::filter_unprocessed_packets( - &bank, - &msgs, - &original_unprocessed_indexes, + bank, + msgs, + original_unprocessed_indexes, my_pubkey, *next_leader, cost_model, @@ -413,8 +413,8 @@ impl BankingStage { Self::process_packets_transactions( &bank, &bank_creation_time, - &recorder, - &msgs, + recorder, + msgs, original_unprocessed_indexes.to_owned(), transaction_status_sender.clone(), gossip_vote_sender, @@ -449,7 +449,7 @@ impl BankingStage { // `original_unprocessed_indexes` must have remaining packets to process // if not yet processed. assert!(Self::packet_has_more_unprocessed_transactions( - &original_unprocessed_indexes + original_unprocessed_indexes )); true } @@ -652,7 +652,7 @@ impl BankingStage { let decision = Self::process_buffered_packets( &my_pubkey, &socket, - &poh_recorder, + poh_recorder, cluster_info, &mut buffered_packets, enable_forwarding, @@ -684,8 +684,8 @@ impl BankingStage { match Self::process_packets( &my_pubkey, - &verified_receiver, - &poh_recorder, + verified_receiver, + poh_recorder, recv_start, recv_timeout, id, @@ -797,7 +797,7 @@ impl BankingStage { let mut mint_decimals: HashMap = HashMap::new(); let pre_token_balances = if transaction_status_sender.is_some() { - collect_token_balances(&bank, &batch, &mut mint_decimals) + collect_token_balances(bank, batch, &mut mint_decimals) } else { vec![] }; @@ -857,7 +857,7 @@ impl BankingStage { if let Some(transaction_status_sender) = transaction_status_sender { let txs = batch.transactions_iter().cloned().collect(); let post_balances = bank.collect_balances(batch); - let post_token_balances = collect_token_balances(&bank, &batch, &mut mint_decimals); + let post_token_balances = collect_token_balances(bank, batch, &mut mint_decimals); transaction_status_sender.send_transaction_status_batch( bank.clone(), txs, @@ -1170,7 +1170,7 @@ impl BankingStage { // applying cost of processed transactions to shared cost_tracker transactions.iter().enumerate().for_each(|(index, tx)| { if !unprocessed_tx_indexes.iter().any(|&i| i == index) { - let tx_cost = cost_model.read().unwrap().calculate_cost(&tx.transaction()); + let tx_cost = cost_model.read().unwrap().calculate_cost(tx.transaction()); let mut guard = cost_tracker.lock().unwrap(); let _result = guard.try_add(tx_cost); drop(guard); @@ -1229,7 +1229,7 @@ impl BankingStage { let (transactions, transaction_to_packet_indexes, retry_packet_indexes) = Self::transactions_from_packets( msgs, - &transaction_indexes, + transaction_indexes, bank.secp256k1_program_enabled(), cost_model, cost_tracker, @@ -1368,7 +1368,7 @@ impl BankingStage { &bank, &msgs, &packet_indexes, - &my_pubkey, + my_pubkey, next_leader, cost_model, cost_tracker, @@ -2579,7 +2579,7 @@ mod tests { Receiver, JoinHandle<()>, ) { - Blockstore::destroy(&ledger_path).unwrap(); + Blockstore::destroy(ledger_path).unwrap(); let genesis_config_info = create_slow_genesis_config(10_000); let GenesisConfigInfo { genesis_config, @@ -2587,8 +2587,8 @@ mod tests { .. } = &genesis_config_info; let blockstore = - Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"); - let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config)); + Blockstore::open(ledger_path).expect("Expected to be able to open database ledger"); + let bank = Arc::new(Bank::new_no_wallclock_throttle(genesis_config)); let exit = Arc::new(AtomicBool::default()); let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new( bank.tick_height(), @@ -2609,9 +2609,9 @@ mod tests { let pubkey1 = solana_sdk::pubkey::new_rand(); let pubkey2 = solana_sdk::pubkey::new_rand(); let transactions = vec![ - system_transaction::transfer(&mint_keypair, &pubkey0, 1, genesis_config.hash()), - system_transaction::transfer(&mint_keypair, &pubkey1, 1, genesis_config.hash()), - system_transaction::transfer(&mint_keypair, &pubkey2, 1, genesis_config.hash()), + system_transaction::transfer(mint_keypair, &pubkey0, 1, genesis_config.hash()), + system_transaction::transfer(mint_keypair, &pubkey1, 1, genesis_config.hash()), + system_transaction::transfer(mint_keypair, &pubkey2, 1, genesis_config.hash()), ]; let poh_simulator = simulate_poh(record_receiver, &poh_recorder); diff --git a/core/src/broadcast_stage.rs b/core/src/broadcast_stage.rs index 9a8028f660..77ed6cfa9b 100644 --- a/core/src/broadcast_stage.rs +++ b/core/src/broadcast_stage.rs @@ -408,7 +408,7 @@ pub fn broadcast_shreds( let packets: Vec<_> = shreds .iter() .map(|shred| { - let broadcast_index = weighted_best(&peers_and_stakes, shred.seed()); + let broadcast_index = weighted_best(peers_and_stakes, shred.seed()); (&shred.payload, &peers[broadcast_index].tvu) }) @@ -429,7 +429,7 @@ pub fn broadcast_shreds( send_mmsg_time.stop(); transmit_stats.send_mmsg_elapsed += send_mmsg_time.as_us(); - let num_live_peers = num_live_peers(&peers); + let num_live_peers = num_live_peers(peers); update_peer_stats( num_live_peers, broadcast_len as i64 + 1, diff --git a/core/src/broadcast_stage/broadcast_duplicates_run.rs b/core/src/broadcast_stage/broadcast_duplicates_run.rs index 674d8d06bf..d9d738267e 100644 --- a/core/src/broadcast_stage/broadcast_duplicates_run.rs +++ b/core/src/broadcast_stage/broadcast_duplicates_run.rs @@ -212,9 +212,9 @@ impl BroadcastRun for BroadcastDuplicatesRun { .collect(); stakes.sort_by(|(l_key, l_stake), (r_key, r_stake)| { if r_stake == l_stake { - l_key.cmp(&r_key) + l_key.cmp(r_key) } else { - r_stake.cmp(&l_stake) + r_stake.cmp(l_stake) } }); diff --git a/core/src/broadcast_stage/standard_broadcast_run.rs b/core/src/broadcast_stage/standard_broadcast_run.rs index 6908d5dd1b..8b9cf78e27 100644 --- a/core/src/broadcast_stage/standard_broadcast_run.rs +++ b/core/src/broadcast_stage/standard_broadcast_run.rs @@ -161,7 +161,7 @@ impl StandardBroadcastRun { ) -> Result<()> { let (bsend, brecv) = channel(); let (ssend, srecv) = channel(); - self.process_receive_results(&blockstore, &ssend, &bsend, receive_results)?; + self.process_receive_results(blockstore, &ssend, &bsend, receive_results)?; let srecv = Arc::new(Mutex::new(srecv)); let brecv = Arc::new(Mutex::new(brecv)); //data diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index 587e5f5903..3f07732f70 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -110,7 +110,7 @@ impl VoteTracker { epoch_schedule: *root_bank.epoch_schedule(), ..VoteTracker::default() }; - vote_tracker.progress_with_new_root_bank(&root_bank); + vote_tracker.progress_with_new_root_bank(root_bank); assert_eq!( *vote_tracker.leader_schedule_epoch.read().unwrap(), root_bank.get_leader_schedule_epoch(root_bank.slot()) @@ -603,7 +603,7 @@ impl ClusterInfoVoteListener { if slot == last_vote_slot { let vote_accounts = Stakes::vote_accounts(epoch_stakes.stakes()); let stake = vote_accounts - .get(&vote_pubkey) + .get(vote_pubkey) .map(|(stake, _)| *stake) .unwrap_or_default(); let total_stake = epoch_stakes.total_stake(); @@ -692,7 +692,7 @@ impl ClusterInfoVoteListener { // voters trying to make votes for slots earlier than the epoch for // which they are authorized let actual_authorized_voter = - vote_tracker.get_authorized_voter(&vote_pubkey, *last_vote_slot); + vote_tracker.get_authorized_voter(vote_pubkey, *last_vote_slot); if actual_authorized_voter.is_none() { return false; @@ -700,7 +700,7 @@ impl ClusterInfoVoteListener { // Voting without the correct authorized pubkey, dump the vote if !VoteTracker::vote_contains_authorized_voter( - &gossip_tx, + gossip_tx, &actual_authorized_voter.unwrap(), ) { return false; @@ -738,7 +738,7 @@ impl ClusterInfoVoteListener { Self::track_new_votes_and_notify_confirmations( vote, &vote_pubkey, - &vote_tracker, + vote_tracker, root_bank, subscriptions, verified_vote_sender, diff --git a/core/src/cluster_slot_state_verifier.rs b/core/src/cluster_slot_state_verifier.rs index 9a91823a95..2ad5090ce5 100644 --- a/core/src/cluster_slot_state_verifier.rs +++ b/core/src/cluster_slot_state_verifier.rs @@ -192,7 +192,7 @@ fn get_cluster_duplicate_confirmed_hash<'a>( slot, gossip_duplicate_confirmed_hash, local_duplicate_confirmed_hash ); } - Some(&local_frozen_hash) + Some(local_frozen_hash) } (Some(local_frozen_hash), None) => Some(local_frozen_hash), _ => gossip_duplicate_confirmed_hash, diff --git a/core/src/commitment_service.rs b/core/src/commitment_service.rs index 02cb4732c6..fe10848b75 100644 --- a/core/src/commitment_service.rs +++ b/core/src/commitment_service.rs @@ -352,15 +352,15 @@ mod tests { if *a <= root { let mut expected = BlockCommitment::default(); expected.increase_rooted_stake(lamports); - assert_eq!(*commitment.get(&a).unwrap(), expected); + assert_eq!(*commitment.get(a).unwrap(), expected); } else if i <= 4 { let mut expected = BlockCommitment::default(); expected.increase_confirmation_stake(2, lamports); - assert_eq!(*commitment.get(&a).unwrap(), expected); + assert_eq!(*commitment.get(a).unwrap(), expected); } else if i <= 6 { let mut expected = BlockCommitment::default(); expected.increase_confirmation_stake(1, lamports); - assert_eq!(*commitment.get(&a).unwrap(), expected); + assert_eq!(*commitment.get(a).unwrap(), expected); } } assert_eq!(rooted_stake[0], (root, lamports)); diff --git a/core/src/consensus.rs b/core/src/consensus.rs index ab627a4b69..43d1c837ae 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -164,7 +164,7 @@ impl Tower { bank: &Bank, path: &Path, ) -> Self { - let path = Self::get_filename(&path, node_pubkey); + let path = Self::get_filename(path, node_pubkey); let tmp_path = Self::get_tmp_filename(&path); let mut tower = Self { node_pubkey: *node_pubkey, @@ -205,8 +205,8 @@ impl Tower { crate::replay_stage::ReplayStage::initialize_progress_and_fork_choice( root_bank.deref(), bank_forks.frozen_banks().values().cloned().collect(), - &my_pubkey, - &vote_account, + my_pubkey, + vote_account, ); let root = root_bank.slot(); @@ -219,11 +219,11 @@ impl Tower { .clone(); Self::new( - &my_pubkey, - &vote_account, + my_pubkey, + vote_account, root, &heaviest_bank, - &ledger_path, + ledger_path, ) } @@ -736,7 +736,7 @@ impl Tower { // finding any lockout intervals in the `lockout_intervals` tree // for this bank that contain `last_vote`. let lockout_intervals = &progress - .get(&candidate_slot) + .get(candidate_slot) .unwrap() .fork_stats .lockout_intervals; @@ -1328,7 +1328,7 @@ pub fn reconcile_blockstore_roots_with_tower( if last_blockstore_root < tower_root { // Ensure tower_root itself to exist and be marked as rooted in the blockstore // in addition to its ancestors. - let new_roots: Vec<_> = AncestorIterator::new_inclusive(tower_root, &blockstore) + let new_roots: Vec<_> = AncestorIterator::new_inclusive(tower_root, blockstore) .take_while(|current| match current.cmp(&last_blockstore_root) { Ordering::Greater => true, Ordering::Equal => false, @@ -1490,7 +1490,7 @@ pub mod test { tower: &mut Tower, ) -> Vec { // Try to simulate the vote - let my_keypairs = self.validator_keypairs.get(&my_pubkey).unwrap(); + let my_keypairs = self.validator_keypairs.get(my_pubkey).unwrap(); let my_vote_pubkey = my_keypairs.vote_keypair.pubkey(); let ancestors = self.bank_forks.read().unwrap().ancestors(); let mut frozen_banks: Vec<_> = self @@ -1503,7 +1503,7 @@ pub mod test { .collect(); let _ = ReplayStage::compute_bank_stats( - &my_pubkey, + my_pubkey, &ancestors, &mut frozen_banks, tower, @@ -1582,9 +1582,9 @@ pub mod test { .filter_map(|slot| { let mut fork_tip_parent = tr(slot - 1); fork_tip_parent.push_front(tr(slot)); - self.fill_bank_forks(fork_tip_parent, &cluster_votes); + self.fill_bank_forks(fork_tip_parent, cluster_votes); if votes_to_simulate.contains(&slot) { - Some((slot, self.simulate_vote(slot, &my_pubkey, tower))) + Some((slot, self.simulate_vote(slot, my_pubkey, tower))) } else { None } @@ -1627,7 +1627,7 @@ pub mod test { fork_tip_parent.push_front(tr(start_slot + i)); self.fill_bank_forks(fork_tip_parent, cluster_votes); if self - .simulate_vote(i + start_slot, &my_pubkey, tower) + .simulate_vote(i + start_slot, my_pubkey, tower) .is_empty() { cluster_votes @@ -2850,7 +2850,7 @@ pub mod test { tower.save(&identity_keypair).unwrap(); modify_serialized(&tower.path); - let loaded = Tower::restore(&dir.path(), &identity_keypair.pubkey()); + let loaded = Tower::restore(dir.path(), &identity_keypair.pubkey()); (tower, loaded) } diff --git a/core/src/cost_model.rs b/core/src/cost_model.rs index ed81fc27ed..6a12005cb0 100644 --- a/core/src/cost_model.rs +++ b/core/src/cost_model.rs @@ -82,7 +82,7 @@ impl CostModel { &non_signed_writable_accounts, &non_signed_readonly_accounts, ), - execution_cost: self.find_transaction_cost(&transaction), + execution_cost: self.find_transaction_cost(transaction), }; cost.writable_accounts.extend(&signed_writable_accounts); cost.writable_accounts.extend(&non_signed_writable_accounts); @@ -109,7 +109,7 @@ impl CostModel { } fn find_instruction_cost(&self, program_key: &Pubkey) -> u64 { - match self.instruction_execution_cost_table.get_cost(&program_key) { + match self.instruction_execution_cost_table.get_cost(program_key) { Some(cost) => *cost, None => { let default_value = self.instruction_execution_cost_table.get_mode(); diff --git a/core/src/cost_tracker.rs b/core/src/cost_tracker.rs index 064d56f6c9..df544ba702 100644 --- a/core/src/cost_tracker.rs +++ b/core/src/cost_tracker.rs @@ -55,7 +55,7 @@ impl CostTracker { // check each account against account_cost_limit, for account_key in keys.iter() { - match self.cost_by_writable_accounts.get(&account_key) { + match self.cost_by_writable_accounts.get(account_key) { Some(chained_cost) => { if chained_cost + cost > self.account_cost_limit { return Err("would exceed account cost limit"); @@ -143,7 +143,7 @@ mod tests { ) -> (Transaction, Vec, u64) { let keypair = Keypair::new(); let simple_transaction = - system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 2, *start_hash); + system_transaction::transfer(mint_keypair, &keypair.pubkey(), 2, *start_hash); (simple_transaction, vec![mint_keypair.pubkey()], 5) } diff --git a/core/src/execute_cost_table.rs b/core/src/execute_cost_table.rs index 5ef0ede4cf..47cb1c81dc 100644 --- a/core/src/execute_cost_table.rs +++ b/core/src/execute_cost_table.rs @@ -67,7 +67,7 @@ impl ExecuteCostTable { .map(|(key, _)| key) .expect("cannot find mode from cost table"); - *self.table.get(&key).unwrap() + *self.table.get(key).unwrap() } } @@ -75,11 +75,11 @@ impl ExecuteCostTable { // client is advised to call `get_average()` or `get_mode()` to // assign a 'default' value for new program. pub fn get_cost(&self, key: &Pubkey) -> Option<&u64> { - self.table.get(&key) + self.table.get(key) } pub fn upsert(&mut self, key: &Pubkey, value: &u64) { - let need_to_add = self.table.get(&key).is_none(); + let need_to_add = self.table.get(key).is_none(); let current_size = self.get_count(); if current_size == self.capacity && need_to_add { self.prune_to(&((current_size as f64 * PRUNE_RATIO) as usize)); diff --git a/core/src/fetch_stage.rs b/core/src/fetch_stage.rs index 48f44d60ec..523ad2a92b 100644 --- a/core/src/fetch_stage.rs +++ b/core/src/fetch_stage.rs @@ -34,7 +34,7 @@ impl FetchStage { tpu_forwards_sockets, exit, &sender, - &poh_recorder, + poh_recorder, coalesce_ms, ), receiver, @@ -54,8 +54,8 @@ impl FetchStage { tx_sockets, tpu_forwards_sockets, exit, - &sender, - &poh_recorder, + sender, + poh_recorder, coalesce_ms, ) } @@ -108,7 +108,7 @@ impl FetchStage { let tpu_threads = sockets.into_iter().map(|socket| { streamer::receiver( socket, - &exit, + exit, sender.clone(), recycler.clone(), "fetch_stage", @@ -121,7 +121,7 @@ impl FetchStage { let tpu_forwards_threads = tpu_forwards_sockets.into_iter().map(|socket| { streamer::receiver( socket, - &exit, + exit, forward_sender.clone(), recycler.clone(), "fetch_forward_stage", diff --git a/core/src/heaviest_subtree_fork_choice.rs b/core/src/heaviest_subtree_fork_choice.rs index 335e5a9bd4..7adaa12365 100644 --- a/core/src/heaviest_subtree_fork_choice.rs +++ b/core/src/heaviest_subtree_fork_choice.rs @@ -457,7 +457,7 @@ impl HeaviestSubtreeForkChoice { pub fn is_duplicate_confirmed(&self, slot_hash_key: &SlotHashKey) -> Option { self.fork_infos - .get(&slot_hash_key) + .get(slot_hash_key) .map(|fork_info| fork_info.is_duplicate_confirmed()) } @@ -472,7 +472,7 @@ impl HeaviestSubtreeForkChoice { /// Returns false if the node or any of its ancestors have been marked as duplicate pub fn is_candidate(&self, slot_hash_key: &SlotHashKey) -> Option { self.fork_infos - .get(&slot_hash_key) + .get(slot_hash_key) .map(|fork_info| fork_info.is_candidate()) } @@ -585,7 +585,7 @@ impl HeaviestSubtreeForkChoice { for child_key in &fork_info.children { let child_fork_info = self .fork_infos - .get(&child_key) + .get(child_key) .expect("Child must exist in fork_info map"); let child_stake_voted_subtree = child_fork_info.stake_voted_subtree; is_duplicate_confirmed |= child_fork_info.is_duplicate_confirmed; @@ -770,7 +770,7 @@ impl HeaviestSubtreeForkChoice { let epoch = epoch_schedule.get_epoch(new_vote_slot_hash.0); let stake_update = epoch_stakes .get(&epoch) - .map(|epoch_stakes| epoch_stakes.vote_account_stake(&pubkey)) + .map(|epoch_stakes| epoch_stakes.vote_account_stake(pubkey)) .unwrap_or(0); update_operations @@ -896,7 +896,7 @@ impl TreeDiff for HeaviestSubtreeForkChoice { fn children(&self, slot_hash_key: &SlotHashKey) -> Option<&[SlotHashKey]> { self.fork_infos - .get(&slot_hash_key) + .get(slot_hash_key) .map(|fork_info| &fork_info.children[..]) } } @@ -1497,7 +1497,7 @@ mod test { .chain(std::iter::once(&duplicate_leaves_descended_from_4[1])) { assert!(heaviest_subtree_fork_choice - .children(&duplicate_leaf) + .children(duplicate_leaf) .unwrap() .is_empty(),); } @@ -3116,11 +3116,11 @@ mod test { let slot = slot_hash_key.0; if slot <= duplicate_confirmed_slot { assert!(heaviest_subtree_fork_choice - .is_duplicate_confirmed(&slot_hash_key) + .is_duplicate_confirmed(slot_hash_key) .unwrap()); } else { assert!(!heaviest_subtree_fork_choice - .is_duplicate_confirmed(&slot_hash_key) + .is_duplicate_confirmed(slot_hash_key) .unwrap()); } assert!(heaviest_subtree_fork_choice @@ -3139,7 +3139,7 @@ mod test { // 1) Be duplicate confirmed // 2) Have no invalid ancestors assert!(heaviest_subtree_fork_choice - .is_duplicate_confirmed(&slot_hash_key) + .is_duplicate_confirmed(slot_hash_key) .unwrap()); assert!(heaviest_subtree_fork_choice .latest_invalid_ancestor(slot_hash_key) @@ -3149,7 +3149,7 @@ mod test { // 1) Not be duplicate confirmed // 2) Should have an invalid ancestor == `invalid_descendant_slot` assert!(!heaviest_subtree_fork_choice - .is_duplicate_confirmed(&slot_hash_key) + .is_duplicate_confirmed(slot_hash_key) .unwrap()); assert_eq!( heaviest_subtree_fork_choice @@ -3162,7 +3162,7 @@ mod test { // 1) Not be duplicate confirmed // 2) Should not have an invalid ancestor assert!(!heaviest_subtree_fork_choice - .is_duplicate_confirmed(&slot_hash_key) + .is_duplicate_confirmed(slot_hash_key) .unwrap()); assert!(heaviest_subtree_fork_choice .latest_invalid_ancestor(slot_hash_key) @@ -3186,7 +3186,7 @@ mod test { // 1) Be duplicate confirmed // 2) Have no invalid ancestors assert!(heaviest_subtree_fork_choice - .is_duplicate_confirmed(&slot_hash_key) + .is_duplicate_confirmed(slot_hash_key) .unwrap()); assert!(heaviest_subtree_fork_choice .latest_invalid_ancestor(slot_hash_key) @@ -3196,7 +3196,7 @@ mod test { // 1) Not be duplicate confirmed // 2) Should have an invalid ancestor == `invalid_descendant_slot` assert!(!heaviest_subtree_fork_choice - .is_duplicate_confirmed(&slot_hash_key) + .is_duplicate_confirmed(slot_hash_key) .unwrap()); assert_eq!( heaviest_subtree_fork_choice @@ -3209,7 +3209,7 @@ mod test { // 1) Not be duplicate confirmed // 2) Should not have an invalid ancestor assert!(!heaviest_subtree_fork_choice - .is_duplicate_confirmed(&slot_hash_key) + .is_duplicate_confirmed(slot_hash_key) .unwrap()); assert!(heaviest_subtree_fork_choice .latest_invalid_ancestor(slot_hash_key) @@ -3223,7 +3223,7 @@ mod test { heaviest_subtree_fork_choice.mark_fork_valid_candidate(&last_duplicate_confirmed_key); for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() { assert!(heaviest_subtree_fork_choice - .is_duplicate_confirmed(&slot_hash_key) + .is_duplicate_confirmed(slot_hash_key) .unwrap()); assert!(heaviest_subtree_fork_choice .latest_invalid_ancestor(slot_hash_key) diff --git a/core/src/ledger_cleanup_service.rs b/core/src/ledger_cleanup_service.rs index 8b1a1fc203..195601e873 100644 --- a/core/src/ledger_cleanup_service.rs +++ b/core/src/ledger_cleanup_service.rs @@ -187,7 +187,7 @@ impl LedgerCleanupService { *last_purge_slot = root; let (slots_to_clean, purge_first_slot, lowest_cleanup_slot, total_shreds) = - Self::find_slots_to_clean(&blockstore, root, max_ledger_shreds); + Self::find_slots_to_clean(blockstore, root, max_ledger_shreds); if slots_to_clean { let purge_complete = Arc::new(AtomicBool::new(false)); diff --git a/core/src/optimistic_confirmation_verifier.rs b/core/src/optimistic_confirmation_verifier.rs index 2f27bc2b78..c5445df46d 100644 --- a/core/src/optimistic_confirmation_verifier.rs +++ b/core/src/optimistic_confirmation_verifier.rs @@ -36,7 +36,7 @@ impl OptimisticConfirmationVerifier { .into_iter() .filter(|(optimistic_slot, optimistic_hash)| { (*optimistic_slot == root && *optimistic_hash != root_bank.hash()) - || (!root_ancestors.contains_key(&optimistic_slot) && + || (!root_ancestors.contains_key(optimistic_slot) && // In this second part of the `and`, we account for the possibility that // there was some other root `rootX` set in BankForks where: // diff --git a/core/src/progress_map.rs b/core/src/progress_map.rs index e929054f4a..16a11bd37c 100644 --- a/core/src/progress_map.rs +++ b/core/src/progress_map.rs @@ -292,7 +292,7 @@ impl PropagatedStats { pub fn add_node_pubkey(&mut self, node_pubkey: &Pubkey, bank: &Bank) { if !self.propagated_node_ids.contains(node_pubkey) { let node_vote_accounts = bank - .epoch_vote_accounts_for_node_id(&node_pubkey) + .epoch_vote_accounts_for_node_id(node_pubkey) .map(|v| &v.vote_accounts); if let Some(node_vote_accounts) = node_vote_accounts { diff --git a/core/src/repair_service.rs b/core/src/repair_service.rs index bbfaf6d9f4..fb251de9f9 100644 --- a/core/src/repair_service.rs +++ b/core/src/repair_service.rs @@ -229,7 +229,7 @@ impl RepairService { add_votes_elapsed = Measure::start("add_votes"); repair_weight.add_votes( - &blockstore, + blockstore, slot_to_vote_pubkeys.into_iter(), root_bank.epoch_stakes_map(), root_bank.epoch_schedule(), @@ -277,7 +277,7 @@ impl RepairService { let mut outstanding_requests = outstanding_requests.write().unwrap(); repairs.into_iter().for_each(|repair_request| { if let Ok((to, req)) = serve_repair.repair_request( - &cluster_slots, + cluster_slots, repair_request, &mut cache, &mut repair_stats, @@ -493,7 +493,7 @@ impl RepairService { repair_validators, ); if let Some((repair_pubkey, repair_addr)) = status.repair_pubkey_and_addr { - let repairs = Self::generate_duplicate_repairs_for_slot(&blockstore, *slot); + let repairs = Self::generate_duplicate_repairs_for_slot(blockstore, *slot); if let Some(repairs) = repairs { let mut outstanding_requests = outstanding_requests.write().unwrap(); @@ -535,7 +535,7 @@ impl RepairService { nonce: Nonce, ) -> Result<()> { let req = - serve_repair.map_repair_request(&repair_type, repair_pubkey, repair_stats, nonce)?; + serve_repair.map_repair_request(repair_type, repair_pubkey, repair_stats, nonce)?; repair_socket.send_to(&req, to)?; Ok(()) } diff --git a/core/src/repair_weight.rs b/core/src/repair_weight.rs index 26cce442e1..fe080518a5 100644 --- a/core/src/repair_weight.rs +++ b/core/src/repair_weight.rs @@ -495,7 +495,7 @@ impl RepairWeight { for ((slot, _), _) in all_slots { *self .slot_to_tree - .get_mut(&slot) + .get_mut(slot) .expect("Nodes in tree must exist in `self.slot_to_tree`") = root2; } } @@ -521,9 +521,9 @@ impl RepairWeight { fn sort_by_stake_weight_slot(slot_stake_voted: &mut Vec<(Slot, u64)>) { slot_stake_voted.sort_by(|(slot, stake_voted), (slot_, stake_voted_)| { if stake_voted == stake_voted_ { - slot.cmp(&slot_) + slot.cmp(slot_) } else { - stake_voted.cmp(&stake_voted_).reverse() + stake_voted.cmp(stake_voted_).reverse() } }); } @@ -757,7 +757,7 @@ mod test { ); for slot in &[8, 10, 11] { - assert_eq!(*repair_weight.slot_to_tree.get(&slot).unwrap(), 8); + assert_eq!(*repair_weight.slot_to_tree.get(slot).unwrap(), 8); } for slot in 0..=1 { assert_eq!(*repair_weight.slot_to_tree.get(&slot).unwrap(), 0); @@ -772,7 +772,7 @@ mod test { ); for slot in &[8, 10, 11] { - assert_eq!(*repair_weight.slot_to_tree.get(&slot).unwrap(), 0); + assert_eq!(*repair_weight.slot_to_tree.get(slot).unwrap(), 0); } assert_eq!(repair_weight.trees.len(), 1); assert!(repair_weight.trees.contains_key(&0)); @@ -1088,10 +1088,10 @@ mod test { let purged_slots = vec![0, 1, 2, 4, 8, 10]; let mut expected_unrooted_len = 0; for purged_slot in &purged_slots { - assert!(!repair_weight.slot_to_tree.contains_key(&purged_slot)); - assert!(!repair_weight.trees.contains_key(&purged_slot)); + assert!(!repair_weight.slot_to_tree.contains_key(purged_slot)); + assert!(!repair_weight.trees.contains_key(purged_slot)); if *purged_slot > 3 { - assert!(repair_weight.unrooted_slots.contains(&purged_slot)); + assert!(repair_weight.unrooted_slots.contains(purged_slot)); expected_unrooted_len += 1; } } diff --git a/core/src/repair_weighted_traversal.rs b/core/src/repair_weighted_traversal.rs index 534ef4841d..8b6cd0ceb4 100644 --- a/core/src/repair_weighted_traversal.rs +++ b/core/src/repair_weighted_traversal.rs @@ -101,7 +101,7 @@ pub fn get_best_repair_shreds<'a>( let new_repairs = RepairService::generate_repairs_for_slot( blockstore, slot, - &slot_meta, + slot_meta, max_repairs - repairs.len(), ); repairs.extend(new_repairs); diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 158505ffa6..c3ff51f1b4 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -563,7 +563,7 @@ impl ReplayStage { } Self::handle_votable_bank( - &vote_bank, + vote_bank, &poh_recorder, switch_fork_decision, &bank_forks, @@ -757,8 +757,8 @@ impl ReplayStage { Self::initialize_progress_and_fork_choice( &root_bank, frozen_banks, - &my_pubkey, - &vote_account, + my_pubkey, + vote_account, ) } @@ -779,8 +779,8 @@ impl ReplayStage { bank.slot(), ForkProgress::new_from_bank( bank, - &my_pubkey, - &vote_account, + my_pubkey, + vote_account, prev_leader_slot, 0, 0, @@ -875,7 +875,7 @@ impl ReplayStage { .expect("must exist based on earlier check") { descendants - .get_mut(&a) + .get_mut(a) .expect("If exists in ancestor map must exist in descendants map") .retain(|d| *d != slot && !slot_descendants.contains(d)); } @@ -885,9 +885,9 @@ impl ReplayStage { // Purge all the descendants of this slot from both maps for descendant in slot_descendants { - ancestors.remove(&descendant).expect("must exist"); + ancestors.remove(descendant).expect("must exist"); descendants - .remove(&descendant) + .remove(descendant) .expect("must exist based on earlier check"); } descendants @@ -1345,7 +1345,7 @@ impl ReplayStage { ); Self::handle_new_root( new_root, - &bank_forks, + bank_forks, progress, accounts_background_request_sender, highest_confirmed_root, @@ -1451,7 +1451,7 @@ impl ReplayStage { let vote_ix = switch_fork_decision .to_vote_instruction( vote, - &vote_account_pubkey, + vote_account_pubkey, &authorized_voter_keypair.pubkey(), ) .expect("Switch threshold failure should not lead to voting"); @@ -1603,9 +1603,9 @@ impl ReplayStage { leader_schedule_cache: &LeaderScheduleCache, ) { let next_leader_slot = leader_schedule_cache.next_leader_slot( - &my_pubkey, + my_pubkey, bank.slot(), - &bank, + bank, Some(blockstore), GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS, ); @@ -1683,7 +1683,7 @@ impl ReplayStage { let bank_progress = &mut progress.entry(bank.slot()).or_insert_with(|| { ForkProgress::new_from_bank( &bank, - &my_pubkey, + my_pubkey, vote_account, prev_leader_slot, num_blocks_on_fork, @@ -1694,13 +1694,13 @@ impl ReplayStage { let root_slot = bank_forks.read().unwrap().root(); let replay_result = Self::replay_blockstore_into_bank( &bank, - &blockstore, + blockstore, bank_progress, transaction_status_sender, replay_vote_sender, verify_recyclers, ); - Self::update_cost_model(&cost_model, &bank_progress.replay_stats.execute_timings); + Self::update_cost_model(cost_model, &bank_progress.replay_stats.execute_timings); debug!( "after replayed into bank, updated cost model instruction cost table, current values: {:?}", cost_model.read().unwrap().get_instruction_cost_table() @@ -1779,7 +1779,7 @@ impl ReplayStage { ); } } - Self::record_rewards(&bank, &rewards_recorder_sender); + Self::record_rewards(&bank, rewards_recorder_sender); } else { trace!( "bank {} not completed tick_height: {}, max_tick_height: {}", @@ -1823,14 +1823,14 @@ impl ReplayStage { my_vote_pubkey, bank_slot, bank.vote_accounts().into_iter(), - &ancestors, + ancestors, |slot| progress.get_hash(slot), latest_validator_votes_for_frozen_banks, ); // Notify any listeners of the votes found in this newly computed // bank heaviest_subtree_fork_choice.compute_bank_stats( - &bank, + bank, tower, latest_validator_votes_for_frozen_banks, ); @@ -1899,7 +1899,7 @@ impl ReplayStage { let mut cost_model_mutable = cost_model.write().unwrap(); for (program_id, stats) in &execute_timings.details.per_program_timings { let cost = stats.0 / stats.1 as u64; - match cost_model_mutable.upsert_instruction_cost(&program_id, &cost) { + match cost_model_mutable.upsert_instruction_cost(program_id, &cost) { Ok(c) => { debug!( "after replayed into bank, instruction {:?} has averaged cost {}", @@ -2013,9 +2013,9 @@ impl ReplayStage { let selected_fork = { let switch_fork_decision = tower.check_switch_threshold( heaviest_bank.slot(), - &ancestors, - &descendants, - &progress, + ancestors, + descendants, + progress, heaviest_bank.total_epoch_stake(), heaviest_bank .epoch_vote_accounts(heaviest_bank.epoch()) @@ -2261,7 +2261,7 @@ impl ReplayStage { .contains(vote_pubkey); leader_propagated_stats.add_vote_pubkey( *vote_pubkey, - leader_bank.epoch_vote_account_stake(&vote_pubkey), + leader_bank.epoch_vote_account_stake(vote_pubkey), ); !exists }); @@ -2733,7 +2733,7 @@ mod tests { &bank1, bank1.collector_id(), validator_node_to_vote_keys - .get(&bank1.collector_id()) + .get(bank1.collector_id()) .unwrap(), Some(0), 0, @@ -2990,7 +2990,7 @@ mod tests { &bad_hash, hashes_per_tick.saturating_sub(1), vec![system_transaction::transfer( - &genesis_keypair, + genesis_keypair, &keypair2.pubkey(), 2, blockhash, @@ -3108,7 +3108,7 @@ mod tests { entry::create_ticks(bank.ticks_per_slot(), hashes_per_tick, blockhash); let last_entry_hash = entries.last().unwrap().hash; let tx = - system_transaction::transfer(&genesis_keypair, &keypair.pubkey(), 2, blockhash); + system_transaction::transfer(genesis_keypair, &keypair.pubkey(), 2, blockhash); let trailing_entry = entry::next_entry(&last_entry_hash, 1, vec![tx]); entries.push(trailing_entry); entries_to_test_shreds(entries, slot, slot.saturating_sub(1), true, 0) @@ -3188,7 +3188,7 @@ mod tests { &mut bank0_progress, None, &replay_vote_sender, - &&VerifyRecyclers::default(), + &VerifyRecyclers::default(), ); let rpc_subscriptions = Arc::new(RpcSubscriptions::new( @@ -3228,12 +3228,12 @@ mod tests { #[test] fn test_replay_commitment_cache() { fn leader_vote(vote_slot: Slot, bank: &Arc, pubkey: &Pubkey) { - let mut leader_vote_account = bank.get_account(&pubkey).unwrap(); + let mut leader_vote_account = bank.get_account(pubkey).unwrap(); let mut vote_state = VoteState::from(&leader_vote_account).unwrap(); vote_state.process_slot_vote_unchecked(vote_slot); let versioned = VoteStateVersions::new_current(vote_state); VoteState::to(&versioned, &mut leader_vote_account).unwrap(); - bank.store_account(&pubkey, &leader_vote_account); + bank.store_account(pubkey, &leader_vote_account); } let leader_pubkey = solana_sdk::pubkey::new_rand(); @@ -3773,7 +3773,7 @@ mod tests { success_index: usize, ) { let stake = 10_000; - let (bank_forks, _, _) = initialize_state(&all_keypairs, stake); + let (bank_forks, _, _) = initialize_state(all_keypairs, stake); let root_bank = bank_forks.root_bank(); let mut propagated_stats = PropagatedStats { total_epoch_stake: stake * all_keypairs.len() as u64, @@ -4407,7 +4407,7 @@ mod tests { )); assert!(check_map_eq( &descendants, - &bank_forks.read().unwrap().descendants() + bank_forks.read().unwrap().descendants() )); // Try to purge the root @@ -4546,7 +4546,7 @@ mod tests { // Record the vote for 4 tower.record_bank_vote( - &bank_forks.read().unwrap().get(4).unwrap(), + bank_forks.read().unwrap().get(4).unwrap(), &Pubkey::default(), ); @@ -4746,7 +4746,7 @@ mod tests { &cluster_info, refresh_bank, &poh_recorder, - Tower::last_voted_slot_in_bank(&refresh_bank, &my_vote_pubkey).unwrap(), + Tower::last_voted_slot_in_bank(refresh_bank, &my_vote_pubkey).unwrap(), &my_vote_pubkey, &my_vote_keypair, &mut voted_signatures, @@ -5010,12 +5010,12 @@ mod tests { progress, &VoteTracker::default(), &ClusterSlots::default(), - &bank_forks, + bank_forks, heaviest_subtree_fork_choice, latest_validator_votes_for_frozen_banks, ); let (heaviest_bank, heaviest_bank_on_same_fork) = heaviest_subtree_fork_choice - .select_forks(&frozen_banks, &tower, &progress, &ancestors, bank_forks); + .select_forks(&frozen_banks, tower, progress, ancestors, bank_forks); assert!(heaviest_bank_on_same_fork.is_none()); let SelectVoteAndResetForkResult { vote_bank, @@ -5024,8 +5024,8 @@ mod tests { } = ReplayStage::select_vote_and_reset_forks( &heaviest_bank, heaviest_bank_on_same_fork.as_ref(), - &ancestors, - &descendants, + ancestors, + descendants, progress, tower, latest_validator_votes_for_frozen_banks, diff --git a/core/src/serve_repair.rs b/core/src/serve_repair.rs index feb5191ba7..b6beebec98 100644 --- a/core/src/serve_repair.rs +++ b/core/src/serve_repair.rs @@ -171,7 +171,7 @@ impl ServeRepair { Self::run_window_request( recycler, from, - &from_addr, + from_addr, blockstore, &me.read().unwrap().my_info, *slot, @@ -186,7 +186,7 @@ impl ServeRepair { ( Self::run_highest_window_request( recycler, - &from_addr, + from_addr, blockstore, *slot, *highest_index, @@ -200,7 +200,7 @@ impl ServeRepair { ( Self::run_orphan( recycler, - &from_addr, + from_addr, blockstore, *slot, MAX_ORPHAN_REPAIR_RESPONSES, @@ -256,7 +256,7 @@ impl ServeRepair { let mut time = Measure::start("repair::handle_packets"); for reqs in reqs_v { - Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender, stats); + Self::handle_packets(obj, recycler, blockstore, reqs, response_sender, stats); } time.stop(); if total_packets >= *max_packets { @@ -411,7 +411,7 @@ impl ServeRepair { let (repair_peers, weighted_index) = match cache.entry(slot) { Entry::Occupied(entry) => entry.into_mut(), Entry::Vacant(entry) => { - let repair_peers = self.repair_peers(&repair_validators, slot); + let repair_peers = self.repair_peers(repair_validators, slot); if repair_peers.is_empty() { return Err(Error::from(ClusterInfoError::NoPeers)); } diff --git a/core/src/serve_repair_service.rs b/core/src/serve_repair_service.rs index dae275a1e1..f5b4cdadfc 100644 --- a/core/src/serve_repair_service.rs +++ b/core/src/serve_repair_service.rs @@ -28,7 +28,7 @@ impl ServeRepairService { ); let t_receiver = streamer::receiver( serve_repair_socket.clone(), - &exit, + exit, request_sender, Recycler::default(), "serve_repair_receiver", diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index 2c9a9961a9..50a5316074 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -145,7 +145,7 @@ impl ShredFetchStage { .map(|s| { streamer::receiver( s, - &exit, + exit, packet_sender.clone(), recycler.clone(), "packet_modifier", @@ -174,7 +174,7 @@ impl ShredFetchStage { let (mut tvu_threads, tvu_filter) = Self::packet_modifier( sockets, - &exit, + exit, sender.clone(), recycler.clone(), bank_forks.clone(), @@ -184,7 +184,7 @@ impl ShredFetchStage { let (tvu_forwards_threads, fwd_thread_hdl) = Self::packet_modifier( forward_sockets, - &exit, + exit, sender.clone(), recycler.clone(), bank_forks.clone(), @@ -194,7 +194,7 @@ impl ShredFetchStage { let (repair_receiver, repair_handler) = Self::packet_modifier( vec![repair_socket], - &exit, + exit, sender.clone(), recycler, bank_forks, diff --git a/core/src/tpu.rs b/core/src/tpu.rs index c4f1bd5fd0..73fb624cbf 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -76,9 +76,9 @@ impl Tpu { let fetch_stage = FetchStage::new_with_sender( transactions_sockets, tpu_forwards_sockets, - &exit, + exit, &packet_sender, - &poh_recorder, + poh_recorder, tpu_coalesce_ms, ); let (verified_sender, verified_receiver) = unbounded(); @@ -90,10 +90,10 @@ impl Tpu { let (verified_vote_packets_sender, verified_vote_packets_receiver) = unbounded(); let cluster_info_vote_listener = ClusterInfoVoteListener::new( - &exit, + exit, cluster_info.clone(), verified_vote_packets_sender, - &poh_recorder, + poh_recorder, vote_tracker, bank_forks, subscriptions.clone(), @@ -106,7 +106,7 @@ impl Tpu { ); let banking_stage = BankingStage::new( - &cluster_info, + cluster_info, poh_recorder, verified_receiver, verified_vote_packets_receiver, @@ -120,7 +120,7 @@ impl Tpu { cluster_info.clone(), entry_receiver, retransmit_slots_receiver, - &exit, + exit, blockstore, shred_version, ); diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 7467c3ba22..4a785e84cf 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -152,7 +152,7 @@ impl Tvu { repair_socket.clone(), &fetch_sender, Some(bank_forks.clone()), - &exit, + exit, ); let (verified_sender, verified_receiver) = unbounded(); @@ -172,11 +172,11 @@ impl Tvu { bank_forks.clone(), leader_schedule_cache, blockstore.clone(), - &cluster_info, + cluster_info, Arc::new(retransmit_sockets), repair_socket, verified_receiver, - &exit, + exit, cluster_slots_update_receiver, *bank_forks.read().unwrap().working_bank().epoch_schedule(), cfg, @@ -211,7 +211,7 @@ impl Tvu { accounts_hash_receiver, pending_snapshot_package, exit, - &cluster_info, + cluster_info, tvu_config.trusted_validators.clone(), tvu_config.halt_on_trusted_validators_accounts_hash_mismatch, tvu_config.accounts_hash_fault_injection_slots, @@ -300,7 +300,7 @@ impl Tvu { ledger_cleanup_slot_receiver, blockstore.clone(), max_ledger_shreds, - &exit, + exit, compaction_interval, max_compaction_jitter, ) @@ -308,7 +308,7 @@ impl Tvu { let accounts_background_service = AccountsBackgroundService::new( bank_forks.clone(), - &exit, + exit, accounts_background_request_handler, tvu_config.accounts_db_caching_enabled, tvu_config.test_hash_calculation, diff --git a/core/src/unfrozen_gossip_verified_vote_hashes.rs b/core/src/unfrozen_gossip_verified_vote_hashes.rs index 4640e01e72..30d944754c 100644 --- a/core/src/unfrozen_gossip_verified_vote_hashes.rs +++ b/core/src/unfrozen_gossip_verified_vote_hashes.rs @@ -116,7 +116,7 @@ mod tests { if *unfrozen_vote_slot >= frozen_vote_slot { let vote_hashes_map = unfrozen_gossip_verified_vote_hashes .votes_per_slot - .get(&unfrozen_vote_slot) + .get(unfrozen_vote_slot) .unwrap(); assert_eq!(vote_hashes_map.len(), num_duplicate_hashes); for pubkey_votes in vote_hashes_map.values() { diff --git a/core/src/validator.rs b/core/src/validator.rs index ada7c9200e..be58e16efe 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -977,7 +977,7 @@ fn post_process_restored_tower( }) .unwrap_or_else(|err| { let voting_has_been_active = - active_vote_account_exists_in_bank(&bank_forks.working_bank(), &vote_account); + active_vote_account_exists_in_bank(&bank_forks.working_bank(), vote_account); if !err.is_file_missing() { datapoint_error!( "tower_error", @@ -1010,10 +1010,10 @@ fn post_process_restored_tower( } Tower::new_from_bankforks( - &bank_forks, + bank_forks, tower_path, - &validator_identity, - &vote_account, + validator_identity, + vote_account, ) }) } @@ -1081,9 +1081,9 @@ fn new_banks_from_ledger( let tower_path = config.tower_path.as_deref().unwrap_or(ledger_path); - let restored_tower = Tower::restore(tower_path, &validator_identity); + let restored_tower = Tower::restore(tower_path, validator_identity); if let Ok(tower) = &restored_tower { - reconcile_blockstore_roots_with_tower(&tower, &blockstore).unwrap_or_else(|err| { + reconcile_blockstore_roots_with_tower(tower, &blockstore).unwrap_or_else(|err| { error!("Failed to reconcile blockstore with tower: {:?}", err); abort() }); @@ -1185,7 +1185,7 @@ fn new_banks_from_ledger( None, &snapshot_config.snapshot_package_output_path, snapshot_config.archive_format, - Some(&bank_forks.root_bank().get_thread_pool()), + Some(bank_forks.root_bank().get_thread_pool()), snapshot_config.maximum_snapshots_to_retain, ) .unwrap_or_else(|err| { @@ -1197,9 +1197,9 @@ fn new_banks_from_ledger( let tower = post_process_restored_tower( restored_tower, - &validator_identity, - &vote_account, - &config, + validator_identity, + vote_account, + config, tower_path, &bank_forks, ); @@ -1404,7 +1404,7 @@ fn wait_for_supermajority( ); } - let gossip_stake_percent = get_stake_percent_in_gossip(&bank, &cluster_info, i % 10 == 0); + let gossip_stake_percent = get_stake_percent_in_gossip(bank, cluster_info, i % 10 == 0); if gossip_stake_percent >= WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT { break; diff --git a/core/src/window_service.rs b/core/src/window_service.rs index eac0b4c55b..5fbe0861ef 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -134,7 +134,7 @@ fn verify_repair( .map(|repair_meta| { outstanding_requests.register_response( repair_meta.nonce, - &shred, + shred, solana_sdk::timing::timestamp(), ) }) @@ -153,7 +153,7 @@ fn prune_shreds_invalid_repair( let mut outstanding_requests = outstanding_requests.write().unwrap(); shreds.retain(|shred| { let should_keep = ( - verify_repair(&mut outstanding_requests, &shred, &repair_infos[i]), + verify_repair(&mut outstanding_requests, shred, &repair_infos[i]), i += 1, ) .0; @@ -630,7 +630,7 @@ mod test { keypair: &Arc, ) -> Vec { let shredder = Shredder::new(slot, parent, keypair.clone(), 0, 0).unwrap(); - shredder.entries_to_shreds(&entries, true, 0).0 + shredder.entries_to_shreds(entries, true, 0).0 } #[test] diff --git a/core/tests/fork-selection.rs b/core/tests/fork-selection.rs index 23396a10d9..cfeda12228 100644 --- a/core/tests/fork-selection.rs +++ b/core/tests/fork-selection.rs @@ -188,7 +188,7 @@ impl Tower { .delayed_votes .iter() .enumerate() - .map(|(i, v)| (*scores.get(&v).unwrap_or(&0), v.time, i)) + .map(|(i, v)| (*scores.get(v).unwrap_or(&0), v.time, i)) .collect(); // highest score, latest vote first best.sort_unstable(); @@ -542,7 +542,7 @@ fn test_with_partitions( let mut scores: HashMap = HashMap::new(); towers.iter().for_each(|n| { n.delayed_votes.iter().for_each(|v| { - *scores.entry(v.clone()).or_insert(0) += n.score(&v, &fork_tree); + *scores.entry(v.clone()).or_insert(0) += n.score(v, &fork_tree); }) }); for tower in towers.iter_mut() { diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 04cb77426a..0a3ed2f343 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -149,7 +149,7 @@ mod tests { let check_hash_calculation = false; let (deserialized_bank, _timing) = snapshot_utils::bank_from_archive( - &account_paths, + account_paths, &[], &old_bank_forks .snapshot_config @@ -216,7 +216,7 @@ mod tests { }; for slot in 0..last_slot { let mut bank = Bank::new_from_parent(&bank_forks[slot], &Pubkey::default(), slot + 1); - f(&mut bank, &mint_keypair); + f(&mut bank, mint_keypair); let bank = bank_forks.insert(bank); // Set root to make sure we don't end up with too many account storage entries // and to allow snapshotting of bank and the purging logic on status_cache to @@ -250,7 +250,7 @@ mod tests { .unwrap(); let snapshot_package = snapshot_utils::process_accounts_package_pre( snapshot_package, - Some(&last_bank.get_thread_pool()), + Some(last_bank.get_thread_pool()), ); snapshot_utils::archive_snapshot_package( &snapshot_package, @@ -277,12 +277,12 @@ mod tests { |bank, mint_keypair| { let key1 = Keypair::new().pubkey(); let tx = - system_transaction::transfer(&mint_keypair, &key1, 1, bank.last_blockhash()); + system_transaction::transfer(mint_keypair, &key1, 1, bank.last_blockhash()); assert_eq!(bank.process_transaction(&tx), Ok(())); let key2 = Keypair::new().pubkey(); let tx = - system_transaction::transfer(&mint_keypair, &key2, 0, bank.last_blockhash()); + system_transaction::transfer(mint_keypair, &key2, 0, bank.last_blockhash()); assert_eq!(bank.process_transaction(&tx), Ok(())); bank.freeze(); @@ -294,7 +294,7 @@ mod tests { fn goto_end_of_slot(bank: &mut Bank) { let mut tick_hash = bank.last_blockhash(); loop { - tick_hash = hashv(&[&tick_hash.as_ref(), &[42]]); + tick_hash = hashv(&[tick_hash.as_ref(), &[42]]); bank.register_tick(&tick_hash); if tick_hash == bank.last_blockhash() { bank.freeze(); @@ -349,7 +349,7 @@ mod tests { ); let slot = bank.slot(); let key1 = Keypair::new().pubkey(); - let tx = system_transaction::transfer(&mint_keypair, &key1, 1, genesis_config.hash()); + let tx = system_transaction::transfer(mint_keypair, &key1, 1, genesis_config.hash()); assert_eq!(bank.process_transaction(&tx), Ok(())); bank.squash(); let accounts_hash = bank.update_accounts_hash(); @@ -368,9 +368,9 @@ mod tests { snapshot_utils::snapshot_bank( &bank, vec![], - &package_sender, - &snapshot_path, - &snapshot_package_output_path, + package_sender, + snapshot_path, + snapshot_package_output_path, snapshot_config.snapshot_version, &snapshot_config.archive_format, None, @@ -428,7 +428,7 @@ mod tests { // Purge all the outdated snapshots, including the ones needed to generate the package // currently sitting in the channel - snapshot_utils::purge_old_snapshots(&snapshot_path); + snapshot_utils::purge_old_snapshots(snapshot_path); assert!(snapshot_utils::get_snapshot_paths(&snapshots_dir) .into_iter() .map(|path| path.slot) @@ -575,14 +575,14 @@ mod tests { (MAX_CACHE_ENTRIES * 2 + 1) as u64, |bank, mint_keypair| { let tx = system_transaction::transfer( - &mint_keypair, + mint_keypair, &key1, 1, bank.parent().unwrap().last_blockhash(), ); assert_eq!(bank.process_transaction(&tx), Ok(())); let tx = system_transaction::transfer( - &mint_keypair, + mint_keypair, &key2, 1, bank.parent().unwrap().last_blockhash(), diff --git a/dos/src/main.rs b/dos/src/main.rs index 891f9c9fa3..191131fd8a 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -96,14 +96,14 @@ fn run_dos( let res = rpc_client .as_ref() .unwrap() - .get_account(&Pubkey::from_str(&data_input.as_ref().unwrap()).unwrap()); + .get_account(&Pubkey::from_str(data_input.as_ref().unwrap()).unwrap()); if res.is_err() { error_count += 1; } } "get_program_accounts" => { let res = rpc_client.as_ref().unwrap().get_program_accounts( - &Pubkey::from_str(&data_input.as_ref().unwrap()).unwrap(), + &Pubkey::from_str(data_input.as_ref().unwrap()).unwrap(), ); if res.is_err() { error_count += 1; diff --git a/faucet/src/faucet.rs b/faucet/src/faucet.rs index c2831ee627..b253a65435 100644 --- a/faucet/src/faucet.rs +++ b/faucet/src/faucet.rs @@ -654,7 +654,7 @@ mod tests { #[test] fn test_process_faucet_request() { let to = solana_sdk::pubkey::new_rand(); - let blockhash = Hash::new(&to.as_ref()); + let blockhash = Hash::new(to.as_ref()); let lamports = 50; let req = FaucetRequest::GetAirdrop { lamports, @@ -679,6 +679,6 @@ mod tests { assert_eq!(expected_vec_with_length, response_vec); let bad_bytes = "bad bytes".as_bytes(); - assert!(faucet.process_faucet_request(&bad_bytes, ip).is_err()); + assert!(faucet.process_faucet_request(bad_bytes, ip).is_err()); } } diff --git a/faucet/tests/local-faucet.rs b/faucet/tests/local-faucet.rs index 8412552743..8629c68ac2 100644 --- a/faucet/tests/local-faucet.rs +++ b/faucet/tests/local-faucet.rs @@ -12,7 +12,7 @@ fn test_local_faucet() { let keypair = Keypair::new(); let to = solana_sdk::pubkey::new_rand(); let lamports = 50; - let blockhash = Hash::new(&to.as_ref()); + let blockhash = Hash::new(to.as_ref()); let create_instruction = system_instruction::transfer(&keypair.pubkey(), &to, lamports); let message = Message::new(&[create_instruction], Some(&keypair.pubkey())); let expected_tx = Transaction::new(&[&keypair], message, blockhash); diff --git a/frozen-abi/macro/src/lib.rs b/frozen-abi/macro/src/lib.rs index bd285a826c..ee0a4fdf2b 100644 --- a/frozen-abi/macro/src/lib.rs +++ b/frozen-abi/macro/src/lib.rs @@ -224,7 +224,7 @@ fn do_derive_abi_enum_visitor(input: ItemEnum) -> TokenStream { if filter_serde_attrs(&variant.attrs) { continue; }; - let sample_variant = quote_sample_variant(&type_name, &ty_generics, &variant); + let sample_variant = quote_sample_variant(type_name, &ty_generics, variant); variant_count = if let Some(variant_count) = variant_count.checked_add(1) { variant_count } else { @@ -319,7 +319,7 @@ fn test_mod_name(type_name: &Ident) -> Ident { #[cfg(RUSTC_WITH_SPECIALIZATION)] fn frozen_abi_type_alias(input: ItemType, expected_digest: &str) -> TokenStream { let type_name = &input.ident; - let test = quote_for_test(&test_mod_name(type_name), type_name, &expected_digest); + let test = quote_for_test(&test_mod_name(type_name), type_name, expected_digest); let result = quote! { #input #test @@ -330,7 +330,7 @@ fn frozen_abi_type_alias(input: ItemType, expected_digest: &str) -> TokenStream #[cfg(RUSTC_WITH_SPECIALIZATION)] fn frozen_abi_struct_type(input: ItemStruct, expected_digest: &str) -> TokenStream { let type_name = &input.ident; - let test = quote_for_test(&test_mod_name(type_name), type_name, &expected_digest); + let test = quote_for_test(&test_mod_name(type_name), type_name, expected_digest); let result = quote! { #input #test @@ -387,7 +387,7 @@ fn quote_sample_variant( #[cfg(RUSTC_WITH_SPECIALIZATION)] fn frozen_abi_enum_type(input: ItemEnum, expected_digest: &str) -> TokenStream { let type_name = &input.ident; - let test = quote_for_test(&test_mod_name(type_name), type_name, &expected_digest); + let test = quote_for_test(&test_mod_name(type_name), type_name, expected_digest); let result = quote! { #input #test diff --git a/frozen-abi/src/abi_example.rs b/frozen-abi/src/abi_example.rs index ebb74e31ca..b9bb57b364 100644 --- a/frozen-abi/src/abi_example.rs +++ b/frozen-abi/src/abi_example.rs @@ -468,7 +468,7 @@ impl AbiEnumVisitor for &T { default fn visit_for_abi(&self, digester: &mut AbiDigester) -> DigestResult { info!("AbiEnumVisitor for (&default): {}", type_name::()); // Don't call self.visit_for_abi(...) to avoid the infinite recursion! - T::visit_for_abi(&self, digester) + T::visit_for_abi(self, digester) } } diff --git a/genesis-utils/src/lib.rs b/genesis-utils/src/lib.rs index 1efb26aedd..513da409ad 100644 --- a/genesis-utils/src/lib.rs +++ b/genesis-utils/src/lib.rs @@ -28,7 +28,7 @@ fn load_local_genesis( ledger_path: &std::path::Path, expected_genesis_hash: Option, ) -> Result { - let existing_genesis = GenesisConfig::load(&ledger_path) + let existing_genesis = GenesisConfig::load(ledger_path) .map_err(|err| format!("Failed to load genesis config: {}", err))?; check_genesis_hash(&existing_genesis, expected_genesis_hash)?; @@ -54,12 +54,12 @@ pub fn download_then_check_genesis_hash( { unpack_genesis_archive( &tmp_genesis_package, - &ledger_path, + ledger_path, max_genesis_archive_unpacked_size, ) .map_err(|err| format!("Failed to unpack downloaded genesis config: {}", err))?; - let downloaded_genesis = GenesisConfig::load(&ledger_path) + let downloaded_genesis = GenesisConfig::load(ledger_path) .map_err(|err| format!("Failed to load downloaded genesis config: {}", err))?; check_genesis_hash(&downloaded_genesis, expected_genesis_hash)?; diff --git a/genesis/src/genesis_accounts.rs b/genesis/src/genesis_accounts.rs index 61abf74d27..7bf3504402 100644 --- a/genesis/src/genesis_accounts.rs +++ b/genesis/src/genesis_accounts.rs @@ -231,20 +231,20 @@ pub fn add_genesis_accounts(genesis_config: &mut GenesisConfig, mut issued_lampo issued_lamports += add_stakes( genesis_config, - &CREATOR_STAKER_INFOS, + CREATOR_STAKER_INFOS, &UNLOCKS_HALF_AT_9_MONTHS, ) + add_stakes( genesis_config, - &SERVICE_STAKER_INFOS, + SERVICE_STAKER_INFOS, &UNLOCKS_ALL_AT_9_MONTHS, ) + add_stakes( genesis_config, - &FOUNDATION_STAKER_INFOS, + FOUNDATION_STAKER_INFOS, &UNLOCKS_ALL_DAY_ZERO, - ) + add_stakes(genesis_config, &GRANTS_STAKER_INFOS, &UNLOCKS_ALL_DAY_ZERO) + ) + add_stakes(genesis_config, GRANTS_STAKER_INFOS, &UNLOCKS_ALL_DAY_ZERO) + add_stakes( genesis_config, - &COMMUNITY_STAKER_INFOS, + COMMUNITY_STAKER_INFOS, &UNLOCKS_ALL_DAY_ZERO, ); diff --git a/genesis/src/main.rs b/genesis/src/main.rs index cb4ed366c7..2fb7498337 100644 --- a/genesis/src/main.rs +++ b/genesis/src/main.rs @@ -534,9 +534,9 @@ fn main() -> Result<(), Box> { ); let vote_account = vote_state::create_account_with_authorized( - &identity_pubkey, - &identity_pubkey, - &identity_pubkey, + identity_pubkey, + identity_pubkey, + identity_pubkey, commission, VoteState::get_rent_exempt_reserve(&rent).max(1), ); @@ -546,8 +546,8 @@ fn main() -> Result<(), Box> { stake_state::create_account( bootstrap_stake_authorized_pubkey .as_ref() - .unwrap_or(&identity_pubkey), - &vote_pubkey, + .unwrap_or(identity_pubkey), + vote_pubkey, &vote_account, &rent, bootstrap_validator_stake_lamports, @@ -782,7 +782,7 @@ mod tests { let pubkey = &pubkey_str.parse().unwrap(); assert_eq!( b64_account.balance, - genesis_config.accounts[&pubkey].lamports, + genesis_config.accounts[pubkey].lamports, ); } diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 636c3f9396..79687b7e22 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -265,7 +265,7 @@ impl PruneData { destination: Pubkey::new_unique(), wallclock, }; - prune_data.sign(&self_keypair); + prune_data.sign(self_keypair); prune_data } } @@ -1325,7 +1325,7 @@ impl ClusterInfo { if r_stake == l_stake { peers[*r_info].id.cmp(&peers[*l_info].id) } else { - r_stake.cmp(&l_stake) + r_stake.cmp(l_stake) } }) .collect(); @@ -1638,7 +1638,7 @@ impl ClusterInfo { generate_pull_requests: bool, require_stake_for_gossip: bool, ) -> Vec<(SocketAddr, Protocol)> { - self.trim_crds_table(CRDS_UNIQUE_PUBKEY_CAPACITY, &stakes); + self.trim_crds_table(CRDS_UNIQUE_PUBKEY_CAPACITY, stakes); // This will flush local pending push messages before generating // pull-request bloom filters, preventing pull responses to return the // same values back to the node itself. Note that packets will arrive @@ -1649,7 +1649,7 @@ impl ClusterInfo { .add_relaxed(out.len() as u64); if generate_pull_requests { let (pings, pull_requests) = - self.new_pull_requests(&thread_pool, gossip_validators, stakes); + self.new_pull_requests(thread_pool, gossip_validators, stakes); self.stats .packets_sent_pull_requests_count .add_relaxed(pull_requests.len() as u64); @@ -2193,7 +2193,7 @@ impl ClusterInfo { if !responses.is_empty() { let timeouts = { let gossip = self.gossip.read().unwrap(); - gossip.make_timeouts(&stakes, epoch_duration) + gossip.make_timeouts(stakes, epoch_duration) }; for (from, data) in responses { self.handle_pull_response(&from, data, &timeouts); diff --git a/gossip/src/contact_info.rs b/gossip/src/contact_info.rs index 6471c30dac..4485e8e1cc 100644 --- a/gossip/src/contact_info.rs +++ b/gossip/src/contact_info.rs @@ -143,14 +143,14 @@ impl ContactInfo { } let tpu = *bind_addr; - let gossip = next_port(&bind_addr, 1); - let tvu = next_port(&bind_addr, 2); - let tpu_forwards = next_port(&bind_addr, 3); - let tvu_forwards = next_port(&bind_addr, 4); - let repair = next_port(&bind_addr, 5); + let gossip = next_port(bind_addr, 1); + let tvu = next_port(bind_addr, 2); + let tpu_forwards = next_port(bind_addr, 3); + let tvu_forwards = next_port(bind_addr, 4); + let repair = next_port(bind_addr, 5); let rpc = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PORT); let rpc_pubsub = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PUBSUB_PORT); - let serve_repair = next_port(&bind_addr, 6); + let serve_repair = next_port(bind_addr, 6); Self { id: *pubkey, gossip, diff --git a/gossip/src/crds_gossip.rs b/gossip/src/crds_gossip.rs index 18c7991f3b..8fd8b9423e 100644 --- a/gossip/src/crds_gossip.rs +++ b/gossip/src/crds_gossip.rs @@ -325,7 +325,7 @@ impl CrdsGossip { assert!(timeouts.contains_key(&Pubkey::default())); rv = self .pull - .purge_active(thread_pool, &mut self.crds, now, &timeouts); + .purge_active(thread_pool, &mut self.crds, now, timeouts); } self.crds .trim_purged(now.saturating_sub(5 * self.pull.crds_timeout)); diff --git a/gossip/src/crds_gossip_push.rs b/gossip/src/crds_gossip_push.rs index 8daa532d9b..38319995d1 100644 --- a/gossip/src/crds_gossip_push.rs +++ b/gossip/src/crds_gossip_push.rs @@ -277,7 +277,7 @@ impl CrdsGossipPush { let (weights, peers): (Vec<_>, Vec<_>) = self .push_options( crds, - &self_id, + self_id, self_shred_version, stakes, gossip_validators, diff --git a/gossip/src/crds_value.rs b/gossip/src/crds_value.rs index 02c1b623b6..b725579e96 100644 --- a/gossip/src/crds_value.rs +++ b/gossip/src/crds_value.rs @@ -71,7 +71,7 @@ impl Signable for CrdsValue { fn verify(&self) -> bool { self.get_signature() - .verify(&self.pubkey().as_ref(), self.signable_data().borrow()) + .verify(self.pubkey().as_ref(), self.signable_data().borrow()) } } @@ -853,9 +853,9 @@ mod test { wrong_keypair: &Keypair, ) { assert!(!value.verify()); - value.sign(&correct_keypair); + value.sign(correct_keypair); assert!(value.verify()); - value.sign(&wrong_keypair); + value.sign(wrong_keypair); assert!(!value.verify()); serialize_deserialize_value(value, correct_keypair); } diff --git a/gossip/src/gossip_service.rs b/gossip/src/gossip_service.rs index 589ee3758e..e06f2577cf 100644 --- a/gossip/src/gossip_service.rs +++ b/gossip/src/gossip_service.rs @@ -49,7 +49,7 @@ impl GossipService { ); let t_receiver = streamer::receiver( gossip_socket.clone(), - &exit, + exit, request_sender, Recycler::default(), "gossip_receiver", @@ -319,7 +319,7 @@ fn make_gossip_node( gossip_socket, None, should_check_duplicate_instance, - &exit, + exit, ); (gossip_service, ip_echo, cluster_info) } diff --git a/gossip/src/main.rs b/gossip/src/main.rs index 20f667ef24..6eeeef1a19 100644 --- a/gossip/src/main.rs +++ b/gossip/src/main.rs @@ -225,7 +225,7 @@ fn process_spy(matches: &ArgMatches) -> std::io::Result<()> { .value_of("node_pubkey") .map(|pubkey_str| pubkey_str.parse::().unwrap()); let shred_version = value_t_or_exit!(matches, "shred_version", u16); - let identity_keypair = keypair_of(&matches, "identity").map(Arc::new); + let identity_keypair = keypair_of(matches, "identity").map(Arc::new); let entrypoint_addr = parse_entrypoint(matches); @@ -270,7 +270,7 @@ fn parse_entrypoint(matches: &ArgMatches) -> Option { fn process_rpc_url(matches: &ArgMatches) -> std::io::Result<()> { let any = matches.is_present("any"); let all = matches.is_present("all"); - let entrypoint_addr = parse_entrypoint(&matches); + let entrypoint_addr = parse_entrypoint(matches); let timeout = value_t_or_exit!(matches, "timeout", u64); let shred_version = value_t_or_exit!(matches, "shred_version", u16); let (_all_peers, validators) = discover( diff --git a/gossip/tests/crds_gossip.rs b/gossip/tests/crds_gossip.rs index da4c30191b..0baa28da5c 100644 --- a/gossip/tests/crds_gossip.rs +++ b/gossip/tests/crds_gossip.rs @@ -240,7 +240,7 @@ fn connected_staked_network_create(stakes: &[u64]) -> Network { fn network_simulator_pull_only(thread_pool: &ThreadPool, network: &mut Network) { let num = network.len(); - let (converged, bytes_tx) = network_run_pull(&thread_pool, network, 0, num * 2, 0.9); + let (converged, bytes_tx) = network_run_pull(thread_pool, network, 0, num * 2, 0.9); trace!( "network_simulator_pull_{}: converged: {} total_bytes: {}", num, @@ -253,7 +253,7 @@ fn network_simulator_pull_only(thread_pool: &ThreadPool, network: &mut Network) fn network_simulator(thread_pool: &ThreadPool, network: &mut Network, max_convergance: f64) { let num = network.len(); // run for a small amount of time - let (converged, bytes_tx) = network_run_pull(&thread_pool, network, 0, 10, 1.0); + let (converged, bytes_tx) = network_run_pull(thread_pool, network, 0, 10, 1.0); trace!("network_simulator_push_{}: converged: {}", num, converged); // make sure there is someone in the active set let network_values: Vec = network.values().cloned().collect(); @@ -292,7 +292,7 @@ fn network_simulator(thread_pool: &ThreadPool, network: &mut Network, max_conver bytes_tx ); // pull for a bit - let (converged, bytes_tx) = network_run_pull(&thread_pool, network, start, end, 1.0); + let (converged, bytes_tx) = network_run_pull(thread_pool, network, start, end, 1.0); total_bytes += bytes_tx; trace!( "network_simulator_push_{}: converged: {} bytes: {} total_bytes: {}", @@ -466,7 +466,7 @@ fn network_run_pull( .lock() .unwrap() .new_pull_request( - &thread_pool, + thread_pool, from.keypair.deref(), now, None, diff --git a/install/src/command.rs b/install/src/command.rs index 26d8b6960c..68be9b5064 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -548,7 +548,7 @@ pub fn init( init_or_update(config_file, true, false)?; let path_modified = if !no_modify_path { - add_to_path(&config.active_release_bin_dir().to_str().unwrap()) + add_to_path(config.active_release_bin_dir().to_str().unwrap()) } else { false }; @@ -613,10 +613,10 @@ pub fn info(config_file: &str, local_info_only: bool, eval: bool) -> Result<(), return Ok(()); } - println_name_value("Configuration:", &config_file); + println_name_value("Configuration:", config_file); println_name_value( "Active release directory:", - &config.active_release_dir().to_str().unwrap_or("?"), + config.active_release_dir().to_str().unwrap_or("?"), ); fn print_release_version(config: &Config) { @@ -633,14 +633,14 @@ pub fn info(config_file: &str, local_info_only: bool, eval: bool) -> Result<(), if let Some(explicit_release) = &config.explicit_release { match explicit_release { ExplicitRelease::Semver(release_semver) => { - println_name_value(&format!("{}Release version:", BULLET), &release_semver); + println_name_value(&format!("{}Release version:", BULLET), release_semver); println_name_value( &format!("{}Release URL:", BULLET), &github_release_download_url(release_semver), ); } ExplicitRelease::Channel(release_channel) => { - println_name_value(&format!("{}Release channel:", BULLET), &release_channel); + println_name_value(&format!("{}Release channel:", BULLET), release_channel); println_name_value( &format!("{}Release URL:", BULLET), &release_channel_download_url(release_channel), @@ -659,7 +659,7 @@ pub fn info(config_file: &str, local_info_only: bool, eval: bool) -> Result<(), Some(ref update_manifest) => { println_name_value("Installed version:", ""); print_release_version(&config); - print_update_manifest(&update_manifest); + print_update_manifest(update_manifest); } None => { println_name_value("Installed version:", "None"); diff --git a/install/src/lib.rs b/install/src/lib.rs index 188cdfcd06..f79bdfcf19 100644 --- a/install/src/lib.rs +++ b/install/src/lib.rs @@ -18,7 +18,7 @@ mod stop_process; mod update_manifest; pub fn is_semver(semver: &str) -> Result<(), String> { - match semver::Version::parse(&semver) { + match semver::Version::parse(semver) { Ok(_) => Ok(()), Err(err) => Err(format!("{:?}", err)), } @@ -60,10 +60,10 @@ pub fn explicit_release_of( fn handle_init(matches: &ArgMatches<'_>, config_file: &str) -> Result<(), String> { let json_rpc_url = matches.value_of("json_rpc_url").unwrap(); - let update_manifest_pubkey = pubkey_of(&matches, "update_manifest_pubkey"); + let update_manifest_pubkey = pubkey_of(matches, "update_manifest_pubkey"); let data_dir = matches.value_of("data_dir").unwrap(); let no_modify_path = matches.is_present("no_modify_path"); - let explicit_release = explicit_release_of(&matches, "explicit_release"); + let explicit_release = explicit_release_of(matches, "explicit_release"); if update_manifest_pubkey.is_none() && explicit_release.is_none() { Err(format!( @@ -98,7 +98,7 @@ pub fn main() -> Result<(), String> { .global(true) .help("Configuration file to use"); match *defaults::CONFIG_FILE { - Some(ref config_file) => arg.default_value(&config_file), + Some(ref config_file) => arg.default_value(config_file), None => arg.required(true), } }) @@ -115,7 +115,7 @@ pub fn main() -> Result<(), String> { .required(true) .help("Directory to store install data"); match *defaults::DATA_DIR { - Some(ref data_dir) => arg.default_value(&data_dir), + Some(ref data_dir) => arg.default_value(data_dir), None => arg, } }) @@ -181,7 +181,7 @@ pub fn main() -> Result<(), String> { .required(true) .help("Keypair file of the account that funds the deployment"); match *defaults::USER_KEYPAIR { - Some(ref config_file) => arg.default_value(&config_file), + Some(ref config_file) => arg.default_value(config_file), None => arg, } }) @@ -242,7 +242,7 @@ pub fn main() -> Result<(), String> { let config_file = matches.value_of("config_file").unwrap(); match matches.subcommand() { - ("init", Some(matches)) => handle_init(&matches, &config_file), + ("init", Some(matches)) => handle_init(matches, config_file), ("info", Some(matches)) => { let local_info_only = matches.is_present("local_info_only"); let eval = matches.is_present("eval"); @@ -290,7 +290,7 @@ pub fn main_init() -> Result<(), String> { .takes_value(true) .help("Configuration file to use"); match *defaults::CONFIG_FILE { - Some(ref config_file) => arg.default_value(&config_file), + Some(ref config_file) => arg.default_value(config_file), None => arg.required(true), } }) @@ -303,7 +303,7 @@ pub fn main_init() -> Result<(), String> { .required(true) .help("Directory to store install data"); match *defaults::DATA_DIR { - Some(ref data_dir) => arg.default_value(&data_dir), + Some(ref data_dir) => arg.default_value(data_dir), None => arg, } }) @@ -342,5 +342,5 @@ pub fn main_init() -> Result<(), String> { .get_matches(); let config_file = matches.value_of("config_file").unwrap(); - handle_init(&matches, &config_file) + handle_init(&matches, config_file) } diff --git a/keygen/src/keygen.rs b/keygen/src/keygen.rs index a3feaedf93..c7c962a58a 100644 --- a/keygen/src/keygen.rs +++ b/keygen/src/keygen.rs @@ -153,9 +153,9 @@ fn output_keypair( ) -> Result<(), Box> { if outfile == STDOUT_OUTFILE_TOKEN { let mut stdout = std::io::stdout(); - write_keypair(&keypair, &mut stdout)?; + write_keypair(keypair, &mut stdout)?; } else { - write_keypair_file(&keypair, outfile)?; + write_keypair_file(keypair, outfile)?; println!("Wrote {} keypair to {}", source, outfile); } Ok(()) @@ -342,7 +342,7 @@ fn main() -> Result<(), Box> { .global(true) .help("Configuration file to use"); if let Some(ref config_file) = *CONFIG_FILE { - arg.default_value(&config_file) + arg.default_value(config_file) } else { arg } @@ -539,7 +539,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { if matches.is_present("outfile") { let outfile = matches.value_of("outfile").unwrap(); - check_for_overwrite(&outfile, &matches); + check_for_overwrite(outfile, matches); write_pubkey_file(outfile, pubkey)?; } else { println!("{}", pubkey); @@ -558,7 +558,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { match outfile { Some(STDOUT_OUTFILE_TOKEN) => (), - Some(outfile) => check_for_overwrite(&outfile, &matches), + Some(outfile) => check_for_overwrite(outfile, matches), None => (), } @@ -577,7 +577,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { let keypair = keypair_from_seed(seed.as_bytes())?; if let Some(outfile) = outfile { - output_keypair(&keypair, &outfile, "new") + output_keypair(&keypair, outfile, "new") .map_err(|err| format!("Unable to write {}: {}", outfile, err))?; } @@ -600,7 +600,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { }; if outfile != STDOUT_OUTFILE_TOKEN { - check_for_overwrite(&outfile, &matches); + check_for_overwrite(outfile, matches); } let keypair_name = "recover"; @@ -610,7 +610,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name); keypair_from_seed_phrase(keypair_name, skip_validation, true, None, true)? }; - output_keypair(&keypair, &outfile, "recovered")?; + output_keypair(&keypair, outfile, "recovered")?; } ("grind", Some(matches)) => { let ignore_case = matches.is_present("ignore_case"); diff --git a/ledger-tool/src/bigtable.rs b/ledger-tool/src/bigtable.rs index 38c3874043..289cc76684 100644 --- a/ledger-tool/src/bigtable.rs +++ b/ledger-tool/src/bigtable.rs @@ -405,7 +405,7 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { let allow_missing_metadata = arg_matches.is_present("allow_missing_metadata"); let force_reupload = arg_matches.is_present("force_reupload"); let blockstore = - crate::open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary, None); + crate::open_blockstore(ledger_path, AccessType::TryPrimaryThenSecondary, None); runtime.block_on(upload( blockstore, diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 395ce3292b..8a99672bb4 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -136,7 +136,7 @@ fn output_entry( .map(|transaction_status| transaction_status.into()); solana_cli_output::display::println_transaction( - &transaction, + transaction, &transaction_status, " ", None, @@ -455,7 +455,7 @@ fn graph_forks(bank_forks: &BankForks, include_all_votes: bool) -> String { let mut lowest_total_stake = 0; for (node_pubkey, (last_vote_slot, vote_state, stake, total_stake)) in &last_votes { all_votes.entry(*node_pubkey).and_modify(|validator_votes| { - validator_votes.remove(&last_vote_slot); + validator_votes.remove(last_vote_slot); }); dot.push(format!( @@ -475,7 +475,7 @@ fn graph_forks(bank_forks: &BankForks, include_all_votes: bool) -> String { dot.push(format!( r#" "last vote {}" -> "{}" [style=dashed,label="latest vote"];"#, node_pubkey, - if styled_slots.contains(&last_vote_slot) { + if styled_slots.contains(last_vote_slot) { last_vote_slot.to_string() } else { if *last_vote_slot < lowest_last_vote_slot { @@ -522,7 +522,7 @@ fn graph_forks(bank_forks: &BankForks, include_all_votes: bool) -> String { r#" "{} vote {}" -> "{}" [style=dotted,label="vote"];"#, node_pubkey, vote_slot, - if styled_slots.contains(&vote_slot) { + if styled_slots.contains(vote_slot) { vote_slot.to_string() } else { "...".to_string() @@ -714,8 +714,8 @@ fn load_bank_forks( }; bank_forks_utils::load( - &genesis_config, - &blockstore, + genesis_config, + blockstore, account_paths, None, snapshot_config.as_ref(), @@ -747,7 +747,7 @@ fn compute_slot_cost(blockstore: &Blockstore, slot: Slot) -> Result<(), String> transactions += entry.transactions.len(); for transaction in &entry.transactions { programs += transaction.message().instructions.len(); - let tx_cost = cost_model.calculate_cost(&transaction); + let tx_cost = cost_model.calculate_cost(transaction); if cost_tracker.try_add(tx_cost).is_err() { println!( "Slot: {}, CostModel rejected transaction {:?}, stats {:?}!", @@ -887,7 +887,7 @@ fn main() { .long("maximum-snapshots-to-retain") .value_name("NUMBER") .takes_value(true) - .default_value(&default_max_snapshot_to_retain) + .default_value(default_max_snapshot_to_retain) .help("Maximum number of snapshots to hold on to during snapshot purge"); let rent = Rent::default(); @@ -1927,14 +1927,14 @@ fn main() { let remove_stake_accounts = arg_matches.is_present("remove_stake_accounts"); let new_hard_forks = hardforks_of(arg_matches, "hard_forks"); - let faucet_pubkey = pubkey_of(&arg_matches, "faucet_pubkey"); + let faucet_pubkey = pubkey_of(arg_matches, "faucet_pubkey"); let faucet_lamports = value_t!(arg_matches, "faucet_lamports", u64).unwrap_or(0); let rent_burn_percentage = value_t!(arg_matches, "rent_burn_percentage", u8); let hashes_per_tick = arg_matches.value_of("hashes_per_tick"); let bootstrap_stake_authorized_pubkey = - pubkey_of(&arg_matches, "bootstrap_stake_authorized_pubkey"); + pubkey_of(arg_matches, "bootstrap_stake_authorized_pubkey"); let bootstrap_validator_lamports = value_t_or_exit!(arg_matches, "bootstrap_validator_lamports", u64); let bootstrap_validator_stake_lamports = @@ -1948,9 +1948,9 @@ fn main() { ); exit(1); } - let bootstrap_validator_pubkeys = pubkeys_of(&arg_matches, "bootstrap_validator"); + let bootstrap_validator_pubkeys = pubkeys_of(arg_matches, "bootstrap_validator"); let accounts_to_remove = - pubkeys_of(&arg_matches, "accounts_to_remove").unwrap_or_default(); + pubkeys_of(arg_matches, "accounts_to_remove").unwrap_or_default(); let snapshot_version = arg_matches .value_of("snapshot_version") @@ -2105,9 +2105,9 @@ fn main() { ); let vote_account = vote_state::create_account_with_authorized( - &identity_pubkey, - &identity_pubkey, - &identity_pubkey, + identity_pubkey, + identity_pubkey, + identity_pubkey, 100, VoteState::get_rent_exempt_reserve(&rent).max(1), ); @@ -2117,8 +2117,8 @@ fn main() { &stake_state::create_account( bootstrap_stake_authorized_pubkey .as_ref() - .unwrap_or(&identity_pubkey), - &vote_pubkey, + .unwrap_or(identity_pubkey), + vote_pubkey, &vote_account, &rent, bootstrap_validator_stake_lamports, @@ -2544,7 +2544,7 @@ fn main() { } }; let warped_bank = Bank::new_from_parent_with_tracer( - &base_bank, + base_bank, base_bank.collector_id(), next_epoch, tracer, @@ -2561,7 +2561,7 @@ fn main() { println!("Slot: {} => {}", base_bank.slot(), warped_bank.slot()); println!("Epoch: {} => {}", base_bank.epoch(), warped_bank.epoch()); - assert_capitalization(&base_bank); + assert_capitalization(base_bank); assert_capitalization(&warped_bank); let interest_per_epoch = ((warped_bank.capitalization() as f64) / (base_bank.capitalization() as f64) @@ -2589,7 +2589,7 @@ fn main() { pubkey, account, base_bank - .get_account(&pubkey) + .get_account(pubkey) .map(|a| a.lamports()) .unwrap_or_default(), ) @@ -2788,7 +2788,7 @@ fn main() { ); } - assert_capitalization(&bank); + assert_capitalization(bank); println!("Inflation: {:?}", bank.inflation()); println!("RentCollector: {:?}", bank.rent_collector()); println!("Capitalization: {}", Sol(bank.capitalization())); diff --git a/ledger-tool/tests/basic.rs b/ledger-tool/tests/basic.rs index c9ccf9ae69..4cda481e6e 100644 --- a/ledger-tool/tests/basic.rs +++ b/ledger-tool/tests/basic.rs @@ -39,11 +39,11 @@ fn nominal() { let ledger_path = ledger_path.to_str().unwrap(); // Basic validation - let output = run_ledger_tool(&["-l", &ledger_path, "verify"]); + let output = run_ledger_tool(&["-l", ledger_path, "verify"]); assert!(output.status.success()); // Print everything - let output = run_ledger_tool(&["-l", &ledger_path, "print", "-vvv"]); + let output = run_ledger_tool(&["-l", ledger_path, "print", "-vvv"]); assert!(output.status.success()); assert_eq!(count_newlines(&output.stdout), ticks + meta_lines); } diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index bc416f8795..9a8d5321c2 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -58,8 +58,8 @@ pub fn load( ) { return load_from_snapshot( - &genesis_config, - &blockstore, + genesis_config, + blockstore, account_paths, shrink_paths, snapshot_config, @@ -79,8 +79,8 @@ pub fn load( } load_from_genesis( - &genesis_config, - &blockstore, + genesis_config, + blockstore, account_paths, process_options, cache_block_meta_sender, @@ -97,8 +97,8 @@ fn load_from_genesis( info!("Processing ledger from genesis"); to_loadresult( blockstore_processor::process_blockstore( - &genesis_config, - &blockstore, + genesis_config, + blockstore, account_paths, process_options, cache_block_meta_sender, diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 34ed286685..e4bbef6c33 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -705,7 +705,7 @@ impl Blockstore { for (&(slot, set_index), erasure_meta) in erasure_metas.iter() { let index_meta_entry = index_working_set.get_mut(&slot).expect("Index"); let index = &mut index_meta_entry.index; - match erasure_meta.status(&index) { + match erasure_meta.status(index) { ErasureMetaStatus::CanRecover => { Self::recover_shreds( index, @@ -838,7 +838,7 @@ impl Blockstore { let mut num_recovered_exists = 0; if let Some(leader_schedule_cache) = leader_schedule { let recovered_data = Self::try_shred_recovery( - &db, + db, &erasure_metas, &mut index_working_set, &mut just_inserted_data_shreds, @@ -1135,14 +1135,14 @@ impl Blockstore { let maybe_shred = self.get_coding_shred(slot, coding_index); if let Ok(Some(shred_data)) = maybe_shred { let potential_shred = Shred::new_from_serialized_shred(shred_data).unwrap(); - if Self::erasure_mismatch(&potential_shred, &shred) { + if Self::erasure_mismatch(&potential_shred, shred) { conflicting_shred = Some(potential_shred.payload); } break; } else if let Some(potential_shred) = just_received_coding_shreds.get(&(slot, coding_index)) { - if Self::erasure_mismatch(&potential_shred, &shred) { + if Self::erasure_mismatch(potential_shred, shred) { conflicting_shred = Some(potential_shred.payload.clone()); } break; @@ -1183,7 +1183,7 @@ impl Blockstore { let slot_meta = &mut slot_meta_entry.new_slot_meta.borrow_mut(); if !is_trusted { - if Self::is_data_shred_present(&shred, slot_meta, &index_meta.data()) { + if Self::is_data_shred_present(&shred, slot_meta, index_meta.data()) { handle_duplicate(shred); return Err(InsertDataShredError::Exists); } @@ -1474,7 +1474,7 @@ impl Blockstore { index as u32, new_consumed, shred.reference_tick(), - &data_index, + data_index, ); if slot_meta.is_full() { datapoint_info!( @@ -1689,7 +1689,7 @@ impl Blockstore { } break; } - let (current_slot, index) = C::index(&db_iterator.key().expect("Expect a valid key")); + let (current_slot, index) = C::index(db_iterator.key().expect("Expect a valid key")); let current_index = { if current_slot > slot { @@ -1702,7 +1702,7 @@ impl Blockstore { let upper_index = cmp::min(current_index, end_index); // the tick that will be used to figure out the timeout for this hole let reference_tick = u64::from(Shred::reference_tick_from_data( - &db_iterator.value().expect("couldn't read value"), + db_iterator.value().expect("couldn't read value"), )); if ticks_since_first_insert < reference_tick + MAX_TURBINE_DELAY_IN_TICKS { @@ -2437,7 +2437,7 @@ impl Blockstore { address_signatures.extend( signatures .into_iter() - .filter(|(_, signature)| !excluded_signatures.contains(&signature)), + .filter(|(_, signature)| !excluded_signatures.contains(signature)), ) } else { address_signatures.append(&mut signatures); @@ -2520,7 +2520,7 @@ impl Blockstore { next_primary_index_iter_timer.stop(); let mut address_signatures: Vec<(Slot, Signature)> = address_signatures .into_iter() - .filter(|(_, signature)| !until_excluded_signatures.contains(&signature)) + .filter(|(_, signature)| !until_excluded_signatures.contains(signature)) .collect(); address_signatures.truncate(limit); @@ -2993,7 +2993,7 @@ impl Blockstore { } pub fn scan_and_fix_roots(&self, exit: &Arc) -> Result<()> { - let ancestor_iterator = AncestorIterator::new(self.last_root(), &self) + let ancestor_iterator = AncestorIterator::new(self.last_root(), self) .take_while(|&slot| slot >= self.lowest_cleanup_slot()); let mut find_missing_roots = Measure::start("find_missing_roots"); @@ -3278,8 +3278,8 @@ fn commit_slot_meta_working_set( } // Check if the working copy of the metadata has changed if Some(meta) != meta_backup.as_ref() { - should_signal = should_signal || slot_has_updates(meta, &meta_backup); - write_batch.put::(*slot, &meta)?; + should_signal = should_signal || slot_has_updates(meta, meta_backup); + write_batch.put::(*slot, meta)?; } } @@ -3430,7 +3430,7 @@ fn handle_chaining_for_slot( traverse_children_mut( db, slot, - &meta, + meta, working_set, new_chained_slots, slot_function, @@ -3520,7 +3520,7 @@ pub fn create_new_ledger( access_type: AccessType, ) -> Result { Blockstore::destroy(ledger_path)?; - genesis_config.write(&ledger_path)?; + genesis_config.write(ledger_path)?; // Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger. let blockstore = Blockstore::open_with_access_type(ledger_path, access_type, None, false)?; diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 0e98022ff7..2fb35aecc1 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -112,7 +112,7 @@ fn execute_batch( let mut mint_decimals: HashMap = HashMap::new(); let pre_token_balances = if record_token_balances { - collect_token_balances(&bank, &batch, &mut mint_decimals) + collect_token_balances(bank, batch, &mut mint_decimals) } else { vec![] }; @@ -139,7 +139,7 @@ fn execute_batch( if let Some(transaction_status_sender) = transaction_status_sender { let txs = batch.transactions_iter().cloned().collect(); let post_token_balances = if record_token_balances { - collect_token_balances(&bank, &batch, &mut mint_decimals) + collect_token_balances(bank, batch, &mut mint_decimals) } else { vec![] }; @@ -327,7 +327,7 @@ fn process_entries_with_callback( timings, )?; for hash in tick_hashes { - bank.register_tick(&hash); + bank.register_tick(hash); } Ok(()) } @@ -396,7 +396,7 @@ pub fn process_blockstore( // Setup bank for slot 0 let bank0 = Bank::new_with_paths( - &genesis_config, + genesis_config, account_paths, &opts.frozen_accounts, opts.debug_keys.clone(), @@ -896,9 +896,9 @@ fn process_next_slots( // handles any partials if next_meta.is_full() { let next_bank = Arc::new(Bank::new_from_parent( - &bank, + bank, &leader_schedule_cache - .slot_leader_at(*next_slot, Some(&bank)) + .slot_leader_at(*next_slot, Some(bank)) .unwrap(), *next_slot, )); @@ -1048,7 +1048,7 @@ fn load_frozen_forks( *root = new_root_bank.slot(); last_root = new_root_bank.slot(); - leader_schedule_cache.set_root(&new_root_bank); + leader_schedule_cache.set_root(new_root_bank); new_root_bank.squash(); if last_free.elapsed() > Duration::from_secs(10) { @@ -3093,7 +3093,7 @@ pub mod tests { account_paths: Vec, ) -> EpochSchedule { let bank = Bank::new_with_paths( - &genesis_config, + genesis_config, account_paths, &[], None, @@ -3274,7 +3274,7 @@ pub mod tests { slot_leader_keypair: &Arc, ) { // Add votes to `last_slot` so that `root` will be confirmed - let vote_entry = next_entry(&parent_blockhash, 1, vec![vote_tx]); + let vote_entry = next_entry(parent_blockhash, 1, vec![vote_tx]); let mut entries = create_ticks(ticks_per_slot, 0, vote_entry.hash); entries.insert(0, vote_entry); blockstore @@ -3285,7 +3285,7 @@ pub mod tests { ticks_per_slot, Some(parent_slot), true, - &slot_leader_keypair, + slot_leader_keypair, entries, 0, ) diff --git a/ledger/src/entry.rs b/ledger/src/entry.rs index 8e7ea398ae..2c45bf9c9b 100644 --- a/ledger/src/entry.rs +++ b/ledger/src/entry.rs @@ -682,7 +682,7 @@ impl EntrySlice for [Entry] { } pub fn next_entry_mut(start: &mut Hash, num_hashes: u64, transactions: Vec) -> Entry { - let entry = Entry::new(&start, num_hashes, transactions); + let entry = Entry::new(start, num_hashes, transactions); *start = entry.hash; entry } @@ -737,7 +737,7 @@ mod tests { #[test] fn test_entry_verify() { let zero = Hash::default(); - let one = hash(&zero.as_ref()); + let one = hash(zero.as_ref()); assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case, never used assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step @@ -826,7 +826,7 @@ mod tests { fn test_verify_slice1() { solana_logger::setup(); let zero = Hash::default(); - let one = hash(&zero.as_ref()); + let one = hash(zero.as_ref()); assert!(vec![][..].verify(&zero)); // base case assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1 assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad @@ -841,8 +841,8 @@ mod tests { fn test_verify_slice_with_hashes1() { solana_logger::setup(); let zero = Hash::default(); - let one = hash(&zero.as_ref()); - let two = hash(&one.as_ref()); + let one = hash(zero.as_ref()); + let two = hash(one.as_ref()); assert!(vec![][..].verify(&one)); // base case assert!(vec![Entry::new_tick(1, &two)][..].verify(&one)); // singleton case 1 assert!(!vec![Entry::new_tick(1, &two)][..].verify(&two)); // singleton case 2, bad @@ -861,8 +861,8 @@ mod tests { fn test_verify_slice_with_hashes_and_transactions() { solana_logger::setup(); let zero = Hash::default(); - let one = hash(&zero.as_ref()); - let two = hash(&one.as_ref()); + let one = hash(zero.as_ref()); + let two = hash(one.as_ref()); let alice_keypair = Keypair::new(); let bob_keypair = Keypair::new(); let tx0 = system_transaction::transfer(&alice_keypair, &bob_keypair.pubkey(), 1, one); diff --git a/ledger/src/leader_schedule_utils.rs b/ledger/src/leader_schedule_utils.rs index 47df872428..ea21a79b73 100644 --- a/ledger/src/leader_schedule_utils.rs +++ b/ledger/src/leader_schedule_utils.rs @@ -63,9 +63,9 @@ fn sort_stakes(stakes: &mut Vec<(Pubkey, u64)>) { // Note: Use unstable sort, because we dedup right after to remove the equal elements. stakes.sort_unstable_by(|(l_pubkey, l_stake), (r_pubkey, r_stake)| { if r_stake == l_stake { - r_pubkey.cmp(&l_pubkey) + r_pubkey.cmp(l_pubkey) } else { - r_stake.cmp(&l_stake) + r_stake.cmp(l_stake) } }); diff --git a/ledger/src/poh.rs b/ledger/src/poh.rs index 0ade8d7a75..23521c9759 100644 --- a/ledger/src/poh.rs +++ b/ledger/src/poh.rs @@ -63,7 +63,7 @@ impl Poh { let num_hashes = std::cmp::min(self.remaining_hashes - 1, max_num_hashes); for _ in 0..num_hashes { - self.hash = hash(&self.hash.as_ref()); + self.hash = hash(self.hash.as_ref()); } self.num_hashes += num_hashes; self.remaining_hashes -= num_hashes; @@ -77,7 +77,7 @@ impl Poh { return None; // Caller needs to `tick()` first } - self.hash = hashv(&[&self.hash.as_ref(), &mixin.as_ref()]); + self.hash = hashv(&[self.hash.as_ref(), mixin.as_ref()]); let num_hashes = self.num_hashes + 1; self.num_hashes = 0; self.remaining_hashes -= 1; @@ -89,7 +89,7 @@ impl Poh { } pub fn tick(&mut self) -> Option { - self.hash = hash(&self.hash.as_ref()); + self.hash = hash(self.hash.as_ref()); self.num_hashes += 1; self.remaining_hashes -= 1; @@ -115,7 +115,7 @@ pub fn compute_hash_time_ns(hashes_sample_size: u64) -> u64 { let mut v = Hash::default(); let start = Instant::now(); for _ in 0..hashes_sample_size { - v = hash(&v.as_ref()); + v = hash(v.as_ref()); } start.elapsed().as_nanos() as u64 } @@ -139,11 +139,11 @@ mod tests { assert_ne!(entry.num_hashes, 0); for _ in 1..entry.num_hashes { - current_hash = hash(¤t_hash.as_ref()); + current_hash = hash(current_hash.as_ref()); } current_hash = match mixin { - Some(mixin) => hashv(&[¤t_hash.as_ref(), &mixin.as_ref()]), - None => hash(¤t_hash.as_ref()), + Some(mixin) => hashv(&[current_hash.as_ref(), mixin.as_ref()]), + None => hash(current_hash.as_ref()), }; if current_hash != entry.hash { return false; @@ -192,9 +192,9 @@ mod tests { #[test] fn test_poh_verify() { let zero = Hash::default(); - let one = hash(&zero.as_ref()); - let two = hash(&one.as_ref()); - let one_with_zero = hashv(&[&zero.as_ref(), &zero.as_ref()]); + let one = hash(zero.as_ref()); + let two = hash(one.as_ref()); + let one_with_zero = hashv(&[zero.as_ref(), zero.as_ref()]); let mut poh = Poh::new(zero, None); assert!(verify( @@ -262,7 +262,7 @@ mod tests { ( PohEntry { num_hashes: 1, - hash: hash(&one_with_zero.as_ref()), + hash: hash(one_with_zero.as_ref()), }, None ) diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index f1d6ff92c9..8549562eba 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -840,7 +840,7 @@ impl Shredder { first_index: usize, slot: Slot, ) -> std::result::Result, reed_solomon_erasure::Error> { - Self::verify_consistent_shred_payload_sizes(&"try_recovery()", &shreds)?; + Self::verify_consistent_shred_payload_sizes("try_recovery()", &shreds)?; let mut recovered_data = vec![]; let fec_set_size = num_data + num_coding; @@ -933,7 +933,7 @@ impl Shredder { pub fn deshred(shreds: &[Shred]) -> std::result::Result, reed_solomon_erasure::Error> { use reed_solomon_erasure::Error::TooFewDataShards; const SHRED_DATA_OFFSET: usize = SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER; - Self::verify_consistent_shred_payload_sizes(&"deshred()", shreds)?; + Self::verify_consistent_shred_payload_sizes("deshred()", shreds)?; let index = shreds.first().ok_or(TooFewDataShards)?.index(); let aligned = shreds.iter().zip(index..).all(|(s, i)| s.index() == i); let data_complete = { diff --git a/ledger/src/sigverify_shreds.rs b/ledger/src/sigverify_shreds.rs index 4f6511078a..42ae66d83d 100644 --- a/ledger/src/sigverify_shreds.rs +++ b/ledger/src/sigverify_shreds.rs @@ -312,7 +312,7 @@ fn sign_shred_cpu(keypair: &Keypair, packet: &mut Packet) { ); let signature = keypair.sign_message(&packet.data[msg_start..msg_end]); trace!("signature {:?}", signature); - packet.data[0..sig_end].copy_from_slice(&signature.as_ref()); + packet.data[0..sig_end].copy_from_slice(signature.as_ref()); } pub fn sign_shreds_cpu(keypair: &Keypair, batches: &mut [Packets]) { @@ -364,7 +364,7 @@ pub fn sign_shreds_gpu( let mut elems = Vec::new(); let offset: usize = pinned_keypair.len(); - let num_keypair_packets = vec_size_in_packets(&pinned_keypair); + let num_keypair_packets = vec_size_in_packets(pinned_keypair); let mut num_packets = num_keypair_packets; //should be zero diff --git a/local-cluster/src/cluster_tests.rs b/local-cluster/src/cluster_tests.rs index dfb95a42a4..f37759b007 100644 --- a/local-cluster/src/cluster_tests.rs +++ b/local-cluster/src/cluster_tests.rs @@ -63,10 +63,10 @@ pub fn spend_and_verify_all_nodes( .get_recent_blockhash_with_commitment(CommitmentConfig::confirmed()) .unwrap(); let mut transaction = - system_transaction::transfer(&funding_keypair, &random_keypair.pubkey(), 1, blockhash); + system_transaction::transfer(funding_keypair, &random_keypair.pubkey(), 1, blockhash); let confs = VOTE_THRESHOLD_DEPTH + 1; let sig = client - .retry_transfer_until_confirmed(&funding_keypair, &mut transaction, 10, confs) + .retry_transfer_until_confirmed(funding_keypair, &mut transaction, 10, confs) .unwrap(); for validator in &cluster_nodes { if ignore_nodes.contains(&validator.id) { @@ -114,14 +114,14 @@ pub fn send_many_transactions( let transfer_amount = thread_rng().gen_range(1, max_tokens_per_transfer); let mut transaction = system_transaction::transfer( - &funding_keypair, + funding_keypair, &random_keypair.pubkey(), transfer_amount, blockhash, ); client - .retry_transfer(&funding_keypair, &mut transaction, 5) + .retry_transfer(funding_keypair, &mut transaction, 5) .unwrap(); expected_balances.insert(random_keypair.pubkey(), transfer_amount); @@ -236,7 +236,7 @@ pub fn kill_entry_and_spend_and_verify_rest( .get_recent_blockhash_with_commitment(CommitmentConfig::processed()) .unwrap(); let mut transaction = system_transaction::transfer( - &funding_keypair, + funding_keypair, &random_keypair.pubkey(), 1, blockhash, @@ -245,7 +245,7 @@ pub fn kill_entry_and_spend_and_verify_rest( let confs = VOTE_THRESHOLD_DEPTH + 1; let sig = { let sig = client.retry_transfer_until_confirmed( - &funding_keypair, + funding_keypair, &mut transaction, 5, confs, @@ -260,7 +260,7 @@ pub fn kill_entry_and_spend_and_verify_rest( } }; info!("poll_all_nodes_for_signature()"); - match poll_all_nodes_for_signature(&entry_point_info, &cluster_nodes, &sig, confs) { + match poll_all_nodes_for_signature(entry_point_info, &cluster_nodes, &sig, confs) { Err(e) => { info!("poll_all_nodes_for_signature() failed {:?}", e); result = Err(e); @@ -377,7 +377,7 @@ fn poll_all_nodes_for_signature( continue; } let client = create_client(validator.client_facing_addr(), VALIDATOR_PORT_RANGE); - client.poll_for_signature_confirmation(&sig, confs)?; + client.poll_for_signature_confirmation(sig, confs)?; } Ok(()) diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index d455daefbb..c842d5bfe1 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -449,7 +449,7 @@ impl LocalCluster { .get_recent_blockhash_with_commitment(CommitmentConfig::processed()) .unwrap(); let mut tx = - system_transaction::transfer(&source_keypair, dest_pubkey, lamports, blockhash); + system_transaction::transfer(source_keypair, dest_pubkey, lamports, blockhash); info!( "executing transfer of {} from {} to {}", lamports, @@ -457,7 +457,7 @@ impl LocalCluster { *dest_pubkey ); client - .retry_transfer(&source_keypair, &mut tx, 10) + .retry_transfer(source_keypair, &mut tx, 10) .expect("client transfer"); client .wait_for_balance_with_commitment( @@ -512,7 +512,7 @@ impl LocalCluster { .0, ); client - .retry_transfer(&from_account, &mut transaction, 10) + .retry_transfer(from_account, &mut transaction, 10) .expect("fund vote"); client .wait_for_balance_with_commitment( @@ -616,7 +616,7 @@ impl Cluster for LocalCluster { } fn exit_node(&mut self, pubkey: &Pubkey) -> ClusterValidatorInfo { - let mut node = self.validators.remove(&pubkey).unwrap(); + let mut node = self.validators.remove(pubkey).unwrap(); // Shut down the validator let mut validator = node.validator.take().expect("Validator must be running"); @@ -631,7 +631,7 @@ impl Cluster for LocalCluster { cluster_validator_info: &mut ClusterValidatorInfo, ) -> (Node, Option) { // Update the stored ContactInfo for this node - let node = Node::new_localhost_with_pubkey(&pubkey); + let node = Node::new_localhost_with_pubkey(pubkey); cluster_validator_info.info.contact_info = node.info.clone(); cluster_validator_info.config.rpc_addrs = Some((node.info.rpc, node.info.rpc_pubsub)); diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 7c9da39da3..48389d06cd 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -425,7 +425,7 @@ fn run_cluster_partition( fn test_cluster_partition_1_2() { let empty = |_: &mut LocalCluster, _: &mut ()| {}; let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { - cluster.check_for_new_roots(16, &"PARTITION_TEST"); + cluster.check_for_new_roots(16, "PARTITION_TEST"); }; run_cluster_partition( &[vec![1], vec![1, 1]], @@ -445,7 +445,7 @@ fn test_cluster_partition_1_2() { fn test_cluster_partition_1_1() { let empty = |_: &mut LocalCluster, _: &mut ()| {}; let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { - cluster.check_for_new_roots(16, &"PARTITION_TEST"); + cluster.check_for_new_roots(16, "PARTITION_TEST"); }; run_cluster_partition( &[vec![1], vec![1]], @@ -465,7 +465,7 @@ fn test_cluster_partition_1_1() { fn test_cluster_partition_1_1_1() { let empty = |_: &mut LocalCluster, _: &mut ()| {}; let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { - cluster.check_for_new_roots(16, &"PARTITION_TEST"); + cluster.check_for_new_roots(16, "PARTITION_TEST"); }; run_cluster_partition( &[vec![1], vec![1], vec![1]], @@ -525,7 +525,7 @@ fn test_kill_heaviest_partition() { let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { info!("Killing validator with id: {}", validator_to_kill); cluster.exit_node(&validator_to_kill); - cluster.check_for_new_roots(16, &"PARTITION_TEST"); + cluster.check_for_new_roots(16, "PARTITION_TEST"); }; run_cluster_partition( &partitions, @@ -594,7 +594,7 @@ fn run_kill_partition_switch_threshold( .iter() .map(|validator_to_kill| { info!("Killing validator with id: {}", validator_to_kill); - cluster.exit_node(&validator_to_kill) + cluster.exit_node(validator_to_kill) }) .collect(); on_partition_start( @@ -622,7 +622,7 @@ fn find_latest_replayed_slot_from_ledger( mut latest_slot: Slot, ) -> (Slot, HashSet) { loop { - let mut blockstore = open_blockstore(&ledger_path); + let mut blockstore = open_blockstore(ledger_path); // This is kind of a hack because we can't query for new frozen blocks over RPC // since the validator is not voting. let new_latest_slots: Vec = blockstore @@ -644,7 +644,7 @@ fn find_latest_replayed_slot_from_ledger( break; } else { sleep(Duration::from_millis(50)); - blockstore = open_blockstore(&ledger_path); + blockstore = open_blockstore(ledger_path); } } // Check the slot has been replayed @@ -666,7 +666,7 @@ fn find_latest_replayed_slot_from_ledger( ); } else { sleep(Duration::from_millis(50)); - blockstore = open_blockstore(&ledger_path); + blockstore = open_blockstore(ledger_path); } } } else { @@ -870,7 +870,7 @@ fn test_switch_threshold_uses_gossip_votes() { 0, crds_value::Vote::new(node_keypair.pubkey(), vote_tx, timestamp()), ), - &node_keypair, + node_keypair, )], context .dead_validator_info @@ -962,7 +962,7 @@ fn test_kill_partition_switch_threshold_no_progress() { |_: &mut LocalCluster, _: &[Pubkey], _: Vec, _: &mut ()| {}; let on_before_partition_resolved = |_: &mut LocalCluster, _: &mut ()| {}; let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { - cluster.check_no_new_roots(400, &"PARTITION_TEST"); + cluster.check_no_new_roots(400, "PARTITION_TEST"); }; // This kills `max_failures_stake`, so no progress should be made @@ -1015,7 +1015,7 @@ fn test_kill_partition_switch_threshold_progress() { |_: &mut LocalCluster, _: &[Pubkey], _: Vec, _: &mut ()| {}; let on_before_partition_resolved = |_: &mut LocalCluster, _: &mut ()| {}; let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { - cluster.check_for_new_roots(16, &"PARTITION_TEST"); + cluster.check_for_new_roots(16, "PARTITION_TEST"); }; run_kill_partition_switch_threshold( &[&[(failures_stake as usize, 16)]], @@ -1246,7 +1246,7 @@ fn test_fork_choice_refresh_old_votes() { // for lockouts built during partition to resolve and gives validators an opportunity // to try and switch forks) let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut PartitionContext| { - cluster.check_for_new_roots(16, &"PARTITION_TEST"); + cluster.check_for_new_roots(16, "PARTITION_TEST"); }; run_kill_partition_switch_threshold( @@ -1320,7 +1320,7 @@ fn test_forwarding() { .unwrap(); // Confirm that transactions were forwarded to and processed by the leader. - cluster_tests::send_many_transactions(&validator_info, &cluster.funding_keypair, 10, 20); + cluster_tests::send_many_transactions(validator_info, &cluster.funding_keypair, 10, 20); } #[test] @@ -1532,7 +1532,7 @@ fn test_frozen_account_from_snapshot() { trace!("Waiting for snapshot at {:?}", snapshot_package_output_path); let (archive_filename, _archive_snapshot_hash) = - wait_for_next_snapshot(&cluster, &snapshot_package_output_path); + wait_for_next_snapshot(&cluster, snapshot_package_output_path); trace!("Found snapshot: {:?}", archive_filename); @@ -1668,7 +1668,7 @@ fn test_snapshot_download() { trace!("Waiting for snapshot"); let (archive_filename, archive_snapshot_hash) = - wait_for_next_snapshot(&cluster, &snapshot_package_output_path); + wait_for_next_snapshot(&cluster, snapshot_package_output_path); trace!("found: {:?}", archive_filename); let validator_archive_path = snapshot_utils::get_snapshot_archive_path( @@ -1743,7 +1743,7 @@ fn test_snapshot_restart_tower() { .snapshot_package_output_path; let (archive_filename, archive_snapshot_hash) = - wait_for_next_snapshot(&cluster, &snapshot_package_output_path); + wait_for_next_snapshot(&cluster, snapshot_package_output_path); // Copy archive to validator's snapshot output directory let validator_archive_path = snapshot_utils::get_snapshot_archive_path( @@ -1765,7 +1765,7 @@ fn test_snapshot_restart_tower() { // validator's ContactInfo let restarted_node_info = cluster.get_contact_info(&validator_id).unwrap(); cluster_tests::spend_and_verify_all_nodes( - &restarted_node_info, + restarted_node_info, &cluster.funding_keypair, 1, HashSet::new(), @@ -1926,7 +1926,7 @@ fn test_snapshots_restart_validity() { expected_balances.extend(new_balances); - wait_for_next_snapshot(&cluster, &snapshot_package_output_path); + wait_for_next_snapshot(&cluster, snapshot_package_output_path); // Create new account paths since validator exit is not guaranteed to cleanup RPC threads, // which may delete the old accounts on exit at any point @@ -2019,7 +2019,7 @@ fn test_faulty_node(faulty_node_type: BroadcastStageType) { let cluster = LocalCluster::new(&mut cluster_config); // Check for new roots - cluster.check_for_new_roots(16, &"test_faulty_node"); + cluster.check_for_new_roots(16, "test_faulty_node"); } #[test] @@ -2365,7 +2365,7 @@ fn purge_slots(blockstore: &Blockstore, start_slot: Slot, slot_count: Slot) { } fn restore_tower(ledger_path: &Path, node_pubkey: &Pubkey) -> Option { - let tower = Tower::restore(&ledger_path, &node_pubkey); + let tower = Tower::restore(ledger_path, node_pubkey); if let Err(tower_err) = tower { if tower_err.is_file_missing() { return None; @@ -2374,7 +2374,7 @@ fn restore_tower(ledger_path: &Path, node_pubkey: &Pubkey) -> Option { } } // actually saved tower must have at least one vote. - Tower::restore(&ledger_path, &node_pubkey).ok() + Tower::restore(ledger_path, node_pubkey).ok() } fn last_vote_in_tower(ledger_path: &Path, node_pubkey: &Pubkey) -> Option<(Slot, Hash)> { @@ -2386,7 +2386,7 @@ fn root_in_tower(ledger_path: &Path, node_pubkey: &Pubkey) -> Option { } fn remove_tower(ledger_path: &Path, node_pubkey: &Pubkey) { - fs::remove_file(Tower::get_filename(&ledger_path, &node_pubkey)).unwrap(); + fs::remove_file(Tower::get_filename(ledger_path, node_pubkey)).unwrap(); } // A bit convoluted test case; but this roughly follows this test theoretical scenario: @@ -2847,7 +2847,7 @@ fn test_hard_fork_invalidates_tower() { cluster .lock() .unwrap() - .check_for_new_roots(16, &"hard fork"); + .check_for_new_roots(16, "hard fork"); } #[test] @@ -2906,7 +2906,7 @@ fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) { let on_partition_before_resolved = |_: &mut LocalCluster, _: &mut ()| {}; let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { - cluster.check_for_new_roots(20, &"run_test_load_program_accounts_partition"); + cluster.check_for_new_roots(20, "run_test_load_program_accounts_partition"); exit.store(true, Ordering::Relaxed); t_update.join().unwrap(); t_scan.join().unwrap(); @@ -3097,7 +3097,7 @@ fn run_test_load_program_accounts(scan_commitment: CommitmentConfig) { scan_client_sender.send(scan_client).unwrap(); // Wait for some roots to pass - cluster.check_for_new_roots(40, &"run_test_load_program_accounts"); + cluster.check_for_new_roots(40, "run_test_load_program_accounts"); // Exit and ensure no violations of consistency were found exit.store(true, Ordering::Relaxed); diff --git a/measure/src/measure.rs b/measure/src/measure.rs index 3b34260523..26f32b097c 100644 --- a/measure/src/measure.rs +++ b/measure/src/measure.rs @@ -216,7 +216,7 @@ mod tests { { let some_struct = SomeStruct { x: 42 }; let (result, _measure) = Measure::this( - |(obj, x)| SomeStruct::add_to(&obj, x), + |(obj, x)| SomeStruct::add_to(obj, x), (&some_struct, 4), "test", ); diff --git a/metrics/src/counter.rs b/metrics/src/counter.rs index 1bbc0dd778..29b7ecc526 100644 --- a/metrics/src/counter.rs +++ b/metrics/src/counter.rs @@ -222,7 +222,7 @@ mod tests { INIT_HOOK.call_once(|| { ENV_LOCK = Some(RwLock::new(())); }); - &ENV_LOCK.as_ref().unwrap() + ENV_LOCK.as_ref().unwrap() } } diff --git a/perf/src/packet.rs b/perf/src/packet.rs index bdd2052c7e..e73a5ad6b7 100644 --- a/perf/src/packet.rs +++ b/perf/src/packet.rs @@ -66,7 +66,7 @@ impl Packets { pub fn set_addr(&mut self, addr: &SocketAddr) { for m in self.packets.iter_mut() { - m.meta.set_addr(&addr); + m.meta.set_addr(addr); } } diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index a965107192..2b5975a6bf 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -519,11 +519,11 @@ mod tests { let packet_offsets = sigverify::get_packet_offsets(&packet, 0); assert_eq!( - memfind(&tx_bytes, &tx.signatures[0].as_ref()), + memfind(&tx_bytes, tx.signatures[0].as_ref()), Some(SIG_OFFSET) ); assert_eq!( - memfind(&tx_bytes, &tx.message().account_keys[0].as_ref()), + memfind(&tx_bytes, tx.message().account_keys[0].as_ref()), Some(packet_offsets.pubkey_start as usize) ); assert_eq!( @@ -531,7 +531,7 @@ mod tests { Some(packet_offsets.msg_start as usize) ); assert_eq!( - memfind(&tx_bytes, &tx.signatures[0].as_ref()), + memfind(&tx_bytes, tx.signatures[0].as_ref()), Some(packet_offsets.sig_start as usize) ); assert_eq!(packet_offsets.sig_len, 1); @@ -667,7 +667,7 @@ mod tests { let tx_bytes = serialize(&tx0).unwrap(); assert!(tx_bytes.len() <= PACKET_DATA_SIZE); assert_eq!( - memfind(&tx_bytes, &tx0.signatures[0].as_ref()), + memfind(&tx_bytes, tx0.signatures[0].as_ref()), Some(SIG_OFFSET) ); let tx1 = deserialize(&tx_bytes).unwrap(); diff --git a/poh/benches/poh_verify.rs b/poh/benches/poh_verify.rs index e917a5ebea..b0ae0b7aae 100644 --- a/poh/benches/poh_verify.rs +++ b/poh/benches/poh_verify.rs @@ -18,7 +18,7 @@ const NUM_ENTRIES: usize = 800; fn bench_poh_verify_ticks(bencher: &mut Bencher) { solana_logger::setup(); let zero = Hash::default(); - let start_hash = hash(&zero.as_ref()); + let start_hash = hash(zero.as_ref()); let mut cur_hash = start_hash; let mut ticks: Vec = Vec::with_capacity(NUM_ENTRIES); @@ -34,7 +34,7 @@ fn bench_poh_verify_ticks(bencher: &mut Bencher) { #[bench] fn bench_poh_verify_transaction_entries(bencher: &mut Bencher) { let zero = Hash::default(); - let start_hash = hash(&zero.as_ref()); + let start_hash = hash(zero.as_ref()); let mut cur_hash = start_hash; let keypair1 = Keypair::new(); diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index 2fd475999c..2a17c68d41 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -754,11 +754,11 @@ pub fn create_test_recorder( bank.ticks_per_slot(), &Pubkey::default(), blockstore, - &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), + &Arc::new(LeaderScheduleCache::new_from_bank(bank)), &poh_config, exit.clone(), ); - poh_recorder.set_bank(&bank); + poh_recorder.set_bank(bank); let poh_recorder = Arc::new(Mutex::new(poh_recorder)); let poh_service = PohService::new( diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index c9e0328abd..6e7b9e33e7 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -311,7 +311,7 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { { let mut program_signer = false; for seeds in signers_seeds.iter() { - let signer = Pubkey::create_program_address(&seeds, &caller).unwrap(); + let signer = Pubkey::create_program_address(seeds, &caller).unwrap(); if instruction_account.pubkey == signer { program_signer = true; break; @@ -324,7 +324,7 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { } } - invoke_context.record_instruction(&instruction); + invoke_context.record_instruction(instruction); solana_runtime::message_processor::MessageProcessor::process_cross_program_instruction( &message, @@ -769,7 +769,7 @@ impl ProgramTest { // Add commonly-used SPL programs as a convenience to the user for (program_id, account) in programs::spl_programs(&Rent::default()).iter() { - bank.store_account(program_id, &account); + bank.store_account(program_id, account); } // User-supplied additional builtins @@ -782,10 +782,10 @@ impl ProgramTest { } for (address, account) in self.accounts.iter() { - if bank.get_account(&address).is_some() { + if bank.get_account(address).is_some() { info!("Overriding account at {}", address); } - bank.store_account(&address, &account); + bank.store_account(address, account); } bank.set_capitalization(); if let Some(max_units) = self.bpf_compute_max_units { diff --git a/programs/bpf_loader/benches/serialization.rs b/programs/bpf_loader/benches/serialization.rs index e99c953b49..472c795305 100644 --- a/programs/bpf_loader/benches/serialization.rs +++ b/programs/bpf_loader/benches/serialization.rs @@ -107,9 +107,9 @@ fn bench_serialize_unaligned(bencher: &mut Bencher) { .enumerate() .map(|(i, (key, account))| { if i <= accounts.len() / 2 { - KeyedAccount::new_readonly(&key, false, &account) + KeyedAccount::new_readonly(key, false, account) } else { - KeyedAccount::new(&key, false, &account) + KeyedAccount::new(key, false, account) } }) .collect(); @@ -128,9 +128,9 @@ fn bench_serialize_aligned(bencher: &mut Bencher) { .enumerate() .map(|(i, (key, account))| { if i <= accounts.len() / 2 { - KeyedAccount::new_readonly(&key, false, &account) + KeyedAccount::new_readonly(key, false, account) } else { - KeyedAccount::new(&key, false, &account) + KeyedAccount::new(key, false, account) } }) .collect(); diff --git a/programs/bpf_loader/build.rs b/programs/bpf_loader/build.rs index 38d5e942ae..2c665cd93f 100644 --- a/programs/bpf_loader/build.rs +++ b/programs/bpf_loader/build.rs @@ -28,7 +28,7 @@ fn main() { }; let mut out = BufWriter::new(file); let sysc_re = Regex::new(r#"register_syscall_by_name\([[:space:]]*b"([^"]+)","#).unwrap(); - for caps in sysc_re.captures_iter(&text) { + for caps in sysc_re.captures_iter(text) { writeln!(out, "{}", caps[1].to_string()).unwrap(); } } diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index e74ffcc81b..a363691898 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -128,7 +128,7 @@ fn write_program_data( ); return Err(InstructionError::AccountDataTooSmall); } - data[program_data_offset..program_data_offset + len].copy_from_slice(&bytes); + data[program_data_offset..program_data_offset + len].copy_from_slice(bytes); Ok(()) } @@ -369,7 +369,7 @@ fn process_loader_upgradeable_instruction( // Create ProgramData account let (derived_address, bump_seed) = - Pubkey::find_program_address(&[program.unsigned_key().as_ref()], &program_id); + Pubkey::find_program_address(&[program.unsigned_key().as_ref()], program_id); if derived_address != *programdata.unsigned_key() { ic_logger_msg!(logger, "ProgramData address is not derived"); return Err(InstructionError::InvalidArgument); @@ -759,7 +759,7 @@ impl Executor for BpfExecutor { let mut serialize_time = Measure::start("serialize"); let keyed_accounts = invoke_context.get_keyed_accounts()?; let mut parameter_bytes = - serialize_parameters(loader_id, program_id, keyed_accounts, &instruction_data)?; + serialize_parameters(loader_id, program_id, keyed_accounts, instruction_data)?; serialize_time.stop(); let mut create_vm_time = Measure::start("create_vm"); let mut execute_time; @@ -2228,7 +2228,7 @@ mod tests { .unwrap(); buffer_account.borrow_mut().data_as_mut_slice() [UpgradeableLoaderState::buffer_data_offset().unwrap()..] - .copy_from_slice(&elf_new); + .copy_from_slice(elf_new); let programdata_account = AccountSharedData::new_ref( min_programdata_balance, UpgradeableLoaderState::programdata_len(elf_orig.len().max(elf_new.len())).unwrap(), diff --git a/programs/bpf_loader/src/serialization.rs b/programs/bpf_loader/src/serialization.rs index f9529c017d..4f842abebb 100644 --- a/programs/bpf_loader/src/serialization.rs +++ b/programs/bpf_loader/src/serialization.rs @@ -104,7 +104,7 @@ pub fn serialize_parameters_unaligned( .map_err(|_| InstructionError::InvalidArgument)?; v.write_u64::(keyed_account.data_len()? as u64) .map_err(|_| InstructionError::InvalidArgument)?; - v.write_all(&keyed_account.try_account_ref()?.data()) + v.write_all(keyed_account.try_account_ref()?.data()) .map_err(|_| InstructionError::InvalidArgument)?; v.write_all(keyed_account.owner()?.as_ref()) .map_err(|_| InstructionError::InvalidArgument)?; @@ -223,7 +223,7 @@ pub fn serialize_parameters_aligned( .map_err(|_| InstructionError::InvalidArgument)?; v.write_u64::(keyed_account.data_len()? as u64) .map_err(|_| InstructionError::InvalidArgument)?; - v.write_all(&keyed_account.try_account_ref()?.data()) + v.write_all(keyed_account.try_account_ref()?.data()) .map_err(|_| InstructionError::InvalidArgument)?; v.resize( MAX_PERMITTED_DATA_INCREASE @@ -382,9 +382,9 @@ mod tests { .enumerate() .map(|(i, (key, account))| { if i <= accounts.len() / 2 { - KeyedAccount::new_readonly(&key, false, &account) + KeyedAccount::new_readonly(key, false, account) } else { - KeyedAccount::new(&key, false, &account) + KeyedAccount::new(key, false, account) } }) .collect(); @@ -439,9 +439,9 @@ mod tests { .enumerate() .map(|(i, (key, account))| { if i <= accounts.len() / 2 { - KeyedAccount::new_readonly(&key, false, &account) + KeyedAccount::new_readonly(key, false, account) } else { - KeyedAccount::new(&key, false, &account) + KeyedAccount::new(key, false, account) } }) .collect(); @@ -487,9 +487,9 @@ mod tests { .enumerate() .map(|(i, (key, account))| { if i < accounts.len() / 2 { - KeyedAccount::new_readonly(&key, false, &account) + KeyedAccount::new_readonly(key, false, account) } else { - KeyedAccount::new(&key, false, &account) + KeyedAccount::new(key, false, account) } }) .collect(); diff --git a/programs/bpf_loader/src/syscalls.rs b/programs/bpf_loader/src/syscalls.rs index 0c42306760..ce5b11b127 100644 --- a/programs/bpf_loader/src/syscalls.rs +++ b/programs/bpf_loader/src/syscalls.rs @@ -597,7 +597,7 @@ impl<'a> SyscallObject for SyscallPanic<'a> { memory_mapping, file, len, - &self.loader_id, + self.loader_id, self.enforce_aligned_host_addrs, &mut |string: &str| Err(SyscallError::Panic(string.to_string(), line, column).into()), ); @@ -628,7 +628,7 @@ impl<'a> SyscallObject for SyscallLog<'a> { memory_mapping, addr, len, - &self.loader_id, + self.loader_id, self.enforce_aligned_host_addrs, &mut |string: &str| { stable_log::program_log(&self.logger, string); @@ -2051,7 +2051,7 @@ where let mut accounts = Vec::with_capacity(account_keys.len()); let mut refs = Vec::with_capacity(account_keys.len()); for (i, ref account_key) in account_keys.iter().enumerate() { - let account = invoke_context.get_account(&account_key).ok_or_else(|| { + let account = invoke_context.get_account(account_key).ok_or_else(|| { ic_msg!( invoke_context, "Instruction references an unknown account {}", @@ -2215,7 +2215,7 @@ fn call<'a>( let instruction = syscall.translate_instruction( instruction_addr, - &memory_mapping, + memory_mapping, enforce_aligned_host_addrs, )?; let signers = syscall.translate_signers( diff --git a/programs/config/src/config_processor.rs b/programs/config/src/config_processor.rs index 09743d38c3..a7ac27313a 100644 --- a/programs/config/src/config_processor.rs +++ b/programs/config/src/config_processor.rs @@ -30,7 +30,7 @@ pub fn process_instruction( return Err(InstructionError::InvalidAccountOwner); } - deserialize(&config_account.data()).map_err(|err| { + deserialize(config_account.data()).map_err(|err| { ic_msg!( invoke_context, "Unable to deserialize config account: {}", @@ -130,7 +130,7 @@ pub fn process_instruction( config_keyed_account .try_account_ref_mut()? .data_as_mut_slice()[..data.len()] - .copy_from_slice(&data); + .copy_from_slice(data); Ok(()) } @@ -216,7 +216,7 @@ mod tests { let (_, config_account) = create_config_account(keys); assert_eq!( Some(MyConfig::default()), - deserialize(get_config_data(&config_account.borrow().data()).unwrap()).ok() + deserialize(get_config_data(config_account.borrow().data()).unwrap()).ok() ); } @@ -241,7 +241,7 @@ mod tests { ); assert_eq!( Some(my_config), - deserialize(get_config_data(&config_account.borrow().data()).unwrap()).ok() + deserialize(get_config_data(config_account.borrow().data()).unwrap()).ok() ); } @@ -321,11 +321,11 @@ mod tests { ), Ok(()) ); - let meta_data: ConfigKeys = deserialize(&config_account.borrow().data()).unwrap(); + let meta_data: ConfigKeys = deserialize(config_account.borrow().data()).unwrap(); assert_eq!(meta_data.keys, keys); assert_eq!( Some(my_config), - deserialize(get_config_data(&config_account.borrow().data()).unwrap()).ok() + deserialize(get_config_data(config_account.borrow().data()).unwrap()).ok() ); } @@ -454,11 +454,11 @@ mod tests { ), Ok(()) ); - let meta_data: ConfigKeys = deserialize(&config_account.borrow().data()).unwrap(); + let meta_data: ConfigKeys = deserialize(config_account.borrow().data()).unwrap(); assert_eq!(meta_data.keys, keys); assert_eq!( new_config, - MyConfig::deserialize(get_config_data(&config_account.borrow().data()).unwrap()) + MyConfig::deserialize(get_config_data(config_account.borrow().data()).unwrap()) .unwrap() ); @@ -646,11 +646,11 @@ mod tests { ), Ok(()) ); - let meta_data: ConfigKeys = deserialize(&config_account.borrow().data()).unwrap(); + let meta_data: ConfigKeys = deserialize(config_account.borrow().data()).unwrap(); assert_eq!(meta_data.keys, keys); assert_eq!( new_config, - MyConfig::deserialize(get_config_data(&config_account.borrow().data()).unwrap()) + MyConfig::deserialize(get_config_data(config_account.borrow().data()).unwrap()) .unwrap() ); diff --git a/programs/config/src/date_instruction.rs b/programs/config/src/date_instruction.rs index c4f732644a..7bc4ae4145 100644 --- a/programs/config/src/date_instruction.rs +++ b/programs/config/src/date_instruction.rs @@ -54,5 +54,5 @@ pub fn create_account( /// transaction containing this instruction. pub fn store(date_pubkey: &Pubkey, date: Date) -> Instruction { let date_config = DateConfig::new(date); - config_instruction::store(&date_pubkey, true, vec![], &date_config) + config_instruction::store(date_pubkey, true, vec![], &date_config) } diff --git a/programs/exchange/src/exchange_processor.rs b/programs/exchange/src/exchange_processor.rs index f2a36630b5..e16a06773b 100644 --- a/programs/exchange/src/exchange_processor.rs +++ b/programs/exchange/src/exchange_processor.rs @@ -193,11 +193,11 @@ impl ExchangeProcessor { error!("Not enough accounts"); return Err(InstructionError::InvalidArgument); } - Self::is_account_unallocated(&keyed_accounts[NEW_ACCOUNT_INDEX].try_account_ref()?.data())?; + Self::is_account_unallocated(keyed_accounts[NEW_ACCOUNT_INDEX].try_account_ref()?.data())?; Self::serialize( &ExchangeState::Account( TokenAccountInfo::default() - .owner(&keyed_accounts[OWNER_INDEX].unsigned_key()) + .owner(keyed_accounts[OWNER_INDEX].unsigned_key()) .tokens(100_000, 100_000, 100_000, 100_000), ), &mut keyed_accounts[NEW_ACCOUNT_INDEX] @@ -221,13 +221,13 @@ impl ExchangeProcessor { } let mut to_account = - Self::deserialize_account(&keyed_accounts[TO_ACCOUNT_INDEX].try_account_ref()?.data())?; + Self::deserialize_account(keyed_accounts[TO_ACCOUNT_INDEX].try_account_ref()?.data())?; if &faucet::id() == keyed_accounts[FROM_ACCOUNT_INDEX].unsigned_key() { to_account.tokens[token] += tokens; } else { let state: ExchangeState = - bincode::deserialize(&keyed_accounts[FROM_ACCOUNT_INDEX].try_account_ref()?.data()) + bincode::deserialize(keyed_accounts[FROM_ACCOUNT_INDEX].try_account_ref()?.data()) .map_err(Self::map_to_invalid_arg)?; match state { ExchangeState::Account(mut from_account) => { @@ -309,10 +309,10 @@ impl ExchangeProcessor { return Err(InstructionError::InvalidArgument); } - Self::is_account_unallocated(&keyed_accounts[ORDER_INDEX].try_account_ref()?.data())?; + Self::is_account_unallocated(keyed_accounts[ORDER_INDEX].try_account_ref()?.data())?; let mut account = Self::deserialize_account( - &keyed_accounts[ACCOUNT_INDEX].try_account_ref_mut()?.data(), + keyed_accounts[ACCOUNT_INDEX].try_account_ref_mut()?.data(), )?; if &account.owner != keyed_accounts[OWNER_INDEX].unsigned_key() { @@ -368,7 +368,7 @@ impl ExchangeProcessor { } let order = - Self::deserialize_order(&keyed_accounts[ORDER_INDEX].try_account_ref()?.data())?; + Self::deserialize_order(keyed_accounts[ORDER_INDEX].try_account_ref()?.data())?; if &order.owner != keyed_accounts[OWNER_INDEX].unsigned_key() { error!("Signer does not own order"); @@ -404,11 +404,11 @@ impl ExchangeProcessor { } let mut to_order = - Self::deserialize_order(&keyed_accounts[TO_ORDER_INDEX].try_account_ref()?.data())?; + Self::deserialize_order(keyed_accounts[TO_ORDER_INDEX].try_account_ref()?.data())?; let mut from_order = - Self::deserialize_order(&keyed_accounts[FROM_ORDER_INDEX].try_account_ref()?.data())?; + Self::deserialize_order(keyed_accounts[FROM_ORDER_INDEX].try_account_ref()?.data())?; let mut profit_account = Self::deserialize_account( - &keyed_accounts[PROFIT_ACCOUNT_INDEX] + keyed_accounts[PROFIT_ACCOUNT_INDEX] .try_account_ref()? .data(), )?; @@ -639,7 +639,7 @@ mod test { } fn create_token_account(client: &BankClient, owner: &Keypair) -> Pubkey { - let new = create_account(&client, &owner); + let new = create_account(client, owner); let instruction = exchange_instruction::account_request(&owner.pubkey(), &new); client .send_and_confirm_instruction(owner, instruction) @@ -670,9 +670,9 @@ mod test { trade_tokens: u64, price: u64, ) -> (Pubkey, Pubkey) { - let trade = create_account(&client, &owner); - let src = create_token_account(&client, &owner); - transfer(&client, &owner, &src, from_token, src_tokens); + let trade = create_account(client, owner); + let src = create_token_account(client, owner); + transfer(client, owner, &src, from_token, src_tokens); let instruction = exchange_instruction::trade_request( &owner.pubkey(), diff --git a/programs/ownable/src/ownable_instruction.rs b/programs/ownable/src/ownable_instruction.rs index 2e4cb1c5b0..d9c7dab4e2 100644 --- a/programs/ownable/src/ownable_instruction.rs +++ b/programs/ownable/src/ownable_instruction.rs @@ -33,7 +33,7 @@ pub fn create_account( let space = std::mem::size_of::() as u64; vec![ system_instruction::create_account( - &payer_pubkey, + payer_pubkey, account_pubkey, lamports, space, diff --git a/programs/ownable/src/ownable_processor.rs b/programs/ownable/src/ownable_processor.rs index d9532922ca..b6696614cf 100644 --- a/programs/ownable/src/ownable_processor.rs +++ b/programs/ownable/src/ownable_processor.rs @@ -38,7 +38,7 @@ pub fn process_instruction( let new_owner_pubkey: Pubkey = limited_deserialize(data)?; let account_keyed_account = &mut keyed_account_at_index(keyed_accounts, 0)?; let mut account_owner_pubkey: Pubkey = - limited_deserialize(&account_keyed_account.try_account_ref()?.data())?; + limited_deserialize(account_keyed_account.try_account_ref()?.data())?; if account_owner_pubkey == Pubkey::default() { account_owner_pubkey = new_owner_pubkey; @@ -47,7 +47,7 @@ pub fn process_instruction( set_owner( &mut account_owner_pubkey, new_owner_pubkey, - &owner_keyed_account, + owner_keyed_account, )?; } diff --git a/programs/stake/src/config.rs b/programs/stake/src/config.rs index acd7416c2c..7e595a09c0 100644 --- a/programs/stake/src/config.rs +++ b/programs/stake/src/config.rs @@ -17,7 +17,7 @@ use solana_sdk::{ pub use solana_sdk::stake::config::*; pub fn from(account: &T) -> Option { - get_config_data(&account.data()) + get_config_data(account.data()) .ok() .and_then(|data| deserialize(data).ok()) } diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index b80db4e5e2..0117329138 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -91,7 +91,7 @@ pub fn process_instruction( keyed_account_at_index(keyed_accounts, 3).map(|ka| ka.unsigned_key()); me.authorize_with_seed( - &authority_base, + authority_base, &args.authority_seed, &args.authority_owner, &args.new_authorized_pubkey, @@ -102,7 +102,7 @@ pub fn process_instruction( ) } else { me.authorize_with_seed( - &authority_base, + authority_base, &args.authority_seed, &args.authority_owner, &args.new_authorized_pubkey, @@ -119,7 +119,7 @@ pub fn process_instruction( let vote = keyed_account_at_index(keyed_accounts, 1)?; me.delegate( - &vote, + vote, &from_keyed_account::(keyed_account_at_index(keyed_accounts, 2)?)?, &from_keyed_account::(keyed_account_at_index(keyed_accounts, 3)?)?, &config::from_keyed_account(keyed_account_at_index(keyed_accounts, 4)?)?, diff --git a/programs/stake/src/stake_state.rs b/programs/stake/src/stake_state.rs index 2224df542f..a82a0f25fb 100644 --- a/programs/stake/src/stake_state.rs +++ b/programs/stake/src/stake_state.rs @@ -513,7 +513,7 @@ impl<'a> StakeAccount for KeyedAccount<'a> { } self.authorize( &signers, - &new_authority, + new_authority, stake_authorize, require_custodian_for_locked_stake_authorize, clock, @@ -686,7 +686,7 @@ impl<'a> StakeAccount for KeyedAccount<'a> { split.set_state(&StakeState::Initialized(split_meta))?; } StakeState::Uninitialized => { - if !signers.contains(&self.unsigned_key()) { + if !signers.contains(self.unsigned_key()) { return Err(InstructionError::MissingRequiredSignature); } } @@ -810,7 +810,7 @@ impl<'a> StakeAccount for KeyedAccount<'a> { (meta.lockup, reserve, false) } StakeState::Uninitialized => { - if !signers.contains(&self.unsigned_key()) { + if !signers.contains(self.unsigned_key()) { return Err(InstructionError::MissingRequiredSignature); } (Lockup::default(), 0, false) // no lockup, no restrictions @@ -821,7 +821,7 @@ impl<'a> StakeAccount for KeyedAccount<'a> { // verify that lockup has expired or that the withdrawal is signed by // the custodian, both epoch and unix_timestamp must have passed let custodian_pubkey = custodian.and_then(|keyed_account| keyed_account.signer_key()); - if lockup.is_in_force(&clock, custodian_pubkey) { + if lockup.is_in_force(clock, custodian_pubkey) { return Err(StakeError::LockupInForce.into()); } @@ -3883,7 +3883,7 @@ mod tests { fn test_authorize_with_seed() { let base_pubkey = solana_sdk::pubkey::new_rand(); let seed = "42"; - let withdrawer_pubkey = Pubkey::create_with_seed(&base_pubkey, &seed, &id()).unwrap(); + let withdrawer_pubkey = Pubkey::create_with_seed(&base_pubkey, seed, &id()).unwrap(); let stake_lamports = 42; let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, @@ -3904,7 +3904,7 @@ mod tests { assert_eq!( stake_keyed_account.authorize_with_seed( &base_keyed_account, - &"", + "", &id(), &new_authority, StakeAuthorize::Staker, @@ -3919,7 +3919,7 @@ mod tests { assert_eq!( stake_keyed_account.authorize_with_seed( &stake_keyed_account, - &seed, + seed, &id(), &new_authority, StakeAuthorize::Staker, @@ -3934,7 +3934,7 @@ mod tests { assert_eq!( stake_keyed_account.authorize_with_seed( &base_keyed_account, - &seed, + seed, &id(), &new_authority, StakeAuthorize::Staker, @@ -3949,7 +3949,7 @@ mod tests { assert_eq!( stake_keyed_account.authorize_with_seed( &base_keyed_account, - &seed, + seed, &id(), &new_authority, StakeAuthorize::Withdrawer, @@ -3964,7 +3964,7 @@ mod tests { assert_eq!( stake_keyed_account.authorize_with_seed( &stake_keyed_account, - &seed, + seed, &id(), &new_authority, StakeAuthorize::Withdrawer, diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index bd21e85507..70e59c3d28 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -230,7 +230,7 @@ impl VoteState { // utility function, used by Stakes, tests pub fn from(account: &T) -> Option { - Self::deserialize(&account.data()).ok() + Self::deserialize(account.data()).ok() } // utility function, used by Stakes, tests @@ -239,7 +239,7 @@ impl VoteState { } pub fn deserialize(input: &[u8]) -> Result { - deserialize::(&input) + deserialize::(input) .map(|versioned| versioned.convert_to_current()) .map_err(|_| InstructionError::InvalidAccountData) } @@ -638,7 +638,7 @@ pub fn update_validator_identity( verify_authorized_signer(&vote_state.authorized_withdrawer, signers)?; // new node must say "yay" - verify_authorized_signer(&node_pubkey, signers)?; + verify_authorized_signer(node_pubkey, signers)?; vote_state.node_pubkey = *node_pubkey; @@ -938,7 +938,7 @@ mod tests { slot_hashes: &[SlotHash], epoch: Epoch, ) -> Result { - let keyed_accounts = &[KeyedAccount::new(&vote_pubkey, true, vote_account)]; + let keyed_accounts = &[KeyedAccount::new(vote_pubkey, true, vote_account)]; let signers: HashSet = get_signers(keyed_accounts); process_vote( &keyed_accounts[0], diff --git a/programs/vote/src/vote_transaction.rs b/programs/vote/src/vote_transaction.rs index fdbce17982..f5446564c5 100644 --- a/programs/vote/src/vote_transaction.rs +++ b/programs/vote/src/vote_transaction.rs @@ -19,7 +19,7 @@ pub fn parse_vote_transaction(tx: &Transaction) -> Option<(Pubkey, Vote, Option< let prog_id_idx = first_instruction.program_id_index as usize; match message.account_keys.get(prog_id_idx) { Some(program_id) => { - if !crate::check_id(&program_id) { + if !crate::check_id(program_id) { return None; } } diff --git a/rbpf-cli/src/main.rs b/rbpf-cli/src/main.rs index 2f22d4378e..0a98d59663 100644 --- a/rbpf-cli/src/main.rs +++ b/rbpf-cli/src/main.rs @@ -158,7 +158,7 @@ native machine code before execting it in the virtual machine.", vec![0u8; allocate] } Err(_) => { - let input = load_accounts(&Path::new(matches.value_of("input").unwrap())).unwrap(); + let input = load_accounts(Path::new(matches.value_of("input").unwrap())).unwrap(); for acc in input.accounts { let asd = AccountSharedData::new_ref(acc.lamports, acc.data.len(), &acc.owner); asd.borrow_mut().set_data(acc.data); @@ -244,7 +244,7 @@ native machine code before execting it in the virtual machine.", if matches.is_present("profile") { println!("Profile is saved in profile.dot"); let tracer = &vm.get_tracer(); - let dynamic_analysis = DynamicAnalysis::new(&tracer, &analysis); + let dynamic_analysis = DynamicAnalysis::new(tracer, &analysis); let mut file = File::create("profile.dot").unwrap(); analysis .visualize_graphically(&mut file, Some(&dynamic_analysis)) diff --git a/remote-wallet/src/remote_wallet.rs b/remote-wallet/src/remote_wallet.rs index afa5abc2f5..af06165018 100644 --- a/remote-wallet/src/remote_wallet.rs +++ b/remote-wallet/src/remote_wallet.rs @@ -114,10 +114,10 @@ impl RemoteWalletManager { is_valid_hid_device(device_info.usage_page(), device_info.interface_number()) && is_valid_ledger(device_info.vendor_id(), device_info.product_id()) }) { - match usb.open_path(&device_info.path()) { + match usb.open_path(device_info.path()) { Ok(device) => { let mut ledger = LedgerWallet::new(device); - let result = ledger.read_device(&device_info); + let result = ledger.read_device(device_info); match result { Ok(info) => { ledger.pretty_path = info.get_pretty_path(); diff --git a/rpc/src/parsed_token_accounts.rs b/rpc/src/parsed_token_accounts.rs index bfcc9a1fa7..1e2c22007b 100644 --- a/rpc/src/parsed_token_accounts.rs +++ b/rpc/src/parsed_token_accounts.rs @@ -20,7 +20,7 @@ pub fn get_parsed_token_account( pubkey: &Pubkey, account: AccountSharedData, ) -> UiAccount { - let additional_data = get_token_account_mint(&account.data()) + let additional_data = get_token_account_mint(account.data()) .and_then(|mint_pubkey| get_mint_owner_and_decimals(&bank, &mint_pubkey).ok()) .map(|(_, decimals)| AccountAdditionalData { spl_token_decimals: Some(decimals), @@ -44,7 +44,7 @@ where { let mut mint_decimals: HashMap = HashMap::new(); keyed_accounts.filter_map(move |(pubkey, account)| { - let additional_data = get_token_account_mint(&account.data()).map(|mint_pubkey| { + let additional_data = get_token_account_mint(account.data()).map(|mint_pubkey| { let spl_token_decimals = mint_decimals.get(&mint_pubkey).cloned().or_else(|| { let (_, decimals) = get_mint_owner_and_decimals(&bank, &mint_pubkey).ok()?; mint_decimals.insert(mint_pubkey, decimals); @@ -80,7 +80,7 @@ pub fn get_mint_owner_and_decimals(bank: &Arc, mint: &Pubkey) -> Result<(P let mint_account = bank.get_account(mint).ok_or_else(|| { Error::invalid_params("Invalid param: could not find mint".to_string()) })?; - let decimals = get_mint_decimals(&mint_account.data())?; + let decimals = get_mint_decimals(mint_account.data())?; Ok((*mint_account.owner(), decimals)) } } diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index ac0e3a70f2..bf4c08350c 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -1574,12 +1574,12 @@ impl JsonRpcRequestProcessor { "Invalid param: not a v2.0 Token account".to_string(), )); } - let token_account = TokenAccount::unpack(&account.data()).map_err(|_| { + let token_account = TokenAccount::unpack(account.data()).map_err(|_| { Error::invalid_params("Invalid param: not a v2.0 Token account".to_string()) })?; let mint = &Pubkey::from_str(&token_account.mint.to_string()) .expect("Token account mint should be convertible to Pubkey"); - let (_, decimals) = get_mint_owner_and_decimals(&bank, &mint)?; + let (_, decimals) = get_mint_owner_and_decimals(&bank, mint)?; let balance = token_amount_to_ui_amount(token_account.amount, decimals); Ok(new_response(&bank, balance)) } @@ -1598,7 +1598,7 @@ impl JsonRpcRequestProcessor { "Invalid param: not a v2.0 Token mint".to_string(), )); } - let mint = Mint::unpack(&mint_account.data()).map_err(|_| { + let mint = Mint::unpack(mint_account.data()).map_err(|_| { Error::invalid_params("Invalid param: mint could not be unpacked".to_string()) })?; @@ -1619,10 +1619,10 @@ impl JsonRpcRequestProcessor { )); } let mut token_balances: Vec = self - .get_filtered_spl_token_accounts_by_mint(&bank, &mint, vec![])? + .get_filtered_spl_token_accounts_by_mint(&bank, mint, vec![])? .into_iter() .map(|(address, account)| { - let amount = TokenAccount::unpack(&account.data()) + let amount = TokenAccount::unpack(account.data()) .map(|account| account.amount) .unwrap_or(0); let amount = token_amount_to_ui_amount(amount, decimals); @@ -1758,7 +1758,7 @@ impl JsonRpcRequestProcessor { let filter_closure = |account: &AccountSharedData| { filters.iter().all(|filter_type| match filter_type { RpcFilterType::DataSize(size) => account.data().len() as u64 == *size, - RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data()), + RpcFilterType::Memcmp(compare) => compare.bytes_match(account.data()), }) }; if self @@ -1830,7 +1830,7 @@ impl JsonRpcRequestProcessor { account.owner() == &spl_token_id_v2_0() && filters.iter().all(|filter_type| match filter_type { RpcFilterType::DataSize(size) => account.data().len() as u64 == *size, - RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data()), + RpcFilterType::Memcmp(compare) => compare.bytes_match(account.data()), }) }) .map_err(|e| RpcCustomError::ScanError { @@ -1878,7 +1878,7 @@ impl JsonRpcRequestProcessor { account.owner() == &spl_token_id_v2_0() && filters.iter().all(|filter_type| match filter_type { RpcFilterType::DataSize(size) => account.data().len() as u64 == *size, - RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data()), + RpcFilterType::Memcmp(compare) => compare.bytes_match(account.data()), }) }) .map_err(|e| RpcCustomError::ScanError { @@ -2065,7 +2065,7 @@ fn get_token_program_id_and_mint( ) -> Result<(Pubkey, Option)> { match token_account_filter { TokenAccountsFilter::Mint(mint) => { - let (mint_owner, _) = get_mint_owner_and_decimals(&bank, &mint)?; + let (mint_owner, _) = get_mint_owner_and_decimals(bank, &mint)?; if mint_owner != spl_token_id_v2_0() { return Err(Error::invalid_params( "Invalid param: not a v2.0 Token mint".to_string(), @@ -2973,7 +2973,7 @@ pub mod rpc_full { let durable_nonce_info = solana_sdk::transaction::uses_durable_nonce(&transaction) .and_then(|nonce_ix| { solana_sdk::transaction::get_nonce_pubkey_from_instruction( - &nonce_ix, + nonce_ix, &transaction, ) }) @@ -3099,7 +3099,7 @@ pub mod rpc_full { }; Ok(new_response( - &bank, + bank, RpcSimulateTransactionResult { err: result.err(), logs: Some(logs), @@ -3820,17 +3820,17 @@ pub fn create_test_transactions_and_populate_blockstore( // Generate transactions for processing // Successful transaction let success_tx = - solana_sdk::system_transaction::transfer(&mint_keypair, &keypair1.pubkey(), 2, blockhash); + solana_sdk::system_transaction::transfer(mint_keypair, &keypair1.pubkey(), 2, blockhash); let success_signature = success_tx.signatures[0]; let entry_1 = solana_ledger::entry::next_entry(&blockhash, 1, vec![success_tx]); // Failed transaction, InstructionError let ix_error_tx = - solana_sdk::system_transaction::transfer(&keypair2, &keypair3.pubkey(), 10, blockhash); + solana_sdk::system_transaction::transfer(keypair2, &keypair3.pubkey(), 10, blockhash); let ix_error_signature = ix_error_tx.signatures[0]; let entry_2 = solana_ledger::entry::next_entry(&entry_1.hash, 1, vec![ix_error_tx]); // Failed transaction let fail_tx = solana_sdk::system_transaction::transfer( - &mint_keypair, + mint_keypair, &keypair2.pubkey(), 2, Hash::default(), @@ -4203,7 +4203,7 @@ pub mod tests { let req = r#"{"jsonrpc":"2.0","id":1,"method":"getClusterNodes"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); @@ -4225,7 +4225,7 @@ pub mod tests { let req = r#"{"jsonrpc":"2.0","id":1,"method":"getRecentPerformanceSamples"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); @@ -4255,7 +4255,7 @@ pub mod tests { let req = r#"{"jsonrpc":"2.0","id":1,"method":"getRecentPerformanceSamples","params":[10000]}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); @@ -4284,7 +4284,7 @@ pub mod tests { } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlotLeader"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let expected = format!(r#"{{"jsonrpc":"2.0","result":"{}","id":1}}"#, leader_pubkey); let expected: Response = serde_json::from_str(&expected).expect("expected response deserialization"); @@ -4314,10 +4314,10 @@ pub mod tests { io.extend_with(rpc_minimal::MinimalImpl.to_delegate()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getTransactionCount"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let expected = r#"{"jsonrpc":"2.0","result":4,"id":1}"#; let expected: Response = - serde_json::from_str(&expected).expect("expected response deserialization"); + serde_json::from_str(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); @@ -4329,10 +4329,10 @@ pub mod tests { let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"minimumLedgerSlot"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let expected = r#"{"jsonrpc":"2.0","result":0,"id":1}"#; let expected: Response = - serde_json::from_str(&expected).expect("expected response deserialization"); + serde_json::from_str(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); @@ -4343,7 +4343,7 @@ pub mod tests { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSupply"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let supply: RpcSupply = serde_json::from_value(json["result"]["value"].clone()) .expect("actual response deserialization"); @@ -4370,7 +4370,7 @@ pub mod tests { io, meta, alice, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getLargestAccounts"}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let largest_accounts: Vec = serde_json::from_value(json["result"]["value"].clone()) @@ -4407,14 +4407,14 @@ pub mod tests { // Test Circulating/NonCirculating Filter let req = r#"{"jsonrpc":"2.0","id":1,"method":"getLargestAccounts","params":[{"filter":"circulating"}]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let largest_accounts: Vec = serde_json::from_value(json["result"]["value"].clone()) .expect("actual response deserialization"); assert_eq!(largest_accounts.len(), 20); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getLargestAccounts","params":[{"filter":"nonCirculating"}]}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let largest_accounts: Vec = serde_json::from_value(json["result"]["value"].clone()) @@ -4460,7 +4460,7 @@ pub mod tests { let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getInflationGovernor"}"#; - let rep = io.handle_request_sync(&req, meta.clone()); + let rep = io.handle_request_sync(req, meta.clone()); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); let inflation_governor: RpcInflationGovernor = if let Response::Single(res) = res { @@ -4476,7 +4476,7 @@ pub mod tests { assert_eq!(inflation_governor, expected_inflation_governor); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getInflationRate"}"#; // Queries current epoch - let rep = io.handle_request_sync(&req, meta); + let rep = io.handle_request_sync(req, meta); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); let inflation_rate: RpcInflationRate = if let Response::Single(res) = res { @@ -4506,7 +4506,7 @@ pub mod tests { let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getEpochSchedule"}"#; - let rep = io.handle_request_sync(&req, meta); + let rep = io.handle_request_sync(req, meta); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); @@ -4541,7 +4541,7 @@ pub mod tests { ] .iter() { - let rep = io.handle_request_sync(&req, meta.clone()); + let rep = io.handle_request_sync(req, meta.clone()); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); @@ -4570,7 +4570,7 @@ pub mod tests { } let req = r#"{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [42424242]}"#; - let rep = io.handle_request_sync(&req, meta.clone()); + let rep = io.handle_request_sync(req, meta.clone()); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); @@ -5437,7 +5437,7 @@ pub mod tests { } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getRecentBlockhash"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { @@ -5468,7 +5468,7 @@ pub mod tests { } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getFees"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { @@ -5544,7 +5544,7 @@ pub mod tests { let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getFeeRateGovernor"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { @@ -5818,7 +5818,7 @@ pub mod tests { let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getIdentity"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { @@ -5857,7 +5857,7 @@ pub mod tests { let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getVersion"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let version = solana_version::Version::default(); let expected = json!({ "jsonrpc": "2.0", @@ -5952,7 +5952,7 @@ pub mod tests { } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlockCommitment","params":[0]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let RpcBlockCommitment { @@ -5978,7 +5978,7 @@ pub mod tests { assert_eq!(total_stake, 10); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlockCommitment","params":[2]}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let commitment_response: RpcBlockCommitment = @@ -6007,7 +6007,7 @@ pub mod tests { } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlock","params":[0]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_block: Option = @@ -6052,7 +6052,7 @@ pub mod tests { } let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlock","params":[0,"binary"]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_block: Option = @@ -6097,7 +6097,7 @@ pub mod tests { // disable rpc-tx-history meta.config.enable_rpc_transaction_history = false; let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlock","params":[0]}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); assert_eq!( res, Some( @@ -6174,7 +6174,7 @@ pub mod tests { .set_highest_confirmed_root(8); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlockProduction","params":[]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let block_production: RpcBlockProduction = @@ -6250,35 +6250,35 @@ pub mod tests { .set_highest_confirmed_root(8); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[0]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, roots[1..].to_vec()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[2]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![3, 4, 8]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[0,4]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![1, 3, 4]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[0,7]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![1, 3, 4]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[9,11]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec = serde_json::from_value(result["result"].clone()).unwrap(); @@ -6327,7 +6327,7 @@ pub mod tests { .set_highest_confirmed_root(8); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[0,500001]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); assert_eq!( res, Some( @@ -6336,35 +6336,35 @@ pub mod tests { ); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[0,0]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec = serde_json::from_value(result["result"].clone()).unwrap(); assert!(confirmed_blocks.is_empty()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[2,2]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![3, 4]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[2,3]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![3, 4, 8]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[2,500000]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![3, 4, 8]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[9,500000]}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec = serde_json::from_value(result["result"].clone()).unwrap(); @@ -6432,7 +6432,7 @@ pub mod tests { let res = io.handle_request_sync(&req, meta); let expected = r#"{"jsonrpc":"2.0","error":{"code":-32004,"message":"Block not available for slot 12345"},"id":1}"#; let expected: Response = - serde_json::from_str(&expected).expect("expected response deserialization"); + serde_json::from_str(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); @@ -6494,7 +6494,7 @@ pub mod tests { // stake but has never voted, and the vote account with no stake should not be present. { let req = r#"{"jsonrpc":"2.0","id":1,"method":"getVoteAccounts"}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); @@ -7360,7 +7360,7 @@ pub mod tests { ); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment": "confirmed"}]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let slot: Slot = serde_json::from_value(json["result"].clone()).unwrap(); assert_eq!(slot, 2); @@ -7375,7 +7375,7 @@ pub mod tests { ); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment": "confirmed"}]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let slot: Slot = serde_json::from_value(json["result"].clone()).unwrap(); assert_eq!(slot, 2); @@ -7390,7 +7390,7 @@ pub mod tests { ); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment": "confirmed"}]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let slot: Slot = serde_json::from_value(json["result"].clone()).unwrap(); assert_eq!(slot, 2); @@ -7406,7 +7406,7 @@ pub mod tests { ); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment": "confirmed"}]}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let slot: Slot = serde_json::from_value(json["result"].clone()).unwrap(); assert_eq!(slot, 3); diff --git a/rpc/src/rpc_health.rs b/rpc/src/rpc_health.rs index 32e6c3b336..4317741b5e 100644 --- a/rpc/src/rpc_health.rs +++ b/rpc/src/rpc_health.rs @@ -65,7 +65,7 @@ impl RpcHealth { .iter() .filter_map(|trusted_validator| { self.cluster_info - .get_accounts_hash_for_node(&trusted_validator, |hashes| { + .get_accounts_hash_for_node(trusted_validator, |hashes| { hashes .iter() .max_by(|a, b| a.0.cmp(&b.0)) diff --git a/rpc/src/rpc_pubsub.rs b/rpc/src/rpc_pubsub.rs index 9d9937237e..f1cd796468 100644 --- a/rpc/src/rpc_pubsub.rs +++ b/rpc/src/rpc_pubsub.rs @@ -772,19 +772,19 @@ mod tests { let _res = io.handle_request_sync(&req, session.clone()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"signatureUnsubscribe","params":[0]}"#; - let res = io.handle_request_sync(&req, session.clone()); + let res = io.handle_request_sync(req, session.clone()); let expected = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - let expected: Response = serde_json::from_str(&expected).unwrap(); + let expected: Response = serde_json::from_str(expected).unwrap(); let result: Response = serde_json::from_str(&res.unwrap()).unwrap(); assert_eq!(expected, result); // Test bad parameter let req = r#"{"jsonrpc":"2.0","id":1,"method":"signatureUnsubscribe","params":[1]}"#; - let res = io.handle_request_sync(&req, session); + let res = io.handle_request_sync(req, session); let expected = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid subscription id."},"id":1}"#; - let expected: Response = serde_json::from_str(&expected).unwrap(); + let expected: Response = serde_json::from_str(expected).unwrap(); let result: Response = serde_json::from_str(&res.unwrap()).unwrap(); assert_eq!(expected, result); @@ -1016,19 +1016,19 @@ mod tests { let _res = io.handle_request_sync(&req, session.clone()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"accountUnsubscribe","params":[0]}"#; - let res = io.handle_request_sync(&req, session.clone()); + let res = io.handle_request_sync(req, session.clone()); let expected = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - let expected: Response = serde_json::from_str(&expected).unwrap(); + let expected: Response = serde_json::from_str(expected).unwrap(); let result: Response = serde_json::from_str(&res.unwrap()).unwrap(); assert_eq!(expected, result); // Test bad parameter let req = r#"{"jsonrpc":"2.0","id":1,"method":"accountUnsubscribe","params":[1]}"#; - let res = io.handle_request_sync(&req, session); + let res = io.handle_request_sync(req, session); let expected = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid subscription id."},"id":1}"#; - let expected: Response = serde_json::from_str(&expected).unwrap(); + let expected: Response = serde_json::from_str(expected).unwrap(); let result: Response = serde_json::from_str(&res.unwrap()).unwrap(); assert_eq!(expected, result); diff --git a/rpc/src/rpc_service.rs b/rpc/src/rpc_service.rs index d911043214..98c67a37e1 100644 --- a/rpc/src/rpc_service.rs +++ b/rpc/src/rpc_service.rs @@ -209,7 +209,7 @@ impl RequestMiddleware for RpcRequestMiddleware { .file_name() .unwrap_or_else(|| std::ffi::OsStr::new("")) .to_str() - .unwrap_or(&"") + .unwrap_or("") )) } else { RpcRequestMiddleware::not_found() diff --git a/rpc/src/rpc_subscriptions.rs b/rpc/src/rpc_subscriptions.rs index 6ee7146c8e..8652091e95 100644 --- a/rpc/src/rpc_subscriptions.rs +++ b/rpc/src/rpc_subscriptions.rs @@ -335,7 +335,7 @@ fn filter_program_results( let keyed_accounts = accounts.into_iter().filter(move |(_, account)| { filters.iter().all(|filter_type| match filter_type { RpcFilterType::DataSize(size) => account.data().len() as u64 == *size, - RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data()), + RpcFilterType::Memcmp(compare) => compare.bytes_match(account.data()), }) }); let accounts: Box> = if program_id == &spl_token_id_v2_0() @@ -614,7 +614,7 @@ impl RpcSubscriptions { if let Some(subscription_ids) = subscriptions.get_mut(signature) { subscription_ids.retain(|k, _| !notified_ids.contains(k)); if subscription_ids.is_empty() { - subscriptions.remove(&signature); + subscriptions.remove(signature); } } notified_ids @@ -1156,7 +1156,7 @@ impl RpcSubscriptions { &subscriptions.gossip_signature_subscriptions, bank_forks, &commitment_slots, - ¬ifier, + notifier, "gossip", ); } @@ -1182,8 +1182,8 @@ impl RpcSubscriptions { pubkey, bank_forks, account_subscriptions.clone(), - ¬ifier, - &commitment_slots, + notifier, + commitment_slots, ) .len(); } @@ -1200,8 +1200,8 @@ impl RpcSubscriptions { address, bank_forks, logs_subscriptions.clone(), - ¬ifier, - &commitment_slots, + notifier, + commitment_slots, ) .len(); } @@ -1218,8 +1218,8 @@ impl RpcSubscriptions { program_id, bank_forks, program_subscriptions.clone(), - ¬ifier, - &commitment_slots, + notifier, + commitment_slots, ) .len(); } @@ -1236,8 +1236,8 @@ impl RpcSubscriptions { signature, bank_forks, signature_subscriptions.clone(), - ¬ifier, - &commitment_slots, + notifier, + commitment_slots, ) .len(); } @@ -1304,7 +1304,7 @@ impl RpcSubscriptions { ReceivedSignatureResult::ReceivedSignature, ), }, - &sink, + sink, ); } } diff --git a/rpc/src/send_transaction_service.rs b/rpc/src/send_transaction_service.rs index ed5e24512d..1b68b31615 100644 --- a/rpc/src/send_transaction_service.rs +++ b/rpc/src/send_transaction_service.rs @@ -269,10 +269,10 @@ impl SendTransactionService { address_list } }) - .unwrap_or_else(|| vec![&tpu_address]); + .unwrap_or_else(|| vec![tpu_address]); for address in addresses { Self::send_transaction( - &send_socket, + send_socket, address, &transaction_info.wire_transaction, ); diff --git a/runtime/benches/accounts.rs b/runtime/benches/accounts.rs index 9c2f0dbfcf..364707ddde 100644 --- a/runtime/benches/accounts.rs +++ b/runtime/benches/accounts.rs @@ -240,7 +240,7 @@ fn store_accounts_with_possible_contention( // Write to a different slot than the one being read from. Because // there's a new account pubkey being written to every time, will // compete for the accounts index lock on every store - accounts.store_slow_uncached(slot + 1, &solana_sdk::pubkey::new_rand(), &account); + accounts.store_slow_uncached(slot + 1, &solana_sdk::pubkey::new_rand(), account); } }) } diff --git a/runtime/benches/bank.rs b/runtime/benches/bank.rs index e739050d0c..22f7b48c06 100644 --- a/runtime/benches/bank.rs +++ b/runtime/benches/bank.rs @@ -50,7 +50,7 @@ pub fn create_builtin_transactions( // Seed the signer account let rando0 = Keypair::new(); bank_client - .transfer_and_confirm(10_000, &mint_keypair, &rando0.pubkey()) + .transfer_and_confirm(10_000, mint_keypair, &rando0.pubkey()) .unwrap_or_else(|_| panic!("{}:{}", line!(), file!())); let instruction = create_invoke_instruction(rando0.pubkey(), program_id, &1u8); @@ -72,7 +72,7 @@ pub fn create_native_loader_transactions( // Seed the signer account©41 let rando0 = Keypair::new(); bank_client - .transfer_and_confirm(10_000, &mint_keypair, &rando0.pubkey()) + .transfer_and_confirm(10_000, mint_keypair, &rando0.pubkey()) .unwrap_or_else(|_| panic!("{}:{}", line!(), file!())); let instruction = create_invoke_instruction(rando0.pubkey(), program_id, &1u8); @@ -94,7 +94,7 @@ fn async_bencher(bank: &Arc, bank_client: &BankClient, transactions: &[Tra } for _ in 0..1_000_000_000_u64 { if bank - .get_signature_status(&transactions.last().unwrap().signatures.get(0).unwrap()) + .get_signature_status(transactions.last().unwrap().signatures.get(0).unwrap()) .is_some() { break; @@ -102,13 +102,13 @@ fn async_bencher(bank: &Arc, bank_client: &BankClient, transactions: &[Tra sleep(Duration::from_nanos(1)); } if bank - .get_signature_status(&transactions.last().unwrap().signatures.get(0).unwrap()) + .get_signature_status(transactions.last().unwrap().signatures.get(0).unwrap()) .unwrap() .is_err() { error!( "transaction failed: {:?}", - bank.get_signature_status(&transactions.last().unwrap().signatures.get(0).unwrap()) + bank.get_signature_status(transactions.last().unwrap().signatures.get(0).unwrap()) .unwrap() ); panic!(); diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 5b1ab29fe5..651d249ec7 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -215,7 +215,7 @@ impl Accounts { let mut account_deps = Vec::with_capacity(message.account_keys.len()); let demote_sysvar_write_locks = feature_set.is_active(&feature_set::demote_sysvar_write_locks::id()); - let mut key_check = MessageProgramIdsCache::new(&message); + let mut key_check = MessageProgramIdsCache::new(message); let mut rent_debits = RentDebits::default(); for (i, key) in message.account_keys.iter().enumerate() { let account = if key_check.is_non_loader_key(key, i) { @@ -237,7 +237,7 @@ impl Accounts { .map(|(mut account, _)| { if message.is_writable(i, demote_sysvar_write_locks) { let rent_due = rent_collector - .collect_from_existing_account(&key, &mut account); + .collect_from_existing_account(key, &mut account); (account, rent_due) } else { (account, 0) @@ -1029,7 +1029,7 @@ impl Accounts { } } if account.rent_epoch() == INITIAL_RENT_EPOCH { - let rent = rent_collector.collect_from_created_account(&key, account); + let rent = rent_collector.collect_from_created_account(key, account); loaded_transaction.rent += rent; loaded_transaction .rent_debits @@ -1106,7 +1106,7 @@ pub fn update_accounts_bench(accounts: &Accounts, pubkeys: &[Pubkey], slot: u64) for pubkey in pubkeys { let amount = thread_rng().gen_range(0, 10); let account = AccountSharedData::new(amount, 0, AccountSharedData::default().owner()); - accounts.store_slow_uncached(slot, &pubkey, &account); + accounts.store_slow_uncached(slot, pubkey, &account); } } @@ -1140,7 +1140,7 @@ mod tests { error_counters: &mut ErrorCounters, ) -> Vec { let mut hash_queue = BlockhashQueue::new(100); - hash_queue.register_hash(&tx.message().recent_blockhash, &fee_calculator); + hash_queue.register_hash(&tx.message().recent_blockhash, fee_calculator); let accounts = Accounts::new_with_config( Vec::new(), &ClusterType::Development, diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index 12df20cb1e..2a2992dd8f 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -420,7 +420,7 @@ impl AccountsBackgroundService { total_remove_slots_time: &mut u64, ) { let mut remove_slots_time = Measure::start("remove_slots_time"); - *removed_slots_count += request_handler.handle_pruned_banks(&bank, true); + *removed_slots_count += request_handler.handle_pruned_banks(bank, true); remove_slots_time.stop(); *total_remove_slots_time += remove_slots_time.as_us(); diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index beb943ed9d..58ff9879b2 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -371,7 +371,7 @@ impl<'a> LoadedAccount<'a> { pub fn owner(&self) -> &Pubkey { match self { LoadedAccount::Stored(stored_account_meta) => &stored_account_meta.account_meta.owner, - LoadedAccount::Cached((_, cached_account)) => &cached_account.account.owner(), + LoadedAccount::Cached((_, cached_account)) => cached_account.account.owner(), } } @@ -394,7 +394,7 @@ impl<'a> LoadedAccount<'a> { pub fn pubkey(&self) -> &Pubkey { match self { LoadedAccount::Stored(stored_account_meta) => &stored_account_meta.meta.pubkey, - LoadedAccount::Cached((pubkey, _)) => &pubkey, + LoadedAccount::Cached((pubkey, _)) => pubkey, } } @@ -408,7 +408,7 @@ impl<'a> LoadedAccount<'a> { pub fn compute_hash(&self, slot: Slot, pubkey: &Pubkey) -> Hash { match self { LoadedAccount::Stored(stored_account_meta) => { - AccountsDb::hash_stored_account(slot, &stored_account_meta) + AccountsDb::hash_stored_account(slot, stored_account_meta) } LoadedAccount::Cached((_, cached_account)) => { AccountsDb::hash_account(slot, &cached_account.account, pubkey) @@ -1496,7 +1496,7 @@ impl AccountsDb { let mut reclaims = Vec::new(); for pubkey in pubkeys { self.accounts_index.clean_rooted_entries( - &pubkey, + pubkey, &mut reclaims, max_clean_root, ); @@ -1588,7 +1588,7 @@ impl AccountsDb { let affected_pubkeys = &store_counts.get(&id).unwrap().1; for key in affected_pubkeys { - for (_slot, account_info) in &purges.get(&key).unwrap().0 { + for (_slot, account_info) in &purges.get(key).unwrap().0 { if !already_counted.contains(&account_info.store_id) { pending_store_ids.insert(account_info.store_id); } @@ -1641,7 +1641,7 @@ impl AccountsDb { for (pubkey, slots_set) in pubkey_to_slot_set { let is_empty = self .accounts_index - .purge_exact(&pubkey, slots_set, &mut reclaims); + .purge_exact(pubkey, slots_set, &mut reclaims); if is_empty { dead_keys.push(pubkey); } @@ -1862,12 +1862,12 @@ impl AccountsDb { // Then purge if we can let mut store_counts: HashMap)> = HashMap::new(); for (key, (account_infos, ref_count)) in purges_zero_lamports.iter_mut() { - if purged_account_slots.contains_key(&key) { - *ref_count = self.accounts_index.ref_count_from_storage(&key); + if purged_account_slots.contains_key(key) { + *ref_count = self.accounts_index.ref_count_from_storage(key); } account_infos.retain(|(slot, account_info)| { let was_slot_purged = purged_account_slots - .get(&key) + .get(key) .map(|slots_removed| slots_removed.contains(slot)) .unwrap_or(false); if was_slot_purged { @@ -2061,7 +2061,7 @@ impl AccountsDb { return; } let mut clean_dead_slots = Measure::start("reclaims::clean_dead_slots"); - self.clean_stored_dead_slots(&dead_slots, purged_account_slots); + self.clean_stored_dead_slots(dead_slots, purged_account_slots); clean_dead_slots.stop(); let mut purge_removed_slots = Measure::start("reclaims::purge_removed_slots"); @@ -3289,7 +3289,7 @@ impl AccountsDb { let path_index = thread_rng().gen_range(0, paths.len()); let store = Arc::new(self.new_storage_entry( slot, - &Path::new(&paths[path_index]), + Path::new(&paths[path_index]), Self::page_align(size), )); @@ -3491,7 +3491,7 @@ impl AccountsDb { let mut remove_storage_entries_elapsed = Measure::start("remove_storage_entries_elapsed"); for remove_slot in removed_slots { // Remove the storage entries and collect some metrics - if let Some((_, slot_storages_to_be_removed)) = self.storage.0.remove(&remove_slot) { + if let Some((_, slot_storages_to_be_removed)) = self.storage.0.remove(remove_slot) { { let r_slot_removed_storages = slot_storages_to_be_removed.read().unwrap(); total_removed_storage_entries += r_slot_removed_storages.len(); @@ -3761,10 +3761,10 @@ impl AccountsDb { Self::hash_account_data( slot, account.lamports(), - &account.owner(), + account.owner(), account.executable(), account.rent_epoch(), - &account.data(), + account.data(), pubkey, ) } @@ -3772,8 +3772,8 @@ impl AccountsDb { fn hash_frozen_account_data(account: &AccountSharedData) -> Hash { let mut hasher = Hasher::default(); - hasher.hash(&account.data()); - hasher.hash(&account.owner().as_ref()); + hasher.hash(account.data()); + hasher.hash(account.owner().as_ref()); if account.executable() { hasher.hash(&[1u8; 1]); @@ -3805,7 +3805,7 @@ impl AccountsDb { hasher.update(&rent_epoch.to_le_bytes()); - hasher.update(&data); + hasher.update(data); if executable { hasher.update(&[1u8; 1]); @@ -3813,8 +3813,8 @@ impl AccountsDb { hasher.update(&[0u8; 1]); } - hasher.update(&owner.as_ref()); - hasher.update(&pubkey.as_ref()); + hasher.update(owner.as_ref()); + hasher.update(pubkey.as_ref()); Hash(<[u8; solana_sdk::hash::HASH_BYTES]>::try_from(hasher.finalize().as_slice()).unwrap()) } @@ -4834,7 +4834,7 @@ impl AccountsDb { ); if check_hash { - let computed_hash = loaded_account.compute_hash(slot, &pubkey); + let computed_hash = loaded_account.compute_hash(slot, pubkey); if computed_hash != source_item.hash { info!( "hash mismatch found: computed: {}, loaded: {}, pubkey: {}", @@ -4905,7 +4905,7 @@ impl AccountsDb { }; let result = Self::scan_snapshot_stores_with_cache( - &storages, + storages, &mut stats, PUBKEY_BINS_FOR_CALCULATING_HASHES, &bounds, @@ -5100,8 +5100,8 @@ impl AccountsDb { self.accounts_index.upsert( slot, pubkey, - &pubkey_account.1.owner(), - &pubkey_account.1.data(), + pubkey_account.1.owner(), + pubkey_account.1.data(), &self.account_indexes, info, &mut reclaims, @@ -5353,7 +5353,7 @@ impl AccountsDb { pub(crate) fn freeze_accounts(&mut self, ancestors: &Ancestors, account_pubkeys: &[Pubkey]) { for account_pubkey in account_pubkeys { - if let Some((account, _slot)) = self.load_without_fixed_root(ancestors, &account_pubkey) + if let Some((account, _slot)) = self.load_without_fixed_root(ancestors, account_pubkey) { let frozen_account_info = FrozenAccountInfo { hash: Self::hash_frozen_account_data(&account), @@ -5391,7 +5391,7 @@ impl AccountsDb { ) } - let hash = Self::hash_frozen_account_data(&account); + let hash = Self::hash_frozen_account_data(account); if hash != frozen_account_info.hash { FROZEN_ACCOUNT_PANIC.store(true, Ordering::Relaxed); panic!( @@ -5735,10 +5735,10 @@ impl AccountsDb { if *slot <= snapshot_slot && (self.accounts_index.is_root(*slot) || ancestors - .map(|ancestors| ancestors.contains_key(&slot)) + .map(|ancestors| ancestors.contains_key(slot)) .unwrap_or_default()) { - self.storage.0.get(&slot).map_or_else( + self.storage.0.get(slot).map_or_else( || None, |item| { let storages = item @@ -5891,9 +5891,9 @@ impl AccountsDb { if !self.account_indexes.is_empty() { for (pubkey, (_, _store_id, stored_account)) in accounts_map.iter() { self.accounts_index.update_secondary_indexes( - &pubkey, + pubkey, &stored_account.account_meta.owner, - &stored_account.data, + stored_account.data, &self.account_indexes, ); } @@ -5929,7 +5929,7 @@ impl AccountsDb { for (id, store) in slot_stores.value().read().unwrap().iter() { // Should be default at this point assert_eq!(store.alive_bytes(), 0); - if let Some((stored_size, count)) = stored_sizes_and_counts.get(&id) { + if let Some((stored_size, count)) = stored_sizes_and_counts.get(id) { trace!("id: {} setting count: {} cur: {}", id, count, store.count(),); store.count_and_status.write().unwrap().0 = *count; store.alive_bytes.store(*stored_size, Ordering::SeqCst); @@ -6020,7 +6020,7 @@ impl AccountsDb { pub fn get_append_vec_id(&self, pubkey: &Pubkey, slot: Slot) -> Option { let ancestors = vec![(slot, 1)].into_iter().collect(); - let result = self.accounts_index.get(&pubkey, Some(&ancestors), None); + let result = self.accounts_index.get(pubkey, Some(&ancestors), None); result.map(|(list, index)| list.slot_list()[index].1.store_id) } @@ -7296,7 +7296,7 @@ pub mod tests { for (i, key) in keys.iter().enumerate() { assert_eq!( accounts - .load_without_fixed_root(&ancestors, &key) + .load_without_fixed_root(&ancestors, key) .unwrap() .0 .lamports(), @@ -7462,7 +7462,7 @@ pub mod tests { } fn ref_count_for_pubkey(&self, pubkey: &Pubkey) -> RefCount { - self.accounts_index.ref_count_from_storage(&pubkey) + self.accounts_index.ref_count_from_storage(pubkey) } } @@ -9390,7 +9390,7 @@ pub mod tests { current_slot += 1; for pubkey in &pubkeys { - accounts.store_uncached(current_slot, &[(&pubkey, &account)]); + accounts.store_uncached(current_slot, &[(pubkey, &account)]); } let shrink_slot = current_slot; accounts.get_accounts_delta_hash(current_slot); @@ -9401,7 +9401,7 @@ pub mod tests { let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink]; for pubkey in updated_pubkeys { - accounts.store_uncached(current_slot, &[(&pubkey, &account)]); + accounts.store_uncached(current_slot, &[(pubkey, &account)]); } accounts.get_accounts_delta_hash(current_slot); accounts.add_root(current_slot); @@ -9459,7 +9459,7 @@ pub mod tests { current_slot += 1; for pubkey in &pubkeys { - accounts.store_uncached(current_slot, &[(&pubkey, &account)]); + accounts.store_uncached(current_slot, &[(pubkey, &account)]); } let shrink_slot = current_slot; accounts.get_accounts_delta_hash(current_slot); @@ -9470,7 +9470,7 @@ pub mod tests { let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink]; for pubkey in updated_pubkeys { - accounts.store_uncached(current_slot, &[(&pubkey, &account)]); + accounts.store_uncached(current_slot, &[(pubkey, &account)]); } accounts.get_accounts_delta_hash(current_slot); accounts.add_root(current_slot); @@ -9517,7 +9517,7 @@ pub mod tests { let dummy_id1 = 22; let entry1 = Arc::new(AccountStorageEntry::new( - &dummy_path, + dummy_path, dummy_slot, dummy_id1, dummy_size, @@ -9531,7 +9531,7 @@ pub mod tests { let dummy_id2 = 44; let entry2 = Arc::new(AccountStorageEntry::new( - &dummy_path, + dummy_path, dummy_slot, dummy_id2, dummy_size, @@ -9553,7 +9553,7 @@ pub mod tests { let dummy_size = 4 * PAGE_SIZE; let dummy_id1 = 22; let entry1 = Arc::new(AccountStorageEntry::new( - &dummy_path, + dummy_path, dummy_slot, dummy_id1, dummy_size, @@ -9568,7 +9568,7 @@ pub mod tests { let dummy_id2 = 44; let dummy_slot2 = 44; let entry2 = Arc::new(AccountStorageEntry::new( - &dummy_path, + dummy_path, dummy_slot2, dummy_id2, dummy_size, @@ -9612,7 +9612,7 @@ pub mod tests { current_slot += 1; for pubkey in &pubkeys { - accounts.store_uncached(current_slot, &[(&pubkey, &account)]); + accounts.store_uncached(current_slot, &[(pubkey, &account)]); } let shrink_slot = current_slot; accounts.get_accounts_delta_hash(current_slot); @@ -9623,7 +9623,7 @@ pub mod tests { let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink]; for pubkey in updated_pubkeys { - accounts.store_uncached(current_slot, &[(&pubkey, &account)]); + accounts.store_uncached(current_slot, &[(pubkey, &account)]); } accounts.get_accounts_delta_hash(current_slot); accounts.add_root(current_slot); @@ -9766,7 +9766,7 @@ pub mod tests { info!( "store: {:?} : {:?}", store, - store_counts.get(&store).unwrap() + store_counts.get(store).unwrap() ); } for x in 0..3 { @@ -11037,7 +11037,7 @@ pub mod tests { let dummy_id1 = 22; let entry1 = Arc::new(AccountStorageEntry::new( - &dummy_path, + dummy_path, dummy_slot, dummy_id1, dummy_size, @@ -11045,7 +11045,7 @@ pub mod tests { let dummy_id2 = 44; let entry2 = Arc::new(AccountStorageEntry::new( - &dummy_path, + dummy_path, dummy_slot, dummy_id2, dummy_size, @@ -11584,7 +11584,7 @@ pub mod tests { let mut accounts = AccountsDb::new_single(); let dummy_path = Path::new(""); let dummy_size = 2 * PAGE_SIZE; - let entry = Arc::new(AccountStorageEntry::new(&dummy_path, 0, 1, dummy_size)); + let entry = Arc::new(AccountStorageEntry::new(dummy_path, 0, 1, dummy_size)); match accounts.shrink_ratio { AccountShrinkThreshold::TotalSpace { shrink_ratio } => { assert_eq!( diff --git a/runtime/src/accounts_hash.rs b/runtime/src/accounts_hash.rs index 4d402120f1..6aad8ea9cf 100644 --- a/runtime/src/accounts_hash.rs +++ b/runtime/src/accounts_hash.rs @@ -255,7 +255,7 @@ impl AccountsHash { let mut hasher = Hasher::default(); for item in hashes.iter().take(end_index).skip(start_index) { - let h = extractor(&item); + let h = extractor(item); hasher.hash(h.as_ref()); } diff --git a/runtime/src/accounts_index.rs b/runtime/src/accounts_index.rs index 0acb02aa9f..c9e7ea415a 100644 --- a/runtime/src/accounts_index.rs +++ b/runtime/src/accounts_index.rs @@ -923,7 +923,7 @@ impl AccountsIndex { read_lock_timer.stop(); read_lock_elapsed += read_lock_timer.as_us(); let mut latest_slot_timer = Measure::start("latest_slot"); - if let Some(index) = self.latest_slot(Some(ancestors), &list_r, max_root) { + if let Some(index) = self.latest_slot(Some(ancestors), list_r, max_root) { latest_slot_timer.stop(); latest_slot_elapsed += latest_slot_timer.as_us(); let mut load_account_timer = Measure::start("load_account"); @@ -1157,7 +1157,7 @@ impl AccountsIndex { max: Option, ) -> (SlotList, RefCount) { ( - self.get_rooted_entries(&locked_account_entry.slot_list(), max), + self.get_rooted_entries(locked_account_entry.slot_list(), max), locked_account_entry.ref_count().load(Ordering::Relaxed), ) } @@ -1174,7 +1174,7 @@ impl AccountsIndex { if let Some(mut write_account_map_entry) = self.get_account_write_entry(pubkey) { write_account_map_entry.slot_list_mut(|slot_list| { slot_list.retain(|(slot, item)| { - let should_purge = slots_to_purge.contains(&slot); + let should_purge = slots_to_purge.contains(slot); if should_purge { reclaims.push((*slot, item.clone())); false @@ -1228,7 +1228,7 @@ impl AccountsIndex { Some(inner) => inner, None => self.roots_tracker.read().unwrap(), }; - if lock.roots.contains(&slot) { + if lock.roots.contains(slot) { rv = Some(i); current_max = *slot; } @@ -1483,7 +1483,7 @@ impl AccountsIndex { ) { let roots_tracker = &self.roots_tracker.read().unwrap(); let newest_root_in_slot_list = - Self::get_newest_root_in_slot_list(&roots_tracker.roots, &slot_list, max_clean_root); + Self::get_newest_root_in_slot_list(&roots_tracker.roots, slot_list, max_clean_root); let max_clean_root = max_clean_root.unwrap_or(roots_tracker.max_root); let mut purged_slots: HashSet = HashSet::new(); @@ -1946,7 +1946,7 @@ pub mod tests { fn remove(&mut self, slot: &u64) -> bool { let result = self.bitfield.remove(slot); assert_eq!(result, self.hash_set.remove(slot)); - assert!(!self.bitfield.contains(&slot)); + assert!(!self.bitfield.contains(slot)); self.compare(); result } @@ -2211,7 +2211,7 @@ pub mod tests { compare_internal(hashset, bitfield); let clone = bitfield.clone(); compare_internal(hashset, &clone); - assert!(clone.eq(&bitfield)); + assert!(clone.eq(bitfield)); assert_eq!(clone, *bitfield); } @@ -2262,8 +2262,8 @@ pub mod tests { // remove the rest, including a call that removes slot again for item in all.iter() { - assert!(tester.remove(&item)); - assert!(!tester.remove(&item)); + assert!(tester.remove(item)); + assert!(!tester.remove(item)); } let min = max + ((width * 2) as u64) + 3; @@ -2538,15 +2538,15 @@ pub mod tests { assert!(index.zero_lamport_pubkeys().is_empty()); let mut ancestors = Ancestors::default(); - assert!(index.get(&pubkey, Some(&ancestors), None).is_none()); - assert!(index.get(&pubkey, None, None).is_none()); + assert!(index.get(pubkey, Some(&ancestors), None).is_none()); + assert!(index.get(pubkey, None, None).is_none()); let mut num = 0; index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 0); ancestors.insert(slot, 0); - assert!(index.get(&pubkey, Some(&ancestors), None).is_some()); - assert_eq!(index.ref_count_from_storage(&pubkey), 1); + assert!(index.get(pubkey, Some(&ancestors), None).is_some()); + assert_eq!(index.ref_count_from_storage(pubkey), 1); index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 1); @@ -2559,15 +2559,15 @@ pub mod tests { assert!(!index.zero_lamport_pubkeys().is_empty()); let mut ancestors = Ancestors::default(); - assert!(index.get(&pubkey, Some(&ancestors), None).is_none()); - assert!(index.get(&pubkey, None, None).is_none()); + assert!(index.get(pubkey, Some(&ancestors), None).is_none()); + assert!(index.get(pubkey, None, None).is_none()); let mut num = 0; index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 0); ancestors.insert(slot, 0); - assert!(index.get(&pubkey, Some(&ancestors), None).is_some()); - assert_eq!(index.ref_count_from_storage(&pubkey), 0); // cached, so 0 + assert!(index.get(pubkey, Some(&ancestors), None).is_some()); + assert_eq!(index.ref_count_from_storage(pubkey), 0); // cached, so 0 index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 1); } @@ -3681,7 +3681,7 @@ pub mod tests { // Both pubkeys will now be present in the index check_secondary_index_mapping_correct( - &secondary_index, + secondary_index, &[secondary_key1, secondary_key2], &account_key, ); diff --git a/runtime/src/ancestors.rs b/runtime/src/ancestors.rs index 0f6bafba85..5c5e16bcea 100644 --- a/runtime/src/ancestors.rs +++ b/runtime/src/ancestors.rs @@ -170,7 +170,7 @@ pub mod tests { let key = item.0; min = std::cmp::min(min, *key); max = std::cmp::max(max, *key); - assert!(ancestors.get(&key)); + assert!(ancestors.get(key)); } for slot in min - 1..max + 2 { assert_eq!(ancestors.get(&slot), hashset.contains(&slot)); diff --git a/runtime/src/append_vec.rs b/runtime/src/append_vec.rs index 2ede4b6aa2..1f8f9744ea 100644 --- a/runtime/src/append_vec.rs +++ b/runtime/src/append_vec.rs @@ -778,7 +778,7 @@ pub mod tests { fn test_new_from_file_crafted_zero_lamport_account() { let file = get_append_vec_path("test_append"); let path = &file.path; - let mut av = AppendVec::new(&path, true, 1024 * 1024); + let mut av = AppendVec::new(path, true, 1024 * 1024); av.set_no_remove_on_drop(); let pubkey = solana_sdk::pubkey::new_rand(); @@ -806,7 +806,7 @@ pub mod tests { fn test_new_from_file_crafted_data_len() { let file = get_append_vec_path("test_new_from_file_crafted_data_len"); let path = &file.path; - let mut av = AppendVec::new(&path, true, 1024 * 1024); + let mut av = AppendVec::new(path, true, 1024 * 1024); av.set_no_remove_on_drop(); let crafted_data_len = 1; @@ -834,7 +834,7 @@ pub mod tests { fn test_new_from_file_too_large_data_len() { let file = get_append_vec_path("test_new_from_file_too_large_data_len"); let path = &file.path; - let mut av = AppendVec::new(&path, true, 1024 * 1024); + let mut av = AppendVec::new(path, true, 1024 * 1024); av.set_no_remove_on_drop(); let too_large_data_len = u64::max_value(); @@ -860,7 +860,7 @@ pub mod tests { fn test_new_from_file_crafted_executable() { let file = get_append_vec_path("test_new_from_crafted_executable"); let path = &file.path; - let mut av = AppendVec::new(&path, true, 1024 * 1024); + let mut av = AppendVec::new(path, true, 1024 * 1024); av.set_no_remove_on_drop(); av.append_account_test(&create_test_account(10)).unwrap(); { diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 22bb5a015b..ea8605e265 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1007,7 +1007,7 @@ impl Default for BlockhashQueue { impl Bank { pub fn new(genesis_config: &GenesisConfig) -> Self { Self::new_with_paths( - &genesis_config, + genesis_config, Vec::new(), &[], None, @@ -1021,7 +1021,7 @@ impl Bank { pub fn new_no_wallclock_throttle(genesis_config: &GenesisConfig) -> Self { let mut bank = Self::new_with_paths( - &genesis_config, + genesis_config, Vec::new(), &[], None, @@ -1044,7 +1044,7 @@ impl Bank { shrink_ratio: AccountShrinkThreshold, ) -> Self { Self::new_with_paths( - &genesis_config, + genesis_config, Vec::new(), &[], None, @@ -1708,7 +1708,7 @@ impl Bank { // if I'm the first Bank in an epoch, ensure stake_history is updated self.update_sysvar_account(&sysvar::stake_history::id(), |account| { create_account::( - &self.stakes.read().unwrap().history(), + self.stakes.read().unwrap().history(), self.inherit_specially_retained_account_fields(account), ) }); @@ -1766,7 +1766,7 @@ impl Bank { .feature_set .full_inflation_features_enabled() .iter() - .filter_map(|id| self.feature_set.activated_slot(&id)) + .filter_map(|id| self.feature_set.activated_slot(id)) .collect::>(); slots.sort_unstable(); slots.get(0).cloned().unwrap_or_else(|| { @@ -1913,7 +1913,7 @@ impl Bank { .iter() .for_each(|(stake_pubkey, delegation)| { match ( - self.get_account_with_fixed_root(&stake_pubkey), + self.get_account_with_fixed_root(stake_pubkey), self.get_account_with_fixed_root(&delegation.voter_pubkey), ) { (Some(stake_account), Some(vote_account)) => { @@ -1979,8 +1979,8 @@ impl Bank { }) .map(|(stake_account, vote_account)| { stake_state::calculate_points( - &stake_account, - &vote_account, + stake_account, + vote_account, Some(&stake_history), fix_stake_deactivate, ) @@ -2019,7 +2019,7 @@ impl Bank { fix_stake_deactivate, ); if let Ok((stakers_reward, _voters_reward)) = redeemed { - self.store_account(&stake_pubkey, &stake_account); + self.store_account(stake_pubkey, stake_account); vote_account_changed = true; if stakers_reward > 0 { @@ -2053,7 +2053,7 @@ impl Bank { }, )); } - self.store_account(&vote_pubkey, &vote_account); + self.store_account(vote_pubkey, vote_account); } } self.rewards.write().unwrap().append(&mut rewards); @@ -2296,7 +2296,7 @@ impl Bank { self.fee_calculator = self.fee_rate_governor.create_fee_calculator(); for (pubkey, account) in genesis_config.accounts.iter() { - if self.get_account(&pubkey).is_some() { + if self.get_account(pubkey).is_some() { panic!("{} repeated in genesis config", pubkey); } self.store_account(pubkey, &AccountSharedData::from(account.clone())); @@ -2307,7 +2307,7 @@ impl Bank { self.update_fees(); for (pubkey, account) in genesis_config.rewards_pools.iter() { - if self.get_account(&pubkey).is_some() { + if self.get_account(pubkey).is_some() { panic!("{} repeated in genesis config", pubkey); } self.store_account(pubkey, &AccountSharedData::from(account.clone())); @@ -2354,11 +2354,11 @@ impl Bank { // NOTE: must hold idempotent for the same set of arguments pub fn add_native_program(&self, name: &str, program_id: &Pubkey, must_replace: bool) { let existing_genuine_program = - if let Some(mut account) = self.get_account_with_fixed_root(&program_id) { + if let Some(mut account) = self.get_account_with_fixed_root(program_id) { // it's very unlikely to be squatted at program_id as non-system account because of burden to // find victim's pubkey/hash. So, when account.owner is indeed native_loader's, it's // safe to assume it's a genuine program. - if native_loader::check_id(&account.owner()) { + if native_loader::check_id(account.owner()) { Some(account) } else { // malicious account is pre-occupying at program_id @@ -2369,7 +2369,7 @@ impl Bank { // Resetting account balance to 0 is needed to really purge from AccountsDb and // flush the Stakes cache account.set_lamports(0); - self.store_account(&program_id, &account); + self.store_account(program_id, &account); None } } else { @@ -2385,7 +2385,7 @@ impl Bank { name, program_id ), Some(account) => { - if *name == String::from_utf8_lossy(&account.data()) { + if *name == String::from_utf8_lossy(account.data()) { // nop; it seems that already AccountsDb is updated. return; } @@ -2425,7 +2425,7 @@ impl Bank { name, self.inherit_specially_retained_account_fields(&existing_genuine_program), ); - self.store_account_and_update_capitalization(&program_id, &account); + self.store_account_and_update_capitalization(program_id, &account); debug!("Added native program {} under {:?}", name, program_id); } @@ -2594,7 +2594,7 @@ impl Bank { hashed_txs.as_transactions_iter(), self.demote_sysvar_write_locks(), ); - TransactionBatch::new(lock_results, &self, Cow::Owned(hashed_txs)) + TransactionBatch::new(lock_results, self, Cow::Owned(hashed_txs)) } pub fn prepare_hashed_batch<'a, 'b>( @@ -2605,7 +2605,7 @@ impl Bank { hashed_txs.as_transactions_iter(), self.demote_sysvar_write_locks(), ); - TransactionBatch::new(lock_results, &self, Cow::Borrowed(hashed_txs)) + TransactionBatch::new(lock_results, self, Cow::Borrowed(hashed_txs)) } pub(crate) fn prepare_simulation_batch<'a, 'b>( @@ -2614,7 +2614,7 @@ impl Bank { ) -> TransactionBatch<'a, 'b> { let mut batch = TransactionBatch::new( vec![tx.sanitize().map_err(|e| e.into())], - &self, + self, Cow::Owned(vec![HashedTransaction::from(tx)]), ); batch.needs_unlock = false; @@ -2628,7 +2628,7 @@ impl Bank { ) -> (Result<()>, TransactionLogMessages, Vec) { assert!(self.is_frozen(), "simulation bank must be frozen"); - let batch = self.prepare_simulation_batch(&transaction); + let batch = self.prepare_simulation_batch(transaction); let mut timings = ExecuteTimings::default(); @@ -2703,7 +2703,7 @@ impl Bank { let hash_age = hash_queue.check_hash_age(&message.recent_blockhash, max_age); if hash_age == Some(true) { (Ok(()), None) - } else if let Some((pubkey, acc)) = self.check_tx_durable_nonce(&tx) { + } else if let Some((pubkey, acc)) = self.check_tx_durable_nonce(tx) { (Ok(()), Some(NonceRollbackPartial::new(pubkey, acc))) } else if hash_age == Some(false) { error_counters.blockhash_too_old += 1; @@ -2794,10 +2794,10 @@ impl Bank { } pub fn check_tx_durable_nonce(&self, tx: &Transaction) -> Option<(Pubkey, AccountSharedData)> { - transaction::uses_durable_nonce(&tx) - .and_then(|nonce_ix| transaction::get_nonce_pubkey_from_instruction(&nonce_ix, &tx)) + transaction::uses_durable_nonce(tx) + .and_then(|nonce_ix| transaction::get_nonce_pubkey_from_instruction(nonce_ix, tx)) .and_then(|nonce_pubkey| { - self.get_account(&nonce_pubkey) + self.get_account(nonce_pubkey) .map(|acc| (*nonce_pubkey, acc)) }) .filter(|(_pubkey, nonce_account)| { @@ -3437,7 +3437,7 @@ impl Bank { hashed_txs.len() ); timings.store_us += write_time.as_us(); - self.update_transaction_statuses(hashed_txs, &executed); + self.update_transaction_statuses(hashed_txs, executed); let fee_collection_results = self.filter_program_errors_and_collect_fee(hashed_txs.as_transactions_iter(), executed); @@ -4195,7 +4195,7 @@ impl Bank { pubkey: &Pubkey, new_account: &AccountSharedData, ) { - if let Some(old_account) = self.get_account_with_fixed_root(&pubkey) { + if let Some(old_account) = self.get_account_with_fixed_root(pubkey) { match new_account.lamports().cmp(&old_account.lamports()) { std::cmp::Ordering::Greater => { self.capitalization @@ -5064,7 +5064,7 @@ impl Bank { pub fn deactivate_feature(&mut self, id: &Pubkey) { let mut feature_set = Arc::make_mut(&mut self.feature_set).clone(); - feature_set.active.remove(&id); + feature_set.active.remove(id); feature_set.inactive.insert(*id); self.feature_set = Arc::new(feature_set); } @@ -5164,8 +5164,8 @@ impl Bank { ) { let feature_builtins = self.feature_builtins.clone(); for (builtin, feature, activation_type) in feature_builtins.iter() { - let should_populate = init_or_warp && self.feature_set.is_active(&feature) - || !init_or_warp && new_feature_activations.contains(&feature); + let should_populate = init_or_warp && self.feature_set.is_active(feature) + || !init_or_warp && new_feature_activations.contains(feature); if should_populate { match activation_type { ActivationType::NewProgram => self.add_builtin( @@ -5267,10 +5267,10 @@ impl Bank { if purge_window_epoch { for reward_pubkey in self.rewards_pool_pubkeys.iter() { - if let Some(mut reward_account) = self.get_account_with_fixed_root(&reward_pubkey) { + if let Some(mut reward_account) = self.get_account_with_fixed_root(reward_pubkey) { if reward_account.lamports() == u64::MAX { reward_account.set_lamports(0); - self.store_account(&reward_pubkey, &reward_account); + self.store_account(reward_pubkey, &reward_account); // Adjust capitalization.... it has been wrapping, reducing the real capitalization by 1-lamport self.capitalization.fetch_add(1, Relaxed); info!( @@ -5313,7 +5313,7 @@ impl Drop for Bank { pub fn goto_end_of_slot(bank: &mut Bank) { let mut tick_hash = bank.last_blockhash(); loop { - tick_hash = hashv(&[&tick_hash.as_ref(), &[42]]); + tick_hash = hashv(&[tick_hash.as_ref(), &[42]]); bank.register_tick(&tick_hash); if tick_hash == bank.last_blockhash() { bank.freeze(); @@ -7092,7 +7092,7 @@ pub(crate) mod tests { .accounts .accounts_db .accounts_index - .get(&pubkey, Some(&ancestors), None) + .get(pubkey, Some(ancestors), None) .unwrap(); locked_entry .slot_list() @@ -7320,7 +7320,7 @@ pub(crate) mod tests { .map(move |(_stake_pubkey, stake_account)| (stake_account, vote_account)) }) .map(|(stake_account, vote_account)| { - stake_state::calculate_points(&stake_account, &vote_account, None, true) + stake_state::calculate_points(stake_account, vote_account, None, true) .unwrap_or(0) }) .sum(); @@ -9462,11 +9462,11 @@ pub(crate) mod tests { assert_eq!(bank.calculate_capitalization(true), bank.capitalization()); assert_eq!( "mock_program1", - String::from_utf8_lossy(&bank.get_account(&vote_id).unwrap_or_default().data()) + String::from_utf8_lossy(bank.get_account(&vote_id).unwrap_or_default().data()) ); assert_eq!( "mock_program2", - String::from_utf8_lossy(&bank.get_account(&stake_id).unwrap_or_default().data()) + String::from_utf8_lossy(bank.get_account(&stake_id).unwrap_or_default().data()) ); // Re-adding builtin programs should be no-op @@ -9482,11 +9482,11 @@ pub(crate) mod tests { assert_eq!(bank.calculate_capitalization(true), bank.capitalization()); assert_eq!( "mock_program1", - String::from_utf8_lossy(&bank.get_account(&vote_id).unwrap_or_default().data()) + String::from_utf8_lossy(bank.get_account(&vote_id).unwrap_or_default().data()) ); assert_eq!( "mock_program2", - String::from_utf8_lossy(&bank.get_account(&stake_id).unwrap_or_default().data()) + String::from_utf8_lossy(bank.get_account(&stake_id).unwrap_or_default().data()) ); } @@ -9623,7 +9623,7 @@ pub(crate) mod tests { } fn get_nonce_account(bank: &Bank, nonce_pubkey: &Pubkey) -> Option { - bank.get_account(&nonce_pubkey).and_then(|acc| { + bank.get_account(nonce_pubkey).and_then(|acc| { let state = StateMut::::state(&acc).map(|v| v.convert_to_current()); match state { @@ -10114,7 +10114,7 @@ pub(crate) mod tests { let pubkey2 = solana_sdk::pubkey::new_rand(); let keypair0_account = AccountSharedData::new(8, 0, &Pubkey::default()); let keypair1_account = AccountSharedData::new(9, 0, &Pubkey::default()); - let account0 = AccountSharedData::new(11, 0, &&Pubkey::default()); + let account0 = AccountSharedData::new(11, 0, &Pubkey::default()); bank0.store_account(&keypair0.pubkey(), &keypair0_account); bank0.store_account(&keypair1.pubkey(), &keypair1_account); bank0.store_account(&pubkey0, &account0); @@ -11991,7 +11991,7 @@ pub(crate) mod tests { // Write accounts to the store for key in &all_pubkeys { - bank0.store_account(&key, &starting_account); + bank0.store_account(key, &starting_account); } // Set aside a subset of accounts to modify diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index 08ea15147e..012f7244fd 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -516,7 +516,7 @@ mod tests { slot: child.slot(), timestamp: recent_timestamp + additional_timestamp_secs, }, - &child, + child, &voting_keypair.pubkey(), ); } diff --git a/runtime/src/epoch_stakes.rs b/runtime/src/epoch_stakes.rs index 3185d4bdb3..2fdc4ba91a 100644 --- a/runtime/src/epoch_stakes.rs +++ b/runtime/src/epoch_stakes.rs @@ -24,7 +24,7 @@ impl EpochStakes { pub fn new(stakes: &Stakes, leader_schedule_epoch: Epoch) -> Self { let epoch_vote_accounts = Stakes::vote_accounts(stakes); let (total_stake, node_id_to_vote_accounts, epoch_authorized_voters) = - Self::parse_epoch_vote_accounts(&epoch_vote_accounts, leader_schedule_epoch); + Self::parse_epoch_vote_accounts(epoch_vote_accounts, leader_schedule_epoch); Self { stakes: Arc::new(stakes.clone()), total_stake, diff --git a/runtime/src/genesis_utils.rs b/runtime/src/genesis_utils.rs index 29457764cf..eea539905c 100644 --- a/runtime/src/genesis_utils.rs +++ b/runtime/src/genesis_utils.rs @@ -191,15 +191,15 @@ pub fn create_genesis_config_with_leader_ex( mut initial_accounts: Vec<(Pubkey, AccountSharedData)>, ) -> GenesisConfig { let validator_vote_account = vote_state::create_account( - &validator_vote_account_pubkey, - &validator_pubkey, + validator_vote_account_pubkey, + validator_pubkey, 0, validator_stake_lamports, ); let validator_stake_account = stake_state::create_account( validator_stake_account_pubkey, - &validator_vote_account_pubkey, + validator_vote_account_pubkey, &validator_vote_account, &rent, validator_stake_lamports, diff --git a/runtime/src/hardened_unpack.rs b/runtime/src/hardened_unpack.rs index 2d71219e2d..03f213417e 100644 --- a/runtime/src/hardened_unpack.rs +++ b/runtime/src/hardened_unpack.rs @@ -280,7 +280,7 @@ pub fn open_genesis_config( ledger_path: &Path, max_genesis_archive_unpacked_size: u64, ) -> GenesisConfig { - GenesisConfig::load(&ledger_path).unwrap_or_else(|load_err| { + GenesisConfig::load(ledger_path).unwrap_or_else(|load_err| { let genesis_package = ledger_path.join(DEFAULT_GENESIS_ARCHIVE); unpack_genesis_archive( &genesis_package, @@ -296,7 +296,7 @@ pub fn open_genesis_config( }); // loading must succeed at this moment - GenesisConfig::load(&ledger_path).unwrap() + GenesisConfig::load(ledger_path).unwrap() }) } diff --git a/runtime/src/loader_utils.rs b/runtime/src/loader_utils.rs index 9f304df037..7580ef873b 100644 --- a/runtime/src/loader_utils.rs +++ b/runtime/src/loader_utils.rs @@ -70,7 +70,7 @@ pub fn load_buffer_account( bank_client .send_and_confirm_message( - &[from_keypair, &buffer_keypair], + &[from_keypair, buffer_keypair], Message::new( &bpf_loader_upgradeable::create_buffer( &from_keypair.pubkey(), @@ -102,7 +102,7 @@ pub fn load_buffer_account( Some(&from_keypair.pubkey()), ); bank_client - .send_and_confirm_message(&[from_keypair, &buffer_authority_keypair], message) + .send_and_confirm_message(&[from_keypair, buffer_authority_keypair], message) .unwrap(); offset += chunk_size as u32; } @@ -121,7 +121,7 @@ pub fn load_upgradeable_program( load_buffer_account( bank_client, - &from_keypair, + from_keypair, buffer_keypair, authority_keypair, &program, @@ -147,7 +147,7 @@ pub fn load_upgradeable_program( ); bank_client .send_and_confirm_message( - &[from_keypair, &executable_keypair, &authority_keypair], + &[from_keypair, executable_keypair, authority_keypair], message, ) .unwrap(); @@ -163,15 +163,15 @@ pub fn upgrade_program( ) { let message = Message::new( &[bpf_loader_upgradeable::upgrade( - &program_pubkey, - &buffer_pubkey, + program_pubkey, + buffer_pubkey, &authority_keypair.pubkey(), - &spill_pubkey, + spill_pubkey, )], Some(&from_keypair.pubkey()), ); bank_client - .send_and_confirm_message(&[from_keypair, &authority_keypair], message) + .send_and_confirm_message(&[from_keypair, authority_keypair], message) .unwrap(); } @@ -191,7 +191,7 @@ pub fn set_upgrade_authority( Some(&from_keypair.pubkey()), ); bank_client - .send_and_confirm_message(&[from_keypair, ¤t_authority_keypair], message) + .send_and_confirm_message(&[from_keypair, current_authority_keypair], message) .unwrap(); } diff --git a/runtime/src/message_processor.rs b/runtime/src/message_processor.rs index 0f2798fe7b..fa86f5c982 100644 --- a/runtime/src/message_processor.rs +++ b/runtime/src/message_processor.rs @@ -122,7 +122,7 @@ impl PreAccount { && (!is_writable // line coverage used to get branch coverage || pre.executable() || program_id != pre.owner() - || !Self::is_zeroed(&post.data())) + || !Self::is_zeroed(post.data())) { return Err(InstructionError::ModifiedProgramId); } @@ -454,7 +454,7 @@ impl<'a> InvokeContext for ThisInvokeContext<'a> { self.executors.borrow_mut().insert(*pubkey, executor); } fn get_executor(&self, pubkey: &Pubkey) -> Option> { - self.executors.borrow().get(&pubkey) + self.executors.borrow().get(pubkey) } fn record_instruction(&self, instruction: &Instruction) { if let Some(recorder) = &self.instruction_recorder { @@ -657,7 +657,7 @@ impl MessageProcessor { if id == root_id { invoke_context.remove_first_keyed_account()?; // Call the builtin program - return process_instruction(&program_id, instruction_data, invoke_context); + return process_instruction(program_id, instruction_data, invoke_context); } } // Call the program via the native loader @@ -671,7 +671,7 @@ impl MessageProcessor { for (id, process_instruction) in &self.programs { if id == owner_id { // Call the program via a builtin loader - return process_instruction(&program_id, instruction_data, invoke_context); + return process_instruction(program_id, instruction_data, invoke_context); } } } @@ -782,7 +782,7 @@ impl MessageProcessor { .map(|index| keyed_account_at_index(keyed_accounts, *index)) .collect::, InstructionError>>()?; let (message, callee_program_id, _) = - Self::create_message(&instruction, &keyed_accounts, &signers, &invoke_context)?; + Self::create_message(&instruction, &keyed_accounts, signers, &invoke_context)?; let keyed_accounts = invoke_context.get_keyed_accounts()?; let mut caller_write_privileges = keyed_account_indices .iter() @@ -1036,7 +1036,7 @@ impl MessageProcessor { let account = accounts[account_index].borrow(); pre_accounts[unique_index] .verify( - &program_id, + program_id, message.is_writable(account_index, demote_sysvar_write_locks), rent, &account, @@ -1102,7 +1102,7 @@ impl MessageProcessor { } let account = account.borrow(); pre_account - .verify(&program_id, is_writable, &rent, &account, timings, false) + .verify(program_id, is_writable, rent, &account, timings, false) .map_err(|err| { ic_logger_msg!(logger, "failed to verify account {}: {}", key, err); err diff --git a/runtime/src/native_loader.rs b/runtime/src/native_loader.rs index 149770b181..4fec5ab6f8 100644 --- a/runtime/src/native_loader.rs +++ b/runtime/src/native_loader.rs @@ -112,7 +112,7 @@ impl NativeLoader { if let Some(entrypoint) = cache.get(name) { Ok(entrypoint.clone()) } else { - match Self::library_open(&Self::create_path(&name)?) { + match Self::library_open(&Self::create_path(name)?) { Ok(library) => { let result = unsafe { library.get::(name.as_bytes()) }; match result { diff --git a/runtime/src/non_circulating_supply.rs b/runtime/src/non_circulating_supply.rs index e4175066bc..eb1af37e9d 100644 --- a/runtime/src/non_circulating_supply.rs +++ b/runtime/src/non_circulating_supply.rs @@ -70,7 +70,7 @@ pub fn calculate_non_circulating_supply(bank: &Arc) -> ScanResult .downgrade() }); - let should_insert = !outer_keys.read().unwrap().contains(&key); + let should_insert = !outer_keys.read().unwrap().contains(key); if should_insert { let mut w_outer_keys = outer_keys.write().unwrap(); - if !w_outer_keys.contains(&key) { + if !w_outer_keys.contains(key) { w_outer_keys.push(*key); } } @@ -175,11 +175,11 @@ impl let is_outer_key_empty = { let inner_key_map = self .index - .get_mut(&outer_key) + .get_mut(outer_key) .expect("If we're removing a key, then it must have an entry in the map"); // If we deleted a pubkey from the reverse_index, then the corresponding entry // better exist in this index as well or the two indexes are out of sync! - assert!(inner_key_map.value().remove_inner_key(&removed_inner_key)); + assert!(inner_key_map.value().remove_inner_key(removed_inner_key)); inner_key_map.is_empty() }; diff --git a/runtime/src/serde_snapshot/future.rs b/runtime/src/serde_snapshot/future.rs index b822da5825..1002c1cddb 100644 --- a/runtime/src/serde_snapshot/future.rs +++ b/runtime/src/serde_snapshot/future.rs @@ -165,7 +165,7 @@ impl<'a> From> for SerializableVersionedB } Self { blockhash_queue: rhs.blockhash_queue, - ancestors: &rhs.ancestors, + ancestors: rhs.ancestors, hash: rhs.hash, parent_hash: rhs.parent_hash, parent_slot: rhs.parent_slot, diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 6d8436f313..c7bf8a9bda 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -250,7 +250,7 @@ pub(crate) fn reconstruct_accounts_db_via_serialization( accountsdb_to_stream( SerdeStyle::Newer, &mut writer, - &accounts, + accounts, slot, &snapshot_storages, ) @@ -261,7 +261,7 @@ pub(crate) fn reconstruct_accounts_db_via_serialization( let copied_accounts = TempDir::new().unwrap(); // Simulate obtaining a copy of the AppendVecs from a tarball - let unpacked_append_vec_map = copy_append_vecs(&accounts, copied_accounts.path()).unwrap(); + let unpacked_append_vec_map = copy_append_vecs(accounts, copied_accounts.path()).unwrap(); let mut accounts_db = accountsdb_from_stream(SerdeStyle::Newer, &mut reader, &[], unpacked_append_vec_map) .unwrap(); diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index fb9218ba45..62f49c27ed 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -622,7 +622,7 @@ pub fn bank_from_archive>( let mut untar = Measure::start("untar"); let unpacked_append_vec_map = untar_snapshot_in( &snapshot_tar, - &unpack_dir.as_ref(), + unpack_dir.as_ref(), account_paths, archive_format, )?; @@ -913,7 +913,7 @@ pub fn verify_snapshot_archive( let unpack_dir = temp_dir.path(); untar_snapshot_in( snapshot_archive, - &unpack_dir, + unpack_dir, &[unpack_dir.to_path_buf()], archive_format, ) @@ -953,7 +953,7 @@ pub fn snapshot_bank( ) -> Result<()> { let storages: Vec<_> = root_bank.get_snapshot_storages(); let mut add_snapshot_time = Measure::start("add-snapshot-ms"); - add_snapshot(snapshot_path, &root_bank, &storages, snapshot_version)?; + add_snapshot(snapshot_path, root_bank, &storages, snapshot_version)?; add_snapshot_time.stop(); inc_new_counter_info!("add-snapshot-ms", add_snapshot_time.as_ms() as usize); @@ -964,7 +964,7 @@ pub fn snapshot_bank( .expect("no snapshots found in config snapshot_path"); let package = package_snapshot( - &root_bank, + root_bank, latest_slot_snapshot_paths, snapshot_path, status_cache_slot_deltas, @@ -1003,9 +1003,9 @@ pub fn bank_to_snapshot_archive, Q: AsRef>( let temp_dir = tempfile::tempdir_in(snapshot_path)?; let storages: Vec<_> = bank.get_snapshot_storages(); - let slot_snapshot_paths = add_snapshot(&temp_dir, &bank, &storages, snapshot_version)?; + let slot_snapshot_paths = add_snapshot(&temp_dir, bank, &storages, snapshot_version)?; let package = package_snapshot( - &bank, + bank, &slot_snapshot_paths, &temp_dir, bank.src.slot_deltas(&bank.src.roots()), diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index 7148094b31..67f0fdfab6 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -134,7 +134,7 @@ impl Stakes { // when account is removed (lamports == 0 or data uninitialized), don't read so that // given `pubkey` can be used for any owner in the future, while not affecting Stakes. if account.lamports() != 0 - && !(check_vote_init && VoteState::is_uninitialized_no_deser(&account.data())) + && !(check_vote_init && VoteState::is_uninitialized_no_deser(account.data())) { let stake = old.as_ref().map_or_else( || { @@ -258,8 +258,8 @@ pub mod tests { stake_pubkey, stake_state::create_account( &stake_pubkey, - &vote_pubkey, - &vote_state::create_account(&vote_pubkey, &solana_sdk::pubkey::new_rand(), 0, 1), + vote_pubkey, + &vote_state::create_account(vote_pubkey, &solana_sdk::pubkey::new_rand(), 0, 1), &Rent::free(), stake, ), @@ -290,8 +290,8 @@ pub mod tests { stake_pubkey, stake_state::create_account_with_activation_epoch( &stake_pubkey, - &vote_pubkey, - &vote_state::create_account(&vote_pubkey, &solana_sdk::pubkey::new_rand(), 0, 1), + vote_pubkey, + &vote_state::create_account(vote_pubkey, &solana_sdk::pubkey::new_rand(), 0, 1), &Rent::free(), stake, epoch, diff --git a/runtime/src/status_cache.rs b/runtime/src/status_cache.rs index 96fb54cd8a..d07ee5356e 100644 --- a/runtime/src/status_cache.rs +++ b/runtime/src/status_cache.rs @@ -251,7 +251,7 @@ impl StatusCache { .iter() .for_each(|(tx_hash, (key_index, statuses))| { for (key_slice, res) in statuses.iter() { - self.insert_with_slice(&tx_hash, *slot, *key_index, *key_slice, res.clone()) + self.insert_with_slice(tx_hash, *slot, *key_index, *key_slice, res.clone()) } }); if *is_root { diff --git a/runtime/src/system_instruction_processor.rs b/runtime/src/system_instruction_processor.rs index bab232ca5c..a8402bfc94 100644 --- a/runtime/src/system_instruction_processor.rs +++ b/runtime/src/system_instruction_processor.rs @@ -79,7 +79,7 @@ fn allocate( // if it looks like the `to` account is already in use, bail // (note that the id check is also enforced by message_processor) - if !account.data().is_empty() || !system_program::check_id(&account.owner()) { + if !account.data().is_empty() || !system_program::check_id(account.owner()) { ic_msg!( invoke_context, "Allocate: account {:?} already in use", @@ -115,13 +115,13 @@ fn assign( return Ok(()); } - if !address.is_signer(&signers) { + if !address.is_signer(signers) { ic_msg!(invoke_context, "Assign: account {:?} must sign", address); return Err(InstructionError::MissingRequiredSignature); } // guard against sysvars being made - if sysvar::check_id(&owner) { + if sysvar::check_id(owner) { ic_msg!(invoke_context, "Assign: cannot assign to sysvar, {}", owner); return Err(SystemError::InvalidProgramId.into()); } @@ -300,13 +300,13 @@ pub fn process_instruction( let from = keyed_account_at_index(keyed_accounts, 0)?; let to = keyed_account_at_index(keyed_accounts, 1)?; let to_address = Address::create( - &to.unsigned_key(), + to.unsigned_key(), Some((&base, &seed, &owner)), invoke_context, )?; create_account( from, - &to, + to, &to_address, lamports, space, @@ -736,11 +736,11 @@ mod tests { let result = create_account( &KeyedAccount::new(&from, true, &from_account), &KeyedAccount::new(&to, false, &to_account), - &address, + address, 50, MAX_PERMITTED_DATA_LENGTH + 1, &system_program::id(), - &signers, + signers, &MockInvokeContext::new(vec![]), ); assert!(result.is_err()); @@ -753,11 +753,11 @@ mod tests { let result = create_account( &KeyedAccount::new(&from, true, &from_account), &KeyedAccount::new(&to, false, &to_account), - &address, + address, 50, MAX_PERMITTED_DATA_LENGTH, &system_program::id(), - &signers, + signers, &MockInvokeContext::new(vec![]), ); assert!(result.is_ok()); @@ -790,7 +790,7 @@ mod tests { 50, 2, &new_owner, - &signers, + signers, &MockInvokeContext::new(vec![]), ); assert_eq!(result, Err(SystemError::AccountAlreadyInUse.into())); @@ -809,7 +809,7 @@ mod tests { 50, 2, &new_owner, - &signers, + signers, &MockInvokeContext::new(vec![]), ); assert_eq!(result, Err(SystemError::AccountAlreadyInUse.into())); @@ -827,7 +827,7 @@ mod tests { 50, 2, &new_owner, - &signers, + signers, &MockInvokeContext::new(vec![]), ); assert_eq!(result, Err(SystemError::AccountAlreadyInUse.into())); @@ -1141,7 +1141,7 @@ mod tests { transfer_with_seed( &from_keyed_account, &from_base_keyed_account, - &from_seed, + from_seed, &from_owner, &to_keyed_account, 50, @@ -1158,7 +1158,7 @@ mod tests { let result = transfer_with_seed( &from_keyed_account, &from_base_keyed_account, - &from_seed, + from_seed, &from_owner, &to_keyed_account, 100, @@ -1173,7 +1173,7 @@ mod tests { assert!(transfer_with_seed( &from_keyed_account, &from_base_keyed_account, - &from_seed, + from_seed, &from_owner, &to_keyed_account, 0, diff --git a/runtime/tests/accounts.rs b/runtime/tests/accounts.rs index 977a65a42b..7439d0a847 100644 --- a/runtime/tests/accounts.rs +++ b/runtime/tests/accounts.rs @@ -52,7 +52,7 @@ fn test_shrink_and_clean() { for (pubkey, account) in alive_accounts.iter_mut() { account.checked_sub_lamports(1).unwrap(); - accounts.store_uncached(current_slot, &[(&pubkey, &account)]); + accounts.store_uncached(current_slot, &[(pubkey, account)]); } accounts.add_root(current_slot); } @@ -121,9 +121,9 @@ fn test_bad_bank_hash() { for (key, account) in &account_refs { assert_eq!( - db.load_account_hash(&ancestors, &key, None, LoadHint::Unspecified) + db.load_account_hash(&ancestors, key, None, LoadHint::Unspecified) .unwrap(), - AccountsDb::hash_account(some_slot, *account, &key) + AccountsDb::hash_account(some_slot, *account, key) ); } existing.clear(); diff --git a/runtime/tests/stake.rs b/runtime/tests/stake.rs index 7c3d812641..487397fb38 100644 --- a/runtime/tests/stake.rs +++ b/runtime/tests/stake.rs @@ -28,7 +28,7 @@ fn next_epoch(bank: &Arc) -> Arc { bank.squash(); Arc::new(Bank::new_from_parent( - &bank, + bank, &Pubkey::default(), bank.get_slots_in_epoch(bank.epoch()) + bank.slot(), )) diff --git a/sdk/cargo-build-bpf/src/main.rs b/sdk/cargo-build-bpf/src/main.rs index ea5b1e2275..cfc3860e4c 100644 --- a/sdk/cargo-build-bpf/src/main.rs +++ b/sdk/cargo-build-bpf/src/main.rs @@ -114,7 +114,7 @@ fn install_if_missing( url.push_str(version); url.push('/'); url.push_str(file.to_str().unwrap()); - download_file(&url.as_str(), &file, true, &mut None)?; + download_file(url.as_str(), file, true, &mut None)?; fs::create_dir_all(&target_path).map_err(|err| err.to_string())?; let zip = File::open(&file).map_err(|err| err.to_string())?; let tar = BzDecoder::new(BufReader::new(zip)); @@ -412,14 +412,14 @@ fn build_bpf_package(config: &Config, target_directory: &Path, package: &cargo_m "solana-bpf-tools-linux.tar.bz2" }; install_if_missing( - &config, + config, "bpf-tools", "v1.10", "https://github.com/solana-labs/bpf-tools/releases/download", &PathBuf::from(bpf_tools_filename), ) .expect("Failed to install bpf-tools"); - link_bpf_toolchain(&config); + link_bpf_toolchain(config); let llvm_bin = config .bpf_sdk @@ -530,7 +530,7 @@ fn build_bpf_package(config: &Config, target_directory: &Path, package: &cargo_m postprocess_dump(&program_dump); } - check_undefined_symbols(&config, &program_so); + check_undefined_symbols(config, &program_so); println!(); println!("To deploy this program:"); @@ -556,7 +556,7 @@ fn build_bpf(config: Config, manifest_path: Option) { if let Some(root_package) = metadata.root_package() { if !config.workspace { - build_bpf_package(&config, &metadata.target_directory.as_ref(), root_package); + build_bpf_package(&config, metadata.target_directory.as_ref(), root_package); return; } } @@ -577,7 +577,7 @@ fn build_bpf(config: Config, manifest_path: Option) { .collect::>(); for package in all_bpf_packages { - build_bpf_package(&config, &metadata.target_directory.as_ref(), package); + build_bpf_package(&config, metadata.target_directory.as_ref(), package); } } diff --git a/sdk/cargo-test-bpf/src/main.rs b/sdk/cargo-test-bpf/src/main.rs index 0db5da8365..49b39f5487 100644 --- a/sdk/cargo-test-bpf/src/main.rs +++ b/sdk/cargo-test-bpf/src/main.rs @@ -122,7 +122,7 @@ fn test_bpf_package(config: &Config, target_directory: &Path, package: &cargo_me cargo_args.push("test-bpf"); } for extra_cargo_test_arg in &config.extra_cargo_test_args { - cargo_args.push(&extra_cargo_test_arg); + cargo_args.push(extra_cargo_test_arg); } spawn(&config.cargo, &cargo_args); } @@ -143,7 +143,7 @@ fn test_bpf(config: Config, manifest_path: Option) { if let Some(root_package) = metadata.root_package() { if !config.workspace { - test_bpf_package(&config, &metadata.target_directory.as_ref(), root_package); + test_bpf_package(&config, metadata.target_directory.as_ref(), root_package); return; } } @@ -164,7 +164,7 @@ fn test_bpf(config: Config, manifest_path: Option) { .collect::>(); for package in all_bpf_packages { - test_bpf_package(&config, &metadata.target_directory.as_ref(), package); + test_bpf_package(&config, metadata.target_directory.as_ref(), package); } } diff --git a/sdk/program/src/message.rs b/sdk/program/src/message.rs index a8b0aa5fe8..aed75785a6 100644 --- a/sdk/program/src/message.rs +++ b/sdk/program/src/message.rs @@ -309,8 +309,8 @@ impl Message { nonce_authority_pubkey: &Pubkey, ) -> Self { let nonce_ix = system_instruction::advance_nonce_account( - &nonce_account_pubkey, - &nonce_authority_pubkey, + nonce_account_pubkey, + nonce_authority_pubkey, ); instructions.insert(0, nonce_ix); Self::new(&instructions, payer) @@ -482,20 +482,20 @@ impl Message { data: &[u8], ) -> Result { let mut current = 0; - let num_instructions = read_u16(&mut current, &data)?; + let num_instructions = read_u16(&mut current, data)?; if index >= num_instructions as usize { return Err(SanitizeError::IndexOutOfBounds); } // index into the instruction byte-offset table. current += index * 2; - let start = read_u16(&mut current, &data)?; + let start = read_u16(&mut current, data)?; current = start as usize; - let num_accounts = read_u16(&mut current, &data)?; + let num_accounts = read_u16(&mut current, data)?; let mut accounts = Vec::with_capacity(num_accounts as usize); for _ in 0..num_accounts { - let meta_byte = read_u8(&mut current, &data)?; + let meta_byte = read_u8(&mut current, data)?; let mut is_signer = false; let mut is_writable = false; if meta_byte & (1 << Self::IS_SIGNER_BIT) != 0 { @@ -504,16 +504,16 @@ impl Message { if meta_byte & (1 << Self::IS_WRITABLE_BIT) != 0 { is_writable = true; } - let pubkey = read_pubkey(&mut current, &data)?; + let pubkey = read_pubkey(&mut current, data)?; accounts.push(AccountMeta { pubkey, is_signer, is_writable, }); } - let program_id = read_pubkey(&mut current, &data)?; - let data_len = read_u16(&mut current, &data)?; - let data = read_slice(&mut current, &data, data_len as usize)?; + let program_id = read_pubkey(&mut current, data)?; + let data_len = read_u16(&mut current, data)?; + let data = read_slice(&mut current, data, data_len as usize)?; Ok(Instruction { program_id, accounts, diff --git a/sdk/program/src/slot_hashes.rs b/sdk/program/src/slot_hashes.rs index ff1f0b2e1e..8016505a0a 100644 --- a/sdk/program/src/slot_hashes.rs +++ b/sdk/program/src/slot_hashes.rs @@ -17,7 +17,7 @@ pub struct SlotHashes(Vec); impl SlotHashes { pub fn add(&mut self, slot: Slot, hash: Hash) { - match self.binary_search_by(|(probe, _)| slot.cmp(&probe)) { + match self.binary_search_by(|(probe, _)| slot.cmp(probe)) { Ok(index) => (self.0)[index] = (slot, hash), Err(index) => (self.0).insert(index, (slot, hash)), } @@ -25,7 +25,7 @@ impl SlotHashes { } #[allow(clippy::trivially_copy_pass_by_ref)] pub fn get(&self, slot: &Slot) -> Option<&Hash> { - self.binary_search_by(|(probe, _)| slot.cmp(&probe)) + self.binary_search_by(|(probe, _)| slot.cmp(probe)) .ok() .map(|index| &self[index].1) } diff --git a/sdk/program/src/stake/state.rs b/sdk/program/src/stake/state.rs index ed3096d005..b85f74bc64 100644 --- a/sdk/program/src/stake/state.rs +++ b/sdk/program/src/stake/state.rs @@ -139,7 +139,7 @@ impl Authorized { } StakeAuthorize::Withdrawer => { if let Some((lockup, clock, custodian)) = lockup_custodian_args { - if lockup.is_in_force(&clock, None) { + if lockup.is_in_force(clock, None) { match custodian { None => { return Err(StakeError::CustodianMissing.into()); @@ -149,7 +149,7 @@ impl Authorized { return Err(StakeError::CustodianSignatureMissing.into()); } - if lockup.is_in_force(&clock, Some(custodian)) { + if lockup.is_in_force(clock, Some(custodian)) { return Err(StakeError::LockupInForce.into()); } } diff --git a/sdk/src/account.rs b/sdk/src/account.rs index 4d6d397e1f..a3f467d5a5 100644 --- a/sdk/src/account.rs +++ b/sdk/src/account.rs @@ -588,7 +588,7 @@ pub mod tests { use super::*; fn make_two_accounts(key: &Pubkey) -> (Account, AccountSharedData) { - let mut account1 = Account::new(1, 2, &key); + let mut account1 = Account::new(1, 2, key); account1.executable = true; account1.rent_epoch = 4; let mut account2 = AccountSharedData::new(1, 2, key); diff --git a/sdk/src/derivation_path.rs b/sdk/src/derivation_path.rs index 11c5631776..06feb0cfbd 100644 --- a/sdk/src/derivation_path.rs +++ b/sdk/src/derivation_path.rs @@ -46,7 +46,7 @@ impl TryFrom<&str> for DerivationPath { impl AsRef<[ChildIndex]> for DerivationPath { fn as_ref(&self) -> &[ChildIndex] { - &self.0.as_ref() + self.0.as_ref() } } @@ -88,7 +88,7 @@ impl DerivationPath { } fn _from_absolute_path_insecure_str(path: &str) -> Result { - Ok(Self(DerivationPathInner::from_str(&path).map_err( + Ok(Self(DerivationPathInner::from_str(path).map_err( |err| DerivationPathError::InvalidDerivationPath(err.to_string()), )?)) } diff --git a/sdk/src/genesis_config.rs b/sdk/src/genesis_config.rs index 8cd985b787..3c7c363f1c 100644 --- a/sdk/src/genesis_config.rs +++ b/sdk/src/genesis_config.rs @@ -160,7 +160,7 @@ impl GenesisConfig { } pub fn load(ledger_path: &Path) -> Result { - let filename = Self::genesis_filename(&ledger_path); + let filename = Self::genesis_filename(ledger_path); let file = OpenOptions::new() .read(true) .open(&filename) @@ -198,7 +198,7 @@ impl GenesisConfig { std::fs::create_dir_all(&ledger_path)?; - let mut file = File::create(Self::genesis_filename(&ledger_path))?; + let mut file = File::create(Self::genesis_filename(ledger_path))?; file.write_all(&serialized) } @@ -339,8 +339,8 @@ mod tests { && account.lamports == 10_000)); let path = &make_tmp_path("genesis_config"); - config.write(&path).expect("write"); - let loaded_config = GenesisConfig::load(&path).expect("load"); + config.write(path).expect("write"); + let loaded_config = GenesisConfig::load(path).expect("load"); assert_eq!(config.hash(), loaded_config.hash()); let _ignored = std::fs::remove_file(&path); } diff --git a/sdk/src/nonce_keyed_account.rs b/sdk/src/nonce_keyed_account.rs index 14fc508b54..336548fcad 100644 --- a/sdk/src/nonce_keyed_account.rs +++ b/sdk/src/nonce_keyed_account.rs @@ -306,7 +306,7 @@ mod test { let authorized = keyed_account.unsigned_key(); keyed_account .initialize_nonce_account( - &authorized, + authorized, &recent_blockhashes, &rent, &MockInvokeContext::new(vec![]), @@ -367,7 +367,7 @@ mod test { keyed_account .withdraw_nonce_account( withdraw_lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -597,7 +597,7 @@ mod test { nonce_keyed .withdraw_nonce_account( withdraw_lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -639,7 +639,7 @@ mod test { let lamports = nonce_keyed.account.borrow().lamports(); let result = nonce_keyed.withdraw_nonce_account( lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -669,7 +669,7 @@ mod test { let lamports = nonce_keyed.account.borrow().lamports() + 1; let result = nonce_keyed.withdraw_nonce_account( lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -699,7 +699,7 @@ mod test { nonce_keyed .withdraw_nonce_account( withdraw_lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -722,7 +722,7 @@ mod test { nonce_keyed .withdraw_nonce_account( withdraw_lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -779,7 +779,7 @@ mod test { nonce_keyed .withdraw_nonce_account( withdraw_lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -808,7 +808,7 @@ mod test { nonce_keyed .withdraw_nonce_account( withdraw_lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -852,7 +852,7 @@ mod test { let withdraw_lamports = nonce_keyed.account.borrow().lamports(); let result = nonce_keyed.withdraw_nonce_account( withdraw_lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -888,7 +888,7 @@ mod test { let withdraw_lamports = nonce_keyed.account.borrow().lamports() + 1; let result = nonce_keyed.withdraw_nonce_account( withdraw_lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -924,7 +924,7 @@ mod test { let withdraw_lamports = nonce_keyed.account.borrow().lamports() - min_lamports + 1; let result = nonce_keyed.withdraw_nonce_account( withdraw_lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -960,7 +960,7 @@ mod test { let withdraw_lamports = u64::MAX - 54; let result = nonce_keyed.withdraw_nonce_account( withdraw_lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -1149,7 +1149,7 @@ mod test { let authorized = &Pubkey::default().clone(); nonce_account .initialize_nonce_account( - &authorized, + authorized, &recent_blockhashes, &rent, &MockInvokeContext::new(vec![]), @@ -1176,7 +1176,7 @@ mod test { let authorized = nonce_account.unsigned_key(); nonce_account .initialize_nonce_account( - &authorized, + authorized, &recent_blockhashes, &Rent::free(), &MockInvokeContext::new(vec![]), @@ -1211,7 +1211,7 @@ mod test { let authorized = nonce_account.unsigned_key(); nonce_account .initialize_nonce_account( - &authorized, + authorized, &recent_blockhashes, &Rent::free(), &MockInvokeContext::new(vec![]), diff --git a/sdk/src/secp256k1_instruction.rs b/sdk/src/secp256k1_instruction.rs index 33b9f02548..37585eb18b 100644 --- a/sdk/src/secp256k1_instruction.rs +++ b/sdk/src/secp256k1_instruction.rs @@ -38,7 +38,7 @@ pub fn new_secp256k1_instruction( hasher.update(&message_arr); let message_hash = hasher.finalize(); let mut message_hash_arr = [0u8; 32]; - message_hash_arr.copy_from_slice(&message_hash.as_slice()); + message_hash_arr.copy_from_slice(message_hash.as_slice()); let message = libsecp256k1::Message::parse(&message_hash_arr); let (signature, recovery_id) = libsecp256k1::sign(&message, priv_key); let signature_arr = signature.serialize(); @@ -143,7 +143,7 @@ pub fn verify_eth_addresses( // Parse out pubkey let eth_address_slice = get_data_slice( - &instruction_datas, + instruction_datas, offsets.eth_address_instruction_index, offsets.eth_address_offset, HASHED_PUBKEY_SERIALIZED_SIZE, @@ -151,7 +151,7 @@ pub fn verify_eth_addresses( // Parse out message let message_slice = get_data_slice( - &instruction_datas, + instruction_datas, offsets.message_instruction_index, offsets.message_data_offset, offsets.message_data_size as usize, diff --git a/sdk/src/signature.rs b/sdk/src/signature.rs index 725e6ffd88..bf4f5e4322 100644 --- a/sdk/src/signature.rs +++ b/sdk/src/signature.rs @@ -29,7 +29,7 @@ impl crate::sanitize::Sanitize for Signature {} impl Signature { pub fn new(signature_slice: &[u8]) -> Self { - Self(GenericArray::clone_from_slice(&signature_slice)) + Self(GenericArray::clone_from_slice(signature_slice)) } pub(self) fn verify_verbose( @@ -54,7 +54,7 @@ pub trait Signable { } fn verify(&self) -> bool { self.get_signature() - .verify(&self.pubkey().as_ref(), self.signable_data().borrow()) + .verify(self.pubkey().as_ref(), self.signable_data().borrow()) } fn pubkey(&self) -> Pubkey; diff --git a/sdk/src/transaction.rs b/sdk/src/transaction.rs index 077e1b21cd..104de0eb7a 100644 --- a/sdk/src/transaction.rs +++ b/sdk/src/transaction.rs @@ -459,7 +459,7 @@ pub fn uses_durable_nonce(tx: &Transaction) -> Option<&CompiledInstruction> { .filter(|maybe_ix| { let prog_id_idx = maybe_ix.program_id_index as usize; match message.account_keys.get(prog_id_idx) { - Some(program_id) => system_program::check_id(&program_id), + Some(program_id) => system_program::check_id(program_id), _ => false, } } && matches!(limited_deserialize(&maybe_ix.data), Ok(SystemInstruction::AdvanceNonceAccount)) @@ -968,7 +968,7 @@ mod tests { let (_, nonce_pubkey, tx) = nonced_transfer_tx(); let nonce_ix = uses_durable_nonce(&tx).unwrap(); assert_eq!( - get_nonce_pubkey_from_instruction(&nonce_ix, &tx), + get_nonce_pubkey_from_instruction(nonce_ix, &tx), Some(&nonce_pubkey), ); } diff --git a/stake-accounts/src/args.rs b/stake-accounts/src/args.rs index 025b07adae..56a0a3a6fe 100644 --- a/stake-accounts/src/args.rs +++ b/stake-accounts/src/args.rs @@ -272,15 +272,15 @@ pub(crate) fn resolve_command( Ok(Command::Balance(resolved_args)) } Command::Authorize(args) => { - let resolved_args = resolve_authorize_args(&mut wallet_manager, &args)?; + let resolved_args = resolve_authorize_args(&mut wallet_manager, args)?; Ok(Command::Authorize(resolved_args)) } Command::SetLockup(args) => { - let resolved_args = resolve_set_lockup_args(&mut wallet_manager, &args)?; + let resolved_args = resolve_set_lockup_args(&mut wallet_manager, args)?; Ok(Command::SetLockup(resolved_args)) } Command::Rebase(args) => { - let resolved_args = resolve_rebase_args(&mut wallet_manager, &args)?; + let resolved_args = resolve_rebase_args(&mut wallet_manager, args)?; Ok(Command::Rebase(resolved_args)) } Command::Move(args) => { diff --git a/stake-accounts/src/main.rs b/stake-accounts/src/main.rs index 2b478785ff..94cca8701d 100644 --- a/stake-accounts/src/main.rs +++ b/stake-accounts/src/main.rs @@ -114,7 +114,7 @@ fn process_lockup_stake_accounts( ) -> Result<(), ClientError> { let addresses = stake_accounts::derive_stake_account_addresses(&args.base_pubkey, args.num_accounts); - let existing_lockups = get_lockups(&client, addresses)?; + let existing_lockups = get_lockups(client, addresses)?; let lockup = LockupArgs { epoch: args.lockup_epoch, @@ -143,7 +143,7 @@ fn process_rebase_stake_accounts( ) -> Result<(), ClientError> { let addresses = stake_accounts::derive_stake_account_addresses(&args.base_pubkey, args.num_accounts); - let balances = get_balances(&client, addresses)?; + let balances = get_balances(client, addresses)?; let messages = stake_accounts::rebase_stake_accounts( &args.fee_payer.pubkey(), @@ -172,7 +172,7 @@ fn process_move_stake_accounts( let args = &move_args.rebase_args; let addresses = stake_accounts::derive_stake_account_addresses(&args.base_pubkey, args.num_accounts); - let balances = get_balances(&client, addresses)?; + let balances = get_balances(client, addresses)?; let messages = stake_accounts::move_stake_accounts( &args.fee_payer.pubkey(), diff --git a/stake-accounts/src/stake_accounts.rs b/stake-accounts/src/stake_accounts.rs index 37b4837408..ef183c338b 100644 --- a/stake-accounts/src/stake_accounts.rs +++ b/stake-accounts/src/stake_accounts.rs @@ -49,7 +49,7 @@ pub(crate) fn new_stake_account( let instructions = stake_instruction::create_account_with_seed( funding_pubkey, &stake_account_address, - &base_pubkey, + base_pubkey, &index.to_string(), &authorized, &lockup, @@ -66,14 +66,14 @@ fn authorize_stake_accounts_instructions( new_withdraw_authority_pubkey: &Pubkey, ) -> Vec { let instruction0 = stake_instruction::authorize( - &stake_account_address, + stake_account_address, stake_authority_pubkey, new_stake_authority_pubkey, StakeAuthorize::Staker, None, ); let instruction1 = stake_instruction::authorize( - &stake_account_address, + stake_account_address, withdraw_authority_pubkey, new_withdraw_authority_pubkey, StakeAuthorize::Withdrawer, @@ -102,7 +102,7 @@ fn rebase_stake_account( new_base_pubkey, &i.to_string(), ); - let message = Message::new(&instructions, Some(&fee_payer_pubkey)); + let message = Message::new(&instructions, Some(fee_payer_pubkey)); Some(message) } @@ -139,7 +139,7 @@ fn move_stake_account( ); instructions.extend(authorize_instructions.into_iter()); - let message = Message::new(&instructions, Some(&fee_payer_pubkey)); + let message = Message::new(&instructions, Some(fee_payer_pubkey)); Some(message) } @@ -163,7 +163,7 @@ pub(crate) fn authorize_stake_accounts( new_stake_authority_pubkey, new_withdraw_authority_pubkey, ); - Message::new(&instructions, Some(&fee_payer_pubkey)) + Message::new(&instructions, Some(fee_payer_pubkey)) }) .collect::>() } @@ -223,7 +223,7 @@ pub(crate) fn lockup_stake_accounts( return None; } let instruction = stake_instruction::set_lockup(address, &lockup, custodian_pubkey); - let message = Message::new(&[instruction], Some(&fee_payer_pubkey)); + let message = Message::new(&[instruction], Some(fee_payer_pubkey)); Some(message) }) .collect() @@ -306,7 +306,7 @@ mod tests { ) -> Keypair { let fee_payer_keypair = Keypair::new(); client - .transfer_and_confirm(lamports, &funding_keypair, &fee_payer_keypair.pubkey()) + .transfer_and_confirm(lamports, funding_keypair, &fee_payer_keypair.pubkey()) .unwrap(); fee_payer_keypair } @@ -316,7 +316,7 @@ mod tests { base_pubkey: &Pubkey, i: usize, ) -> AccountSharedData { - let account_address = derive_stake_account_address(&base_pubkey, i); + let account_address = derive_stake_account_address(base_pubkey, i); AccountSharedData::from(client.get_account(&account_address).unwrap().unwrap()) } @@ -327,7 +327,7 @@ mod tests { ) -> Vec<(Pubkey, u64)> { (0..num_accounts) .map(|i| { - let address = derive_stake_account_address(&base_pubkey, i); + let address = derive_stake_account_address(base_pubkey, i); (address, client.get_balance(&address).unwrap()) }) .collect() @@ -340,7 +340,7 @@ mod tests { ) -> Vec<(Pubkey, Lockup)> { (0..num_accounts) .map(|i| { - let address = derive_stake_account_address(&base_pubkey, i); + let address = derive_stake_account_address(base_pubkey, i); let account = AccountSharedData::from(client.get_account(&address).unwrap().unwrap()); (address, stake_state::lockup_from(&account).unwrap()) diff --git a/storage-bigtable/src/bigtable.rs b/storage-bigtable/src/bigtable.rs index 964b4f60a0..383e90bc6f 100644 --- a/storage-bigtable/src/bigtable.rs +++ b/storage-bigtable/src/bigtable.rs @@ -628,7 +628,7 @@ where .ok_or_else(|| Error::ObjectNotFound(format!("{}/{}", table, key)))? .1; - let data = decompress(&value)?; + let data = decompress(value)?; T::decode(&data[..]).map_err(|err| { warn!("Failed to deserialize {}/{}: {}", table, key, err); Error::ObjectCorrupt(format!("{}/{}", table, key)) @@ -649,7 +649,7 @@ where .ok_or_else(|| Error::ObjectNotFound(format!("{}/{}", table, key)))? .1; - let data = decompress(&value)?; + let data = decompress(value)?; bincode::deserialize(&data).map_err(|err| { warn!("Failed to deserialize {}/{}: {}", table, key, err); Error::ObjectCorrupt(format!("{}/{}", table, key)) diff --git a/storage-bigtable/src/lib.rs b/storage-bigtable/src/lib.rs index b78b145a2a..254c9406f3 100644 --- a/storage-bigtable/src/lib.rs +++ b/storage-bigtable/src/lib.rs @@ -559,7 +559,7 @@ impl LedgerStorage { let signature = transaction.signatures[0]; for address in &transaction.message.account_keys { - if !is_sysvar_id(&address) { + if !is_sysvar_id(address) { by_addr .entry(address) .or_default() diff --git a/tokens/src/commands.rs b/tokens/src/commands.rs index 67dbc2bbe5..ac14ee79a3 100644 --- a/tokens/src/commands.rs +++ b/tokens/src/commands.rs @@ -139,7 +139,7 @@ fn apply_previous_transactions( for transaction_info in transaction_infos { let mut amount = transaction_info.amount; for allocation in allocations.iter_mut() { - if !has_same_recipient(&allocation, &transaction_info) { + if !has_same_recipient(allocation, transaction_info) { continue; } if allocation.amount >= amount { @@ -161,7 +161,7 @@ fn transfer( to_pubkey: &Pubkey, ) -> ClientResult { let create_instruction = - system_instruction::transfer(&sender_keypair.pubkey(), &to_pubkey, lamports); + system_instruction::transfer(&sender_keypair.pubkey(), to_pubkey, lamports); let message = Message::new(&[create_instruction], Some(&sender_keypair.pubkey())); let (recent_blockhash, _fees) = client.get_recent_blockhash()?; Ok(Transaction::new( @@ -215,7 +215,7 @@ fn distribution_instructions( } stake_instruction::create_account( &sender_pubkey, - &new_stake_account_address, + new_stake_account_address, &authorized, &lockup, allocation.amount - unlocked_sol, @@ -231,12 +231,12 @@ fn distribution_instructions( &sender_stake_args.stake_account_address, &stake_authority, allocation.amount - unlocked_sol, - &new_stake_account_address, + new_stake_account_address, ); // Make the recipient the new stake authority instructions.push(stake_instruction::authorize( - &new_stake_account_address, + new_stake_account_address, &stake_authority, &recipient, StakeAuthorize::Staker, @@ -245,7 +245,7 @@ fn distribution_instructions( // Make the recipient the new withdraw authority instructions.push(stake_instruction::authorize( - &new_stake_account_address, + new_stake_account_address, &withdraw_authority, &recipient, StakeAuthorize::Withdrawer, @@ -260,7 +260,7 @@ fn distribution_instructions( custodian: None, }; instructions.push(stake_instruction::set_lockup( - &new_stake_account_address, + new_stake_account_address, &lockup, &stake_args.lockup_authority.unwrap(), )); @@ -673,7 +673,7 @@ fn update_finalized_transactions( { statuses.extend( client - .get_signature_statuses(&unconfirmed_signatures_chunk)? + .get_signature_statuses(unconfirmed_signatures_chunk)? .value .into_iter(), ); diff --git a/transaction-status/src/token_balances.rs b/transaction-status/src/token_balances.rs index ab3bfa87ed..07d0fb2c9a 100644 --- a/transaction-status/src/token_balances.rs +++ b/transaction-status/src/token_balances.rs @@ -40,7 +40,7 @@ fn get_mint_decimals(bank: &Bank, mint: &Pubkey) -> Option { } else { let mint_account = bank.get_account(mint)?; - let decimals = Mint::unpack(&mint_account.data()) + let decimals = Mint::unpack(mint_account.data()) .map(|mint| mint.decimals) .ok()?; @@ -69,7 +69,7 @@ pub fn collect_token_balances( } if let Some((mint, ui_token_amount)) = - collect_token_balance_from_account(&bank, account_id, &mut mint_decimals) + collect_token_balance_from_account(bank, account_id, &mut mint_decimals) { transaction_balances.push(TransactionTokenBalance { account_index: index as u8, @@ -91,12 +91,12 @@ pub fn collect_token_balance_from_account( ) -> Option<(String, UiTokenAmount)> { let account = bank.get_account(account_id)?; - let token_account = TokenAccount::unpack(&account.data()).ok()?; + let token_account = TokenAccount::unpack(account.data()).ok()?; let mint_string = &token_account.mint.to_string(); - let mint = &Pubkey::from_str(&mint_string).unwrap_or_default(); + let mint = &Pubkey::from_str(mint_string).unwrap_or_default(); - let decimals = mint_decimals.get(&mint).cloned().or_else(|| { - let decimals = get_mint_decimals(bank, &mint)?; + let decimals = mint_decimals.get(mint).cloned().or_else(|| { + let decimals = get_mint_decimals(bank, mint)?; mint_decimals.insert(*mint, decimals); Some(decimals) })?; diff --git a/upload-perf/src/upload-perf.rs b/upload-perf/src/upload-perf.rs index 34436be80f..fbe0132a40 100644 --- a/upload-perf/src/upload-perf.rs +++ b/upload-perf/src/upload-perf.rs @@ -57,7 +57,7 @@ fn main() { let name = v["name"].as_str().unwrap().trim_matches('\"').to_string(); if last_commit.is_none() { - last_commit = get_last_metrics(&"commit".to_string(), &db, &name, &branch).ok(); + last_commit = get_last_metrics(&"commit".to_string(), &db, &name, branch).ok(); } let median: i64 = v["median"].to_string().parse().unwrap(); @@ -76,10 +76,10 @@ fn main() { */ } - let last_median = get_last_metrics(&"median".to_string(), &db, &name, &branch) + let last_median = get_last_metrics(&"median".to_string(), &db, &name, branch) .unwrap_or_default(); let last_deviation = - get_last_metrics(&"deviation".to_string(), &db, &name, &branch) + get_last_metrics(&"deviation".to_string(), &db, &name, branch) .unwrap_or_default(); results.insert(name, (median, deviation, last_median, last_deviation)); diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index 85a7620099..8e1a8fe74c 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -68,7 +68,7 @@ fn main() { .takes_value(true) .help("Configuration file to use"); if let Some(ref config_file) = *solana_cli_config::CONFIG_FILE { - arg.default_value(&config_file) + arg.default_value(config_file) } else { arg } diff --git a/validator/src/dashboard.rs b/validator/src/dashboard.rs index 10c07f1eca..a924f30f90 100644 --- a/validator/src/dashboard.rs +++ b/validator/src/dashboard.rs @@ -195,7 +195,7 @@ async fn wait_for_validator_startup( } if admin_client.is_none() { - match admin_rpc_service::connect(&ledger_path).await { + match admin_rpc_service::connect(ledger_path).await { Ok(new_admin_client) => admin_client = Some(new_admin_client), Err(err) => { progress_bar.set_message(format!("Unable to connect to validator: {}", err)); diff --git a/validator/src/main.rs b/validator/src/main.rs index ebf6fc3c0f..bb1a1a010d 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -112,7 +112,7 @@ fn wait_for_restart_window( let min_idle_slots = (min_idle_time_in_minutes as f64 * 60. / DEFAULT_S_PER_SLOT) as Slot; - let admin_client = admin_rpc_service::connect(&ledger_path); + let admin_client = admin_rpc_service::connect(ledger_path); let rpc_addr = admin_rpc_service::runtime() .block_on(async move { admin_client.await?.rpc_addr().await }) .map_err(|err| format!("Unable to get validator RPC address: {}", err))?; @@ -474,7 +474,7 @@ fn get_rpc_node( rpc_peers } else { let trusted_snapshot_hashes = - get_trusted_snapshot_hashes(&cluster_info, &validator_config.trusted_validators); + get_trusted_snapshot_hashes(cluster_info, &validator_config.trusted_validators); let mut eligible_rpc_peers = vec![]; @@ -598,7 +598,7 @@ fn check_vote_account( } for (_, vote_account_authorized_voter_pubkey) in vote_state.authorized_voters().iter() { - if !authorized_voter_pubkeys.contains(&vote_account_authorized_voter_pubkey) { + if !authorized_voter_pubkeys.contains(vote_account_authorized_voter_pubkey) { return Err(format!( "authorized voter {} not available", vote_account_authorized_voter_pubkey @@ -686,7 +686,7 @@ fn verify_reachable_ports( ("RPC", rpc_addr, &node.info.rpc), ("RPC pubsub", rpc_pubsub_addr, &node.info.rpc_pubsub), ] { - if ContactInfo::is_valid_address(&public_addr) { + if ContactInfo::is_valid_address(public_addr) { tcp_listeners.push(( bind_addr.port(), TcpListener::bind(bind_addr).unwrap_or_else(|err| { @@ -757,7 +757,7 @@ fn rpc_bootstrap( order.shuffle(&mut thread_rng()); if order .into_iter() - .all(|i| !verify_reachable_ports(&node, &cluster_entrypoints[i], &validator_config)) + .all(|i| !verify_reachable_ports(node, &cluster_entrypoints[i], validator_config)) { exit(1); } @@ -775,8 +775,8 @@ fn rpc_bootstrap( *start_progress.write().unwrap() = ValidatorStartProgress::SearchingForRpcService; gossip = Some(start_gossip_node( - &identity_keypair, - &cluster_entrypoints, + identity_keypair, + cluster_entrypoints, ledger_path, &node.info.gossip, node.sockets.gossip.try_clone().unwrap(), @@ -788,8 +788,8 @@ fn rpc_bootstrap( let rpc_node_details = get_rpc_node( &gossip.as_ref().unwrap().0, - &cluster_entrypoints, - &validator_config, + cluster_entrypoints, + validator_config, &mut blacklisted_rpc_nodes, bootstrap_config.no_snapshot_fetch, bootstrap_config.no_untrusted_rpc, @@ -816,7 +816,7 @@ fn rpc_bootstrap( .and_then(|_| { let genesis_config = download_then_check_genesis_hash( &rpc_contact_info.rpc, - &ledger_path, + ledger_path, validator_config.expected_genesis_hash, bootstrap_config.max_genesis_archive_unpacked_size, bootstrap_config.no_genesis_fetch, @@ -897,7 +897,7 @@ fn rpc_bootstrap( }; let ret = download_snapshot( &rpc_contact_info.rpc, - &snapshot_output_dir, + snapshot_output_dir, snapshot_hash, use_progress_bar, maximum_snapshots_to_retain, @@ -946,7 +946,7 @@ fn rpc_bootstrap( check_vote_account( &rpc_client, &identity_keypair.pubkey(), - &vote_account, + vote_account, &authorized_voter_keypairs .read() .unwrap() @@ -1680,7 +1680,7 @@ pub fn main() { .long("max-genesis-archive-unpacked-size") .value_name("NUMBER") .takes_value(true) - .default_value(&default_genesis_archive_unpacked_size) + .default_value(default_genesis_archive_unpacked_size) .help( "maximum total uncompressed file size of downloaded genesis archive", ), @@ -2141,10 +2141,10 @@ pub fn main() { cuda: matches.is_present("cuda"), expected_genesis_hash: matches .value_of("expected_genesis_hash") - .map(|s| Hash::from_str(&s).unwrap()), + .map(|s| Hash::from_str(s).unwrap()), expected_bank_hash: matches .value_of("expected_bank_hash") - .map(|s| Hash::from_str(&s).unwrap()), + .map(|s| Hash::from_str(s).unwrap()), expected_shred_version: value_t!(matches, "expected_shred_version", u16).ok(), new_hard_forks: hardforks_of(&matches, "hard_forks"), rpc_config: JsonRpcConfig { diff --git a/watchtower/src/main.rs b/watchtower/src/main.rs index d9b26c8d97..3dd06dd8aa 100644 --- a/watchtower/src/main.rs +++ b/watchtower/src/main.rs @@ -66,7 +66,7 @@ fn get_config() -> Config { .global(true) .help("Configuration file to use"); if let Some(ref config_file) = *solana_cli_config::CONFIG_FILE { - arg.default_value(&config_file) + arg.default_value(config_file) } else { arg } @@ -190,7 +190,7 @@ fn get_cluster_info( for validator_identity in &config.validator_identity_pubkeys { validator_balances.insert( *validator_identity, - rpc_client.get_balance(&validator_identity)?, + rpc_client.get_balance(validator_identity)?, ); } @@ -299,7 +299,7 @@ fn main() -> Result<(), Box> { validator_errors.push(format!("{} missing", formatted_validator_identity)); } - if let Some(balance) = validator_balances.get(&validator_identity) { + if let Some(balance) = validator_balances.get(validator_identity) { if *balance < config.minimum_validator_identity_balance { failures.push(( "balance",