chore: cargo +nightly clippy --fix -Z unstable-options

This commit is contained in:
Alexander Meißner 2021-06-18 15:34:46 +02:00 committed by Michael Vines
parent 3570b00560
commit 6514096a67
177 changed files with 1021 additions and 1021 deletions

View File

@ -69,32 +69,32 @@ impl UiAccount {
) -> Self {
let data = match encoding {
UiAccountEncoding::Binary => UiAccountData::LegacyBinary(
bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(),
bs58::encode(slice_data(account.data(), data_slice_config)).into_string(),
),
UiAccountEncoding::Base58 => UiAccountData::Binary(
bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(),
bs58::encode(slice_data(account.data(), data_slice_config)).into_string(),
encoding,
),
UiAccountEncoding::Base64 => UiAccountData::Binary(
base64::encode(slice_data(&account.data(), data_slice_config)),
base64::encode(slice_data(account.data(), data_slice_config)),
encoding,
),
UiAccountEncoding::Base64Zstd => {
let mut encoder = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap();
match encoder
.write_all(slice_data(&account.data(), data_slice_config))
.write_all(slice_data(account.data(), data_slice_config))
.and_then(|()| encoder.finish())
{
Ok(zstd_data) => UiAccountData::Binary(base64::encode(zstd_data), encoding),
Err(_) => UiAccountData::Binary(
base64::encode(slice_data(&account.data(), data_slice_config)),
base64::encode(slice_data(account.data(), data_slice_config)),
UiAccountEncoding::Base64,
),
}
}
UiAccountEncoding::JsonParsed => {
if let Ok(parsed_data) =
parse_account_data(pubkey, &account.owner(), &account.data(), additional_data)
parse_account_data(pubkey, account.owner(), account.data(), additional_data)
{
UiAccountData::Json(parsed_data)
} else {

View File

@ -37,7 +37,7 @@ fn parse_config_data<T>(data: &[u8], keys: Vec<(Pubkey, bool)>) -> Option<UiConf
where
T: serde::de::DeserializeOwned,
{
let config_data: T = deserialize(&get_config_data(data).ok()?).ok()?;
let config_data: T = deserialize(get_config_data(data).ok()?).ok()?;
let keys = keys
.iter()
.map(|key| UiConfigKey {
@ -101,7 +101,7 @@ mod test {
};
let stake_config_account = create_config_account(vec![], &stake_config, 10);
assert_eq!(
parse_config(&stake_config_account.data(), &stake_config::id()).unwrap(),
parse_config(stake_config_account.data(), &stake_config::id()).unwrap(),
ConfigAccountType::StakeConfig(UiStakeConfig {
warmup_cooldown_rate: 0.25,
slash_penalty: 50,
@ -121,7 +121,7 @@ mod test {
10,
);
assert_eq!(
parse_config(&validator_info_config_account.data(), &info_pubkey).unwrap(),
parse_config(validator_info_config_account.data(), &info_pubkey).unwrap(),
ConfigAccountType::ValidatorInfo(UiConfig {
keys: vec![
UiConfigKey {

View File

@ -55,7 +55,7 @@ pub fn airdrop_lamports(
);
let (blockhash, _fee_calculator) = client.get_recent_blockhash().unwrap();
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
Ok(transaction) => {
let mut tries = 0;
loop {
@ -431,7 +431,7 @@ fn run_accounts_bench(
if !airdrop_lamports(
&client,
&faucet_addr,
&payer_keypairs[i],
payer_keypairs[i],
lamports * 100_000,
) {
warn!("failed airdrop, exiting");
@ -487,14 +487,14 @@ fn run_accounts_bench(
.into_par_iter()
.map(|_| {
let message = make_close_message(
&payer_keypairs[0],
payer_keypairs[0],
&base_keypair,
seed_tracker.max_closed.clone(),
1,
min_balance,
mint.is_some(),
);
let signers: Vec<&Keypair> = vec![&payer_keypairs[0], &base_keypair];
let signers: Vec<&Keypair> = vec![payer_keypairs[0], &base_keypair];
Transaction::new(&signers, message, recent_blockhash.0)
})
.collect();

View File

@ -195,7 +195,7 @@ fn main() {
if !skip_sanity {
//sanity check, make sure all the transactions can execute sequentially
transactions.iter().for_each(|tx| {
let res = bank.process_transaction(&tx);
let res = bank.process_transaction(tx);
assert!(res.is_ok(), "sanity test transactions error: {:?}", res);
});
bank.clear_signatures();

View File

@ -376,8 +376,8 @@ mod tests {
let mint_pubkey = &genesis.mint_keypair.pubkey();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1);
let message = Message::new(&[instruction], Some(&mint_pubkey));
let instruction = system_instruction::transfer(mint_pubkey, &bob_pubkey, 1);
let message = Message::new(&[instruction], Some(mint_pubkey));
Runtime::new()?.block_on(async {
let client_transport = start_local_server(bank_forks, block_commitment_cache).await;

View File

@ -147,7 +147,7 @@ impl Banks for BanksServer {
.read()
.unwrap()
.root_bank()
.get_blockhash_last_valid_slot(&blockhash)
.get_blockhash_last_valid_slot(blockhash)
.unwrap();
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
let info =

View File

@ -138,8 +138,8 @@ impl SendTransactionService {
result.retried += 1;
inc_new_counter_info!("send_transaction_service-retry", 1);
Self::send_transaction(
&send_socket,
&tpu_address,
send_socket,
tpu_address,
&transaction_info.wire_transaction,
);
true

View File

@ -451,13 +451,13 @@ fn swapper<T>(
let to_swap_txs: Vec<_> = to_swap
.par_iter()
.map(|(signer, swap, profit)| {
let s: &Keypair = &signer;
let s: &Keypair = signer;
let owner = &signer.pubkey();
let instruction = exchange_instruction::swap_request(
owner,
&swap.0.pubkey,
&swap.1.pubkey,
&profit,
profit,
);
let message = Message::new(&[instruction], Some(&s.pubkey()));
Transaction::new(&[s], message, blockhash)
@ -600,7 +600,7 @@ fn trader<T>(
src,
),
];
let message = Message::new(&instructions, Some(&owner_pubkey));
let message = Message::new(&instructions, Some(owner_pubkey));
Transaction::new(&[owner.as_ref(), trade], message, blockhash)
})
.collect();
@ -739,7 +739,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
let mut to_fund_txs: Vec<_> = chunk
.par_iter()
.map(|(k, m)| {
let instructions = system_instruction::transfer_many(&k.pubkey(), &m);
let instructions = system_instruction::transfer_many(&k.pubkey(), m);
let message = Message::new(&instructions, Some(&k.pubkey()));
(k.clone(), Transaction::new_unsigned(message))
})
@ -777,7 +777,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
let mut waits = 0;
loop {
sleep(Duration::from_millis(200));
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, &tx, amount));
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, tx, amount));
if to_fund_txs.is_empty() {
break;
}
@ -836,7 +836,7 @@ pub fn create_token_accounts<T: Client>(
);
let request_ix =
exchange_instruction::account_request(owner_pubkey, &new_keypair.pubkey());
let message = Message::new(&[create_ix, request_ix], Some(&owner_pubkey));
let message = Message::new(&[create_ix, request_ix], Some(owner_pubkey));
(
(from_keypair, new_keypair),
Transaction::new_unsigned(message),
@ -872,7 +872,7 @@ pub fn create_token_accounts<T: Client>(
let mut waits = 0;
while !to_create_txs.is_empty() {
sleep(Duration::from_millis(200));
to_create_txs.retain(|(_, tx)| !verify_transaction(client, &tx));
to_create_txs.retain(|(_, tx)| !verify_transaction(client, tx));
if to_create_txs.is_empty() {
break;
}
@ -958,7 +958,7 @@ fn compute_and_report_stats(maxes: &Arc<RwLock<Vec<(String, SampleStats)>>>, tot
fn generate_keypairs(num: u64) -> Vec<Keypair> {
let mut seed = [0_u8; 32];
seed.copy_from_slice(&Keypair::new().pubkey().as_ref());
seed.copy_from_slice(Keypair::new().pubkey().as_ref());
let mut rnd = GenKeys::new(seed);
rnd.gen_n_keypairs(num)
}
@ -989,7 +989,7 @@ pub fn airdrop_lamports<T: Client>(
let (blockhash, _fee_calculator, _last_valid_slot) = client
.get_recent_blockhash_with_commitment(CommitmentConfig::processed())
.expect("Failed to get blockhash");
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), amount_to_drop, blockhash) {
match request_airdrop_transaction(faucet_addr, &id.pubkey(), amount_to_drop, blockhash) {
Ok(transaction) => {
let signature = client.async_send_transaction(transaction).unwrap();

View File

@ -18,7 +18,7 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
msgs.packets.resize(10, Packet::default());
for w in msgs.packets.iter_mut() {
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(&addr);
w.meta.set_addr(addr);
}
let msgs = Arc::new(msgs);
spawn(move || loop {

View File

@ -544,12 +544,12 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
// re-sign retained to_fund_txes with updated blockhash
self.sign(blockhash);
self.send(&client);
self.send(client);
// Sleep a few slots to allow transactions to process
sleep(Duration::from_secs(1));
self.verify(&client, to_lamports);
self.verify(client, to_lamports);
// retry anything that seems to have dropped through cracks
// again since these txs are all or nothing, they're fine to
@ -564,7 +564,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund
.par_iter()
.map(|(k, t)| {
let instructions = system_instruction::transfer_many(&k.pubkey(), &t);
let instructions = system_instruction::transfer_many(&k.pubkey(), t);
let message = Message::new(&instructions, Some(&k.pubkey()));
(*k, Transaction::new_unsigned(message))
})
@ -617,7 +617,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
return None;
}
let verified = if verify_funding_transfer(&client, &tx, to_lamports) {
let verified = if verify_funding_transfer(&client, tx, to_lamports) {
verified_txs.fetch_add(1, Ordering::Relaxed);
Some(k.pubkey())
} else {
@ -733,7 +733,7 @@ pub fn airdrop_lamports<T: Client>(
);
let (blockhash, _fee_calculator) = get_recent_blockhash(client);
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
Ok(transaction) => {
let mut tries = 0;
loop {

View File

@ -39,7 +39,7 @@ fn main() {
let keypair_count = *tx_count * keypair_multiplier;
if *write_to_client_file {
info!("Generating {} keypairs", keypair_count);
let (keypairs, _) = generate_keypairs(&id, keypair_count as u64);
let (keypairs, _) = generate_keypairs(id, keypair_count as u64);
let num_accounts = keypairs.len() as u64;
let max_fee =
FeeRateGovernor::new(*target_lamports_per_signature, 0).max_lamports_per_signature;
@ -68,7 +68,7 @@ fn main() {
}
info!("Connecting to the cluster");
let nodes = discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| {
let nodes = discover_cluster(entrypoint_addr, *num_nodes).unwrap_or_else(|err| {
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
exit(1);
});
@ -135,7 +135,7 @@ fn main() {
generate_and_fund_keypairs(
client.clone(),
Some(*faucet_addr),
&id,
id,
keypair_count,
*num_lamports_per_account,
)

View File

@ -506,7 +506,7 @@ pub const SKIP_SEED_PHRASE_VALIDATION_ARG: ArgConstant<'static> = ArgConstant {
/// Prompts user for a passphrase and then asks for confirmirmation to check for mistakes
pub fn prompt_passphrase(prompt: &str) -> Result<String, Box<dyn error::Error>> {
let passphrase = prompt_password_stderr(&prompt)?;
let passphrase = prompt_password_stderr(prompt)?;
if !passphrase.is_empty() {
let confirmed = rpassword::prompt_password_stderr("Enter same passphrase again: ")?;
if confirmed != passphrase {
@ -586,9 +586,9 @@ pub fn keypair_from_seed_phrase(
let keypair = if skip_validation {
let passphrase = prompt_passphrase(&passphrase_prompt)?;
if legacy {
keypair_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase)?
keypair_from_seed_phrase_and_passphrase(seed_phrase, &passphrase)?
} else {
let seed = generate_seed_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase);
let seed = generate_seed_from_seed_phrase_and_passphrase(seed_phrase, &passphrase);
keypair_from_seed_and_derivation_path(&seed, derivation_path)?
}
} else {
@ -616,7 +616,7 @@ pub fn keypair_from_seed_phrase(
if legacy {
keypair_from_seed(seed.as_bytes())?
} else {
keypair_from_seed_and_derivation_path(&seed.as_bytes(), derivation_path)?
keypair_from_seed_and_derivation_path(seed.as_bytes(), derivation_path)?
}
};

View File

@ -107,24 +107,24 @@ mod test {
#[test]
fn compute_websocket_url() {
assert_eq!(
Config::compute_websocket_url(&"http://api.devnet.solana.com"),
Config::compute_websocket_url("http://api.devnet.solana.com"),
"ws://api.devnet.solana.com/".to_string()
);
assert_eq!(
Config::compute_websocket_url(&"https://api.devnet.solana.com"),
Config::compute_websocket_url("https://api.devnet.solana.com"),
"wss://api.devnet.solana.com/".to_string()
);
assert_eq!(
Config::compute_websocket_url(&"http://example.com:8899"),
Config::compute_websocket_url("http://example.com:8899"),
"ws://example.com:8900/".to_string()
);
assert_eq!(
Config::compute_websocket_url(&"https://example.com:1234"),
Config::compute_websocket_url("https://example.com:1234"),
"wss://example.com:1235/".to_string()
);
assert_eq!(Config::compute_websocket_url(&"garbage"), String::new());
assert_eq!(Config::compute_websocket_url("garbage"), String::new());
}
}

View File

@ -1287,7 +1287,7 @@ impl fmt::Display for CliValidatorInfo {
writeln_name_value(
f,
&format!(" {}:", to_title_case(key)),
&value.as_str().unwrap_or("?"),
value.as_str().unwrap_or("?"),
)?;
}
Ok(())
@ -1768,7 +1768,7 @@ impl fmt::Display for CliTokenAccount {
writeln_name_value(
f,
"Close authority:",
&account.close_authority.as_ref().unwrap_or(&String::new()),
account.close_authority.as_ref().unwrap_or(&String::new()),
)?;
Ok(())
}
@ -2006,7 +2006,7 @@ pub fn return_signers_with_config(
}
pub fn parse_sign_only_reply_string(reply: &str) -> SignOnly {
let object: Value = serde_json::from_str(&reply).unwrap();
let object: Value = serde_json::from_str(reply).unwrap();
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
let blockhash = blockhash_str.parse::<Hash>().unwrap();
let mut present_signers: Vec<(Pubkey, Signature)> = Vec::new();

View File

@ -1000,7 +1000,7 @@ fn process_airdrop(
let result = request_and_confirm_airdrop(rpc_client, config, &pubkey, lamports);
if let Ok(signature) = result {
let signature_cli_message = log_instruction_custom_error::<SystemError>(result, &config)?;
let signature_cli_message = log_instruction_custom_error::<SystemError>(result, config)?;
println!("{}", signature_cli_message);
let current_balance = rpc_client.get_balance(&pubkey)?;
@ -1013,7 +1013,7 @@ fn process_airdrop(
Ok(build_balance_message(current_balance, false, true))
}
} else {
log_instruction_custom_error::<SystemError>(result, &config)
log_instruction_custom_error::<SystemError>(result, config)
}
}
@ -1098,7 +1098,7 @@ fn process_confirm(
#[allow(clippy::unnecessary_wraps)]
fn process_decode_transaction(config: &CliConfig, transaction: &Transaction) -> ProcessResult {
let sigverify_status = CliSignatureVerificationStatus::verify_transaction(&transaction);
let sigverify_status = CliSignatureVerificationStatus::verify_transaction(transaction);
let decode_transaction = CliTransaction {
decoded_transaction: transaction.clone(),
transaction: EncodedTransaction::encode(transaction.clone(), UiTransactionEncoding::Json),
@ -1269,7 +1269,7 @@ fn process_transfer(
} else {
rpc_client.send_and_confirm_transaction_with_spinner(&tx)
};
log_instruction_custom_error::<SystemError>(result, &config)
log_instruction_custom_error::<SystemError>(result, config)
}
}
@ -1324,7 +1324,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
from_pubkey,
seed,
program_id,
} => process_create_address_with_seed(config, from_pubkey.as_ref(), &seed, &program_id),
} => process_create_address_with_seed(config, from_pubkey.as_ref(), seed, program_id),
CliCommand::Fees { ref blockhash } => process_fees(&rpc_client, config, blockhash.as_ref()),
CliCommand::Feature(feature_subcommand) => {
process_feature_subcommand(&rpc_client, config, feature_subcommand)
@ -1347,8 +1347,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
CliCommand::LeaderSchedule { epoch } => {
process_leader_schedule(&rpc_client, config, *epoch)
}
CliCommand::LiveSlots => process_live_slots(&config),
CliCommand::Logs { filter } => process_logs(&config, filter),
CliCommand::LiveSlots => process_live_slots(config),
CliCommand::Logs { filter } => process_logs(config, filter),
CliCommand::Ping {
lamports,
interval,
@ -1453,7 +1453,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
),
// Get the current nonce
CliCommand::GetNonce(nonce_account_pubkey) => {
process_get_nonce(&rpc_client, config, &nonce_account_pubkey)
process_get_nonce(&rpc_client, config, nonce_account_pubkey)
}
// Get a new nonce
CliCommand::NewNonce {
@ -1474,7 +1474,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
} => process_show_nonce_account(
&rpc_client,
config,
&nonce_account_pubkey,
nonce_account_pubkey,
*use_lamports_unit,
),
// Withdraw lamports from a nonce account
@ -1487,10 +1487,10 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
} => process_withdraw_from_nonce_account(
&rpc_client,
config,
&nonce_account,
nonce_account,
*nonce_authority,
memo.as_ref(),
&destination_account_pubkey,
destination_account_pubkey,
*lamports,
),
@ -1564,7 +1564,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
} => process_deactivate_stake_account(
&rpc_client,
config,
&stake_account_pubkey,
stake_account_pubkey,
*stake_authority,
*sign_only,
*dump_transaction_message,
@ -1590,8 +1590,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
} => process_delegate_stake(
&rpc_client,
config,
&stake_account_pubkey,
&vote_account_pubkey,
stake_account_pubkey,
vote_account_pubkey,
*stake_authority,
*force,
*sign_only,
@ -1618,7 +1618,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
} => process_split_stake(
&rpc_client,
config,
&stake_account_pubkey,
stake_account_pubkey,
*stake_authority,
*sign_only,
*dump_transaction_message,
@ -1645,8 +1645,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
} => process_merge_stake(
&rpc_client,
config,
&stake_account_pubkey,
&source_stake_account_pubkey,
stake_account_pubkey,
source_stake_account_pubkey,
*stake_authority,
*sign_only,
*dump_transaction_message,
@ -1663,7 +1663,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
} => process_show_stake_account(
&rpc_client,
config,
&stake_account_pubkey,
stake_account_pubkey,
*use_lamports_unit,
*with_rewards,
),
@ -1686,7 +1686,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
} => process_stake_authorize(
&rpc_client,
config,
&stake_account_pubkey,
stake_account_pubkey,
new_authorizations,
*custodian,
*sign_only,
@ -1712,7 +1712,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
} => process_stake_set_lockup(
&rpc_client,
config,
&stake_account_pubkey,
stake_account_pubkey,
&mut lockup,
*custodian,
*sign_only,
@ -1740,8 +1740,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
} => process_withdraw_stake(
&rpc_client,
config,
&stake_account_pubkey,
&destination_account_pubkey,
stake_account_pubkey,
destination_account_pubkey,
*amount,
*withdraw_authority,
*custodian,
@ -1769,7 +1769,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
} => process_set_validator_info(
&rpc_client,
config,
&validator_info,
validator_info,
*force_keybase,
*info_pubkey,
),
@ -1803,7 +1803,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
} => process_show_vote_account(
&rpc_client,
config,
&vote_account_pubkey,
vote_account_pubkey,
*use_lamports_unit,
*with_rewards,
),
@ -1830,8 +1830,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
} => process_vote_authorize(
&rpc_client,
config,
&vote_account_pubkey,
&new_authorized_pubkey,
vote_account_pubkey,
new_authorized_pubkey,
*vote_authorize,
memo.as_ref(),
),
@ -1843,7 +1843,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
} => process_vote_update_validator(
&rpc_client,
config,
&vote_account_pubkey,
vote_account_pubkey,
*new_identity_account,
*withdraw_authority,
memo.as_ref(),
@ -1856,7 +1856,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
} => process_vote_update_commission(
&rpc_client,
config,
&vote_account_pubkey,
vote_account_pubkey,
*commission,
*withdraw_authority,
memo.as_ref(),
@ -1872,7 +1872,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
CliCommand::Balance {
pubkey,
use_lamports_unit,
} => process_balance(&rpc_client, config, &pubkey, *use_lamports_unit),
} => process_balance(&rpc_client, config, pubkey, *use_lamports_unit),
// Confirm the last client transaction by signature
CliCommand::Confirm(signature) => process_confirm(&rpc_client, config, signature),
CliCommand::DecodeTransaction(transaction) => {
@ -1892,8 +1892,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
} => process_show_account(
&rpc_client,
config,
&pubkey,
&output_file,
pubkey,
output_file,
*use_lamports_unit,
),
CliCommand::Transfer {

View File

@ -122,7 +122,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.long("our-localhost")
.takes_value(false)
.value_name("PORT")
.default_value(&DEFAULT_RPC_PORT_STR)
.default_value(DEFAULT_RPC_PORT_STR)
.validator(is_port)
.help("Guess Identity pubkey and validator rpc node assuming local (possibly private) validator"),
)

View File

@ -102,7 +102,7 @@ fn process_rewards(
rewards_epoch: Option<Epoch>,
) -> ProcessResult {
let rewards = rpc_client
.get_inflation_reward(&addresses, rewards_epoch)
.get_inflation_reward(addresses, rewards_epoch)
.map_err(|err| {
if let Some(epoch) = rewards_epoch {
format!("Rewards not available for epoch {}", epoch)

View File

@ -184,7 +184,7 @@ pub fn parse_args<'a>(
let CliCommandInfo {
command,
mut signers,
} = parse_command(&matches, &default_signer, &mut wallet_manager)?;
} = parse_command(matches, &default_signer, &mut wallet_manager)?;
if signers.is_empty() {
if let Ok(signer_info) =
@ -257,7 +257,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.global(true)
.help("Configuration file to use");
if let Some(ref config_file) = *CONFIG_FILE {
arg.default_value(&config_file)
arg.default_value(config_file)
} else {
arg
}
@ -411,10 +411,10 @@ fn main() -> Result<(), Box<dyn error::Error>> {
}
fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box<dyn error::Error>> {
if parse_settings(&matches)? {
if parse_settings(matches)? {
let mut wallet_manager = None;
let (mut config, signers) = parse_args(&matches, &mut wallet_manager)?;
let (mut config, signers) = parse_args(matches, &mut wallet_manager)?;
config.signers = signers.iter().map(|s| s.as_ref()).collect();
let result = process_command(&config)?;
println!("{}", result);

View File

@ -364,7 +364,7 @@ pub fn process_authorize_nonce_account(
config.commitment,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<NonceError>(result, &config)
log_instruction_custom_error::<NonceError>(result, config)
}
pub fn process_create_nonce_account(
@ -449,7 +449,7 @@ pub fn process_create_nonce_account(
let mut tx = Transaction::new_unsigned(message);
tx.try_sign(&config.signers, recent_blockhash)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
log_instruction_custom_error::<SystemError>(result, config)
}
pub fn process_get_nonce(
@ -474,10 +474,10 @@ pub fn process_new_nonce(
) -> ProcessResult {
check_unique_pubkeys(
(&config.signers[0].pubkey(), "cli keypair".to_string()),
(&nonce_account, "nonce_account_pubkey".to_string()),
(nonce_account, "nonce_account_pubkey".to_string()),
)?;
if let Err(err) = rpc_client.get_account(&nonce_account) {
if let Err(err) = rpc_client.get_account(nonce_account) {
return Err(CliError::BadParameter(format!(
"Unable to advance nonce account {}. error: {}",
nonce_account, err
@ -487,7 +487,7 @@ pub fn process_new_nonce(
let nonce_authority = config.signers[nonce_authority];
let ixs = vec![advance_nonce_account(
&nonce_account,
nonce_account,
&nonce_authority.pubkey(),
)]
.with_memo(memo);
@ -503,7 +503,7 @@ pub fn process_new_nonce(
config.commitment,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
log_instruction_custom_error::<SystemError>(result, config)
}
pub fn process_show_nonce_account(
@ -522,7 +522,7 @@ pub fn process_show_nonce_account(
use_lamports_unit,
..CliNonceAccount::default()
};
if let Some(ref data) = data {
if let Some(data) = data {
nonce_account.nonce = Some(data.blockhash.to_string());
nonce_account.lamports_per_signature = Some(data.fee_calculator.lamports_per_signature);
nonce_account.authority = Some(data.authority.to_string());
@ -566,7 +566,7 @@ pub fn process_withdraw_from_nonce_account(
config.commitment,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<NonceError>(result, &config)
log_instruction_custom_error::<NonceError>(result, config)
}
#[cfg(test)]

View File

@ -770,7 +770,7 @@ fn process_program_deploy(
};
let upgrade_authority_signer = config.signers[upgrade_authority_signer_index];
let default_program_keypair = get_default_program_keypair(&program_location);
let default_program_keypair = get_default_program_keypair(program_location);
let (program_signer, program_pubkey) = if let Some(i) = program_signer_index {
(Some(config.signers[i]), config.signers[i].pubkey())
} else if let Some(program_pubkey) = program_pubkey {
@ -846,7 +846,7 @@ fn process_program_deploy(
};
let (program_data, program_len) = if let Some(program_location) = program_location {
let program_data = read_and_verify_elf(&program_location)?;
let program_data = read_and_verify_elf(program_location)?;
let program_len = program_data.len();
(program_data, program_len)
} else if buffer_provided {
@ -1262,7 +1262,7 @@ fn process_dump(
UpgradeableLoaderState::programdata_data_offset().unwrap_or(0);
let program_data = &programdata_account.data[offset..];
let mut f = File::create(output_location)?;
f.write_all(&program_data)?;
f.write_all(program_data)?;
Ok(format!("Wrote program to {}", output_location))
} else {
Err(
@ -1282,7 +1282,7 @@ fn process_dump(
let offset = UpgradeableLoaderState::buffer_data_offset().unwrap_or(0);
let program_data = &account.data[offset..];
let mut f = File::create(output_location)?;
f.write_all(&program_data)?;
f.write_all(program_data)?;
Ok(format!("Wrote program to {}", output_location))
} else {
Err(format!(
@ -1313,8 +1313,8 @@ fn close(
let mut tx = Transaction::new_unsigned(Message::new(
&[bpf_loader_upgradeable::close(
&account_pubkey,
&recipient_pubkey,
account_pubkey,
recipient_pubkey,
&authority_signer.pubkey(),
)],
Some(&config.signers[0].pubkey()),
@ -1423,7 +1423,7 @@ fn process_close(
if close(
rpc_client,
config,
&address,
address,
&recipient_pubkey,
authority_signer,
)
@ -1524,7 +1524,7 @@ fn do_process_program_write_and_deploy(
.value
{
complete_partial_program_init(
&loader_id,
loader_id,
&config.signers[0].pubkey(),
buffer_pubkey,
&account,
@ -1554,7 +1554,7 @@ fn do_process_program_write_and_deploy(
buffer_pubkey,
minimum_balance,
buffer_data_len as u64,
&loader_id,
loader_id,
)],
minimum_balance,
)
@ -1582,7 +1582,7 @@ fn do_process_program_write_and_deploy(
} else {
loader_instruction::write(
buffer_pubkey,
&loader_id,
loader_id,
(i * DATA_CHUNK_SIZE) as u32,
chunk.to_vec(),
)
@ -1626,7 +1626,7 @@ fn do_process_program_write_and_deploy(
)
} else {
Message::new(
&[loader_instruction::finalize(buffer_pubkey, &loader_id)],
&[loader_instruction::finalize(buffer_pubkey, loader_id)],
Some(&config.signers[0].pubkey()),
)
};
@ -1752,8 +1752,8 @@ fn do_process_program_upgrade(
// Create and add final message
let final_message = Message::new(
&[bpf_loader_upgradeable::upgrade(
&program_id,
&buffer_pubkey,
program_id,
buffer_pubkey,
&upgrade_authority.pubkey(),
&config.signers[0].pubkey(),
)],
@ -1821,7 +1821,7 @@ fn complete_partial_program_init(
account_data_len as u64,
));
if account.owner != *loader_id {
instructions.push(system_instruction::assign(elf_pubkey, &loader_id));
instructions.push(system_instruction::assign(elf_pubkey, loader_id));
}
}
if account.lamports < minimum_balance {
@ -1893,7 +1893,7 @@ fn send_deploy_messages(
initial_transaction.try_sign(&[payer_signer], blockhash)?;
}
let result = rpc_client.send_and_confirm_transaction_with_spinner(&initial_transaction);
log_instruction_custom_error::<SystemError>(result, &config)
log_instruction_custom_error::<SystemError>(result, config)
.map_err(|err| format!("Account allocation failed: {}", err))?;
} else {
return Err("Buffer account not created yet, must provide a key pair".into());

View File

@ -92,7 +92,7 @@ where
Ok((message, spend))
} else {
let from_balance = rpc_client
.get_balance_with_commitment(&from_pubkey, commitment)?
.get_balance_with_commitment(from_pubkey, commitment)?
.value;
let (message, SpendAndFee { spend, fee }) = resolve_spend_message(
amount,

View File

@ -972,7 +972,7 @@ pub fn process_create_stake_account(
) -> ProcessResult {
let stake_account = config.signers[stake_account];
let stake_account_address = if let Some(seed) = seed {
Pubkey::create_with_seed(&stake_account.pubkey(), &seed, &stake::program::id())?
Pubkey::create_with_seed(&stake_account.pubkey(), seed, &stake::program::id())?
} else {
stake_account.pubkey()
};
@ -1085,7 +1085,7 @@ pub fn process_create_stake_account(
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
log_instruction_custom_error::<SystemError>(result, config)
}
}
@ -1172,7 +1172,7 @@ pub fn process_stake_authorize(
} else {
rpc_client.send_and_confirm_transaction_with_spinner(&tx)
};
log_instruction_custom_error::<StakeError>(result, &config)
log_instruction_custom_error::<StakeError>(result, config)
}
}
@ -1196,7 +1196,7 @@ pub fn process_deactivate_stake_account(
let stake_authority = config.signers[stake_authority];
let stake_account_address = if let Some(seed) = seed {
Pubkey::create_with_seed(&stake_account_pubkey, seed, &stake::program::id())?
Pubkey::create_with_seed(stake_account_pubkey, seed, &stake::program::id())?
} else {
*stake_account_pubkey
};
@ -1248,7 +1248,7 @@ pub fn process_deactivate_stake_account(
config.commitment,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<StakeError>(result, &config)
log_instruction_custom_error::<StakeError>(result, config)
}
}
@ -1274,7 +1274,7 @@ pub fn process_withdraw_stake(
let custodian = custodian.map(|index| config.signers[index]);
let stake_account_address = if let Some(seed) = seed {
Pubkey::create_with_seed(&stake_account_pubkey, seed, &stake::program::id())?
Pubkey::create_with_seed(stake_account_pubkey, seed, &stake::program::id())?
} else {
*stake_account_pubkey
};
@ -1347,7 +1347,7 @@ pub fn process_withdraw_stake(
config.commitment,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
log_instruction_custom_error::<SystemError>(result, config)
}
}
@ -1382,10 +1382,10 @@ pub fn process_split_stake(
}
check_unique_pubkeys(
(&fee_payer.pubkey(), "fee-payer keypair".to_string()),
(&stake_account_pubkey, "stake_account".to_string()),
(stake_account_pubkey, "stake_account".to_string()),
)?;
check_unique_pubkeys(
(&stake_account_pubkey, "stake_account".to_string()),
(stake_account_pubkey, "stake_account".to_string()),
(
&split_stake_account.pubkey(),
"split_stake_account".to_string(),
@ -1395,7 +1395,7 @@ pub fn process_split_stake(
let stake_authority = config.signers[stake_authority];
let split_stake_account_address = if let Some(seed) = split_stake_account_seed {
Pubkey::create_with_seed(&split_stake_account.pubkey(), &seed, &stake::program::id())?
Pubkey::create_with_seed(&split_stake_account.pubkey(), seed, &stake::program::id())?
} else {
split_stake_account.pubkey()
};
@ -1433,7 +1433,7 @@ pub fn process_split_stake(
let ixs = if let Some(seed) = split_stake_account_seed {
stake_instruction::split_with_seed(
&stake_account_pubkey,
stake_account_pubkey,
&stake_authority.pubkey(),
lamports,
&split_stake_account_address,
@ -1443,7 +1443,7 @@ pub fn process_split_stake(
.with_memo(memo)
} else {
stake_instruction::split(
&stake_account_pubkey,
stake_account_pubkey,
&stake_authority.pubkey(),
lamports,
&split_stake_account_address,
@ -1492,7 +1492,7 @@ pub fn process_split_stake(
config.commitment,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<StakeError>(result, &config)
log_instruction_custom_error::<StakeError>(result, config)
}
}
@ -1515,19 +1515,19 @@ pub fn process_merge_stake(
check_unique_pubkeys(
(&fee_payer.pubkey(), "fee-payer keypair".to_string()),
(&stake_account_pubkey, "stake_account".to_string()),
(stake_account_pubkey, "stake_account".to_string()),
)?;
check_unique_pubkeys(
(&fee_payer.pubkey(), "fee-payer keypair".to_string()),
(
&source_stake_account_pubkey,
source_stake_account_pubkey,
"source_stake_account".to_string(),
),
)?;
check_unique_pubkeys(
(&stake_account_pubkey, "stake_account".to_string()),
(stake_account_pubkey, "stake_account".to_string()),
(
&source_stake_account_pubkey,
source_stake_account_pubkey,
"source_stake_account".to_string(),
),
)?;
@ -1552,8 +1552,8 @@ pub fn process_merge_stake(
blockhash_query.get_blockhash_and_fee_calculator(rpc_client, config.commitment)?;
let ixs = stake_instruction::merge(
&stake_account_pubkey,
&source_stake_account_pubkey,
stake_account_pubkey,
source_stake_account_pubkey,
&stake_authority.pubkey(),
)
.with_memo(memo);
@ -1603,7 +1603,7 @@ pub fn process_merge_stake(
config.commitment,
config.send_transaction_config,
);
log_instruction_custom_error::<StakeError>(result, &config)
log_instruction_custom_error::<StakeError>(result, config)
}
}
@ -1674,7 +1674,7 @@ pub fn process_stake_set_lockup(
config.commitment,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<StakeError>(result, &config)
log_instruction_custom_error::<StakeError>(result, config)
}
}
@ -2076,7 +2076,7 @@ pub fn process_delegate_stake(
config.commitment,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<StakeError>(result, &config)
log_instruction_custom_error::<StakeError>(result, config)
}
}

View File

@ -119,7 +119,7 @@ fn parse_validator_info(
let key_list: ConfigKeys = deserialize(&account.data)?;
if !key_list.keys.is_empty() {
let (validator_pubkey, _) = key_list.keys[1];
let validator_info_string: String = deserialize(&get_config_data(&account.data)?)?;
let validator_info_string: String = deserialize(get_config_data(&account.data)?)?;
let validator_info: Map<_, _> = serde_json::from_str(&validator_info_string)?;
Ok((validator_pubkey, validator_info))
} else {
@ -246,7 +246,7 @@ pub fn process_set_validator_info(
) -> ProcessResult {
// Validate keybase username
if let Some(string) = validator_info.get("keybaseUsername") {
let result = verify_keybase(&config.signers[0].pubkey(), &string);
let result = verify_keybase(&config.signers[0].pubkey(), string);
if result.is_err() {
if force_keybase {
println!("--force supplied, ignoring: {:?}", result);
@ -272,7 +272,7 @@ pub fn process_set_validator_info(
},
)
.find(|(pubkey, account)| {
let (validator_pubkey, _) = parse_validator_info(&pubkey, &account).unwrap();
let (validator_pubkey, _) = parse_validator_info(pubkey, account).unwrap();
validator_pubkey == config.signers[0].pubkey()
});
@ -393,7 +393,7 @@ pub fn process_get_validator_info(
}
for (validator_info_pubkey, validator_info_account) in validator_info.iter() {
let (validator_pubkey, validator_info) =
parse_validator_info(&validator_info_pubkey, &validator_info_account)?;
parse_validator_info(validator_info_pubkey, validator_info_account)?;
validator_info_list.push(CliValidatorInfo {
identity_pubkey: validator_pubkey.to_string(),
info_pubkey: validator_info_pubkey.to_string(),
@ -451,7 +451,7 @@ mod tests {
"name": "Alice",
"keybaseUsername": "alice_keybase",
});
assert_eq!(parse_args(&matches), expected);
assert_eq!(parse_args(matches), expected);
}
#[test]

View File

@ -468,7 +468,7 @@ pub fn process_create_vote_account(
let vote_account = config.signers[vote_account];
let vote_account_pubkey = vote_account.pubkey();
let vote_account_address = if let Some(seed) = seed {
Pubkey::create_with_seed(&vote_account_pubkey, &seed, &solana_vote_program::id())?
Pubkey::create_with_seed(&vote_account_pubkey, seed, &solana_vote_program::id())?
} else {
vote_account_pubkey
};
@ -549,7 +549,7 @@ pub fn process_create_vote_account(
let mut tx = Transaction::new_unsigned(message);
tx.try_sign(&config.signers, recent_blockhash)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
log_instruction_custom_error::<SystemError>(result, config)
}
pub fn process_vote_authorize(
@ -592,7 +592,7 @@ pub fn process_vote_authorize(
config.commitment,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<VoteError>(result, &config)
log_instruction_custom_error::<VoteError>(result, config)
}
pub fn process_vote_update_validator(
@ -629,7 +629,7 @@ pub fn process_vote_update_validator(
config.commitment,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<VoteError>(result, &config)
log_instruction_custom_error::<VoteError>(result, config)
}
pub fn process_vote_update_commission(
@ -660,7 +660,7 @@ pub fn process_vote_update_commission(
config.commitment,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<VoteError>(result, &config)
log_instruction_custom_error::<VoteError>(result, config)
}
fn get_vote_account(
@ -763,7 +763,7 @@ pub fn process_withdraw_from_vote_account(
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let withdraw_authority = config.signers[withdraw_authority];
let current_balance = rpc_client.get_balance(&vote_account_pubkey)?;
let current_balance = rpc_client.get_balance(vote_account_pubkey)?;
let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption(VoteState::size_of())?;
let lamports = match withdraw_amount {
@ -798,7 +798,7 @@ pub fn process_withdraw_from_vote_account(
config.commitment,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&transaction);
log_instruction_custom_error::<VoteError>(result, &config)
log_instruction_custom_error::<VoteError>(result, config)
}
#[cfg(test)]

View File

@ -68,7 +68,7 @@ fn test_cli_program_deploy_non_upgradeable() {
.unwrap()
.as_str()
.unwrap();
let program_id = Pubkey::from_str(&program_id_str).unwrap();
let program_id = Pubkey::from_str(program_id_str).unwrap();
let account0 = rpc_client.get_account(&program_id).unwrap();
assert_eq!(account0.lamports, minimum_balance_for_rent_exemption);
assert_eq!(account0.owner, bpf_loader::id());
@ -198,7 +198,7 @@ fn test_cli_program_deploy_no_authority() {
.unwrap()
.as_str()
.unwrap();
let program_id = Pubkey::from_str(&program_id_str).unwrap();
let program_id = Pubkey::from_str(program_id_str).unwrap();
// Attempt to upgrade the program
config.signers = vec![&keypair, &upgrade_authority];
@ -284,7 +284,7 @@ fn test_cli_program_deploy_with_authority() {
.unwrap();
assert_eq!(
program_keypair.pubkey(),
Pubkey::from_str(&program_pubkey_str).unwrap()
Pubkey::from_str(program_pubkey_str).unwrap()
);
let program_account = rpc_client.get_account(&program_keypair.pubkey()).unwrap();
assert_eq!(program_account.lamports, minimum_balance_for_program);
@ -328,7 +328,7 @@ fn test_cli_program_deploy_with_authority() {
.unwrap()
.as_str()
.unwrap();
let program_pubkey = Pubkey::from_str(&program_pubkey_str).unwrap();
let program_pubkey = Pubkey::from_str(program_pubkey_str).unwrap();
let program_account = rpc_client.get_account(&program_pubkey).unwrap();
assert_eq!(program_account.lamports, minimum_balance_for_program);
assert_eq!(program_account.owner, bpf_loader_upgradeable::id());
@ -397,7 +397,7 @@ fn test_cli_program_deploy_with_authority() {
.as_str()
.unwrap();
assert_eq!(
Pubkey::from_str(&new_upgrade_authority_str).unwrap(),
Pubkey::from_str(new_upgrade_authority_str).unwrap(),
new_upgrade_authority.pubkey()
);
@ -452,7 +452,7 @@ fn test_cli_program_deploy_with_authority() {
.unwrap();
assert_eq!(
new_upgrade_authority.pubkey(),
Pubkey::from_str(&authority_pubkey_str).unwrap()
Pubkey::from_str(authority_pubkey_str).unwrap()
);
// Set no authority
@ -510,7 +510,7 @@ fn test_cli_program_deploy_with_authority() {
.unwrap()
.as_str()
.unwrap();
let program_pubkey = Pubkey::from_str(&program_pubkey_str).unwrap();
let program_pubkey = Pubkey::from_str(program_pubkey_str).unwrap();
let (programdata_pubkey, _) =
Pubkey::find_program_address(&[program_pubkey.as_ref()], &bpf_loader_upgradeable::id());
let programdata_account = rpc_client.get_account(&programdata_pubkey).unwrap();
@ -606,7 +606,7 @@ fn test_cli_program_write_buffer() {
.unwrap()
.as_str()
.unwrap();
let new_buffer_pubkey = Pubkey::from_str(&buffer_pubkey_str).unwrap();
let new_buffer_pubkey = Pubkey::from_str(buffer_pubkey_str).unwrap();
let buffer_account = rpc_client.get_account(&new_buffer_pubkey).unwrap();
assert_eq!(buffer_account.lamports, minimum_balance_for_buffer_default);
assert_eq!(buffer_account.owner, bpf_loader_upgradeable::id());
@ -641,7 +641,7 @@ fn test_cli_program_write_buffer() {
.unwrap();
assert_eq!(
buffer_keypair.pubkey(),
Pubkey::from_str(&buffer_pubkey_str).unwrap()
Pubkey::from_str(buffer_pubkey_str).unwrap()
);
let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap();
assert_eq!(buffer_account.lamports, minimum_balance_for_buffer);
@ -675,7 +675,7 @@ fn test_cli_program_write_buffer() {
.unwrap();
assert_eq!(
keypair.pubkey(),
Pubkey::from_str(&authority_pubkey_str).unwrap()
Pubkey::from_str(authority_pubkey_str).unwrap()
);
// Specify buffer authority
@ -700,7 +700,7 @@ fn test_cli_program_write_buffer() {
.unwrap();
assert_eq!(
buffer_keypair.pubkey(),
Pubkey::from_str(&buffer_pubkey_str).unwrap()
Pubkey::from_str(buffer_pubkey_str).unwrap()
);
let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap();
assert_eq!(buffer_account.lamports, minimum_balance_for_buffer_default);
@ -735,7 +735,7 @@ fn test_cli_program_write_buffer() {
.unwrap()
.as_str()
.unwrap();
let buffer_pubkey = Pubkey::from_str(&buffer_pubkey_str).unwrap();
let buffer_pubkey = Pubkey::from_str(buffer_pubkey_str).unwrap();
let buffer_account = rpc_client.get_account(&buffer_pubkey).unwrap();
assert_eq!(buffer_account.lamports, minimum_balance_for_buffer_default);
assert_eq!(buffer_account.owner, bpf_loader_upgradeable::id());
@ -768,7 +768,7 @@ fn test_cli_program_write_buffer() {
.unwrap();
assert_eq!(
authority_keypair.pubkey(),
Pubkey::from_str(&authority_pubkey_str).unwrap()
Pubkey::from_str(authority_pubkey_str).unwrap()
);
// Close buffer
@ -806,7 +806,7 @@ fn test_cli_program_write_buffer() {
.unwrap()
.as_str()
.unwrap();
let new_buffer_pubkey = Pubkey::from_str(&buffer_pubkey_str).unwrap();
let new_buffer_pubkey = Pubkey::from_str(buffer_pubkey_str).unwrap();
// Close buffers and deposit default keypair
let pre_lamports = rpc_client.get_account(&keypair.pubkey()).unwrap().lamports;
@ -901,7 +901,7 @@ fn test_cli_program_set_buffer_authority() {
.as_str()
.unwrap();
assert_eq!(
Pubkey::from_str(&new_buffer_authority_str).unwrap(),
Pubkey::from_str(new_buffer_authority_str).unwrap(),
new_buffer_authority.pubkey()
);
let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap();
@ -928,7 +928,7 @@ fn test_cli_program_set_buffer_authority() {
.as_str()
.unwrap();
assert_eq!(
Pubkey::from_str(&buffer_authority_str).unwrap(),
Pubkey::from_str(buffer_authority_str).unwrap(),
buffer_keypair.pubkey()
);
let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap();
@ -1101,7 +1101,7 @@ fn test_cli_program_show() {
.unwrap();
assert_eq!(
buffer_keypair.pubkey(),
Pubkey::from_str(&address_str).unwrap()
Pubkey::from_str(address_str).unwrap()
);
let authority_str = json
.as_object()
@ -1112,7 +1112,7 @@ fn test_cli_program_show() {
.unwrap();
assert_eq!(
authority_keypair.pubkey(),
Pubkey::from_str(&authority_str).unwrap()
Pubkey::from_str(authority_str).unwrap()
);
let data_len = json
.as_object()
@ -1161,7 +1161,7 @@ fn test_cli_program_show() {
.unwrap();
assert_eq!(
program_keypair.pubkey(),
Pubkey::from_str(&address_str).unwrap()
Pubkey::from_str(address_str).unwrap()
);
let programdata_address_str = json
.as_object()
@ -1176,7 +1176,7 @@ fn test_cli_program_show() {
);
assert_eq!(
programdata_pubkey,
Pubkey::from_str(&programdata_address_str).unwrap()
Pubkey::from_str(programdata_address_str).unwrap()
);
let authority_str = json
.as_object()
@ -1187,7 +1187,7 @@ fn test_cli_program_show() {
.unwrap();
assert_eq!(
authority_keypair.pubkey(),
Pubkey::from_str(&authority_str).unwrap()
Pubkey::from_str(authority_str).unwrap()
);
let deployed_slot = json
.as_object()

View File

@ -31,7 +31,7 @@ impl LargestAccountsCache {
&self,
filter: &Option<RpcLargestAccountsFilter>,
) -> Option<(u64, Vec<RpcAccountBalance>)> {
self.cache.get(&filter).and_then(|value| {
self.cache.get(filter).and_then(|value| {
if let Ok(elapsed) = value.cached_time.elapsed() {
if elapsed < Duration::from_secs(self.duration) {
return Some((value.slot, value.accounts.clone()));

View File

@ -1627,7 +1627,7 @@ impl RpcClient {
) -> ClientResult<u64> {
let now = Instant::now();
loop {
match self.get_balance_with_commitment(&pubkey, commitment_config) {
match self.get_balance_with_commitment(pubkey, commitment_config) {
Ok(bal) => {
return Ok(bal.value);
}
@ -1696,7 +1696,7 @@ impl RpcClient {
let now = Instant::now();
loop {
if let Ok(Some(_)) =
self.get_signature_status_with_commitment(&signature, commitment_config)
self.get_signature_status_with_commitment(signature, commitment_config)
{
break;
}
@ -1853,11 +1853,11 @@ impl RpcClient {
let (signature, status) = loop {
// Get recent commitment in order to count confirmations for successful transactions
let status = self
.get_signature_status_with_commitment(&signature, CommitmentConfig::processed())?;
.get_signature_status_with_commitment(signature, CommitmentConfig::processed())?;
if status.is_none() {
if self
.get_fee_calculator_for_blockhash_with_commitment(
&recent_blockhash,
recent_blockhash,
CommitmentConfig::processed(),
)?
.value
@ -1891,7 +1891,7 @@ impl RpcClient {
// Return when specified commitment is reached
// Failed transactions have already been eliminated, `is_some` check is sufficient
if self
.get_signature_status_with_commitment(&signature, commitment)?
.get_signature_status_with_commitment(signature, commitment)?
.is_some()
{
progress_bar.set_message("Transaction confirmed");
@ -1907,7 +1907,7 @@ impl RpcClient {
));
sleep(Duration::from_millis(500));
confirmations = self
.get_num_blocks_since_signature_confirmation(&signature)
.get_num_blocks_since_signature_confirmation(signature)
.unwrap_or(confirmations);
if now.elapsed().as_secs() >= MAX_HASH_AGE_IN_SECONDS as u64 {
return Err(

View File

@ -451,7 +451,7 @@ impl SyncClient for ThinClient {
) -> TransportResult<Option<transaction::Result<()>>> {
let status = self
.rpc_client()
.get_signature_status(&signature)
.get_signature_status(signature)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
@ -468,7 +468,7 @@ impl SyncClient for ThinClient {
) -> TransportResult<Option<transaction::Result<()>>> {
let status = self
.rpc_client()
.get_signature_status_with_commitment(&signature, commitment_config)
.get_signature_status_with_commitment(signature, commitment_config)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,

View File

@ -121,7 +121,7 @@ struct LeaderTpuCache {
impl LeaderTpuCache {
fn new(rpc_client: &RpcClient, first_slot: Slot) -> Self {
let leaders = Self::fetch_slot_leaders(rpc_client, first_slot).unwrap_or_default();
let leader_tpu_map = Self::fetch_cluster_tpu_sockets(&rpc_client).unwrap_or_default();
let leader_tpu_map = Self::fetch_cluster_tpu_sockets(rpc_client).unwrap_or_default();
Self {
first_slot,
leaders,

View File

@ -187,7 +187,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
});
//sanity check, make sure all the transactions can execute sequentially
transactions.iter().for_each(|tx| {
let res = bank.process_transaction(&tx);
let res = bank.process_transaction(tx);
assert!(res.is_ok(), "sanity test transactions");
});
bank.clear_signatures();

View File

@ -24,10 +24,10 @@ fn bench_save_tower(bench: &mut Bencher) {
let heaviest_bank = BankForks::new(Bank::default()).working_bank();
let tower = Tower::new(
&node_keypair.pubkey(),
&vote_account_pubkey,
vote_account_pubkey,
0,
&heaviest_bank,
&path,
path,
);
bench.iter(move || {

View File

@ -148,7 +148,7 @@ impl AccountsHashVerifier {
for (slot, hash) in hashes.iter() {
slot_to_hash.insert(*slot, *hash);
}
if Self::should_halt(&cluster_info, trusted_validators, &mut slot_to_hash) {
if Self::should_halt(cluster_info, trusted_validators, &mut slot_to_hash) {
exit.store(true, Ordering::Relaxed);
}
}

View File

@ -257,7 +257,7 @@ impl BankingStage {
Self::num_threads(),
transaction_status_sender,
gossip_vote_sender,
&cost_model,
cost_model,
&cost_tracker,
)
}
@ -393,9 +393,9 @@ impl BankingStage {
// We've hit the end of this slot, no need to perform more processing,
// just filter the remaining packets for the invalid (e.g. too old) ones
let new_unprocessed_indexes = Self::filter_unprocessed_packets(
&bank,
&msgs,
&original_unprocessed_indexes,
bank,
msgs,
original_unprocessed_indexes,
my_pubkey,
*next_leader,
cost_model,
@ -413,8 +413,8 @@ impl BankingStage {
Self::process_packets_transactions(
&bank,
&bank_creation_time,
&recorder,
&msgs,
recorder,
msgs,
original_unprocessed_indexes.to_owned(),
transaction_status_sender.clone(),
gossip_vote_sender,
@ -449,7 +449,7 @@ impl BankingStage {
// `original_unprocessed_indexes` must have remaining packets to process
// if not yet processed.
assert!(Self::packet_has_more_unprocessed_transactions(
&original_unprocessed_indexes
original_unprocessed_indexes
));
true
}
@ -652,7 +652,7 @@ impl BankingStage {
let decision = Self::process_buffered_packets(
&my_pubkey,
&socket,
&poh_recorder,
poh_recorder,
cluster_info,
&mut buffered_packets,
enable_forwarding,
@ -684,8 +684,8 @@ impl BankingStage {
match Self::process_packets(
&my_pubkey,
&verified_receiver,
&poh_recorder,
verified_receiver,
poh_recorder,
recv_start,
recv_timeout,
id,
@ -797,7 +797,7 @@ impl BankingStage {
let mut mint_decimals: HashMap<Pubkey, u8> = HashMap::new();
let pre_token_balances = if transaction_status_sender.is_some() {
collect_token_balances(&bank, &batch, &mut mint_decimals)
collect_token_balances(bank, batch, &mut mint_decimals)
} else {
vec![]
};
@ -857,7 +857,7 @@ impl BankingStage {
if let Some(transaction_status_sender) = transaction_status_sender {
let txs = batch.transactions_iter().cloned().collect();
let post_balances = bank.collect_balances(batch);
let post_token_balances = collect_token_balances(&bank, &batch, &mut mint_decimals);
let post_token_balances = collect_token_balances(bank, batch, &mut mint_decimals);
transaction_status_sender.send_transaction_status_batch(
bank.clone(),
txs,
@ -1170,7 +1170,7 @@ impl BankingStage {
// applying cost of processed transactions to shared cost_tracker
transactions.iter().enumerate().for_each(|(index, tx)| {
if !unprocessed_tx_indexes.iter().any(|&i| i == index) {
let tx_cost = cost_model.read().unwrap().calculate_cost(&tx.transaction());
let tx_cost = cost_model.read().unwrap().calculate_cost(tx.transaction());
let mut guard = cost_tracker.lock().unwrap();
let _result = guard.try_add(tx_cost);
drop(guard);
@ -1229,7 +1229,7 @@ impl BankingStage {
let (transactions, transaction_to_packet_indexes, retry_packet_indexes) =
Self::transactions_from_packets(
msgs,
&transaction_indexes,
transaction_indexes,
bank.secp256k1_program_enabled(),
cost_model,
cost_tracker,
@ -1368,7 +1368,7 @@ impl BankingStage {
&bank,
&msgs,
&packet_indexes,
&my_pubkey,
my_pubkey,
next_leader,
cost_model,
cost_tracker,
@ -2579,7 +2579,7 @@ mod tests {
Receiver<WorkingBankEntry>,
JoinHandle<()>,
) {
Blockstore::destroy(&ledger_path).unwrap();
Blockstore::destroy(ledger_path).unwrap();
let genesis_config_info = create_slow_genesis_config(10_000);
let GenesisConfigInfo {
genesis_config,
@ -2587,8 +2587,8 @@ mod tests {
..
} = &genesis_config_info;
let blockstore =
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger");
let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config));
Blockstore::open(ledger_path).expect("Expected to be able to open database ledger");
let bank = Arc::new(Bank::new_no_wallclock_throttle(genesis_config));
let exit = Arc::new(AtomicBool::default());
let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
bank.tick_height(),
@ -2609,9 +2609,9 @@ mod tests {
let pubkey1 = solana_sdk::pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
let transactions = vec![
system_transaction::transfer(&mint_keypair, &pubkey0, 1, genesis_config.hash()),
system_transaction::transfer(&mint_keypair, &pubkey1, 1, genesis_config.hash()),
system_transaction::transfer(&mint_keypair, &pubkey2, 1, genesis_config.hash()),
system_transaction::transfer(mint_keypair, &pubkey0, 1, genesis_config.hash()),
system_transaction::transfer(mint_keypair, &pubkey1, 1, genesis_config.hash()),
system_transaction::transfer(mint_keypair, &pubkey2, 1, genesis_config.hash()),
];
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);

View File

@ -408,7 +408,7 @@ pub fn broadcast_shreds(
let packets: Vec<_> = shreds
.iter()
.map(|shred| {
let broadcast_index = weighted_best(&peers_and_stakes, shred.seed());
let broadcast_index = weighted_best(peers_and_stakes, shred.seed());
(&shred.payload, &peers[broadcast_index].tvu)
})
@ -429,7 +429,7 @@ pub fn broadcast_shreds(
send_mmsg_time.stop();
transmit_stats.send_mmsg_elapsed += send_mmsg_time.as_us();
let num_live_peers = num_live_peers(&peers);
let num_live_peers = num_live_peers(peers);
update_peer_stats(
num_live_peers,
broadcast_len as i64 + 1,

View File

@ -212,9 +212,9 @@ impl BroadcastRun for BroadcastDuplicatesRun {
.collect();
stakes.sort_by(|(l_key, l_stake), (r_key, r_stake)| {
if r_stake == l_stake {
l_key.cmp(&r_key)
l_key.cmp(r_key)
} else {
r_stake.cmp(&l_stake)
r_stake.cmp(l_stake)
}
});

View File

@ -161,7 +161,7 @@ impl StandardBroadcastRun {
) -> Result<()> {
let (bsend, brecv) = channel();
let (ssend, srecv) = channel();
self.process_receive_results(&blockstore, &ssend, &bsend, receive_results)?;
self.process_receive_results(blockstore, &ssend, &bsend, receive_results)?;
let srecv = Arc::new(Mutex::new(srecv));
let brecv = Arc::new(Mutex::new(brecv));
//data

View File

@ -110,7 +110,7 @@ impl VoteTracker {
epoch_schedule: *root_bank.epoch_schedule(),
..VoteTracker::default()
};
vote_tracker.progress_with_new_root_bank(&root_bank);
vote_tracker.progress_with_new_root_bank(root_bank);
assert_eq!(
*vote_tracker.leader_schedule_epoch.read().unwrap(),
root_bank.get_leader_schedule_epoch(root_bank.slot())
@ -603,7 +603,7 @@ impl ClusterInfoVoteListener {
if slot == last_vote_slot {
let vote_accounts = Stakes::vote_accounts(epoch_stakes.stakes());
let stake = vote_accounts
.get(&vote_pubkey)
.get(vote_pubkey)
.map(|(stake, _)| *stake)
.unwrap_or_default();
let total_stake = epoch_stakes.total_stake();
@ -692,7 +692,7 @@ impl ClusterInfoVoteListener {
// voters trying to make votes for slots earlier than the epoch for
// which they are authorized
let actual_authorized_voter =
vote_tracker.get_authorized_voter(&vote_pubkey, *last_vote_slot);
vote_tracker.get_authorized_voter(vote_pubkey, *last_vote_slot);
if actual_authorized_voter.is_none() {
return false;
@ -700,7 +700,7 @@ impl ClusterInfoVoteListener {
// Voting without the correct authorized pubkey, dump the vote
if !VoteTracker::vote_contains_authorized_voter(
&gossip_tx,
gossip_tx,
&actual_authorized_voter.unwrap(),
) {
return false;
@ -738,7 +738,7 @@ impl ClusterInfoVoteListener {
Self::track_new_votes_and_notify_confirmations(
vote,
&vote_pubkey,
&vote_tracker,
vote_tracker,
root_bank,
subscriptions,
verified_vote_sender,

View File

@ -192,7 +192,7 @@ fn get_cluster_duplicate_confirmed_hash<'a>(
slot, gossip_duplicate_confirmed_hash, local_duplicate_confirmed_hash
);
}
Some(&local_frozen_hash)
Some(local_frozen_hash)
}
(Some(local_frozen_hash), None) => Some(local_frozen_hash),
_ => gossip_duplicate_confirmed_hash,

View File

@ -352,15 +352,15 @@ mod tests {
if *a <= root {
let mut expected = BlockCommitment::default();
expected.increase_rooted_stake(lamports);
assert_eq!(*commitment.get(&a).unwrap(), expected);
assert_eq!(*commitment.get(a).unwrap(), expected);
} else if i <= 4 {
let mut expected = BlockCommitment::default();
expected.increase_confirmation_stake(2, lamports);
assert_eq!(*commitment.get(&a).unwrap(), expected);
assert_eq!(*commitment.get(a).unwrap(), expected);
} else if i <= 6 {
let mut expected = BlockCommitment::default();
expected.increase_confirmation_stake(1, lamports);
assert_eq!(*commitment.get(&a).unwrap(), expected);
assert_eq!(*commitment.get(a).unwrap(), expected);
}
}
assert_eq!(rooted_stake[0], (root, lamports));

View File

@ -164,7 +164,7 @@ impl Tower {
bank: &Bank,
path: &Path,
) -> Self {
let path = Self::get_filename(&path, node_pubkey);
let path = Self::get_filename(path, node_pubkey);
let tmp_path = Self::get_tmp_filename(&path);
let mut tower = Self {
node_pubkey: *node_pubkey,
@ -205,8 +205,8 @@ impl Tower {
crate::replay_stage::ReplayStage::initialize_progress_and_fork_choice(
root_bank.deref(),
bank_forks.frozen_banks().values().cloned().collect(),
&my_pubkey,
&vote_account,
my_pubkey,
vote_account,
);
let root = root_bank.slot();
@ -219,11 +219,11 @@ impl Tower {
.clone();
Self::new(
&my_pubkey,
&vote_account,
my_pubkey,
vote_account,
root,
&heaviest_bank,
&ledger_path,
ledger_path,
)
}
@ -736,7 +736,7 @@ impl Tower {
// finding any lockout intervals in the `lockout_intervals` tree
// for this bank that contain `last_vote`.
let lockout_intervals = &progress
.get(&candidate_slot)
.get(candidate_slot)
.unwrap()
.fork_stats
.lockout_intervals;
@ -1328,7 +1328,7 @@ pub fn reconcile_blockstore_roots_with_tower(
if last_blockstore_root < tower_root {
// Ensure tower_root itself to exist and be marked as rooted in the blockstore
// in addition to its ancestors.
let new_roots: Vec<_> = AncestorIterator::new_inclusive(tower_root, &blockstore)
let new_roots: Vec<_> = AncestorIterator::new_inclusive(tower_root, blockstore)
.take_while(|current| match current.cmp(&last_blockstore_root) {
Ordering::Greater => true,
Ordering::Equal => false,
@ -1490,7 +1490,7 @@ pub mod test {
tower: &mut Tower,
) -> Vec<HeaviestForkFailures> {
// Try to simulate the vote
let my_keypairs = self.validator_keypairs.get(&my_pubkey).unwrap();
let my_keypairs = self.validator_keypairs.get(my_pubkey).unwrap();
let my_vote_pubkey = my_keypairs.vote_keypair.pubkey();
let ancestors = self.bank_forks.read().unwrap().ancestors();
let mut frozen_banks: Vec<_> = self
@ -1503,7 +1503,7 @@ pub mod test {
.collect();
let _ = ReplayStage::compute_bank_stats(
&my_pubkey,
my_pubkey,
&ancestors,
&mut frozen_banks,
tower,
@ -1582,9 +1582,9 @@ pub mod test {
.filter_map(|slot| {
let mut fork_tip_parent = tr(slot - 1);
fork_tip_parent.push_front(tr(slot));
self.fill_bank_forks(fork_tip_parent, &cluster_votes);
self.fill_bank_forks(fork_tip_parent, cluster_votes);
if votes_to_simulate.contains(&slot) {
Some((slot, self.simulate_vote(slot, &my_pubkey, tower)))
Some((slot, self.simulate_vote(slot, my_pubkey, tower)))
} else {
None
}
@ -1627,7 +1627,7 @@ pub mod test {
fork_tip_parent.push_front(tr(start_slot + i));
self.fill_bank_forks(fork_tip_parent, cluster_votes);
if self
.simulate_vote(i + start_slot, &my_pubkey, tower)
.simulate_vote(i + start_slot, my_pubkey, tower)
.is_empty()
{
cluster_votes
@ -2850,7 +2850,7 @@ pub mod test {
tower.save(&identity_keypair).unwrap();
modify_serialized(&tower.path);
let loaded = Tower::restore(&dir.path(), &identity_keypair.pubkey());
let loaded = Tower::restore(dir.path(), &identity_keypair.pubkey());
(tower, loaded)
}

View File

@ -82,7 +82,7 @@ impl CostModel {
&non_signed_writable_accounts,
&non_signed_readonly_accounts,
),
execution_cost: self.find_transaction_cost(&transaction),
execution_cost: self.find_transaction_cost(transaction),
};
cost.writable_accounts.extend(&signed_writable_accounts);
cost.writable_accounts.extend(&non_signed_writable_accounts);
@ -109,7 +109,7 @@ impl CostModel {
}
fn find_instruction_cost(&self, program_key: &Pubkey) -> u64 {
match self.instruction_execution_cost_table.get_cost(&program_key) {
match self.instruction_execution_cost_table.get_cost(program_key) {
Some(cost) => *cost,
None => {
let default_value = self.instruction_execution_cost_table.get_mode();

View File

@ -55,7 +55,7 @@ impl CostTracker {
// check each account against account_cost_limit,
for account_key in keys.iter() {
match self.cost_by_writable_accounts.get(&account_key) {
match self.cost_by_writable_accounts.get(account_key) {
Some(chained_cost) => {
if chained_cost + cost > self.account_cost_limit {
return Err("would exceed account cost limit");
@ -143,7 +143,7 @@ mod tests {
) -> (Transaction, Vec<Pubkey>, u64) {
let keypair = Keypair::new();
let simple_transaction =
system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 2, *start_hash);
system_transaction::transfer(mint_keypair, &keypair.pubkey(), 2, *start_hash);
(simple_transaction, vec![mint_keypair.pubkey()], 5)
}

View File

@ -67,7 +67,7 @@ impl ExecuteCostTable {
.map(|(key, _)| key)
.expect("cannot find mode from cost table");
*self.table.get(&key).unwrap()
*self.table.get(key).unwrap()
}
}
@ -75,11 +75,11 @@ impl ExecuteCostTable {
// client is advised to call `get_average()` or `get_mode()` to
// assign a 'default' value for new program.
pub fn get_cost(&self, key: &Pubkey) -> Option<&u64> {
self.table.get(&key)
self.table.get(key)
}
pub fn upsert(&mut self, key: &Pubkey, value: &u64) {
let need_to_add = self.table.get(&key).is_none();
let need_to_add = self.table.get(key).is_none();
let current_size = self.get_count();
if current_size == self.capacity && need_to_add {
self.prune_to(&((current_size as f64 * PRUNE_RATIO) as usize));

View File

@ -34,7 +34,7 @@ impl FetchStage {
tpu_forwards_sockets,
exit,
&sender,
&poh_recorder,
poh_recorder,
coalesce_ms,
),
receiver,
@ -54,8 +54,8 @@ impl FetchStage {
tx_sockets,
tpu_forwards_sockets,
exit,
&sender,
&poh_recorder,
sender,
poh_recorder,
coalesce_ms,
)
}
@ -108,7 +108,7 @@ impl FetchStage {
let tpu_threads = sockets.into_iter().map(|socket| {
streamer::receiver(
socket,
&exit,
exit,
sender.clone(),
recycler.clone(),
"fetch_stage",
@ -121,7 +121,7 @@ impl FetchStage {
let tpu_forwards_threads = tpu_forwards_sockets.into_iter().map(|socket| {
streamer::receiver(
socket,
&exit,
exit,
forward_sender.clone(),
recycler.clone(),
"fetch_forward_stage",

View File

@ -457,7 +457,7 @@ impl HeaviestSubtreeForkChoice {
pub fn is_duplicate_confirmed(&self, slot_hash_key: &SlotHashKey) -> Option<bool> {
self.fork_infos
.get(&slot_hash_key)
.get(slot_hash_key)
.map(|fork_info| fork_info.is_duplicate_confirmed())
}
@ -472,7 +472,7 @@ impl HeaviestSubtreeForkChoice {
/// Returns false if the node or any of its ancestors have been marked as duplicate
pub fn is_candidate(&self, slot_hash_key: &SlotHashKey) -> Option<bool> {
self.fork_infos
.get(&slot_hash_key)
.get(slot_hash_key)
.map(|fork_info| fork_info.is_candidate())
}
@ -585,7 +585,7 @@ impl HeaviestSubtreeForkChoice {
for child_key in &fork_info.children {
let child_fork_info = self
.fork_infos
.get(&child_key)
.get(child_key)
.expect("Child must exist in fork_info map");
let child_stake_voted_subtree = child_fork_info.stake_voted_subtree;
is_duplicate_confirmed |= child_fork_info.is_duplicate_confirmed;
@ -770,7 +770,7 @@ impl HeaviestSubtreeForkChoice {
let epoch = epoch_schedule.get_epoch(new_vote_slot_hash.0);
let stake_update = epoch_stakes
.get(&epoch)
.map(|epoch_stakes| epoch_stakes.vote_account_stake(&pubkey))
.map(|epoch_stakes| epoch_stakes.vote_account_stake(pubkey))
.unwrap_or(0);
update_operations
@ -896,7 +896,7 @@ impl TreeDiff for HeaviestSubtreeForkChoice {
fn children(&self, slot_hash_key: &SlotHashKey) -> Option<&[SlotHashKey]> {
self.fork_infos
.get(&slot_hash_key)
.get(slot_hash_key)
.map(|fork_info| &fork_info.children[..])
}
}
@ -1497,7 +1497,7 @@ mod test {
.chain(std::iter::once(&duplicate_leaves_descended_from_4[1]))
{
assert!(heaviest_subtree_fork_choice
.children(&duplicate_leaf)
.children(duplicate_leaf)
.unwrap()
.is_empty(),);
}
@ -3116,11 +3116,11 @@ mod test {
let slot = slot_hash_key.0;
if slot <= duplicate_confirmed_slot {
assert!(heaviest_subtree_fork_choice
.is_duplicate_confirmed(&slot_hash_key)
.is_duplicate_confirmed(slot_hash_key)
.unwrap());
} else {
assert!(!heaviest_subtree_fork_choice
.is_duplicate_confirmed(&slot_hash_key)
.is_duplicate_confirmed(slot_hash_key)
.unwrap());
}
assert!(heaviest_subtree_fork_choice
@ -3139,7 +3139,7 @@ mod test {
// 1) Be duplicate confirmed
// 2) Have no invalid ancestors
assert!(heaviest_subtree_fork_choice
.is_duplicate_confirmed(&slot_hash_key)
.is_duplicate_confirmed(slot_hash_key)
.unwrap());
assert!(heaviest_subtree_fork_choice
.latest_invalid_ancestor(slot_hash_key)
@ -3149,7 +3149,7 @@ mod test {
// 1) Not be duplicate confirmed
// 2) Should have an invalid ancestor == `invalid_descendant_slot`
assert!(!heaviest_subtree_fork_choice
.is_duplicate_confirmed(&slot_hash_key)
.is_duplicate_confirmed(slot_hash_key)
.unwrap());
assert_eq!(
heaviest_subtree_fork_choice
@ -3162,7 +3162,7 @@ mod test {
// 1) Not be duplicate confirmed
// 2) Should not have an invalid ancestor
assert!(!heaviest_subtree_fork_choice
.is_duplicate_confirmed(&slot_hash_key)
.is_duplicate_confirmed(slot_hash_key)
.unwrap());
assert!(heaviest_subtree_fork_choice
.latest_invalid_ancestor(slot_hash_key)
@ -3186,7 +3186,7 @@ mod test {
// 1) Be duplicate confirmed
// 2) Have no invalid ancestors
assert!(heaviest_subtree_fork_choice
.is_duplicate_confirmed(&slot_hash_key)
.is_duplicate_confirmed(slot_hash_key)
.unwrap());
assert!(heaviest_subtree_fork_choice
.latest_invalid_ancestor(slot_hash_key)
@ -3196,7 +3196,7 @@ mod test {
// 1) Not be duplicate confirmed
// 2) Should have an invalid ancestor == `invalid_descendant_slot`
assert!(!heaviest_subtree_fork_choice
.is_duplicate_confirmed(&slot_hash_key)
.is_duplicate_confirmed(slot_hash_key)
.unwrap());
assert_eq!(
heaviest_subtree_fork_choice
@ -3209,7 +3209,7 @@ mod test {
// 1) Not be duplicate confirmed
// 2) Should not have an invalid ancestor
assert!(!heaviest_subtree_fork_choice
.is_duplicate_confirmed(&slot_hash_key)
.is_duplicate_confirmed(slot_hash_key)
.unwrap());
assert!(heaviest_subtree_fork_choice
.latest_invalid_ancestor(slot_hash_key)
@ -3223,7 +3223,7 @@ mod test {
heaviest_subtree_fork_choice.mark_fork_valid_candidate(&last_duplicate_confirmed_key);
for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() {
assert!(heaviest_subtree_fork_choice
.is_duplicate_confirmed(&slot_hash_key)
.is_duplicate_confirmed(slot_hash_key)
.unwrap());
assert!(heaviest_subtree_fork_choice
.latest_invalid_ancestor(slot_hash_key)

View File

@ -187,7 +187,7 @@ impl LedgerCleanupService {
*last_purge_slot = root;
let (slots_to_clean, purge_first_slot, lowest_cleanup_slot, total_shreds) =
Self::find_slots_to_clean(&blockstore, root, max_ledger_shreds);
Self::find_slots_to_clean(blockstore, root, max_ledger_shreds);
if slots_to_clean {
let purge_complete = Arc::new(AtomicBool::new(false));

View File

@ -36,7 +36,7 @@ impl OptimisticConfirmationVerifier {
.into_iter()
.filter(|(optimistic_slot, optimistic_hash)| {
(*optimistic_slot == root && *optimistic_hash != root_bank.hash())
|| (!root_ancestors.contains_key(&optimistic_slot) &&
|| (!root_ancestors.contains_key(optimistic_slot) &&
// In this second part of the `and`, we account for the possibility that
// there was some other root `rootX` set in BankForks where:
//

View File

@ -292,7 +292,7 @@ impl PropagatedStats {
pub fn add_node_pubkey(&mut self, node_pubkey: &Pubkey, bank: &Bank) {
if !self.propagated_node_ids.contains(node_pubkey) {
let node_vote_accounts = bank
.epoch_vote_accounts_for_node_id(&node_pubkey)
.epoch_vote_accounts_for_node_id(node_pubkey)
.map(|v| &v.vote_accounts);
if let Some(node_vote_accounts) = node_vote_accounts {

View File

@ -229,7 +229,7 @@ impl RepairService {
add_votes_elapsed = Measure::start("add_votes");
repair_weight.add_votes(
&blockstore,
blockstore,
slot_to_vote_pubkeys.into_iter(),
root_bank.epoch_stakes_map(),
root_bank.epoch_schedule(),
@ -277,7 +277,7 @@ impl RepairService {
let mut outstanding_requests = outstanding_requests.write().unwrap();
repairs.into_iter().for_each(|repair_request| {
if let Ok((to, req)) = serve_repair.repair_request(
&cluster_slots,
cluster_slots,
repair_request,
&mut cache,
&mut repair_stats,
@ -493,7 +493,7 @@ impl RepairService {
repair_validators,
);
if let Some((repair_pubkey, repair_addr)) = status.repair_pubkey_and_addr {
let repairs = Self::generate_duplicate_repairs_for_slot(&blockstore, *slot);
let repairs = Self::generate_duplicate_repairs_for_slot(blockstore, *slot);
if let Some(repairs) = repairs {
let mut outstanding_requests = outstanding_requests.write().unwrap();
@ -535,7 +535,7 @@ impl RepairService {
nonce: Nonce,
) -> Result<()> {
let req =
serve_repair.map_repair_request(&repair_type, repair_pubkey, repair_stats, nonce)?;
serve_repair.map_repair_request(repair_type, repair_pubkey, repair_stats, nonce)?;
repair_socket.send_to(&req, to)?;
Ok(())
}

View File

@ -495,7 +495,7 @@ impl RepairWeight {
for ((slot, _), _) in all_slots {
*self
.slot_to_tree
.get_mut(&slot)
.get_mut(slot)
.expect("Nodes in tree must exist in `self.slot_to_tree`") = root2;
}
}
@ -521,9 +521,9 @@ impl RepairWeight {
fn sort_by_stake_weight_slot(slot_stake_voted: &mut Vec<(Slot, u64)>) {
slot_stake_voted.sort_by(|(slot, stake_voted), (slot_, stake_voted_)| {
if stake_voted == stake_voted_ {
slot.cmp(&slot_)
slot.cmp(slot_)
} else {
stake_voted.cmp(&stake_voted_).reverse()
stake_voted.cmp(stake_voted_).reverse()
}
});
}
@ -757,7 +757,7 @@ mod test {
);
for slot in &[8, 10, 11] {
assert_eq!(*repair_weight.slot_to_tree.get(&slot).unwrap(), 8);
assert_eq!(*repair_weight.slot_to_tree.get(slot).unwrap(), 8);
}
for slot in 0..=1 {
assert_eq!(*repair_weight.slot_to_tree.get(&slot).unwrap(), 0);
@ -772,7 +772,7 @@ mod test {
);
for slot in &[8, 10, 11] {
assert_eq!(*repair_weight.slot_to_tree.get(&slot).unwrap(), 0);
assert_eq!(*repair_weight.slot_to_tree.get(slot).unwrap(), 0);
}
assert_eq!(repair_weight.trees.len(), 1);
assert!(repair_weight.trees.contains_key(&0));
@ -1088,10 +1088,10 @@ mod test {
let purged_slots = vec![0, 1, 2, 4, 8, 10];
let mut expected_unrooted_len = 0;
for purged_slot in &purged_slots {
assert!(!repair_weight.slot_to_tree.contains_key(&purged_slot));
assert!(!repair_weight.trees.contains_key(&purged_slot));
assert!(!repair_weight.slot_to_tree.contains_key(purged_slot));
assert!(!repair_weight.trees.contains_key(purged_slot));
if *purged_slot > 3 {
assert!(repair_weight.unrooted_slots.contains(&purged_slot));
assert!(repair_weight.unrooted_slots.contains(purged_slot));
expected_unrooted_len += 1;
}
}

View File

@ -101,7 +101,7 @@ pub fn get_best_repair_shreds<'a>(
let new_repairs = RepairService::generate_repairs_for_slot(
blockstore,
slot,
&slot_meta,
slot_meta,
max_repairs - repairs.len(),
);
repairs.extend(new_repairs);

View File

@ -563,7 +563,7 @@ impl ReplayStage {
}
Self::handle_votable_bank(
&vote_bank,
vote_bank,
&poh_recorder,
switch_fork_decision,
&bank_forks,
@ -757,8 +757,8 @@ impl ReplayStage {
Self::initialize_progress_and_fork_choice(
&root_bank,
frozen_banks,
&my_pubkey,
&vote_account,
my_pubkey,
vote_account,
)
}
@ -779,8 +779,8 @@ impl ReplayStage {
bank.slot(),
ForkProgress::new_from_bank(
bank,
&my_pubkey,
&vote_account,
my_pubkey,
vote_account,
prev_leader_slot,
0,
0,
@ -875,7 +875,7 @@ impl ReplayStage {
.expect("must exist based on earlier check")
{
descendants
.get_mut(&a)
.get_mut(a)
.expect("If exists in ancestor map must exist in descendants map")
.retain(|d| *d != slot && !slot_descendants.contains(d));
}
@ -885,9 +885,9 @@ impl ReplayStage {
// Purge all the descendants of this slot from both maps
for descendant in slot_descendants {
ancestors.remove(&descendant).expect("must exist");
ancestors.remove(descendant).expect("must exist");
descendants
.remove(&descendant)
.remove(descendant)
.expect("must exist based on earlier check");
}
descendants
@ -1345,7 +1345,7 @@ impl ReplayStage {
);
Self::handle_new_root(
new_root,
&bank_forks,
bank_forks,
progress,
accounts_background_request_sender,
highest_confirmed_root,
@ -1451,7 +1451,7 @@ impl ReplayStage {
let vote_ix = switch_fork_decision
.to_vote_instruction(
vote,
&vote_account_pubkey,
vote_account_pubkey,
&authorized_voter_keypair.pubkey(),
)
.expect("Switch threshold failure should not lead to voting");
@ -1603,9 +1603,9 @@ impl ReplayStage {
leader_schedule_cache: &LeaderScheduleCache,
) {
let next_leader_slot = leader_schedule_cache.next_leader_slot(
&my_pubkey,
my_pubkey,
bank.slot(),
&bank,
bank,
Some(blockstore),
GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS,
);
@ -1683,7 +1683,7 @@ impl ReplayStage {
let bank_progress = &mut progress.entry(bank.slot()).or_insert_with(|| {
ForkProgress::new_from_bank(
&bank,
&my_pubkey,
my_pubkey,
vote_account,
prev_leader_slot,
num_blocks_on_fork,
@ -1694,13 +1694,13 @@ impl ReplayStage {
let root_slot = bank_forks.read().unwrap().root();
let replay_result = Self::replay_blockstore_into_bank(
&bank,
&blockstore,
blockstore,
bank_progress,
transaction_status_sender,
replay_vote_sender,
verify_recyclers,
);
Self::update_cost_model(&cost_model, &bank_progress.replay_stats.execute_timings);
Self::update_cost_model(cost_model, &bank_progress.replay_stats.execute_timings);
debug!(
"after replayed into bank, updated cost model instruction cost table, current values: {:?}",
cost_model.read().unwrap().get_instruction_cost_table()
@ -1779,7 +1779,7 @@ impl ReplayStage {
);
}
}
Self::record_rewards(&bank, &rewards_recorder_sender);
Self::record_rewards(&bank, rewards_recorder_sender);
} else {
trace!(
"bank {} not completed tick_height: {}, max_tick_height: {}",
@ -1823,14 +1823,14 @@ impl ReplayStage {
my_vote_pubkey,
bank_slot,
bank.vote_accounts().into_iter(),
&ancestors,
ancestors,
|slot| progress.get_hash(slot),
latest_validator_votes_for_frozen_banks,
);
// Notify any listeners of the votes found in this newly computed
// bank
heaviest_subtree_fork_choice.compute_bank_stats(
&bank,
bank,
tower,
latest_validator_votes_for_frozen_banks,
);
@ -1899,7 +1899,7 @@ impl ReplayStage {
let mut cost_model_mutable = cost_model.write().unwrap();
for (program_id, stats) in &execute_timings.details.per_program_timings {
let cost = stats.0 / stats.1 as u64;
match cost_model_mutable.upsert_instruction_cost(&program_id, &cost) {
match cost_model_mutable.upsert_instruction_cost(program_id, &cost) {
Ok(c) => {
debug!(
"after replayed into bank, instruction {:?} has averaged cost {}",
@ -2013,9 +2013,9 @@ impl ReplayStage {
let selected_fork = {
let switch_fork_decision = tower.check_switch_threshold(
heaviest_bank.slot(),
&ancestors,
&descendants,
&progress,
ancestors,
descendants,
progress,
heaviest_bank.total_epoch_stake(),
heaviest_bank
.epoch_vote_accounts(heaviest_bank.epoch())
@ -2261,7 +2261,7 @@ impl ReplayStage {
.contains(vote_pubkey);
leader_propagated_stats.add_vote_pubkey(
*vote_pubkey,
leader_bank.epoch_vote_account_stake(&vote_pubkey),
leader_bank.epoch_vote_account_stake(vote_pubkey),
);
!exists
});
@ -2733,7 +2733,7 @@ mod tests {
&bank1,
bank1.collector_id(),
validator_node_to_vote_keys
.get(&bank1.collector_id())
.get(bank1.collector_id())
.unwrap(),
Some(0),
0,
@ -2990,7 +2990,7 @@ mod tests {
&bad_hash,
hashes_per_tick.saturating_sub(1),
vec![system_transaction::transfer(
&genesis_keypair,
genesis_keypair,
&keypair2.pubkey(),
2,
blockhash,
@ -3108,7 +3108,7 @@ mod tests {
entry::create_ticks(bank.ticks_per_slot(), hashes_per_tick, blockhash);
let last_entry_hash = entries.last().unwrap().hash;
let tx =
system_transaction::transfer(&genesis_keypair, &keypair.pubkey(), 2, blockhash);
system_transaction::transfer(genesis_keypair, &keypair.pubkey(), 2, blockhash);
let trailing_entry = entry::next_entry(&last_entry_hash, 1, vec![tx]);
entries.push(trailing_entry);
entries_to_test_shreds(entries, slot, slot.saturating_sub(1), true, 0)
@ -3188,7 +3188,7 @@ mod tests {
&mut bank0_progress,
None,
&replay_vote_sender,
&&VerifyRecyclers::default(),
&VerifyRecyclers::default(),
);
let rpc_subscriptions = Arc::new(RpcSubscriptions::new(
@ -3228,12 +3228,12 @@ mod tests {
#[test]
fn test_replay_commitment_cache() {
fn leader_vote(vote_slot: Slot, bank: &Arc<Bank>, pubkey: &Pubkey) {
let mut leader_vote_account = bank.get_account(&pubkey).unwrap();
let mut leader_vote_account = bank.get_account(pubkey).unwrap();
let mut vote_state = VoteState::from(&leader_vote_account).unwrap();
vote_state.process_slot_vote_unchecked(vote_slot);
let versioned = VoteStateVersions::new_current(vote_state);
VoteState::to(&versioned, &mut leader_vote_account).unwrap();
bank.store_account(&pubkey, &leader_vote_account);
bank.store_account(pubkey, &leader_vote_account);
}
let leader_pubkey = solana_sdk::pubkey::new_rand();
@ -3773,7 +3773,7 @@ mod tests {
success_index: usize,
) {
let stake = 10_000;
let (bank_forks, _, _) = initialize_state(&all_keypairs, stake);
let (bank_forks, _, _) = initialize_state(all_keypairs, stake);
let root_bank = bank_forks.root_bank();
let mut propagated_stats = PropagatedStats {
total_epoch_stake: stake * all_keypairs.len() as u64,
@ -4407,7 +4407,7 @@ mod tests {
));
assert!(check_map_eq(
&descendants,
&bank_forks.read().unwrap().descendants()
bank_forks.read().unwrap().descendants()
));
// Try to purge the root
@ -4546,7 +4546,7 @@ mod tests {
// Record the vote for 4
tower.record_bank_vote(
&bank_forks.read().unwrap().get(4).unwrap(),
bank_forks.read().unwrap().get(4).unwrap(),
&Pubkey::default(),
);
@ -4746,7 +4746,7 @@ mod tests {
&cluster_info,
refresh_bank,
&poh_recorder,
Tower::last_voted_slot_in_bank(&refresh_bank, &my_vote_pubkey).unwrap(),
Tower::last_voted_slot_in_bank(refresh_bank, &my_vote_pubkey).unwrap(),
&my_vote_pubkey,
&my_vote_keypair,
&mut voted_signatures,
@ -5010,12 +5010,12 @@ mod tests {
progress,
&VoteTracker::default(),
&ClusterSlots::default(),
&bank_forks,
bank_forks,
heaviest_subtree_fork_choice,
latest_validator_votes_for_frozen_banks,
);
let (heaviest_bank, heaviest_bank_on_same_fork) = heaviest_subtree_fork_choice
.select_forks(&frozen_banks, &tower, &progress, &ancestors, bank_forks);
.select_forks(&frozen_banks, tower, progress, ancestors, bank_forks);
assert!(heaviest_bank_on_same_fork.is_none());
let SelectVoteAndResetForkResult {
vote_bank,
@ -5024,8 +5024,8 @@ mod tests {
} = ReplayStage::select_vote_and_reset_forks(
&heaviest_bank,
heaviest_bank_on_same_fork.as_ref(),
&ancestors,
&descendants,
ancestors,
descendants,
progress,
tower,
latest_validator_votes_for_frozen_banks,

View File

@ -171,7 +171,7 @@ impl ServeRepair {
Self::run_window_request(
recycler,
from,
&from_addr,
from_addr,
blockstore,
&me.read().unwrap().my_info,
*slot,
@ -186,7 +186,7 @@ impl ServeRepair {
(
Self::run_highest_window_request(
recycler,
&from_addr,
from_addr,
blockstore,
*slot,
*highest_index,
@ -200,7 +200,7 @@ impl ServeRepair {
(
Self::run_orphan(
recycler,
&from_addr,
from_addr,
blockstore,
*slot,
MAX_ORPHAN_REPAIR_RESPONSES,
@ -256,7 +256,7 @@ impl ServeRepair {
let mut time = Measure::start("repair::handle_packets");
for reqs in reqs_v {
Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender, stats);
Self::handle_packets(obj, recycler, blockstore, reqs, response_sender, stats);
}
time.stop();
if total_packets >= *max_packets {
@ -411,7 +411,7 @@ impl ServeRepair {
let (repair_peers, weighted_index) = match cache.entry(slot) {
Entry::Occupied(entry) => entry.into_mut(),
Entry::Vacant(entry) => {
let repair_peers = self.repair_peers(&repair_validators, slot);
let repair_peers = self.repair_peers(repair_validators, slot);
if repair_peers.is_empty() {
return Err(Error::from(ClusterInfoError::NoPeers));
}

View File

@ -28,7 +28,7 @@ impl ServeRepairService {
);
let t_receiver = streamer::receiver(
serve_repair_socket.clone(),
&exit,
exit,
request_sender,
Recycler::default(),
"serve_repair_receiver",

View File

@ -145,7 +145,7 @@ impl ShredFetchStage {
.map(|s| {
streamer::receiver(
s,
&exit,
exit,
packet_sender.clone(),
recycler.clone(),
"packet_modifier",
@ -174,7 +174,7 @@ impl ShredFetchStage {
let (mut tvu_threads, tvu_filter) = Self::packet_modifier(
sockets,
&exit,
exit,
sender.clone(),
recycler.clone(),
bank_forks.clone(),
@ -184,7 +184,7 @@ impl ShredFetchStage {
let (tvu_forwards_threads, fwd_thread_hdl) = Self::packet_modifier(
forward_sockets,
&exit,
exit,
sender.clone(),
recycler.clone(),
bank_forks.clone(),
@ -194,7 +194,7 @@ impl ShredFetchStage {
let (repair_receiver, repair_handler) = Self::packet_modifier(
vec![repair_socket],
&exit,
exit,
sender.clone(),
recycler,
bank_forks,

View File

@ -76,9 +76,9 @@ impl Tpu {
let fetch_stage = FetchStage::new_with_sender(
transactions_sockets,
tpu_forwards_sockets,
&exit,
exit,
&packet_sender,
&poh_recorder,
poh_recorder,
tpu_coalesce_ms,
);
let (verified_sender, verified_receiver) = unbounded();
@ -90,10 +90,10 @@ impl Tpu {
let (verified_vote_packets_sender, verified_vote_packets_receiver) = unbounded();
let cluster_info_vote_listener = ClusterInfoVoteListener::new(
&exit,
exit,
cluster_info.clone(),
verified_vote_packets_sender,
&poh_recorder,
poh_recorder,
vote_tracker,
bank_forks,
subscriptions.clone(),
@ -106,7 +106,7 @@ impl Tpu {
);
let banking_stage = BankingStage::new(
&cluster_info,
cluster_info,
poh_recorder,
verified_receiver,
verified_vote_packets_receiver,
@ -120,7 +120,7 @@ impl Tpu {
cluster_info.clone(),
entry_receiver,
retransmit_slots_receiver,
&exit,
exit,
blockstore,
shred_version,
);

View File

@ -152,7 +152,7 @@ impl Tvu {
repair_socket.clone(),
&fetch_sender,
Some(bank_forks.clone()),
&exit,
exit,
);
let (verified_sender, verified_receiver) = unbounded();
@ -172,11 +172,11 @@ impl Tvu {
bank_forks.clone(),
leader_schedule_cache,
blockstore.clone(),
&cluster_info,
cluster_info,
Arc::new(retransmit_sockets),
repair_socket,
verified_receiver,
&exit,
exit,
cluster_slots_update_receiver,
*bank_forks.read().unwrap().working_bank().epoch_schedule(),
cfg,
@ -211,7 +211,7 @@ impl Tvu {
accounts_hash_receiver,
pending_snapshot_package,
exit,
&cluster_info,
cluster_info,
tvu_config.trusted_validators.clone(),
tvu_config.halt_on_trusted_validators_accounts_hash_mismatch,
tvu_config.accounts_hash_fault_injection_slots,
@ -300,7 +300,7 @@ impl Tvu {
ledger_cleanup_slot_receiver,
blockstore.clone(),
max_ledger_shreds,
&exit,
exit,
compaction_interval,
max_compaction_jitter,
)
@ -308,7 +308,7 @@ impl Tvu {
let accounts_background_service = AccountsBackgroundService::new(
bank_forks.clone(),
&exit,
exit,
accounts_background_request_handler,
tvu_config.accounts_db_caching_enabled,
tvu_config.test_hash_calculation,

View File

@ -116,7 +116,7 @@ mod tests {
if *unfrozen_vote_slot >= frozen_vote_slot {
let vote_hashes_map = unfrozen_gossip_verified_vote_hashes
.votes_per_slot
.get(&unfrozen_vote_slot)
.get(unfrozen_vote_slot)
.unwrap();
assert_eq!(vote_hashes_map.len(), num_duplicate_hashes);
for pubkey_votes in vote_hashes_map.values() {

View File

@ -977,7 +977,7 @@ fn post_process_restored_tower(
})
.unwrap_or_else(|err| {
let voting_has_been_active =
active_vote_account_exists_in_bank(&bank_forks.working_bank(), &vote_account);
active_vote_account_exists_in_bank(&bank_forks.working_bank(), vote_account);
if !err.is_file_missing() {
datapoint_error!(
"tower_error",
@ -1010,10 +1010,10 @@ fn post_process_restored_tower(
}
Tower::new_from_bankforks(
&bank_forks,
bank_forks,
tower_path,
&validator_identity,
&vote_account,
validator_identity,
vote_account,
)
})
}
@ -1081,9 +1081,9 @@ fn new_banks_from_ledger(
let tower_path = config.tower_path.as_deref().unwrap_or(ledger_path);
let restored_tower = Tower::restore(tower_path, &validator_identity);
let restored_tower = Tower::restore(tower_path, validator_identity);
if let Ok(tower) = &restored_tower {
reconcile_blockstore_roots_with_tower(&tower, &blockstore).unwrap_or_else(|err| {
reconcile_blockstore_roots_with_tower(tower, &blockstore).unwrap_or_else(|err| {
error!("Failed to reconcile blockstore with tower: {:?}", err);
abort()
});
@ -1185,7 +1185,7 @@ fn new_banks_from_ledger(
None,
&snapshot_config.snapshot_package_output_path,
snapshot_config.archive_format,
Some(&bank_forks.root_bank().get_thread_pool()),
Some(bank_forks.root_bank().get_thread_pool()),
snapshot_config.maximum_snapshots_to_retain,
)
.unwrap_or_else(|err| {
@ -1197,9 +1197,9 @@ fn new_banks_from_ledger(
let tower = post_process_restored_tower(
restored_tower,
&validator_identity,
&vote_account,
&config,
validator_identity,
vote_account,
config,
tower_path,
&bank_forks,
);
@ -1404,7 +1404,7 @@ fn wait_for_supermajority(
);
}
let gossip_stake_percent = get_stake_percent_in_gossip(&bank, &cluster_info, i % 10 == 0);
let gossip_stake_percent = get_stake_percent_in_gossip(bank, cluster_info, i % 10 == 0);
if gossip_stake_percent >= WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT {
break;

View File

@ -134,7 +134,7 @@ fn verify_repair(
.map(|repair_meta| {
outstanding_requests.register_response(
repair_meta.nonce,
&shred,
shred,
solana_sdk::timing::timestamp(),
)
})
@ -153,7 +153,7 @@ fn prune_shreds_invalid_repair(
let mut outstanding_requests = outstanding_requests.write().unwrap();
shreds.retain(|shred| {
let should_keep = (
verify_repair(&mut outstanding_requests, &shred, &repair_infos[i]),
verify_repair(&mut outstanding_requests, shred, &repair_infos[i]),
i += 1,
)
.0;
@ -630,7 +630,7 @@ mod test {
keypair: &Arc<Keypair>,
) -> Vec<Shred> {
let shredder = Shredder::new(slot, parent, keypair.clone(), 0, 0).unwrap();
shredder.entries_to_shreds(&entries, true, 0).0
shredder.entries_to_shreds(entries, true, 0).0
}
#[test]

View File

@ -188,7 +188,7 @@ impl Tower {
.delayed_votes
.iter()
.enumerate()
.map(|(i, v)| (*scores.get(&v).unwrap_or(&0), v.time, i))
.map(|(i, v)| (*scores.get(v).unwrap_or(&0), v.time, i))
.collect();
// highest score, latest vote first
best.sort_unstable();
@ -542,7 +542,7 @@ fn test_with_partitions(
let mut scores: HashMap<Vote, usize> = HashMap::new();
towers.iter().for_each(|n| {
n.delayed_votes.iter().for_each(|v| {
*scores.entry(v.clone()).or_insert(0) += n.score(&v, &fork_tree);
*scores.entry(v.clone()).or_insert(0) += n.score(v, &fork_tree);
})
});
for tower in towers.iter_mut() {

View File

@ -149,7 +149,7 @@ mod tests {
let check_hash_calculation = false;
let (deserialized_bank, _timing) = snapshot_utils::bank_from_archive(
&account_paths,
account_paths,
&[],
&old_bank_forks
.snapshot_config
@ -216,7 +216,7 @@ mod tests {
};
for slot in 0..last_slot {
let mut bank = Bank::new_from_parent(&bank_forks[slot], &Pubkey::default(), slot + 1);
f(&mut bank, &mint_keypair);
f(&mut bank, mint_keypair);
let bank = bank_forks.insert(bank);
// Set root to make sure we don't end up with too many account storage entries
// and to allow snapshotting of bank and the purging logic on status_cache to
@ -250,7 +250,7 @@ mod tests {
.unwrap();
let snapshot_package = snapshot_utils::process_accounts_package_pre(
snapshot_package,
Some(&last_bank.get_thread_pool()),
Some(last_bank.get_thread_pool()),
);
snapshot_utils::archive_snapshot_package(
&snapshot_package,
@ -277,12 +277,12 @@ mod tests {
|bank, mint_keypair| {
let key1 = Keypair::new().pubkey();
let tx =
system_transaction::transfer(&mint_keypair, &key1, 1, bank.last_blockhash());
system_transaction::transfer(mint_keypair, &key1, 1, bank.last_blockhash());
assert_eq!(bank.process_transaction(&tx), Ok(()));
let key2 = Keypair::new().pubkey();
let tx =
system_transaction::transfer(&mint_keypair, &key2, 0, bank.last_blockhash());
system_transaction::transfer(mint_keypair, &key2, 0, bank.last_blockhash());
assert_eq!(bank.process_transaction(&tx), Ok(()));
bank.freeze();
@ -294,7 +294,7 @@ mod tests {
fn goto_end_of_slot(bank: &mut Bank) {
let mut tick_hash = bank.last_blockhash();
loop {
tick_hash = hashv(&[&tick_hash.as_ref(), &[42]]);
tick_hash = hashv(&[tick_hash.as_ref(), &[42]]);
bank.register_tick(&tick_hash);
if tick_hash == bank.last_blockhash() {
bank.freeze();
@ -349,7 +349,7 @@ mod tests {
);
let slot = bank.slot();
let key1 = Keypair::new().pubkey();
let tx = system_transaction::transfer(&mint_keypair, &key1, 1, genesis_config.hash());
let tx = system_transaction::transfer(mint_keypair, &key1, 1, genesis_config.hash());
assert_eq!(bank.process_transaction(&tx), Ok(()));
bank.squash();
let accounts_hash = bank.update_accounts_hash();
@ -368,9 +368,9 @@ mod tests {
snapshot_utils::snapshot_bank(
&bank,
vec![],
&package_sender,
&snapshot_path,
&snapshot_package_output_path,
package_sender,
snapshot_path,
snapshot_package_output_path,
snapshot_config.snapshot_version,
&snapshot_config.archive_format,
None,
@ -428,7 +428,7 @@ mod tests {
// Purge all the outdated snapshots, including the ones needed to generate the package
// currently sitting in the channel
snapshot_utils::purge_old_snapshots(&snapshot_path);
snapshot_utils::purge_old_snapshots(snapshot_path);
assert!(snapshot_utils::get_snapshot_paths(&snapshots_dir)
.into_iter()
.map(|path| path.slot)
@ -575,14 +575,14 @@ mod tests {
(MAX_CACHE_ENTRIES * 2 + 1) as u64,
|bank, mint_keypair| {
let tx = system_transaction::transfer(
&mint_keypair,
mint_keypair,
&key1,
1,
bank.parent().unwrap().last_blockhash(),
);
assert_eq!(bank.process_transaction(&tx), Ok(()));
let tx = system_transaction::transfer(
&mint_keypair,
mint_keypair,
&key2,
1,
bank.parent().unwrap().last_blockhash(),

View File

@ -96,14 +96,14 @@ fn run_dos(
let res = rpc_client
.as_ref()
.unwrap()
.get_account(&Pubkey::from_str(&data_input.as_ref().unwrap()).unwrap());
.get_account(&Pubkey::from_str(data_input.as_ref().unwrap()).unwrap());
if res.is_err() {
error_count += 1;
}
}
"get_program_accounts" => {
let res = rpc_client.as_ref().unwrap().get_program_accounts(
&Pubkey::from_str(&data_input.as_ref().unwrap()).unwrap(),
&Pubkey::from_str(data_input.as_ref().unwrap()).unwrap(),
);
if res.is_err() {
error_count += 1;

View File

@ -654,7 +654,7 @@ mod tests {
#[test]
fn test_process_faucet_request() {
let to = solana_sdk::pubkey::new_rand();
let blockhash = Hash::new(&to.as_ref());
let blockhash = Hash::new(to.as_ref());
let lamports = 50;
let req = FaucetRequest::GetAirdrop {
lamports,
@ -679,6 +679,6 @@ mod tests {
assert_eq!(expected_vec_with_length, response_vec);
let bad_bytes = "bad bytes".as_bytes();
assert!(faucet.process_faucet_request(&bad_bytes, ip).is_err());
assert!(faucet.process_faucet_request(bad_bytes, ip).is_err());
}
}

View File

@ -12,7 +12,7 @@ fn test_local_faucet() {
let keypair = Keypair::new();
let to = solana_sdk::pubkey::new_rand();
let lamports = 50;
let blockhash = Hash::new(&to.as_ref());
let blockhash = Hash::new(to.as_ref());
let create_instruction = system_instruction::transfer(&keypair.pubkey(), &to, lamports);
let message = Message::new(&[create_instruction], Some(&keypair.pubkey()));
let expected_tx = Transaction::new(&[&keypair], message, blockhash);

View File

@ -224,7 +224,7 @@ fn do_derive_abi_enum_visitor(input: ItemEnum) -> TokenStream {
if filter_serde_attrs(&variant.attrs) {
continue;
};
let sample_variant = quote_sample_variant(&type_name, &ty_generics, &variant);
let sample_variant = quote_sample_variant(type_name, &ty_generics, variant);
variant_count = if let Some(variant_count) = variant_count.checked_add(1) {
variant_count
} else {
@ -319,7 +319,7 @@ fn test_mod_name(type_name: &Ident) -> Ident {
#[cfg(RUSTC_WITH_SPECIALIZATION)]
fn frozen_abi_type_alias(input: ItemType, expected_digest: &str) -> TokenStream {
let type_name = &input.ident;
let test = quote_for_test(&test_mod_name(type_name), type_name, &expected_digest);
let test = quote_for_test(&test_mod_name(type_name), type_name, expected_digest);
let result = quote! {
#input
#test
@ -330,7 +330,7 @@ fn frozen_abi_type_alias(input: ItemType, expected_digest: &str) -> TokenStream
#[cfg(RUSTC_WITH_SPECIALIZATION)]
fn frozen_abi_struct_type(input: ItemStruct, expected_digest: &str) -> TokenStream {
let type_name = &input.ident;
let test = quote_for_test(&test_mod_name(type_name), type_name, &expected_digest);
let test = quote_for_test(&test_mod_name(type_name), type_name, expected_digest);
let result = quote! {
#input
#test
@ -387,7 +387,7 @@ fn quote_sample_variant(
#[cfg(RUSTC_WITH_SPECIALIZATION)]
fn frozen_abi_enum_type(input: ItemEnum, expected_digest: &str) -> TokenStream {
let type_name = &input.ident;
let test = quote_for_test(&test_mod_name(type_name), type_name, &expected_digest);
let test = quote_for_test(&test_mod_name(type_name), type_name, expected_digest);
let result = quote! {
#input
#test

View File

@ -468,7 +468,7 @@ impl<T: Serialize + ?Sized + AbiEnumVisitor> AbiEnumVisitor for &T {
default fn visit_for_abi(&self, digester: &mut AbiDigester) -> DigestResult {
info!("AbiEnumVisitor for (&default): {}", type_name::<T>());
// Don't call self.visit_for_abi(...) to avoid the infinite recursion!
T::visit_for_abi(&self, digester)
T::visit_for_abi(self, digester)
}
}

View File

@ -28,7 +28,7 @@ fn load_local_genesis(
ledger_path: &std::path::Path,
expected_genesis_hash: Option<Hash>,
) -> Result<GenesisConfig, String> {
let existing_genesis = GenesisConfig::load(&ledger_path)
let existing_genesis = GenesisConfig::load(ledger_path)
.map_err(|err| format!("Failed to load genesis config: {}", err))?;
check_genesis_hash(&existing_genesis, expected_genesis_hash)?;
@ -54,12 +54,12 @@ pub fn download_then_check_genesis_hash(
{
unpack_genesis_archive(
&tmp_genesis_package,
&ledger_path,
ledger_path,
max_genesis_archive_unpacked_size,
)
.map_err(|err| format!("Failed to unpack downloaded genesis config: {}", err))?;
let downloaded_genesis = GenesisConfig::load(&ledger_path)
let downloaded_genesis = GenesisConfig::load(ledger_path)
.map_err(|err| format!("Failed to load downloaded genesis config: {}", err))?;
check_genesis_hash(&downloaded_genesis, expected_genesis_hash)?;

View File

@ -231,20 +231,20 @@ pub fn add_genesis_accounts(genesis_config: &mut GenesisConfig, mut issued_lampo
issued_lamports += add_stakes(
genesis_config,
&CREATOR_STAKER_INFOS,
CREATOR_STAKER_INFOS,
&UNLOCKS_HALF_AT_9_MONTHS,
) + add_stakes(
genesis_config,
&SERVICE_STAKER_INFOS,
SERVICE_STAKER_INFOS,
&UNLOCKS_ALL_AT_9_MONTHS,
) + add_stakes(
genesis_config,
&FOUNDATION_STAKER_INFOS,
FOUNDATION_STAKER_INFOS,
&UNLOCKS_ALL_DAY_ZERO,
) + add_stakes(genesis_config, &GRANTS_STAKER_INFOS, &UNLOCKS_ALL_DAY_ZERO)
) + add_stakes(genesis_config, GRANTS_STAKER_INFOS, &UNLOCKS_ALL_DAY_ZERO)
+ add_stakes(
genesis_config,
&COMMUNITY_STAKER_INFOS,
COMMUNITY_STAKER_INFOS,
&UNLOCKS_ALL_DAY_ZERO,
);

View File

@ -534,9 +534,9 @@ fn main() -> Result<(), Box<dyn error::Error>> {
);
let vote_account = vote_state::create_account_with_authorized(
&identity_pubkey,
&identity_pubkey,
&identity_pubkey,
identity_pubkey,
identity_pubkey,
identity_pubkey,
commission,
VoteState::get_rent_exempt_reserve(&rent).max(1),
);
@ -546,8 +546,8 @@ fn main() -> Result<(), Box<dyn error::Error>> {
stake_state::create_account(
bootstrap_stake_authorized_pubkey
.as_ref()
.unwrap_or(&identity_pubkey),
&vote_pubkey,
.unwrap_or(identity_pubkey),
vote_pubkey,
&vote_account,
&rent,
bootstrap_validator_stake_lamports,
@ -782,7 +782,7 @@ mod tests {
let pubkey = &pubkey_str.parse().unwrap();
assert_eq!(
b64_account.balance,
genesis_config.accounts[&pubkey].lamports,
genesis_config.accounts[pubkey].lamports,
);
}

View File

@ -265,7 +265,7 @@ impl PruneData {
destination: Pubkey::new_unique(),
wallclock,
};
prune_data.sign(&self_keypair);
prune_data.sign(self_keypair);
prune_data
}
}
@ -1325,7 +1325,7 @@ impl ClusterInfo {
if r_stake == l_stake {
peers[*r_info].id.cmp(&peers[*l_info].id)
} else {
r_stake.cmp(&l_stake)
r_stake.cmp(l_stake)
}
})
.collect();
@ -1638,7 +1638,7 @@ impl ClusterInfo {
generate_pull_requests: bool,
require_stake_for_gossip: bool,
) -> Vec<(SocketAddr, Protocol)> {
self.trim_crds_table(CRDS_UNIQUE_PUBKEY_CAPACITY, &stakes);
self.trim_crds_table(CRDS_UNIQUE_PUBKEY_CAPACITY, stakes);
// This will flush local pending push messages before generating
// pull-request bloom filters, preventing pull responses to return the
// same values back to the node itself. Note that packets will arrive
@ -1649,7 +1649,7 @@ impl ClusterInfo {
.add_relaxed(out.len() as u64);
if generate_pull_requests {
let (pings, pull_requests) =
self.new_pull_requests(&thread_pool, gossip_validators, stakes);
self.new_pull_requests(thread_pool, gossip_validators, stakes);
self.stats
.packets_sent_pull_requests_count
.add_relaxed(pull_requests.len() as u64);
@ -2193,7 +2193,7 @@ impl ClusterInfo {
if !responses.is_empty() {
let timeouts = {
let gossip = self.gossip.read().unwrap();
gossip.make_timeouts(&stakes, epoch_duration)
gossip.make_timeouts(stakes, epoch_duration)
};
for (from, data) in responses {
self.handle_pull_response(&from, data, &timeouts);

View File

@ -143,14 +143,14 @@ impl ContactInfo {
}
let tpu = *bind_addr;
let gossip = next_port(&bind_addr, 1);
let tvu = next_port(&bind_addr, 2);
let tpu_forwards = next_port(&bind_addr, 3);
let tvu_forwards = next_port(&bind_addr, 4);
let repair = next_port(&bind_addr, 5);
let gossip = next_port(bind_addr, 1);
let tvu = next_port(bind_addr, 2);
let tpu_forwards = next_port(bind_addr, 3);
let tvu_forwards = next_port(bind_addr, 4);
let repair = next_port(bind_addr, 5);
let rpc = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PORT);
let rpc_pubsub = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PUBSUB_PORT);
let serve_repair = next_port(&bind_addr, 6);
let serve_repair = next_port(bind_addr, 6);
Self {
id: *pubkey,
gossip,

View File

@ -325,7 +325,7 @@ impl CrdsGossip {
assert!(timeouts.contains_key(&Pubkey::default()));
rv = self
.pull
.purge_active(thread_pool, &mut self.crds, now, &timeouts);
.purge_active(thread_pool, &mut self.crds, now, timeouts);
}
self.crds
.trim_purged(now.saturating_sub(5 * self.pull.crds_timeout));

View File

@ -277,7 +277,7 @@ impl CrdsGossipPush {
let (weights, peers): (Vec<_>, Vec<_>) = self
.push_options(
crds,
&self_id,
self_id,
self_shred_version,
stakes,
gossip_validators,

View File

@ -71,7 +71,7 @@ impl Signable for CrdsValue {
fn verify(&self) -> bool {
self.get_signature()
.verify(&self.pubkey().as_ref(), self.signable_data().borrow())
.verify(self.pubkey().as_ref(), self.signable_data().borrow())
}
}
@ -853,9 +853,9 @@ mod test {
wrong_keypair: &Keypair,
) {
assert!(!value.verify());
value.sign(&correct_keypair);
value.sign(correct_keypair);
assert!(value.verify());
value.sign(&wrong_keypair);
value.sign(wrong_keypair);
assert!(!value.verify());
serialize_deserialize_value(value, correct_keypair);
}

View File

@ -49,7 +49,7 @@ impl GossipService {
);
let t_receiver = streamer::receiver(
gossip_socket.clone(),
&exit,
exit,
request_sender,
Recycler::default(),
"gossip_receiver",
@ -319,7 +319,7 @@ fn make_gossip_node(
gossip_socket,
None,
should_check_duplicate_instance,
&exit,
exit,
);
(gossip_service, ip_echo, cluster_info)
}

View File

@ -225,7 +225,7 @@ fn process_spy(matches: &ArgMatches) -> std::io::Result<()> {
.value_of("node_pubkey")
.map(|pubkey_str| pubkey_str.parse::<Pubkey>().unwrap());
let shred_version = value_t_or_exit!(matches, "shred_version", u16);
let identity_keypair = keypair_of(&matches, "identity").map(Arc::new);
let identity_keypair = keypair_of(matches, "identity").map(Arc::new);
let entrypoint_addr = parse_entrypoint(matches);
@ -270,7 +270,7 @@ fn parse_entrypoint(matches: &ArgMatches) -> Option<SocketAddr> {
fn process_rpc_url(matches: &ArgMatches) -> std::io::Result<()> {
let any = matches.is_present("any");
let all = matches.is_present("all");
let entrypoint_addr = parse_entrypoint(&matches);
let entrypoint_addr = parse_entrypoint(matches);
let timeout = value_t_or_exit!(matches, "timeout", u64);
let shred_version = value_t_or_exit!(matches, "shred_version", u16);
let (_all_peers, validators) = discover(

View File

@ -240,7 +240,7 @@ fn connected_staked_network_create(stakes: &[u64]) -> Network {
fn network_simulator_pull_only(thread_pool: &ThreadPool, network: &mut Network) {
let num = network.len();
let (converged, bytes_tx) = network_run_pull(&thread_pool, network, 0, num * 2, 0.9);
let (converged, bytes_tx) = network_run_pull(thread_pool, network, 0, num * 2, 0.9);
trace!(
"network_simulator_pull_{}: converged: {} total_bytes: {}",
num,
@ -253,7 +253,7 @@ fn network_simulator_pull_only(thread_pool: &ThreadPool, network: &mut Network)
fn network_simulator(thread_pool: &ThreadPool, network: &mut Network, max_convergance: f64) {
let num = network.len();
// run for a small amount of time
let (converged, bytes_tx) = network_run_pull(&thread_pool, network, 0, 10, 1.0);
let (converged, bytes_tx) = network_run_pull(thread_pool, network, 0, 10, 1.0);
trace!("network_simulator_push_{}: converged: {}", num, converged);
// make sure there is someone in the active set
let network_values: Vec<Node> = network.values().cloned().collect();
@ -292,7 +292,7 @@ fn network_simulator(thread_pool: &ThreadPool, network: &mut Network, max_conver
bytes_tx
);
// pull for a bit
let (converged, bytes_tx) = network_run_pull(&thread_pool, network, start, end, 1.0);
let (converged, bytes_tx) = network_run_pull(thread_pool, network, start, end, 1.0);
total_bytes += bytes_tx;
trace!(
"network_simulator_push_{}: converged: {} bytes: {} total_bytes: {}",
@ -466,7 +466,7 @@ fn network_run_pull(
.lock()
.unwrap()
.new_pull_request(
&thread_pool,
thread_pool,
from.keypair.deref(),
now,
None,

View File

@ -548,7 +548,7 @@ pub fn init(
init_or_update(config_file, true, false)?;
let path_modified = if !no_modify_path {
add_to_path(&config.active_release_bin_dir().to_str().unwrap())
add_to_path(config.active_release_bin_dir().to_str().unwrap())
} else {
false
};
@ -613,10 +613,10 @@ pub fn info(config_file: &str, local_info_only: bool, eval: bool) -> Result<(),
return Ok(());
}
println_name_value("Configuration:", &config_file);
println_name_value("Configuration:", config_file);
println_name_value(
"Active release directory:",
&config.active_release_dir().to_str().unwrap_or("?"),
config.active_release_dir().to_str().unwrap_or("?"),
);
fn print_release_version(config: &Config) {
@ -633,14 +633,14 @@ pub fn info(config_file: &str, local_info_only: bool, eval: bool) -> Result<(),
if let Some(explicit_release) = &config.explicit_release {
match explicit_release {
ExplicitRelease::Semver(release_semver) => {
println_name_value(&format!("{}Release version:", BULLET), &release_semver);
println_name_value(&format!("{}Release version:", BULLET), release_semver);
println_name_value(
&format!("{}Release URL:", BULLET),
&github_release_download_url(release_semver),
);
}
ExplicitRelease::Channel(release_channel) => {
println_name_value(&format!("{}Release channel:", BULLET), &release_channel);
println_name_value(&format!("{}Release channel:", BULLET), release_channel);
println_name_value(
&format!("{}Release URL:", BULLET),
&release_channel_download_url(release_channel),
@ -659,7 +659,7 @@ pub fn info(config_file: &str, local_info_only: bool, eval: bool) -> Result<(),
Some(ref update_manifest) => {
println_name_value("Installed version:", "");
print_release_version(&config);
print_update_manifest(&update_manifest);
print_update_manifest(update_manifest);
}
None => {
println_name_value("Installed version:", "None");

View File

@ -18,7 +18,7 @@ mod stop_process;
mod update_manifest;
pub fn is_semver(semver: &str) -> Result<(), String> {
match semver::Version::parse(&semver) {
match semver::Version::parse(semver) {
Ok(_) => Ok(()),
Err(err) => Err(format!("{:?}", err)),
}
@ -60,10 +60,10 @@ pub fn explicit_release_of(
fn handle_init(matches: &ArgMatches<'_>, config_file: &str) -> Result<(), String> {
let json_rpc_url = matches.value_of("json_rpc_url").unwrap();
let update_manifest_pubkey = pubkey_of(&matches, "update_manifest_pubkey");
let update_manifest_pubkey = pubkey_of(matches, "update_manifest_pubkey");
let data_dir = matches.value_of("data_dir").unwrap();
let no_modify_path = matches.is_present("no_modify_path");
let explicit_release = explicit_release_of(&matches, "explicit_release");
let explicit_release = explicit_release_of(matches, "explicit_release");
if update_manifest_pubkey.is_none() && explicit_release.is_none() {
Err(format!(
@ -98,7 +98,7 @@ pub fn main() -> Result<(), String> {
.global(true)
.help("Configuration file to use");
match *defaults::CONFIG_FILE {
Some(ref config_file) => arg.default_value(&config_file),
Some(ref config_file) => arg.default_value(config_file),
None => arg.required(true),
}
})
@ -115,7 +115,7 @@ pub fn main() -> Result<(), String> {
.required(true)
.help("Directory to store install data");
match *defaults::DATA_DIR {
Some(ref data_dir) => arg.default_value(&data_dir),
Some(ref data_dir) => arg.default_value(data_dir),
None => arg,
}
})
@ -181,7 +181,7 @@ pub fn main() -> Result<(), String> {
.required(true)
.help("Keypair file of the account that funds the deployment");
match *defaults::USER_KEYPAIR {
Some(ref config_file) => arg.default_value(&config_file),
Some(ref config_file) => arg.default_value(config_file),
None => arg,
}
})
@ -242,7 +242,7 @@ pub fn main() -> Result<(), String> {
let config_file = matches.value_of("config_file").unwrap();
match matches.subcommand() {
("init", Some(matches)) => handle_init(&matches, &config_file),
("init", Some(matches)) => handle_init(matches, config_file),
("info", Some(matches)) => {
let local_info_only = matches.is_present("local_info_only");
let eval = matches.is_present("eval");
@ -290,7 +290,7 @@ pub fn main_init() -> Result<(), String> {
.takes_value(true)
.help("Configuration file to use");
match *defaults::CONFIG_FILE {
Some(ref config_file) => arg.default_value(&config_file),
Some(ref config_file) => arg.default_value(config_file),
None => arg.required(true),
}
})
@ -303,7 +303,7 @@ pub fn main_init() -> Result<(), String> {
.required(true)
.help("Directory to store install data");
match *defaults::DATA_DIR {
Some(ref data_dir) => arg.default_value(&data_dir),
Some(ref data_dir) => arg.default_value(data_dir),
None => arg,
}
})
@ -342,5 +342,5 @@ pub fn main_init() -> Result<(), String> {
.get_matches();
let config_file = matches.value_of("config_file").unwrap();
handle_init(&matches, &config_file)
handle_init(&matches, config_file)
}

View File

@ -153,9 +153,9 @@ fn output_keypair(
) -> Result<(), Box<dyn error::Error>> {
if outfile == STDOUT_OUTFILE_TOKEN {
let mut stdout = std::io::stdout();
write_keypair(&keypair, &mut stdout)?;
write_keypair(keypair, &mut stdout)?;
} else {
write_keypair_file(&keypair, outfile)?;
write_keypair_file(keypair, outfile)?;
println!("Wrote {} keypair to {}", source, outfile);
}
Ok(())
@ -342,7 +342,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.global(true)
.help("Configuration file to use");
if let Some(ref config_file) = *CONFIG_FILE {
arg.default_value(&config_file)
arg.default_value(config_file)
} else {
arg
}
@ -539,7 +539,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box<dyn error::Error>> {
if matches.is_present("outfile") {
let outfile = matches.value_of("outfile").unwrap();
check_for_overwrite(&outfile, &matches);
check_for_overwrite(outfile, matches);
write_pubkey_file(outfile, pubkey)?;
} else {
println!("{}", pubkey);
@ -558,7 +558,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box<dyn error::Error>> {
match outfile {
Some(STDOUT_OUTFILE_TOKEN) => (),
Some(outfile) => check_for_overwrite(&outfile, &matches),
Some(outfile) => check_for_overwrite(outfile, matches),
None => (),
}
@ -577,7 +577,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box<dyn error::Error>> {
let keypair = keypair_from_seed(seed.as_bytes())?;
if let Some(outfile) = outfile {
output_keypair(&keypair, &outfile, "new")
output_keypair(&keypair, outfile, "new")
.map_err(|err| format!("Unable to write {}: {}", outfile, err))?;
}
@ -600,7 +600,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box<dyn error::Error>> {
};
if outfile != STDOUT_OUTFILE_TOKEN {
check_for_overwrite(&outfile, &matches);
check_for_overwrite(outfile, matches);
}
let keypair_name = "recover";
@ -610,7 +610,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box<dyn error::Error>> {
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
keypair_from_seed_phrase(keypair_name, skip_validation, true, None, true)?
};
output_keypair(&keypair, &outfile, "recovered")?;
output_keypair(&keypair, outfile, "recovered")?;
}
("grind", Some(matches)) => {
let ignore_case = matches.is_present("ignore_case");

View File

@ -405,7 +405,7 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) {
let allow_missing_metadata = arg_matches.is_present("allow_missing_metadata");
let force_reupload = arg_matches.is_present("force_reupload");
let blockstore =
crate::open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary, None);
crate::open_blockstore(ledger_path, AccessType::TryPrimaryThenSecondary, None);
runtime.block_on(upload(
blockstore,

View File

@ -136,7 +136,7 @@ fn output_entry(
.map(|transaction_status| transaction_status.into());
solana_cli_output::display::println_transaction(
&transaction,
transaction,
&transaction_status,
" ",
None,
@ -455,7 +455,7 @@ fn graph_forks(bank_forks: &BankForks, include_all_votes: bool) -> String {
let mut lowest_total_stake = 0;
for (node_pubkey, (last_vote_slot, vote_state, stake, total_stake)) in &last_votes {
all_votes.entry(*node_pubkey).and_modify(|validator_votes| {
validator_votes.remove(&last_vote_slot);
validator_votes.remove(last_vote_slot);
});
dot.push(format!(
@ -475,7 +475,7 @@ fn graph_forks(bank_forks: &BankForks, include_all_votes: bool) -> String {
dot.push(format!(
r#" "last vote {}" -> "{}" [style=dashed,label="latest vote"];"#,
node_pubkey,
if styled_slots.contains(&last_vote_slot) {
if styled_slots.contains(last_vote_slot) {
last_vote_slot.to_string()
} else {
if *last_vote_slot < lowest_last_vote_slot {
@ -522,7 +522,7 @@ fn graph_forks(bank_forks: &BankForks, include_all_votes: bool) -> String {
r#" "{} vote {}" -> "{}" [style=dotted,label="vote"];"#,
node_pubkey,
vote_slot,
if styled_slots.contains(&vote_slot) {
if styled_slots.contains(vote_slot) {
vote_slot.to_string()
} else {
"...".to_string()
@ -714,8 +714,8 @@ fn load_bank_forks(
};
bank_forks_utils::load(
&genesis_config,
&blockstore,
genesis_config,
blockstore,
account_paths,
None,
snapshot_config.as_ref(),
@ -747,7 +747,7 @@ fn compute_slot_cost(blockstore: &Blockstore, slot: Slot) -> Result<(), String>
transactions += entry.transactions.len();
for transaction in &entry.transactions {
programs += transaction.message().instructions.len();
let tx_cost = cost_model.calculate_cost(&transaction);
let tx_cost = cost_model.calculate_cost(transaction);
if cost_tracker.try_add(tx_cost).is_err() {
println!(
"Slot: {}, CostModel rejected transaction {:?}, stats {:?}!",
@ -887,7 +887,7 @@ fn main() {
.long("maximum-snapshots-to-retain")
.value_name("NUMBER")
.takes_value(true)
.default_value(&default_max_snapshot_to_retain)
.default_value(default_max_snapshot_to_retain)
.help("Maximum number of snapshots to hold on to during snapshot purge");
let rent = Rent::default();
@ -1927,14 +1927,14 @@ fn main() {
let remove_stake_accounts = arg_matches.is_present("remove_stake_accounts");
let new_hard_forks = hardforks_of(arg_matches, "hard_forks");
let faucet_pubkey = pubkey_of(&arg_matches, "faucet_pubkey");
let faucet_pubkey = pubkey_of(arg_matches, "faucet_pubkey");
let faucet_lamports = value_t!(arg_matches, "faucet_lamports", u64).unwrap_or(0);
let rent_burn_percentage = value_t!(arg_matches, "rent_burn_percentage", u8);
let hashes_per_tick = arg_matches.value_of("hashes_per_tick");
let bootstrap_stake_authorized_pubkey =
pubkey_of(&arg_matches, "bootstrap_stake_authorized_pubkey");
pubkey_of(arg_matches, "bootstrap_stake_authorized_pubkey");
let bootstrap_validator_lamports =
value_t_or_exit!(arg_matches, "bootstrap_validator_lamports", u64);
let bootstrap_validator_stake_lamports =
@ -1948,9 +1948,9 @@ fn main() {
);
exit(1);
}
let bootstrap_validator_pubkeys = pubkeys_of(&arg_matches, "bootstrap_validator");
let bootstrap_validator_pubkeys = pubkeys_of(arg_matches, "bootstrap_validator");
let accounts_to_remove =
pubkeys_of(&arg_matches, "accounts_to_remove").unwrap_or_default();
pubkeys_of(arg_matches, "accounts_to_remove").unwrap_or_default();
let snapshot_version =
arg_matches
.value_of("snapshot_version")
@ -2105,9 +2105,9 @@ fn main() {
);
let vote_account = vote_state::create_account_with_authorized(
&identity_pubkey,
&identity_pubkey,
&identity_pubkey,
identity_pubkey,
identity_pubkey,
identity_pubkey,
100,
VoteState::get_rent_exempt_reserve(&rent).max(1),
);
@ -2117,8 +2117,8 @@ fn main() {
&stake_state::create_account(
bootstrap_stake_authorized_pubkey
.as_ref()
.unwrap_or(&identity_pubkey),
&vote_pubkey,
.unwrap_or(identity_pubkey),
vote_pubkey,
&vote_account,
&rent,
bootstrap_validator_stake_lamports,
@ -2544,7 +2544,7 @@ fn main() {
}
};
let warped_bank = Bank::new_from_parent_with_tracer(
&base_bank,
base_bank,
base_bank.collector_id(),
next_epoch,
tracer,
@ -2561,7 +2561,7 @@ fn main() {
println!("Slot: {} => {}", base_bank.slot(), warped_bank.slot());
println!("Epoch: {} => {}", base_bank.epoch(), warped_bank.epoch());
assert_capitalization(&base_bank);
assert_capitalization(base_bank);
assert_capitalization(&warped_bank);
let interest_per_epoch = ((warped_bank.capitalization() as f64)
/ (base_bank.capitalization() as f64)
@ -2589,7 +2589,7 @@ fn main() {
pubkey,
account,
base_bank
.get_account(&pubkey)
.get_account(pubkey)
.map(|a| a.lamports())
.unwrap_or_default(),
)
@ -2788,7 +2788,7 @@ fn main() {
);
}
assert_capitalization(&bank);
assert_capitalization(bank);
println!("Inflation: {:?}", bank.inflation());
println!("RentCollector: {:?}", bank.rent_collector());
println!("Capitalization: {}", Sol(bank.capitalization()));

View File

@ -39,11 +39,11 @@ fn nominal() {
let ledger_path = ledger_path.to_str().unwrap();
// Basic validation
let output = run_ledger_tool(&["-l", &ledger_path, "verify"]);
let output = run_ledger_tool(&["-l", ledger_path, "verify"]);
assert!(output.status.success());
// Print everything
let output = run_ledger_tool(&["-l", &ledger_path, "print", "-vvv"]);
let output = run_ledger_tool(&["-l", ledger_path, "print", "-vvv"]);
assert!(output.status.success());
assert_eq!(count_newlines(&output.stdout), ticks + meta_lines);
}

View File

@ -58,8 +58,8 @@ pub fn load(
)
{
return load_from_snapshot(
&genesis_config,
&blockstore,
genesis_config,
blockstore,
account_paths,
shrink_paths,
snapshot_config,
@ -79,8 +79,8 @@ pub fn load(
}
load_from_genesis(
&genesis_config,
&blockstore,
genesis_config,
blockstore,
account_paths,
process_options,
cache_block_meta_sender,
@ -97,8 +97,8 @@ fn load_from_genesis(
info!("Processing ledger from genesis");
to_loadresult(
blockstore_processor::process_blockstore(
&genesis_config,
&blockstore,
genesis_config,
blockstore,
account_paths,
process_options,
cache_block_meta_sender,

View File

@ -705,7 +705,7 @@ impl Blockstore {
for (&(slot, set_index), erasure_meta) in erasure_metas.iter() {
let index_meta_entry = index_working_set.get_mut(&slot).expect("Index");
let index = &mut index_meta_entry.index;
match erasure_meta.status(&index) {
match erasure_meta.status(index) {
ErasureMetaStatus::CanRecover => {
Self::recover_shreds(
index,
@ -838,7 +838,7 @@ impl Blockstore {
let mut num_recovered_exists = 0;
if let Some(leader_schedule_cache) = leader_schedule {
let recovered_data = Self::try_shred_recovery(
&db,
db,
&erasure_metas,
&mut index_working_set,
&mut just_inserted_data_shreds,
@ -1135,14 +1135,14 @@ impl Blockstore {
let maybe_shred = self.get_coding_shred(slot, coding_index);
if let Ok(Some(shred_data)) = maybe_shred {
let potential_shred = Shred::new_from_serialized_shred(shred_data).unwrap();
if Self::erasure_mismatch(&potential_shred, &shred) {
if Self::erasure_mismatch(&potential_shred, shred) {
conflicting_shred = Some(potential_shred.payload);
}
break;
} else if let Some(potential_shred) =
just_received_coding_shreds.get(&(slot, coding_index))
{
if Self::erasure_mismatch(&potential_shred, &shred) {
if Self::erasure_mismatch(potential_shred, shred) {
conflicting_shred = Some(potential_shred.payload.clone());
}
break;
@ -1183,7 +1183,7 @@ impl Blockstore {
let slot_meta = &mut slot_meta_entry.new_slot_meta.borrow_mut();
if !is_trusted {
if Self::is_data_shred_present(&shred, slot_meta, &index_meta.data()) {
if Self::is_data_shred_present(&shred, slot_meta, index_meta.data()) {
handle_duplicate(shred);
return Err(InsertDataShredError::Exists);
}
@ -1474,7 +1474,7 @@ impl Blockstore {
index as u32,
new_consumed,
shred.reference_tick(),
&data_index,
data_index,
);
if slot_meta.is_full() {
datapoint_info!(
@ -1689,7 +1689,7 @@ impl Blockstore {
}
break;
}
let (current_slot, index) = C::index(&db_iterator.key().expect("Expect a valid key"));
let (current_slot, index) = C::index(db_iterator.key().expect("Expect a valid key"));
let current_index = {
if current_slot > slot {
@ -1702,7 +1702,7 @@ impl Blockstore {
let upper_index = cmp::min(current_index, end_index);
// the tick that will be used to figure out the timeout for this hole
let reference_tick = u64::from(Shred::reference_tick_from_data(
&db_iterator.value().expect("couldn't read value"),
db_iterator.value().expect("couldn't read value"),
));
if ticks_since_first_insert < reference_tick + MAX_TURBINE_DELAY_IN_TICKS {
@ -2437,7 +2437,7 @@ impl Blockstore {
address_signatures.extend(
signatures
.into_iter()
.filter(|(_, signature)| !excluded_signatures.contains(&signature)),
.filter(|(_, signature)| !excluded_signatures.contains(signature)),
)
} else {
address_signatures.append(&mut signatures);
@ -2520,7 +2520,7 @@ impl Blockstore {
next_primary_index_iter_timer.stop();
let mut address_signatures: Vec<(Slot, Signature)> = address_signatures
.into_iter()
.filter(|(_, signature)| !until_excluded_signatures.contains(&signature))
.filter(|(_, signature)| !until_excluded_signatures.contains(signature))
.collect();
address_signatures.truncate(limit);
@ -2993,7 +2993,7 @@ impl Blockstore {
}
pub fn scan_and_fix_roots(&self, exit: &Arc<AtomicBool>) -> Result<()> {
let ancestor_iterator = AncestorIterator::new(self.last_root(), &self)
let ancestor_iterator = AncestorIterator::new(self.last_root(), self)
.take_while(|&slot| slot >= self.lowest_cleanup_slot());
let mut find_missing_roots = Measure::start("find_missing_roots");
@ -3278,8 +3278,8 @@ fn commit_slot_meta_working_set(
}
// Check if the working copy of the metadata has changed
if Some(meta) != meta_backup.as_ref() {
should_signal = should_signal || slot_has_updates(meta, &meta_backup);
write_batch.put::<cf::SlotMeta>(*slot, &meta)?;
should_signal = should_signal || slot_has_updates(meta, meta_backup);
write_batch.put::<cf::SlotMeta>(*slot, meta)?;
}
}
@ -3430,7 +3430,7 @@ fn handle_chaining_for_slot(
traverse_children_mut(
db,
slot,
&meta,
meta,
working_set,
new_chained_slots,
slot_function,
@ -3520,7 +3520,7 @@ pub fn create_new_ledger(
access_type: AccessType,
) -> Result<Hash> {
Blockstore::destroy(ledger_path)?;
genesis_config.write(&ledger_path)?;
genesis_config.write(ledger_path)?;
// Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger.
let blockstore = Blockstore::open_with_access_type(ledger_path, access_type, None, false)?;

View File

@ -112,7 +112,7 @@ fn execute_batch(
let mut mint_decimals: HashMap<Pubkey, u8> = HashMap::new();
let pre_token_balances = if record_token_balances {
collect_token_balances(&bank, &batch, &mut mint_decimals)
collect_token_balances(bank, batch, &mut mint_decimals)
} else {
vec![]
};
@ -139,7 +139,7 @@ fn execute_batch(
if let Some(transaction_status_sender) = transaction_status_sender {
let txs = batch.transactions_iter().cloned().collect();
let post_token_balances = if record_token_balances {
collect_token_balances(&bank, &batch, &mut mint_decimals)
collect_token_balances(bank, batch, &mut mint_decimals)
} else {
vec![]
};
@ -327,7 +327,7 @@ fn process_entries_with_callback(
timings,
)?;
for hash in tick_hashes {
bank.register_tick(&hash);
bank.register_tick(hash);
}
Ok(())
}
@ -396,7 +396,7 @@ pub fn process_blockstore(
// Setup bank for slot 0
let bank0 = Bank::new_with_paths(
&genesis_config,
genesis_config,
account_paths,
&opts.frozen_accounts,
opts.debug_keys.clone(),
@ -896,9 +896,9 @@ fn process_next_slots(
// handles any partials
if next_meta.is_full() {
let next_bank = Arc::new(Bank::new_from_parent(
&bank,
bank,
&leader_schedule_cache
.slot_leader_at(*next_slot, Some(&bank))
.slot_leader_at(*next_slot, Some(bank))
.unwrap(),
*next_slot,
));
@ -1048,7 +1048,7 @@ fn load_frozen_forks(
*root = new_root_bank.slot();
last_root = new_root_bank.slot();
leader_schedule_cache.set_root(&new_root_bank);
leader_schedule_cache.set_root(new_root_bank);
new_root_bank.squash();
if last_free.elapsed() > Duration::from_secs(10) {
@ -3093,7 +3093,7 @@ pub mod tests {
account_paths: Vec<PathBuf>,
) -> EpochSchedule {
let bank = Bank::new_with_paths(
&genesis_config,
genesis_config,
account_paths,
&[],
None,
@ -3274,7 +3274,7 @@ pub mod tests {
slot_leader_keypair: &Arc<Keypair>,
) {
// Add votes to `last_slot` so that `root` will be confirmed
let vote_entry = next_entry(&parent_blockhash, 1, vec![vote_tx]);
let vote_entry = next_entry(parent_blockhash, 1, vec![vote_tx]);
let mut entries = create_ticks(ticks_per_slot, 0, vote_entry.hash);
entries.insert(0, vote_entry);
blockstore
@ -3285,7 +3285,7 @@ pub mod tests {
ticks_per_slot,
Some(parent_slot),
true,
&slot_leader_keypair,
slot_leader_keypair,
entries,
0,
)

View File

@ -682,7 +682,7 @@ impl EntrySlice for [Entry] {
}
pub fn next_entry_mut(start: &mut Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
let entry = Entry::new(&start, num_hashes, transactions);
let entry = Entry::new(start, num_hashes, transactions);
*start = entry.hash;
entry
}
@ -737,7 +737,7 @@ mod tests {
#[test]
fn test_entry_verify() {
let zero = Hash::default();
let one = hash(&zero.as_ref());
let one = hash(zero.as_ref());
assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case, never used
assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad
assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step
@ -826,7 +826,7 @@ mod tests {
fn test_verify_slice1() {
solana_logger::setup();
let zero = Hash::default();
let one = hash(&zero.as_ref());
let one = hash(zero.as_ref());
assert!(vec![][..].verify(&zero)); // base case
assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
@ -841,8 +841,8 @@ mod tests {
fn test_verify_slice_with_hashes1() {
solana_logger::setup();
let zero = Hash::default();
let one = hash(&zero.as_ref());
let two = hash(&one.as_ref());
let one = hash(zero.as_ref());
let two = hash(one.as_ref());
assert!(vec![][..].verify(&one)); // base case
assert!(vec![Entry::new_tick(1, &two)][..].verify(&one)); // singleton case 1
assert!(!vec![Entry::new_tick(1, &two)][..].verify(&two)); // singleton case 2, bad
@ -861,8 +861,8 @@ mod tests {
fn test_verify_slice_with_hashes_and_transactions() {
solana_logger::setup();
let zero = Hash::default();
let one = hash(&zero.as_ref());
let two = hash(&one.as_ref());
let one = hash(zero.as_ref());
let two = hash(one.as_ref());
let alice_keypair = Keypair::new();
let bob_keypair = Keypair::new();
let tx0 = system_transaction::transfer(&alice_keypair, &bob_keypair.pubkey(), 1, one);

View File

@ -63,9 +63,9 @@ fn sort_stakes(stakes: &mut Vec<(Pubkey, u64)>) {
// Note: Use unstable sort, because we dedup right after to remove the equal elements.
stakes.sort_unstable_by(|(l_pubkey, l_stake), (r_pubkey, r_stake)| {
if r_stake == l_stake {
r_pubkey.cmp(&l_pubkey)
r_pubkey.cmp(l_pubkey)
} else {
r_stake.cmp(&l_stake)
r_stake.cmp(l_stake)
}
});

View File

@ -63,7 +63,7 @@ impl Poh {
let num_hashes = std::cmp::min(self.remaining_hashes - 1, max_num_hashes);
for _ in 0..num_hashes {
self.hash = hash(&self.hash.as_ref());
self.hash = hash(self.hash.as_ref());
}
self.num_hashes += num_hashes;
self.remaining_hashes -= num_hashes;
@ -77,7 +77,7 @@ impl Poh {
return None; // Caller needs to `tick()` first
}
self.hash = hashv(&[&self.hash.as_ref(), &mixin.as_ref()]);
self.hash = hashv(&[self.hash.as_ref(), mixin.as_ref()]);
let num_hashes = self.num_hashes + 1;
self.num_hashes = 0;
self.remaining_hashes -= 1;
@ -89,7 +89,7 @@ impl Poh {
}
pub fn tick(&mut self) -> Option<PohEntry> {
self.hash = hash(&self.hash.as_ref());
self.hash = hash(self.hash.as_ref());
self.num_hashes += 1;
self.remaining_hashes -= 1;
@ -115,7 +115,7 @@ pub fn compute_hash_time_ns(hashes_sample_size: u64) -> u64 {
let mut v = Hash::default();
let start = Instant::now();
for _ in 0..hashes_sample_size {
v = hash(&v.as_ref());
v = hash(v.as_ref());
}
start.elapsed().as_nanos() as u64
}
@ -139,11 +139,11 @@ mod tests {
assert_ne!(entry.num_hashes, 0);
for _ in 1..entry.num_hashes {
current_hash = hash(&current_hash.as_ref());
current_hash = hash(current_hash.as_ref());
}
current_hash = match mixin {
Some(mixin) => hashv(&[&current_hash.as_ref(), &mixin.as_ref()]),
None => hash(&current_hash.as_ref()),
Some(mixin) => hashv(&[current_hash.as_ref(), mixin.as_ref()]),
None => hash(current_hash.as_ref()),
};
if current_hash != entry.hash {
return false;
@ -192,9 +192,9 @@ mod tests {
#[test]
fn test_poh_verify() {
let zero = Hash::default();
let one = hash(&zero.as_ref());
let two = hash(&one.as_ref());
let one_with_zero = hashv(&[&zero.as_ref(), &zero.as_ref()]);
let one = hash(zero.as_ref());
let two = hash(one.as_ref());
let one_with_zero = hashv(&[zero.as_ref(), zero.as_ref()]);
let mut poh = Poh::new(zero, None);
assert!(verify(
@ -262,7 +262,7 @@ mod tests {
(
PohEntry {
num_hashes: 1,
hash: hash(&one_with_zero.as_ref()),
hash: hash(one_with_zero.as_ref()),
},
None
)

View File

@ -840,7 +840,7 @@ impl Shredder {
first_index: usize,
slot: Slot,
) -> std::result::Result<Vec<Shred>, reed_solomon_erasure::Error> {
Self::verify_consistent_shred_payload_sizes(&"try_recovery()", &shreds)?;
Self::verify_consistent_shred_payload_sizes("try_recovery()", &shreds)?;
let mut recovered_data = vec![];
let fec_set_size = num_data + num_coding;
@ -933,7 +933,7 @@ impl Shredder {
pub fn deshred(shreds: &[Shred]) -> std::result::Result<Vec<u8>, reed_solomon_erasure::Error> {
use reed_solomon_erasure::Error::TooFewDataShards;
const SHRED_DATA_OFFSET: usize = SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER;
Self::verify_consistent_shred_payload_sizes(&"deshred()", shreds)?;
Self::verify_consistent_shred_payload_sizes("deshred()", shreds)?;
let index = shreds.first().ok_or(TooFewDataShards)?.index();
let aligned = shreds.iter().zip(index..).all(|(s, i)| s.index() == i);
let data_complete = {

View File

@ -312,7 +312,7 @@ fn sign_shred_cpu(keypair: &Keypair, packet: &mut Packet) {
);
let signature = keypair.sign_message(&packet.data[msg_start..msg_end]);
trace!("signature {:?}", signature);
packet.data[0..sig_end].copy_from_slice(&signature.as_ref());
packet.data[0..sig_end].copy_from_slice(signature.as_ref());
}
pub fn sign_shreds_cpu(keypair: &Keypair, batches: &mut [Packets]) {
@ -364,7 +364,7 @@ pub fn sign_shreds_gpu(
let mut elems = Vec::new();
let offset: usize = pinned_keypair.len();
let num_keypair_packets = vec_size_in_packets(&pinned_keypair);
let num_keypair_packets = vec_size_in_packets(pinned_keypair);
let mut num_packets = num_keypair_packets;
//should be zero

View File

@ -63,10 +63,10 @@ pub fn spend_and_verify_all_nodes<S: ::std::hash::BuildHasher + Sync + Send>(
.get_recent_blockhash_with_commitment(CommitmentConfig::confirmed())
.unwrap();
let mut transaction =
system_transaction::transfer(&funding_keypair, &random_keypair.pubkey(), 1, blockhash);
system_transaction::transfer(funding_keypair, &random_keypair.pubkey(), 1, blockhash);
let confs = VOTE_THRESHOLD_DEPTH + 1;
let sig = client
.retry_transfer_until_confirmed(&funding_keypair, &mut transaction, 10, confs)
.retry_transfer_until_confirmed(funding_keypair, &mut transaction, 10, confs)
.unwrap();
for validator in &cluster_nodes {
if ignore_nodes.contains(&validator.id) {
@ -114,14 +114,14 @@ pub fn send_many_transactions(
let transfer_amount = thread_rng().gen_range(1, max_tokens_per_transfer);
let mut transaction = system_transaction::transfer(
&funding_keypair,
funding_keypair,
&random_keypair.pubkey(),
transfer_amount,
blockhash,
);
client
.retry_transfer(&funding_keypair, &mut transaction, 5)
.retry_transfer(funding_keypair, &mut transaction, 5)
.unwrap();
expected_balances.insert(random_keypair.pubkey(), transfer_amount);
@ -236,7 +236,7 @@ pub fn kill_entry_and_spend_and_verify_rest(
.get_recent_blockhash_with_commitment(CommitmentConfig::processed())
.unwrap();
let mut transaction = system_transaction::transfer(
&funding_keypair,
funding_keypair,
&random_keypair.pubkey(),
1,
blockhash,
@ -245,7 +245,7 @@ pub fn kill_entry_and_spend_and_verify_rest(
let confs = VOTE_THRESHOLD_DEPTH + 1;
let sig = {
let sig = client.retry_transfer_until_confirmed(
&funding_keypair,
funding_keypair,
&mut transaction,
5,
confs,
@ -260,7 +260,7 @@ pub fn kill_entry_and_spend_and_verify_rest(
}
};
info!("poll_all_nodes_for_signature()");
match poll_all_nodes_for_signature(&entry_point_info, &cluster_nodes, &sig, confs) {
match poll_all_nodes_for_signature(entry_point_info, &cluster_nodes, &sig, confs) {
Err(e) => {
info!("poll_all_nodes_for_signature() failed {:?}", e);
result = Err(e);
@ -377,7 +377,7 @@ fn poll_all_nodes_for_signature(
continue;
}
let client = create_client(validator.client_facing_addr(), VALIDATOR_PORT_RANGE);
client.poll_for_signature_confirmation(&sig, confs)?;
client.poll_for_signature_confirmation(sig, confs)?;
}
Ok(())

View File

@ -449,7 +449,7 @@ impl LocalCluster {
.get_recent_blockhash_with_commitment(CommitmentConfig::processed())
.unwrap();
let mut tx =
system_transaction::transfer(&source_keypair, dest_pubkey, lamports, blockhash);
system_transaction::transfer(source_keypair, dest_pubkey, lamports, blockhash);
info!(
"executing transfer of {} from {} to {}",
lamports,
@ -457,7 +457,7 @@ impl LocalCluster {
*dest_pubkey
);
client
.retry_transfer(&source_keypair, &mut tx, 10)
.retry_transfer(source_keypair, &mut tx, 10)
.expect("client transfer");
client
.wait_for_balance_with_commitment(
@ -512,7 +512,7 @@ impl LocalCluster {
.0,
);
client
.retry_transfer(&from_account, &mut transaction, 10)
.retry_transfer(from_account, &mut transaction, 10)
.expect("fund vote");
client
.wait_for_balance_with_commitment(
@ -616,7 +616,7 @@ impl Cluster for LocalCluster {
}
fn exit_node(&mut self, pubkey: &Pubkey) -> ClusterValidatorInfo {
let mut node = self.validators.remove(&pubkey).unwrap();
let mut node = self.validators.remove(pubkey).unwrap();
// Shut down the validator
let mut validator = node.validator.take().expect("Validator must be running");
@ -631,7 +631,7 @@ impl Cluster for LocalCluster {
cluster_validator_info: &mut ClusterValidatorInfo,
) -> (Node, Option<ContactInfo>) {
// Update the stored ContactInfo for this node
let node = Node::new_localhost_with_pubkey(&pubkey);
let node = Node::new_localhost_with_pubkey(pubkey);
cluster_validator_info.info.contact_info = node.info.clone();
cluster_validator_info.config.rpc_addrs = Some((node.info.rpc, node.info.rpc_pubsub));

View File

@ -425,7 +425,7 @@ fn run_cluster_partition<C>(
fn test_cluster_partition_1_2() {
let empty = |_: &mut LocalCluster, _: &mut ()| {};
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
cluster.check_for_new_roots(16, &"PARTITION_TEST");
cluster.check_for_new_roots(16, "PARTITION_TEST");
};
run_cluster_partition(
&[vec![1], vec![1, 1]],
@ -445,7 +445,7 @@ fn test_cluster_partition_1_2() {
fn test_cluster_partition_1_1() {
let empty = |_: &mut LocalCluster, _: &mut ()| {};
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
cluster.check_for_new_roots(16, &"PARTITION_TEST");
cluster.check_for_new_roots(16, "PARTITION_TEST");
};
run_cluster_partition(
&[vec![1], vec![1]],
@ -465,7 +465,7 @@ fn test_cluster_partition_1_1() {
fn test_cluster_partition_1_1_1() {
let empty = |_: &mut LocalCluster, _: &mut ()| {};
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
cluster.check_for_new_roots(16, &"PARTITION_TEST");
cluster.check_for_new_roots(16, "PARTITION_TEST");
};
run_cluster_partition(
&[vec![1], vec![1], vec![1]],
@ -525,7 +525,7 @@ fn test_kill_heaviest_partition() {
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
info!("Killing validator with id: {}", validator_to_kill);
cluster.exit_node(&validator_to_kill);
cluster.check_for_new_roots(16, &"PARTITION_TEST");
cluster.check_for_new_roots(16, "PARTITION_TEST");
};
run_cluster_partition(
&partitions,
@ -594,7 +594,7 @@ fn run_kill_partition_switch_threshold<C>(
.iter()
.map(|validator_to_kill| {
info!("Killing validator with id: {}", validator_to_kill);
cluster.exit_node(&validator_to_kill)
cluster.exit_node(validator_to_kill)
})
.collect();
on_partition_start(
@ -622,7 +622,7 @@ fn find_latest_replayed_slot_from_ledger(
mut latest_slot: Slot,
) -> (Slot, HashSet<Slot>) {
loop {
let mut blockstore = open_blockstore(&ledger_path);
let mut blockstore = open_blockstore(ledger_path);
// This is kind of a hack because we can't query for new frozen blocks over RPC
// since the validator is not voting.
let new_latest_slots: Vec<Slot> = blockstore
@ -644,7 +644,7 @@ fn find_latest_replayed_slot_from_ledger(
break;
} else {
sleep(Duration::from_millis(50));
blockstore = open_blockstore(&ledger_path);
blockstore = open_blockstore(ledger_path);
}
}
// Check the slot has been replayed
@ -666,7 +666,7 @@ fn find_latest_replayed_slot_from_ledger(
);
} else {
sleep(Duration::from_millis(50));
blockstore = open_blockstore(&ledger_path);
blockstore = open_blockstore(ledger_path);
}
}
} else {
@ -870,7 +870,7 @@ fn test_switch_threshold_uses_gossip_votes() {
0,
crds_value::Vote::new(node_keypair.pubkey(), vote_tx, timestamp()),
),
&node_keypair,
node_keypair,
)],
context
.dead_validator_info
@ -962,7 +962,7 @@ fn test_kill_partition_switch_threshold_no_progress() {
|_: &mut LocalCluster, _: &[Pubkey], _: Vec<ClusterValidatorInfo>, _: &mut ()| {};
let on_before_partition_resolved = |_: &mut LocalCluster, _: &mut ()| {};
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
cluster.check_no_new_roots(400, &"PARTITION_TEST");
cluster.check_no_new_roots(400, "PARTITION_TEST");
};
// This kills `max_failures_stake`, so no progress should be made
@ -1015,7 +1015,7 @@ fn test_kill_partition_switch_threshold_progress() {
|_: &mut LocalCluster, _: &[Pubkey], _: Vec<ClusterValidatorInfo>, _: &mut ()| {};
let on_before_partition_resolved = |_: &mut LocalCluster, _: &mut ()| {};
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
cluster.check_for_new_roots(16, &"PARTITION_TEST");
cluster.check_for_new_roots(16, "PARTITION_TEST");
};
run_kill_partition_switch_threshold(
&[&[(failures_stake as usize, 16)]],
@ -1246,7 +1246,7 @@ fn test_fork_choice_refresh_old_votes() {
// for lockouts built during partition to resolve and gives validators an opportunity
// to try and switch forks)
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut PartitionContext| {
cluster.check_for_new_roots(16, &"PARTITION_TEST");
cluster.check_for_new_roots(16, "PARTITION_TEST");
};
run_kill_partition_switch_threshold(
@ -1320,7 +1320,7 @@ fn test_forwarding() {
.unwrap();
// Confirm that transactions were forwarded to and processed by the leader.
cluster_tests::send_many_transactions(&validator_info, &cluster.funding_keypair, 10, 20);
cluster_tests::send_many_transactions(validator_info, &cluster.funding_keypair, 10, 20);
}
#[test]
@ -1532,7 +1532,7 @@ fn test_frozen_account_from_snapshot() {
trace!("Waiting for snapshot at {:?}", snapshot_package_output_path);
let (archive_filename, _archive_snapshot_hash) =
wait_for_next_snapshot(&cluster, &snapshot_package_output_path);
wait_for_next_snapshot(&cluster, snapshot_package_output_path);
trace!("Found snapshot: {:?}", archive_filename);
@ -1668,7 +1668,7 @@ fn test_snapshot_download() {
trace!("Waiting for snapshot");
let (archive_filename, archive_snapshot_hash) =
wait_for_next_snapshot(&cluster, &snapshot_package_output_path);
wait_for_next_snapshot(&cluster, snapshot_package_output_path);
trace!("found: {:?}", archive_filename);
let validator_archive_path = snapshot_utils::get_snapshot_archive_path(
@ -1743,7 +1743,7 @@ fn test_snapshot_restart_tower() {
.snapshot_package_output_path;
let (archive_filename, archive_snapshot_hash) =
wait_for_next_snapshot(&cluster, &snapshot_package_output_path);
wait_for_next_snapshot(&cluster, snapshot_package_output_path);
// Copy archive to validator's snapshot output directory
let validator_archive_path = snapshot_utils::get_snapshot_archive_path(
@ -1765,7 +1765,7 @@ fn test_snapshot_restart_tower() {
// validator's ContactInfo
let restarted_node_info = cluster.get_contact_info(&validator_id).unwrap();
cluster_tests::spend_and_verify_all_nodes(
&restarted_node_info,
restarted_node_info,
&cluster.funding_keypair,
1,
HashSet::new(),
@ -1926,7 +1926,7 @@ fn test_snapshots_restart_validity() {
expected_balances.extend(new_balances);
wait_for_next_snapshot(&cluster, &snapshot_package_output_path);
wait_for_next_snapshot(&cluster, snapshot_package_output_path);
// Create new account paths since validator exit is not guaranteed to cleanup RPC threads,
// which may delete the old accounts on exit at any point
@ -2019,7 +2019,7 @@ fn test_faulty_node(faulty_node_type: BroadcastStageType) {
let cluster = LocalCluster::new(&mut cluster_config);
// Check for new roots
cluster.check_for_new_roots(16, &"test_faulty_node");
cluster.check_for_new_roots(16, "test_faulty_node");
}
#[test]
@ -2365,7 +2365,7 @@ fn purge_slots(blockstore: &Blockstore, start_slot: Slot, slot_count: Slot) {
}
fn restore_tower(ledger_path: &Path, node_pubkey: &Pubkey) -> Option<Tower> {
let tower = Tower::restore(&ledger_path, &node_pubkey);
let tower = Tower::restore(ledger_path, node_pubkey);
if let Err(tower_err) = tower {
if tower_err.is_file_missing() {
return None;
@ -2374,7 +2374,7 @@ fn restore_tower(ledger_path: &Path, node_pubkey: &Pubkey) -> Option<Tower> {
}
}
// actually saved tower must have at least one vote.
Tower::restore(&ledger_path, &node_pubkey).ok()
Tower::restore(ledger_path, node_pubkey).ok()
}
fn last_vote_in_tower(ledger_path: &Path, node_pubkey: &Pubkey) -> Option<(Slot, Hash)> {
@ -2386,7 +2386,7 @@ fn root_in_tower(ledger_path: &Path, node_pubkey: &Pubkey) -> Option<Slot> {
}
fn remove_tower(ledger_path: &Path, node_pubkey: &Pubkey) {
fs::remove_file(Tower::get_filename(&ledger_path, &node_pubkey)).unwrap();
fs::remove_file(Tower::get_filename(ledger_path, node_pubkey)).unwrap();
}
// A bit convoluted test case; but this roughly follows this test theoretical scenario:
@ -2847,7 +2847,7 @@ fn test_hard_fork_invalidates_tower() {
cluster
.lock()
.unwrap()
.check_for_new_roots(16, &"hard fork");
.check_for_new_roots(16, "hard fork");
}
#[test]
@ -2906,7 +2906,7 @@ fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) {
let on_partition_before_resolved = |_: &mut LocalCluster, _: &mut ()| {};
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
cluster.check_for_new_roots(20, &"run_test_load_program_accounts_partition");
cluster.check_for_new_roots(20, "run_test_load_program_accounts_partition");
exit.store(true, Ordering::Relaxed);
t_update.join().unwrap();
t_scan.join().unwrap();
@ -3097,7 +3097,7 @@ fn run_test_load_program_accounts(scan_commitment: CommitmentConfig) {
scan_client_sender.send(scan_client).unwrap();
// Wait for some roots to pass
cluster.check_for_new_roots(40, &"run_test_load_program_accounts");
cluster.check_for_new_roots(40, "run_test_load_program_accounts");
// Exit and ensure no violations of consistency were found
exit.store(true, Ordering::Relaxed);

View File

@ -216,7 +216,7 @@ mod tests {
{
let some_struct = SomeStruct { x: 42 };
let (result, _measure) = Measure::this(
|(obj, x)| SomeStruct::add_to(&obj, x),
|(obj, x)| SomeStruct::add_to(obj, x),
(&some_struct, 4),
"test",
);

View File

@ -222,7 +222,7 @@ mod tests {
INIT_HOOK.call_once(|| {
ENV_LOCK = Some(RwLock::new(()));
});
&ENV_LOCK.as_ref().unwrap()
ENV_LOCK.as_ref().unwrap()
}
}

View File

@ -66,7 +66,7 @@ impl Packets {
pub fn set_addr(&mut self, addr: &SocketAddr) {
for m in self.packets.iter_mut() {
m.meta.set_addr(&addr);
m.meta.set_addr(addr);
}
}

View File

@ -519,11 +519,11 @@ mod tests {
let packet_offsets = sigverify::get_packet_offsets(&packet, 0);
assert_eq!(
memfind(&tx_bytes, &tx.signatures[0].as_ref()),
memfind(&tx_bytes, tx.signatures[0].as_ref()),
Some(SIG_OFFSET)
);
assert_eq!(
memfind(&tx_bytes, &tx.message().account_keys[0].as_ref()),
memfind(&tx_bytes, tx.message().account_keys[0].as_ref()),
Some(packet_offsets.pubkey_start as usize)
);
assert_eq!(
@ -531,7 +531,7 @@ mod tests {
Some(packet_offsets.msg_start as usize)
);
assert_eq!(
memfind(&tx_bytes, &tx.signatures[0].as_ref()),
memfind(&tx_bytes, tx.signatures[0].as_ref()),
Some(packet_offsets.sig_start as usize)
);
assert_eq!(packet_offsets.sig_len, 1);
@ -667,7 +667,7 @@ mod tests {
let tx_bytes = serialize(&tx0).unwrap();
assert!(tx_bytes.len() <= PACKET_DATA_SIZE);
assert_eq!(
memfind(&tx_bytes, &tx0.signatures[0].as_ref()),
memfind(&tx_bytes, tx0.signatures[0].as_ref()),
Some(SIG_OFFSET)
);
let tx1 = deserialize(&tx_bytes).unwrap();

View File

@ -18,7 +18,7 @@ const NUM_ENTRIES: usize = 800;
fn bench_poh_verify_ticks(bencher: &mut Bencher) {
solana_logger::setup();
let zero = Hash::default();
let start_hash = hash(&zero.as_ref());
let start_hash = hash(zero.as_ref());
let mut cur_hash = start_hash;
let mut ticks: Vec<Entry> = Vec::with_capacity(NUM_ENTRIES);
@ -34,7 +34,7 @@ fn bench_poh_verify_ticks(bencher: &mut Bencher) {
#[bench]
fn bench_poh_verify_transaction_entries(bencher: &mut Bencher) {
let zero = Hash::default();
let start_hash = hash(&zero.as_ref());
let start_hash = hash(zero.as_ref());
let mut cur_hash = start_hash;
let keypair1 = Keypair::new();

Some files were not shown because too many files have changed in this diff Show More