Rust v1.63.0 (#27148)

* Upgrade to Rust v1.63.0

* Add nightly_clippy_allows

* Resolve some new clippy nightly lints

* Increase QUIC packets completion timeout

Co-authored-by: Michael Vines <mvines@gmail.com>
This commit is contained in:
Brennan Watt 2022-08-17 15:48:33 -07:00 committed by GitHub
parent 2fd9a4f373
commit a2e7bdf50a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
42 changed files with 78 additions and 78 deletions

View File

@ -19,7 +19,7 @@ pub fn parse_address_lookup_table(
})
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
pub enum LookupTableAccountType {
Uninitialized,

View File

@ -153,13 +153,9 @@ fn verify_transaction(
transaction: &Transaction,
feature_set: &Arc<FeatureSet>,
) -> transaction::Result<()> {
if let Err(err) = transaction.verify() {
Err(err)
} else if let Err(err) = transaction.verify_precompiles(feature_set) {
Err(err)
} else {
Ok(())
}
transaction.verify()?;
transaction.verify_precompiles(feature_set)?;
Ok(())
}
fn simulate_transaction(

View File

@ -1,4 +1,4 @@
FROM solanalabs/rust:1.60.0
FROM solanalabs/rust:1.63.0
ARG date
RUN set -x \

View File

@ -1,6 +1,6 @@
# Note: when the rust version is changed also modify
# ci/rust-version.sh to pick up the new image tag
FROM rust:1.60.0
FROM rust:1.63.0
# Add Google Protocol Buffers for Libra's metrics library.
ENV PROTOC_VERSION 3.8.0

View File

@ -18,13 +18,13 @@
if [[ -n $RUST_STABLE_VERSION ]]; then
stable_version="$RUST_STABLE_VERSION"
else
stable_version=1.60.0
stable_version=1.63.0
fi
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
nightly_version="$RUST_NIGHTLY_VERSION"
else
nightly_version=2022-04-01
nightly_version=2022-08-12
fi

View File

@ -65,11 +65,25 @@ fi
_ ci/order-crates-for-publishing.py
nightly_clippy_allows=(
# This lint occurs all over the code base
"--allow=clippy::significant_drop_in_scrutinee"
# The prost crate, used by solana-storage-proto, generates Rust source that
# triggers this lint. Need to resolve upstream in prost
"--allow=clippy::derive_partial_eq_without_eq"
# This link seems to incorrectly trigger in
# `programs/bpf_loader/src/syscalls/{lib,cpi}.rs`
"--allow=clippy::explicit_auto_deref"
)
# -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612
# run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there
_ scripts/cargo-for-all-lock-files.sh -- nightly clippy -Zunstable-options --all-targets -- \
--deny=warnings \
--deny=clippy::integer_arithmetic \
"${nightly_clippy_allows[@]}"
_ scripts/cargo-for-all-lock-files.sh -- nightly sort --workspace --check
_ scripts/cargo-for-all-lock-files.sh -- nightly fmt --all -- --check

View File

@ -27,7 +27,7 @@ mod tests {
let mut all_packets = vec![];
let now = Instant::now();
let mut total_packets: usize = 0;
while now.elapsed().as_secs() < 5 {
while now.elapsed().as_secs() < 10 {
if let Ok(packets) = receiver.recv_timeout(Duration::from_secs(1)) {
total_packets = total_packets.saturating_add(packets.len());
all_packets.push(packets)

View File

@ -1335,7 +1335,7 @@ impl BankingStage {
);
retryable_transaction_indexes.extend(execution_results.iter().enumerate().filter_map(
|(index, execution_result)| execution_result.was_executed().then(|| index),
|(index, execution_result)| execution_result.was_executed().then_some(index),
));
return ExecuteAndCommitTransactionsOutput {

View File

@ -151,7 +151,7 @@ fn get_slot_leaders(
let leader = leaders.entry(slot).or_insert_with(|| {
let leader = leader_schedule_cache.slot_leader_at(slot, Some(bank))?;
// Discard the shred if the slot leader is the node itself.
(&leader != self_pubkey).then(|| leader)
(&leader != self_pubkey).then_some(leader)
});
if leader.is_none() {
packet.meta.set_discard(true);

View File

@ -411,7 +411,7 @@ lazy_static! {
impl AbiExample for &Vec<u8> {
fn example() -> Self {
info!("AbiExample for (&Vec<u8>): {}", type_name::<Self>());
&*VEC_U8
&VEC_U8
}
}

View File

@ -256,7 +256,7 @@ impl CrdsGossipPull {
if let Some(ping) = ping {
pings.push((peer.gossip, ping));
}
check.then(|| (weight, peer))
check.then_some((weight, peer))
})
.unzip()
};

View File

@ -60,7 +60,7 @@ pub async fn upload_confirmed_blocks(
starting_slot, err
)
})?
.map_while(|slot| (slot <= ending_slot).then(|| slot))
.map_while(|slot| (slot <= ending_slot).then_some(slot))
.collect();
if blockstore_slots.is_empty() {

View File

@ -3145,7 +3145,7 @@ impl Blockstore {
}
.expect("fetch from DuplicateSlots column family failed")?;
let new_shred = Shred::new_from_serialized_shred(payload).unwrap();
(existing_shred != *new_shred.payload()).then(|| existing_shred)
(existing_shred != *new_shred.payload()).then_some(existing_shred)
}
pub fn has_duplicate_shreds_in_slot(&self, slot: Slot) -> bool {

View File

@ -61,7 +61,7 @@ mod serde_compat {
D: Deserializer<'de>,
{
let val = u64::deserialize(deserializer)?;
Ok((val != u64::MAX).then(|| val))
Ok((val != u64::MAX).then_some(val))
}
}

View File

@ -613,7 +613,7 @@ pub mod layout {
merkle::ShredData::get_signed_message_range(proof_size)?
}
};
(shred.len() <= range.end).then(|| range)
(shred.len() <= range.end).then_some(range)
}
pub(crate) fn get_reference_tick(shred: &[u8]) -> Result<u8, Error> {

View File

@ -119,7 +119,7 @@ pub(super) fn erasure_shard_index<T: ShredCodeTrait>(shred: &T) -> Option<usize>
let position = usize::from(coding_header.position);
let fec_set_size = num_data_shreds.checked_add(num_coding_shreds)?;
let index = position.checked_add(num_data_shreds)?;
(index < fec_set_size).then(|| index)
(index < fec_set_size).then_some(index)
}
pub(super) fn sanitize<T: ShredCodeTrait>(shred: &T) -> Result<(), Error> {

View File

@ -319,7 +319,7 @@ impl LocalCluster {
})
.collect();
for (stake, validator_config, (key, _)) in izip!(
(&config.node_stakes[1..]).iter(),
config.node_stakes[1..].iter(),
config.validator_configs[1..].iter(),
validator_keys[1..].iter(),
) {

View File

@ -830,12 +830,7 @@ mod tests {
pub fn memfind<A: Eq>(a: &[A], b: &[A]) -> Option<usize> {
assert!(a.len() >= b.len());
let end = a.len() - b.len() + 1;
for i in 0..end {
if a[i..i + b.len()] == b[..] {
return Some(i);
}
}
None
(0..end).find(|&i| a[i..i + b.len()] == b[..])
}
#[test]

View File

@ -505,7 +505,7 @@ impl PohRecorder {
start: Arc::new(Instant::now()),
min_tick_height: bank.tick_height(),
max_tick_height: bank.max_tick_height(),
transaction_index: track_transaction_indexes.then(|| 0),
transaction_index: track_transaction_indexes.then_some(0),
};
trace!("new working bank");
assert_eq!(working_bank.bank.ticks_per_slot(), self.ticks_per_slot());

View File

@ -129,7 +129,7 @@ fn new_response<T>(bank: &Bank, value: T) -> RpcResponse<T> {
/// Wrapper for rpc return types of methods that provide responses both with and without context.
/// Main purpose of this is to fix methods that lack context information in their return type,
/// without breaking backwards compatibility.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(untagged)]
pub enum OptionalContext<T> {
Context(RpcResponse<T>),
@ -3646,9 +3646,7 @@ pub mod rpc_full {
}
if !skip_preflight {
if let Err(e) = verify_transaction(&transaction, &preflight_bank.feature_set) {
return Err(e);
}
verify_transaction(&transaction, &preflight_bank.feature_set)?;
match meta.health.check() {
RpcHealthStatus::Ok => (),

View File

@ -1001,10 +1001,7 @@ impl RpcSubscriptions {
let mut slots_to_notify: Vec<_> =
(*w_last_unnotified_slot..slot).collect();
let ancestors = bank.proper_ancestors_set();
slots_to_notify = slots_to_notify
.into_iter()
.filter(|slot| ancestors.contains(slot))
.collect();
slots_to_notify.retain(|slot| ancestors.contains(slot));
slots_to_notify.push(slot);
for s in slots_to_notify {
// To avoid skipping a slot that fails this condition,

View File

@ -104,7 +104,7 @@ pub(crate) fn check_rent_state(
.get_account_at_index(index)
.expect(expect_msg)
.borrow(),
include_account_index_in_err.then(|| index),
include_account_index_in_err.then_some(index),
prevent_crediting_accounts_that_end_rent_paying,
)?;
}

View File

@ -440,7 +440,7 @@ impl Accounts {
payer_account,
feature_set
.is_active(&feature_set::include_account_index_in_rent_error::ID)
.then(|| payer_index),
.then_some(payer_index),
feature_set
.is_active(&feature_set::prevent_crediting_accounts_that_end_rent_paying::id()),
)

View File

@ -2174,7 +2174,7 @@ impl AccountsDb {
// figure out how many ancient accounts have been reclaimed
let old_reclaims = reclaims
.iter()
.filter_map(|(slot, _)| (slot < &one_epoch_old).then(|| 1))
.filter_map(|(slot, _)| (slot < &one_epoch_old).then_some(1))
.sum();
ancient_account_cleans.fetch_add(old_reclaims, Ordering::Relaxed);
reclaims
@ -2392,7 +2392,7 @@ impl AccountsDb {
.iter()
.filter_map(|entry| {
let slot = *entry.key();
(slot <= max_slot).then(|| slot)
(slot <= max_slot).then_some(slot)
})
.collect()
}
@ -3676,7 +3676,7 @@ impl AccountsDb {
) -> Option<SnapshotStorage> {
self.get_storages_for_slot(slot).and_then(|all_storages| {
self.should_move_to_ancient_append_vec(&all_storages, current_ancient, slot)
.then(|| all_storages)
.then_some(all_storages)
})
}
@ -5309,7 +5309,7 @@ impl AccountsDb {
// with the same slot.
let is_being_flushed = !currently_contended_slots.insert(*remove_slot);
// If the cache is currently flushing this slot, add it to the list
is_being_flushed.then(|| remove_slot)
is_being_flushed.then_some(remove_slot)
})
.cloned()
.collect();

View File

@ -2285,7 +2285,7 @@ impl Bank {
hash: *self.hash.read().unwrap(),
parent_hash: self.parent_hash,
parent_slot: self.parent_slot,
hard_forks: &*self.hard_forks,
hard_forks: &self.hard_forks,
transaction_count: self.transaction_count.load(Relaxed),
tick_height: self.tick_height.load(Relaxed),
signature_count: self.signature_count.load(Relaxed),
@ -3293,7 +3293,7 @@ impl Bank {
let vote_state = account.vote_state();
let vote_state = vote_state.as_ref().ok()?;
let slot_delta = self.slot().checked_sub(vote_state.last_timestamp.slot)?;
(slot_delta <= slots_per_epoch).then(|| {
(slot_delta <= slots_per_epoch).then_some({
(
*pubkey,
(
@ -3963,10 +3963,10 @@ impl Bank {
}
/// Prepare a transaction batch without locking accounts for transaction simulation.
pub(crate) fn prepare_simulation_batch<'a>(
&'a self,
pub(crate) fn prepare_simulation_batch(
&self,
transaction: SanitizedTransaction,
) -> TransactionBatch<'a, '_> {
) -> TransactionBatch<'_, '_> {
let tx_account_lock_limit = self.get_transaction_account_lock_limit();
let lock_result = transaction
.get_account_locks(tx_account_lock_limit)
@ -4367,7 +4367,7 @@ impl Bank {
self.feature_set.clone(),
compute_budget,
timings,
&*self.sysvar_cache.read().unwrap(),
&self.sysvar_cache.read().unwrap(),
blockhash,
lamports_per_signature,
prev_accounts_data_len,

View File

@ -684,7 +684,7 @@ pub mod tests {
);
assert_eq!(
result,
(!leave_alone).then(|| ExpectedRentCollection {
(!leave_alone).then_some(ExpectedRentCollection {
partition_from_pubkey,
epoch_of_max_storage_slot: rent_collector.epoch,
partition_index_from_max_slot: partition_index_max_inclusive,
@ -712,7 +712,7 @@ pub mod tests {
);
assert_eq!(
result,
(!greater).then(|| ExpectedRentCollection {
(!greater).then_some(ExpectedRentCollection {
partition_from_pubkey,
epoch_of_max_storage_slot: rent_collector.epoch,
partition_index_from_max_slot: partition_index_max_inclusive,
@ -909,7 +909,7 @@ pub mod tests {
);
assert_eq!(
result,
(account_rent_epoch != 0).then(|| ExpectedRentCollection {
(account_rent_epoch != 0).then_some(ExpectedRentCollection {
partition_from_pubkey,
epoch_of_max_storage_slot: rent_collector.epoch + 1,
partition_index_from_max_slot: partition_index_max_inclusive,
@ -1084,7 +1084,7 @@ pub mod tests {
};
assert_eq!(
result,
some_expected.then(|| ExpectedRentCollection {
some_expected.then_some(ExpectedRentCollection {
partition_from_pubkey,
epoch_of_max_storage_slot: rent_collector.epoch,
partition_index_from_max_slot,

View File

@ -384,7 +384,7 @@ where
.map(|path_buf| path_buf.as_path())
{
Some(path) => {
accounts_path_processor(*file, path);
accounts_path_processor(file, path);
UnpackPath::Valid(path)
}
None => UnpackPath::Invalid,

View File

@ -1418,6 +1418,8 @@ impl<'a> FlushGuard<'a> {
#[must_use = "if unused, the `flushing` flag will immediately clear"]
fn lock(flushing: &'a AtomicBool) -> Option<Self> {
let already_flushing = flushing.swap(true, Ordering::AcqRel);
// Eager evaluation here would result in dropping Self and clearing flushing flag
#[allow(clippy::unnecessary_lazy_evaluations)]
(!already_flushing).then(|| Self { flushing })
}
}

View File

@ -65,7 +65,7 @@ pub(crate) enum SerdeStyle {
const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024;
#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample, PartialEq)]
#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample, PartialEq, Eq)]
pub struct AccountsDbFields<T>(
HashMap<Slot, Vec<T>>,
StoredMetaWriteVersion,
@ -120,7 +120,7 @@ impl<T> SnapshotAccountsDbFields<T> {
// There must not be any overlap in the slots of storages between the full snapshot and the incremental snapshot
incremental_snapshot_storages
.iter()
.all(|storage_entry| !full_snapshot_storages.contains_key(storage_entry.0)).then(|| ()).ok_or_else(|| {
.all(|storage_entry| !full_snapshot_storages.contains_key(storage_entry.0)).then_some(()).ok_or_else(|| {
io::Error::new(io::ErrorKind::InvalidData, "Snapshots are incompatible: There are storages for the same slot in both the full snapshot and the incremental snapshot!")
})?;

View File

@ -201,7 +201,7 @@ impl<'a> TypeContext<'a> for Context {
(
SerializableVersionedBank::from(fields),
SerializableAccountsDb::<'a, Self> {
accounts_db: &*serializable_bank.bank.rc.accounts.accounts_db,
accounts_db: &serializable_bank.bank.rc.accounts.accounts_db,
slot: serializable_bank.bank.rc.slot,
account_storage_entries: serializable_bank.snapshot_storages,
phantom: std::marker::PhantomData::default(),
@ -228,7 +228,7 @@ impl<'a> TypeContext<'a> for Context {
(
SerializableVersionedBank::from(fields),
SerializableAccountsDb::<'a, Self> {
accounts_db: &*serializable_bank.bank.rc.accounts.accounts_db,
accounts_db: &serializable_bank.bank.rc.accounts.accounts_db,
slot: serializable_bank.bank.rc.slot,
account_storage_entries: serializable_bank.snapshot_storages,
phantom: std::marker::PhantomData::default(),

View File

@ -155,7 +155,7 @@ fn test_accounts_serialize_style(serde_style: SerdeStyle) {
accountsdb_to_stream(
serde_style,
&mut writer,
&*accounts.accounts_db,
&accounts.accounts_db,
0,
&accounts.accounts_db.get_snapshot_storages(0, None, None).0,
)

View File

@ -543,7 +543,7 @@ mod tests {
.accounts
.iter()
.filter_map(|(pubkey, account)| {
stake::program::check_id(account.owner()).then(|| *pubkey)
stake::program::check_id(account.owner()).then_some(*pubkey)
})
.collect();
expected_stake_accounts.push(bootstrap_validator_pubkey);

View File

@ -1181,7 +1181,7 @@ fn check_are_snapshots_compatible(
let incremental_snapshot_archive_info = incremental_snapshot_archive_info.unwrap();
(full_snapshot_archive_info.slot() == incremental_snapshot_archive_info.base_slot())
.then(|| ())
.then_some(())
.ok_or_else(|| {
SnapshotError::MismatchedBaseSlot(
full_snapshot_archive_info.slot(),

View File

@ -143,7 +143,7 @@ pub mod tests {
slot,
&vec![(&pk, &account, slot), (&pk, &account, slot)][..],
);
assert!(!(&test3).contains_multiple_slots());
assert!(!test3.contains_multiple_slots());
let test3 = (
slot,
&vec![(&pk, &account, slot), (&pk, &account, slot + 1)][..],

View File

@ -1626,7 +1626,7 @@ mod tests {
.unwrap();
// super fun time; callback chooses to .clean_accounts(None) or not
callback(&*bank);
callback(&bank);
// create a normal account at the same pubkey as the zero-lamports account
let lamports = genesis_config.rent.minimum_balance(len2);

View File

@ -80,20 +80,20 @@ impl CompiledKeys {
.chain(
key_meta_map
.iter()
.filter_map(|(key, meta)| (meta.is_signer && meta.is_writable).then(|| *key)),
.filter_map(|(key, meta)| (meta.is_signer && meta.is_writable).then_some(*key)),
)
.collect();
let readonly_signer_keys: Vec<Pubkey> = key_meta_map
.iter()
.filter_map(|(key, meta)| (meta.is_signer && !meta.is_writable).then(|| *key))
.filter_map(|(key, meta)| (meta.is_signer && !meta.is_writable).then_some(*key))
.collect();
let writable_non_signer_keys: Vec<Pubkey> = key_meta_map
.iter()
.filter_map(|(key, meta)| (!meta.is_signer && meta.is_writable).then(|| *key))
.filter_map(|(key, meta)| (!meta.is_signer && meta.is_writable).then_some(*key))
.collect();
let readonly_non_signer_keys: Vec<Pubkey> = key_meta_map
.iter()
.filter_map(|(key, meta)| (!meta.is_signer && !meta.is_writable).then(|| *key))
.filter_map(|(key, meta)| (!meta.is_signer && !meta.is_writable).then_some(*key))
.collect();
let signers_len = writable_signer_keys
@ -160,7 +160,7 @@ impl CompiledKeys {
for search_key in self
.key_meta_map
.iter()
.filter_map(|(key, meta)| key_meta_filter(meta).then(|| key))
.filter_map(|(key, meta)| key_meta_filter(meta).then_some(key))
{
for (key_index, key) in lookup_table_addresses.iter().enumerate() {
if key == search_key {

View File

@ -46,7 +46,7 @@ impl Versions {
Self::Current(state) => match **state {
State::Uninitialized => None,
State::Initialized(ref data) => {
(recent_blockhash == &data.blockhash()).then(|| data)
(recent_blockhash == &data.blockhash()).then_some(data)
}
},
}

View File

@ -28,7 +28,7 @@ fn get_minimum_delegation_return_data() -> Result<u64, ProgramError> {
.ok_or(ProgramError::InvalidInstructionData)
.and_then(|(program_id, return_data)| {
(program_id == super::program::id())
.then(|| return_data)
.then_some(return_data)
.ok_or(ProgramError::IncorrectProgramId)
})
.and_then(|return_data| {

View File

@ -307,7 +307,7 @@ fn recv_send(
let packets = packet_batch.iter().filter_map(|pkt| {
let addr = pkt.meta.socket_addr();
let data = pkt.data(..)?;
socket_addr_space.check(&addr).then(|| (data, addr))
socket_addr_space.check(&addr).then_some((data, addr))
});
batch_send(sock, &packets.collect::<Vec<_>>())?;
Ok(())

View File

@ -409,7 +409,7 @@ pub fn attempt_download_genesis_and_snapshot(
.map_err(|err| format!("Failed to get RPC node slot: {}", err))?;
info!("RPC node root slot: {}", rpc_client_slot);
if let Err(err) = download_snapshots(
download_snapshots(
full_snapshot_archives_dir,
incremental_snapshot_archives_dir,
validator_config,
@ -422,9 +422,7 @@ pub fn attempt_download_genesis_and_snapshot(
download_abort_count,
snapshot_hash,
rpc_contact_info,
) {
return Err(err);
};
)?;
if let Some(url) = bootstrap_config.check_vote_account.as_ref() {
let rpc_client = RpcClient::new(url);

View File

@ -41,7 +41,7 @@ impl CloseAccountData {
keypair: &ElGamalKeypair,
ciphertext: &ElGamalCiphertext,
) -> Result<Self, ProofError> {
let pod_pubkey = pod::ElGamalPubkey((&keypair.public).to_bytes());
let pod_pubkey = pod::ElGamalPubkey(keypair.public.to_bytes());
let pod_ciphertext = pod::ElGamalCiphertext(ciphertext.to_bytes());
let mut transcript = CloseAccountProof::transcript_new(&pod_pubkey, &pod_ciphertext);

View File

@ -62,7 +62,7 @@ impl WithdrawData {
// current source balance
let final_ciphertext = current_ciphertext - &ElGamal::encode(amount);
let pod_pubkey = pod::ElGamalPubkey((&keypair.public).to_bytes());
let pod_pubkey = pod::ElGamalPubkey(keypair.public.to_bytes());
let pod_final_ciphertext: pod::ElGamalCiphertext = final_ciphertext.into();
let mut transcript = WithdrawProof::transcript_new(&pod_pubkey, &pod_final_ciphertext);
let proof = WithdrawProof::new(keypair, final_balance, &final_ciphertext, &mut transcript);