`cargo fmt` using 1.6.0-nightly (#32390)

Seems like rustfmt 1.6.0 can now format `let/else` statements.
1.5.2 we use in our `solana-nightly` does not mind the reformatting.

```
$ cargo +nightly fmt --version
rustfmt 1.6.0-nightly (f20afcc 2023-07-04)

$ cargo +nightly fmt
$ git add -u
$ git commit

$ ./cargo nightly fmt --version
+ exec cargo +nightly-2023-04-19 fmt --version
rustfmt 1.5.2-nightly (c609da5 2023-04-18)

$ ./cargo nightly fmt
$ git diff
[empty output]
```
This commit is contained in:
Illia Bobyr 2023-07-06 20:45:29 -07:00 committed by GitHub
parent 158253c2b0
commit 282e043177
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 63 additions and 37 deletions

View File

@ -75,7 +75,8 @@ impl AccountsHashVerifier {
)) = Self::get_next_accounts_package( )) = Self::get_next_accounts_package(
&accounts_package_sender, &accounts_package_sender,
&accounts_package_receiver, &accounts_package_receiver,
) else { )
else {
std::thread::sleep(LOOP_LIMITER); std::thread::sleep(LOOP_LIMITER);
continue; continue;
}; };
@ -302,7 +303,9 @@ impl AccountsHashVerifier {
(accounts_hash.into(), accounts_hash, None) (accounts_hash.into(), accounts_hash, None)
} }
CalcAccountsHashFlavor::Incremental => { CalcAccountsHashFlavor::Incremental => {
let AccountsPackageType::Snapshot(SnapshotType::IncrementalSnapshot(base_slot)) = accounts_package.package_type else { let AccountsPackageType::Snapshot(SnapshotType::IncrementalSnapshot(base_slot)) =
accounts_package.package_type
else {
panic!("Calculating incremental accounts hash requires a base slot"); panic!("Calculating incremental accounts hash requires a base slot");
}; };
let (base_accounts_hash, base_capitalization) = accounts_package let (base_accounts_hash, base_capitalization) = accounts_package

View File

@ -2373,7 +2373,9 @@ impl ReplayStage {
// If we are a non voting validator or have an incorrect setup preventing us from // If we are a non voting validator or have an incorrect setup preventing us from
// generating vote txs, no need to refresh // generating vote txs, no need to refresh
let Some(last_vote_tx_blockhash) = tower.last_vote_tx_blockhash() else { return }; let Some(last_vote_tx_blockhash) = tower.last_vote_tx_blockhash() else {
return;
};
if my_latest_landed_vote >= last_voted_slot if my_latest_landed_vote >= last_voted_slot
|| heaviest_bank_on_same_fork || heaviest_bank_on_same_fork

View File

@ -51,13 +51,13 @@ impl SnapshotPackagerService {
.spawn(move || { .spawn(move || {
info!("SnapshotPackagerService has started"); info!("SnapshotPackagerService has started");
renice_this_thread(snapshot_config.packager_thread_niceness_adj).unwrap(); renice_this_thread(snapshot_config.packager_thread_niceness_adj).unwrap();
let mut snapshot_gossip_manager = enable_gossip_push.then(|| let mut snapshot_gossip_manager = enable_gossip_push.then(|| {
SnapshotGossipManager::new( SnapshotGossipManager::new(
cluster_info, cluster_info,
max_full_snapshot_hashes, max_full_snapshot_hashes,
starting_snapshot_hashes, starting_snapshot_hashes,
) )
); });
loop { loop {
if exit.load(Ordering::Relaxed) { if exit.load(Ordering::Relaxed) {
@ -68,7 +68,11 @@ impl SnapshotPackagerService {
snapshot_package, snapshot_package,
num_outstanding_snapshot_packages, num_outstanding_snapshot_packages,
num_re_enqueued_snapshot_packages, num_re_enqueued_snapshot_packages,
)) = Self::get_next_snapshot_package(&snapshot_package_sender, &snapshot_package_receiver) else { )) = Self::get_next_snapshot_package(
&snapshot_package_sender,
&snapshot_package_receiver,
)
else {
std::thread::sleep(Self::LOOP_LIMITER); std::thread::sleep(Self::LOOP_LIMITER);
continue; continue;
}; };
@ -102,7 +106,8 @@ impl SnapshotPackagerService {
measure_us!(snapshot_utils::purge_bank_snapshots_older_than_slot( measure_us!(snapshot_utils::purge_bank_snapshots_older_than_slot(
&snapshot_config.bank_snapshots_dir, &snapshot_config.bank_snapshots_dir,
snapshot_package.slot(), snapshot_package.slot(),
)).1 ))
.1
}); });
datapoint_info!( datapoint_info!(
@ -119,7 +124,11 @@ impl SnapshotPackagerService {
), ),
("enqueued_time_us", enqueued_time.as_micros(), i64), ("enqueued_time_us", enqueued_time.as_micros(), i64),
("handling_time_us", handling_time_us, i64), ("handling_time_us", handling_time_us, i64),
("purge_old_snapshots_time_us", purge_bank_snapshots_time_us, i64), (
"purge_old_snapshots_time_us",
purge_bank_snapshots_time_us,
i64
),
); );
} }
info!("SnapshotPackagerService has stopped"); info!("SnapshotPackagerService has stopped");

View File

@ -425,7 +425,7 @@ pub fn frozen_abi(attrs: TokenStream, item: TokenStream) -> TokenStream {
"the required \"digest\" = ... attribute is missing.", "the required \"digest\" = ... attribute is missing.",
) )
.to_compile_error() .to_compile_error()
.into() .into();
}; };
let item = parse_macro_input!(item as Item); let item = parse_macro_input!(item as Item);

View File

@ -127,15 +127,17 @@ impl GeyserPluginManager {
pub(crate) fn unload_plugin(&mut self, name: &str) -> JsonRpcResult<()> { pub(crate) fn unload_plugin(&mut self, name: &str) -> JsonRpcResult<()> {
// Check if any plugin names match this one // Check if any plugin names match this one
let Some(idx) = self.plugins.iter().position(|plugin| plugin.name().eq(name)) else { let Some(idx) = self
.plugins
.iter()
.position(|plugin| plugin.name().eq(name))
else {
// If we don't find one return an error // If we don't find one return an error
return Err( return Err(jsonrpc_core::error::Error {
jsonrpc_core::error::Error {
code: ErrorCode::InvalidRequest, code: ErrorCode::InvalidRequest,
message: String::from("The plugin you requested to unload is not loaded"), message: String::from("The plugin you requested to unload is not loaded"),
data: None, data: None,
} });
)
}; };
// Unload and drop plugin and lib // Unload and drop plugin and lib
@ -149,15 +151,17 @@ impl GeyserPluginManager {
/// Then, attempt to load a new plugin /// Then, attempt to load a new plugin
pub(crate) fn reload_plugin(&mut self, name: &str, config_file: &str) -> JsonRpcResult<()> { pub(crate) fn reload_plugin(&mut self, name: &str, config_file: &str) -> JsonRpcResult<()> {
// Check if any plugin names match this one // Check if any plugin names match this one
let Some(idx) = self.plugins.iter().position(|plugin| plugin.name().eq(name)) else { let Some(idx) = self
.plugins
.iter()
.position(|plugin| plugin.name().eq(name))
else {
// If we don't find one return an error // If we don't find one return an error
return Err( return Err(jsonrpc_core::error::Error {
jsonrpc_core::error::Error {
code: ErrorCode::InvalidRequest, code: ErrorCode::InvalidRequest,
message: String::from("The plugin you requested to reload is not loaded"), message: String::from("The plugin you requested to reload is not loaded"),
data: None, data: None,
} });
)
}; };
// Unload and drop current plugin first in case plugin requires exclusive access to resource, // Unload and drop current plugin first in case plugin requires exclusive access to resource,

View File

@ -989,7 +989,8 @@ fn get_latest_optimistic_slots(
let Some(latest_slot) = blockstore let Some(latest_slot) = blockstore
.get_latest_optimistic_slots(1) .get_latest_optimistic_slots(1)
.expect("get_latest_optimistic_slots() failed") .expect("get_latest_optimistic_slots() failed")
.pop() else { .pop()
else {
eprintln!("Blockstore does not contain any optimistically confirmed slots"); eprintln!("Blockstore does not contain any optimistically confirmed slots");
return vec![]; return vec![];
}; };

View File

@ -112,7 +112,7 @@ pub fn load_bank_forks(
let Some(full_snapshot_archive_info) = let Some(full_snapshot_archive_info) =
snapshot_utils::get_highest_full_snapshot_archive_info( snapshot_utils::get_highest_full_snapshot_archive_info(
&snapshot_config.full_snapshot_archives_dir &snapshot_config.full_snapshot_archives_dir,
) )
else { else {
warn!( warn!(

View File

@ -1675,8 +1675,7 @@ impl Bank {
/// Process reward distribution for the block if it is inside reward interval. /// Process reward distribution for the block if it is inside reward interval.
fn distribute_partitioned_epoch_rewards(&mut self) { fn distribute_partitioned_epoch_rewards(&mut self) {
let EpochRewardStatus::Active(status) = &self.epoch_reward_status let EpochRewardStatus::Active(status) = &self.epoch_reward_status else {
else {
return; return;
}; };

View File

@ -230,10 +230,14 @@ pub struct SnapshotPackage {
impl SnapshotPackage { impl SnapshotPackage {
pub fn new(accounts_package: AccountsPackage, accounts_hash: AccountsHashEnum) -> Self { pub fn new(accounts_package: AccountsPackage, accounts_hash: AccountsHashEnum) -> Self {
let AccountsPackageType::Snapshot(snapshot_type) = accounts_package.package_type else { let AccountsPackageType::Snapshot(snapshot_type) = accounts_package.package_type else {
panic!("The AccountsPackage must be of type Snapshot in order to make a SnapshotPackage!"); panic!(
"The AccountsPackage must be of type Snapshot in order to make a SnapshotPackage!"
);
}; };
let Some(snapshot_info) = accounts_package.snapshot_info else { let Some(snapshot_info) = accounts_package.snapshot_info else {
panic!("The AccountsPackage must have snapshot info in order to make a SnapshotPackage!"); panic!(
"The AccountsPackage must have snapshot info in order to make a SnapshotPackage!"
);
}; };
let snapshot_hash = let snapshot_hash =
SnapshotHash::new(&accounts_hash, snapshot_info.epoch_accounts_hash.as_ref()); SnapshotHash::new(&accounts_hash, snapshot_info.epoch_accounts_hash.as_ref());

View File

@ -967,7 +967,11 @@ fn build_known_snapshot_hashes<'a>(
} }
'to_next_node: for node in nodes { 'to_next_node: for node in nodes {
let Some(SnapshotHash {full: full_snapshot_hash, incr: incremental_snapshot_hash}) = get_snapshot_hashes_for_node(node) else { let Some(SnapshotHash {
full: full_snapshot_hash,
incr: incremental_snapshot_hash,
}) = get_snapshot_hashes_for_node(node)
else {
continue 'to_next_node; continue 'to_next_node;
}; };