`cargo fmt` using 1.6.0-nightly (#32390)
Seems like rustfmt 1.6.0 can now format `let/else` statements. 1.5.2 we use in our `solana-nightly` does not mind the reformatting. ``` $ cargo +nightly fmt --version rustfmt 1.6.0-nightly (f20afcc 2023-07-04) $ cargo +nightly fmt $ git add -u $ git commit $ ./cargo nightly fmt --version + exec cargo +nightly-2023-04-19 fmt --version rustfmt 1.5.2-nightly (c609da5 2023-04-18) $ ./cargo nightly fmt $ git diff [empty output] ```
This commit is contained in:
parent
158253c2b0
commit
282e043177
|
@ -75,7 +75,8 @@ impl AccountsHashVerifier {
|
||||||
)) = Self::get_next_accounts_package(
|
)) = Self::get_next_accounts_package(
|
||||||
&accounts_package_sender,
|
&accounts_package_sender,
|
||||||
&accounts_package_receiver,
|
&accounts_package_receiver,
|
||||||
) else {
|
)
|
||||||
|
else {
|
||||||
std::thread::sleep(LOOP_LIMITER);
|
std::thread::sleep(LOOP_LIMITER);
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
@ -302,7 +303,9 @@ impl AccountsHashVerifier {
|
||||||
(accounts_hash.into(), accounts_hash, None)
|
(accounts_hash.into(), accounts_hash, None)
|
||||||
}
|
}
|
||||||
CalcAccountsHashFlavor::Incremental => {
|
CalcAccountsHashFlavor::Incremental => {
|
||||||
let AccountsPackageType::Snapshot(SnapshotType::IncrementalSnapshot(base_slot)) = accounts_package.package_type else {
|
let AccountsPackageType::Snapshot(SnapshotType::IncrementalSnapshot(base_slot)) =
|
||||||
|
accounts_package.package_type
|
||||||
|
else {
|
||||||
panic!("Calculating incremental accounts hash requires a base slot");
|
panic!("Calculating incremental accounts hash requires a base slot");
|
||||||
};
|
};
|
||||||
let (base_accounts_hash, base_capitalization) = accounts_package
|
let (base_accounts_hash, base_capitalization) = accounts_package
|
||||||
|
|
|
@ -2373,7 +2373,9 @@ impl ReplayStage {
|
||||||
|
|
||||||
// If we are a non voting validator or have an incorrect setup preventing us from
|
// If we are a non voting validator or have an incorrect setup preventing us from
|
||||||
// generating vote txs, no need to refresh
|
// generating vote txs, no need to refresh
|
||||||
let Some(last_vote_tx_blockhash) = tower.last_vote_tx_blockhash() else { return };
|
let Some(last_vote_tx_blockhash) = tower.last_vote_tx_blockhash() else {
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
if my_latest_landed_vote >= last_voted_slot
|
if my_latest_landed_vote >= last_voted_slot
|
||||||
|| heaviest_bank_on_same_fork
|
|| heaviest_bank_on_same_fork
|
||||||
|
|
|
@ -51,13 +51,13 @@ impl SnapshotPackagerService {
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
info!("SnapshotPackagerService has started");
|
info!("SnapshotPackagerService has started");
|
||||||
renice_this_thread(snapshot_config.packager_thread_niceness_adj).unwrap();
|
renice_this_thread(snapshot_config.packager_thread_niceness_adj).unwrap();
|
||||||
let mut snapshot_gossip_manager = enable_gossip_push.then(||
|
let mut snapshot_gossip_manager = enable_gossip_push.then(|| {
|
||||||
SnapshotGossipManager::new(
|
SnapshotGossipManager::new(
|
||||||
cluster_info,
|
cluster_info,
|
||||||
max_full_snapshot_hashes,
|
max_full_snapshot_hashes,
|
||||||
starting_snapshot_hashes,
|
starting_snapshot_hashes,
|
||||||
)
|
)
|
||||||
);
|
});
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
if exit.load(Ordering::Relaxed) {
|
if exit.load(Ordering::Relaxed) {
|
||||||
|
@ -68,7 +68,11 @@ impl SnapshotPackagerService {
|
||||||
snapshot_package,
|
snapshot_package,
|
||||||
num_outstanding_snapshot_packages,
|
num_outstanding_snapshot_packages,
|
||||||
num_re_enqueued_snapshot_packages,
|
num_re_enqueued_snapshot_packages,
|
||||||
)) = Self::get_next_snapshot_package(&snapshot_package_sender, &snapshot_package_receiver) else {
|
)) = Self::get_next_snapshot_package(
|
||||||
|
&snapshot_package_sender,
|
||||||
|
&snapshot_package_receiver,
|
||||||
|
)
|
||||||
|
else {
|
||||||
std::thread::sleep(Self::LOOP_LIMITER);
|
std::thread::sleep(Self::LOOP_LIMITER);
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
@ -102,7 +106,8 @@ impl SnapshotPackagerService {
|
||||||
measure_us!(snapshot_utils::purge_bank_snapshots_older_than_slot(
|
measure_us!(snapshot_utils::purge_bank_snapshots_older_than_slot(
|
||||||
&snapshot_config.bank_snapshots_dir,
|
&snapshot_config.bank_snapshots_dir,
|
||||||
snapshot_package.slot(),
|
snapshot_package.slot(),
|
||||||
)).1
|
))
|
||||||
|
.1
|
||||||
});
|
});
|
||||||
|
|
||||||
datapoint_info!(
|
datapoint_info!(
|
||||||
|
@ -119,7 +124,11 @@ impl SnapshotPackagerService {
|
||||||
),
|
),
|
||||||
("enqueued_time_us", enqueued_time.as_micros(), i64),
|
("enqueued_time_us", enqueued_time.as_micros(), i64),
|
||||||
("handling_time_us", handling_time_us, i64),
|
("handling_time_us", handling_time_us, i64),
|
||||||
("purge_old_snapshots_time_us", purge_bank_snapshots_time_us, i64),
|
(
|
||||||
|
"purge_old_snapshots_time_us",
|
||||||
|
purge_bank_snapshots_time_us,
|
||||||
|
i64
|
||||||
|
),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
info!("SnapshotPackagerService has stopped");
|
info!("SnapshotPackagerService has stopped");
|
||||||
|
|
|
@ -425,7 +425,7 @@ pub fn frozen_abi(attrs: TokenStream, item: TokenStream) -> TokenStream {
|
||||||
"the required \"digest\" = ... attribute is missing.",
|
"the required \"digest\" = ... attribute is missing.",
|
||||||
)
|
)
|
||||||
.to_compile_error()
|
.to_compile_error()
|
||||||
.into()
|
.into();
|
||||||
};
|
};
|
||||||
|
|
||||||
let item = parse_macro_input!(item as Item);
|
let item = parse_macro_input!(item as Item);
|
||||||
|
|
|
@ -127,15 +127,17 @@ impl GeyserPluginManager {
|
||||||
|
|
||||||
pub(crate) fn unload_plugin(&mut self, name: &str) -> JsonRpcResult<()> {
|
pub(crate) fn unload_plugin(&mut self, name: &str) -> JsonRpcResult<()> {
|
||||||
// Check if any plugin names match this one
|
// Check if any plugin names match this one
|
||||||
let Some(idx) = self.plugins.iter().position(|plugin| plugin.name().eq(name)) else {
|
let Some(idx) = self
|
||||||
|
.plugins
|
||||||
|
.iter()
|
||||||
|
.position(|plugin| plugin.name().eq(name))
|
||||||
|
else {
|
||||||
// If we don't find one return an error
|
// If we don't find one return an error
|
||||||
return Err(
|
return Err(jsonrpc_core::error::Error {
|
||||||
jsonrpc_core::error::Error {
|
code: ErrorCode::InvalidRequest,
|
||||||
code: ErrorCode::InvalidRequest,
|
message: String::from("The plugin you requested to unload is not loaded"),
|
||||||
message: String::from("The plugin you requested to unload is not loaded"),
|
data: None,
|
||||||
data: None,
|
});
|
||||||
}
|
|
||||||
)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Unload and drop plugin and lib
|
// Unload and drop plugin and lib
|
||||||
|
@ -149,15 +151,17 @@ impl GeyserPluginManager {
|
||||||
/// Then, attempt to load a new plugin
|
/// Then, attempt to load a new plugin
|
||||||
pub(crate) fn reload_plugin(&mut self, name: &str, config_file: &str) -> JsonRpcResult<()> {
|
pub(crate) fn reload_plugin(&mut self, name: &str, config_file: &str) -> JsonRpcResult<()> {
|
||||||
// Check if any plugin names match this one
|
// Check if any plugin names match this one
|
||||||
let Some(idx) = self.plugins.iter().position(|plugin| plugin.name().eq(name)) else {
|
let Some(idx) = self
|
||||||
|
.plugins
|
||||||
|
.iter()
|
||||||
|
.position(|plugin| plugin.name().eq(name))
|
||||||
|
else {
|
||||||
// If we don't find one return an error
|
// If we don't find one return an error
|
||||||
return Err(
|
return Err(jsonrpc_core::error::Error {
|
||||||
jsonrpc_core::error::Error {
|
code: ErrorCode::InvalidRequest,
|
||||||
code: ErrorCode::InvalidRequest,
|
message: String::from("The plugin you requested to reload is not loaded"),
|
||||||
message: String::from("The plugin you requested to reload is not loaded"),
|
data: None,
|
||||||
data: None,
|
});
|
||||||
}
|
|
||||||
)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Unload and drop current plugin first in case plugin requires exclusive access to resource,
|
// Unload and drop current plugin first in case plugin requires exclusive access to resource,
|
||||||
|
|
|
@ -989,10 +989,11 @@ fn get_latest_optimistic_slots(
|
||||||
let Some(latest_slot) = blockstore
|
let Some(latest_slot) = blockstore
|
||||||
.get_latest_optimistic_slots(1)
|
.get_latest_optimistic_slots(1)
|
||||||
.expect("get_latest_optimistic_slots() failed")
|
.expect("get_latest_optimistic_slots() failed")
|
||||||
.pop() else {
|
.pop()
|
||||||
eprintln!("Blockstore does not contain any optimistically confirmed slots");
|
else {
|
||||||
return vec![];
|
eprintln!("Blockstore does not contain any optimistically confirmed slots");
|
||||||
};
|
return vec![];
|
||||||
|
};
|
||||||
let latest_slot = latest_slot.0;
|
let latest_slot = latest_slot.0;
|
||||||
|
|
||||||
let slot_iter = AncestorIterator::new_inclusive(latest_slot, blockstore).map(|slot| {
|
let slot_iter = AncestorIterator::new_inclusive(latest_slot, blockstore).map(|slot| {
|
||||||
|
|
|
@ -112,7 +112,7 @@ pub fn load_bank_forks(
|
||||||
|
|
||||||
let Some(full_snapshot_archive_info) =
|
let Some(full_snapshot_archive_info) =
|
||||||
snapshot_utils::get_highest_full_snapshot_archive_info(
|
snapshot_utils::get_highest_full_snapshot_archive_info(
|
||||||
&snapshot_config.full_snapshot_archives_dir
|
&snapshot_config.full_snapshot_archives_dir,
|
||||||
)
|
)
|
||||||
else {
|
else {
|
||||||
warn!(
|
warn!(
|
||||||
|
|
|
@ -1675,10 +1675,9 @@ impl Bank {
|
||||||
|
|
||||||
/// Process reward distribution for the block if it is inside reward interval.
|
/// Process reward distribution for the block if it is inside reward interval.
|
||||||
fn distribute_partitioned_epoch_rewards(&mut self) {
|
fn distribute_partitioned_epoch_rewards(&mut self) {
|
||||||
let EpochRewardStatus::Active(status) = &self.epoch_reward_status
|
let EpochRewardStatus::Active(status) = &self.epoch_reward_status else {
|
||||||
else {
|
return;
|
||||||
return;
|
};
|
||||||
};
|
|
||||||
|
|
||||||
let height = self.block_height();
|
let height = self.block_height();
|
||||||
let start_block_height = status.start_block_height;
|
let start_block_height = status.start_block_height;
|
||||||
|
|
|
@ -230,10 +230,14 @@ pub struct SnapshotPackage {
|
||||||
impl SnapshotPackage {
|
impl SnapshotPackage {
|
||||||
pub fn new(accounts_package: AccountsPackage, accounts_hash: AccountsHashEnum) -> Self {
|
pub fn new(accounts_package: AccountsPackage, accounts_hash: AccountsHashEnum) -> Self {
|
||||||
let AccountsPackageType::Snapshot(snapshot_type) = accounts_package.package_type else {
|
let AccountsPackageType::Snapshot(snapshot_type) = accounts_package.package_type else {
|
||||||
panic!("The AccountsPackage must be of type Snapshot in order to make a SnapshotPackage!");
|
panic!(
|
||||||
|
"The AccountsPackage must be of type Snapshot in order to make a SnapshotPackage!"
|
||||||
|
);
|
||||||
};
|
};
|
||||||
let Some(snapshot_info) = accounts_package.snapshot_info else {
|
let Some(snapshot_info) = accounts_package.snapshot_info else {
|
||||||
panic!("The AccountsPackage must have snapshot info in order to make a SnapshotPackage!");
|
panic!(
|
||||||
|
"The AccountsPackage must have snapshot info in order to make a SnapshotPackage!"
|
||||||
|
);
|
||||||
};
|
};
|
||||||
let snapshot_hash =
|
let snapshot_hash =
|
||||||
SnapshotHash::new(&accounts_hash, snapshot_info.epoch_accounts_hash.as_ref());
|
SnapshotHash::new(&accounts_hash, snapshot_info.epoch_accounts_hash.as_ref());
|
||||||
|
|
|
@ -967,7 +967,11 @@ fn build_known_snapshot_hashes<'a>(
|
||||||
}
|
}
|
||||||
|
|
||||||
'to_next_node: for node in nodes {
|
'to_next_node: for node in nodes {
|
||||||
let Some(SnapshotHash {full: full_snapshot_hash, incr: incremental_snapshot_hash}) = get_snapshot_hashes_for_node(node) else {
|
let Some(SnapshotHash {
|
||||||
|
full: full_snapshot_hash,
|
||||||
|
incr: incremental_snapshot_hash,
|
||||||
|
}) = get_snapshot_hashes_for_node(node)
|
||||||
|
else {
|
||||||
continue 'to_next_node;
|
continue 'to_next_node;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue