Pacify clippy

This commit is contained in:
Michael Vines 2022-01-21 16:01:22 -08:00
parent ce4f7601af
commit 6d5bbca630
37 changed files with 157 additions and 194 deletions

View File

@ -136,16 +136,13 @@ impl UiAccount {
UiAccountData::Binary(blob, encoding) => match encoding {
UiAccountEncoding::Base58 => bs58::decode(blob).into_vec().ok(),
UiAccountEncoding::Base64 => base64::decode(blob).ok(),
UiAccountEncoding::Base64Zstd => base64::decode(blob)
.ok()
.map(|zstd_data| {
let mut data = vec![];
zstd::stream::read::Decoder::new(zstd_data.as_slice())
.and_then(|mut reader| reader.read_to_end(&mut data))
.map(|_| data)
.ok()
})
.flatten(),
UiAccountEncoding::Base64Zstd => base64::decode(blob).ok().and_then(|zstd_data| {
let mut data = vec![];
zstd::stream::read::Decoder::new(zstd_data.as_slice())
.and_then(|mut reader| reader.read_to_end(&mut data))
.map(|_| data)
.ok()
}),
UiAccountEncoding::Binary | UiAccountEncoding::JsonParsed => None,
},
}?;

View File

@ -116,7 +116,7 @@ fn make_create_message(
let instructions: Vec<_> = (0..num_instructions)
.into_iter()
.map(|_| {
.flat_map(|_| {
let program_id = if mint.is_some() {
inline_spl_token::id()
} else {
@ -148,7 +148,6 @@ fn make_create_message(
instructions
})
.flatten()
.collect();
Message::new(&instructions, Some(&keypair.pubkey()))

View File

@ -328,7 +328,7 @@ pub fn is_derivation<T>(value: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
let value = value.as_ref().replace("'", "");
let value = value.as_ref().replace('\'', "");
let mut parts = value.split('/');
let account = parts.next().unwrap();
account

View File

@ -2285,6 +2285,7 @@ impl fmt::Display for CliBlock {
let sign = if reward.lamports < 0 { "-" } else { "" };
total_rewards += reward.lamports;
#[allow(clippy::format_in_format_args)]
writeln!(
f,
" {:<44} {:^15} {:>15} {} {}",

View File

@ -78,7 +78,7 @@ fn test_cli_program_deploy_non_upgradeable() {
assert_eq!(account0.lamports, minimum_balance_for_rent_exemption);
assert_eq!(account0.owner, bpf_loader::id());
assert!(account0.executable);
let mut file = File::open(noop_path.to_str().unwrap().to_string()).unwrap();
let mut file = File::open(noop_path.to_str().unwrap()).unwrap();
let mut elf = Vec::new();
file.read_to_end(&mut elf).unwrap();
assert_eq!(account0.data, elf);

View File

@ -435,7 +435,7 @@ impl RpcSender for MockSender {
value: vec![Value::Null, Value::Null]
})?,
"getProgramAccounts" => {
let pubkey = Pubkey::from_str(&PUBKEY.to_string()).unwrap();
let pubkey = Pubkey::from_str(PUBKEY).unwrap();
let account = Account {
lamports: 1_000_000,
data: vec![],

View File

@ -43,18 +43,16 @@ pub fn sample_txs<T>(
total_elapsed = start_time.elapsed();
let elapsed = now.elapsed();
now = Instant::now();
let mut txs;
match client.get_transaction_count_with_commitment(CommitmentConfig::processed()) {
Err(e) => {
// ThinClient with multiple options should pick a better one now.
info!("Couldn't get transaction count {:?}", e);
sleep(Duration::from_secs(sample_period));
continue;
}
Ok(tx_count) => {
txs = tx_count;
}
}
let mut txs =
match client.get_transaction_count_with_commitment(CommitmentConfig::processed()) {
Err(e) => {
// ThinClient with multiple options should pick a better one now.
info!("Couldn't get transaction count {:?}", e);
sleep(Duration::from_secs(sample_period));
continue;
}
Ok(tx_count) => tx_count,
};
if txs < last_txs {
info!("Expected txs({}) >= last_txs({})", txs, last_txs);

View File

@ -490,79 +490,82 @@ impl BankingStage {
let mut proc_start = Measure::start("consume_buffered_process");
let mut reached_end_of_slot = None;
RetainMut::retain_mut(buffered_packet_batches, |buffered_packet_batch_and_offsets| {
let (packet_batch, ref mut original_unprocessed_indexes, _forwarded) =
buffered_packet_batch_and_offsets;
if let Some((next_leader, bank)) = &reached_end_of_slot {
// We've hit the end of this slot, no need to perform more processing,
// just filter the remaining packets for the invalid (e.g. too old) ones
let new_unprocessed_indexes = Self::filter_unprocessed_packets(
bank,
packet_batch,
original_unprocessed_indexes,
my_pubkey,
*next_leader,
banking_stage_stats,
);
Self::update_buffered_packets_with_new_unprocessed(
original_unprocessed_indexes,
new_unprocessed_indexes,
)
} else {
let bank_start = poh_recorder.lock().unwrap().bank_start();
if let Some(BankStart {
working_bank,
bank_creation_time,
}) = bank_start
{
let (processed, verified_txs_len, new_unprocessed_indexes) =
Self::process_packets_transactions(
&working_bank,
&bank_creation_time,
recorder,
packet_batch,
original_unprocessed_indexes.to_owned(),
transaction_status_sender.clone(),
gossip_vote_sender,
banking_stage_stats,
qos_service,
);
if processed < verified_txs_len
|| !Bank::should_bank_still_be_processing_txs(
&bank_creation_time,
max_tx_ingestion_ns,
)
{
reached_end_of_slot = Some((
poh_recorder.lock().unwrap().next_slot_leader(),
working_bank,
));
}
new_tx_count += processed;
// Out of the buffered packets just retried, collect any still unprocessed
// transactions in this batch for forwarding
rebuffered_packet_count += new_unprocessed_indexes.len();
let has_more_unprocessed_transactions =
Self::update_buffered_packets_with_new_unprocessed(
original_unprocessed_indexes,
new_unprocessed_indexes,
);
if let Some(test_fn) = &test_fn {
test_fn();
}
has_more_unprocessed_transactions
RetainMut::retain_mut(
buffered_packet_batches,
|buffered_packet_batch_and_offsets| {
let (packet_batch, ref mut original_unprocessed_indexes, _forwarded) =
buffered_packet_batch_and_offsets;
if let Some((next_leader, bank)) = &reached_end_of_slot {
// We've hit the end of this slot, no need to perform more processing,
// just filter the remaining packets for the invalid (e.g. too old) ones
let new_unprocessed_indexes = Self::filter_unprocessed_packets(
bank,
packet_batch,
original_unprocessed_indexes,
my_pubkey,
*next_leader,
banking_stage_stats,
);
Self::update_buffered_packets_with_new_unprocessed(
original_unprocessed_indexes,
new_unprocessed_indexes,
)
} else {
rebuffered_packet_count += original_unprocessed_indexes.len();
// `original_unprocessed_indexes` must have remaining packets to process
// if not yet processed.
assert!(Self::packet_has_more_unprocessed_transactions(
original_unprocessed_indexes
));
true
let bank_start = poh_recorder.lock().unwrap().bank_start();
if let Some(BankStart {
working_bank,
bank_creation_time,
}) = bank_start
{
let (processed, verified_txs_len, new_unprocessed_indexes) =
Self::process_packets_transactions(
&working_bank,
&bank_creation_time,
recorder,
packet_batch,
original_unprocessed_indexes.to_owned(),
transaction_status_sender.clone(),
gossip_vote_sender,
banking_stage_stats,
qos_service,
);
if processed < verified_txs_len
|| !Bank::should_bank_still_be_processing_txs(
&bank_creation_time,
max_tx_ingestion_ns,
)
{
reached_end_of_slot = Some((
poh_recorder.lock().unwrap().next_slot_leader(),
working_bank,
));
}
new_tx_count += processed;
// Out of the buffered packets just retried, collect any still unprocessed
// transactions in this batch for forwarding
rebuffered_packet_count += new_unprocessed_indexes.len();
let has_more_unprocessed_transactions =
Self::update_buffered_packets_with_new_unprocessed(
original_unprocessed_indexes,
new_unprocessed_indexes,
);
if let Some(test_fn) = &test_fn {
test_fn();
}
has_more_unprocessed_transactions
} else {
rebuffered_packet_count += original_unprocessed_indexes.len();
// `original_unprocessed_indexes` must have remaining packets to process
// if not yet processed.
assert!(Self::packet_has_more_unprocessed_transactions(
original_unprocessed_indexes
));
true
}
}
}
});
},
);
proc_start.stop();

View File

@ -4327,9 +4327,9 @@ pub mod tests {
for i in 0..std::cmp::max(new_vote_pubkeys.len(), new_node_pubkeys.len()) {
propagated_stats.is_propagated = false;
let len = std::cmp::min(i, new_vote_pubkeys.len());
let mut voted_pubkeys = new_vote_pubkeys[..len].iter().copied().collect();
let mut voted_pubkeys = new_vote_pubkeys[..len].to_vec();
let len = std::cmp::min(i, new_node_pubkeys.len());
let mut node_pubkeys = new_node_pubkeys[..len].iter().copied().collect();
let mut node_pubkeys = new_node_pubkeys[..len].to_vec();
let did_newly_reach_threshold =
ReplayStage::update_slot_propagated_threshold_from_votes(
&mut voted_pubkeys,

View File

@ -60,7 +60,7 @@ fn pubkey_from_str(key_str: &str) -> Result<Pubkey, Box<dyn error::Error>> {
pub fn load_genesis_accounts(file: &str, genesis_config: &mut GenesisConfig) -> io::Result<u64> {
let mut lamports = 0;
let accounts_file = File::open(file.to_string())?;
let accounts_file = File::open(file)?;
let genesis_accounts: HashMap<String, Base64Account> =
serde_yaml::from_reader(accounts_file)

View File

@ -708,9 +708,9 @@ impl ClusterInfo {
Some(format!(
"{:15} {:2}| {:5} | {:44} |{:^9}| {:5}| {:5}| {}\n",
rpc_addr.to_string(),
if node.id == my_pubkey { "me" } else { "" }.to_string(),
if node.id == my_pubkey { "me" } else { "" },
now.saturating_sub(last_updated),
node.id.to_string(),
node.id,
if let Some(node_version) = node_version {
node_version.to_string()
} else {
@ -778,9 +778,9 @@ impl ClusterInfo {
} else {
"none".to_string()
},
if node.id == my_pubkey { "me" } else { "" }.to_string(),
if node.id == my_pubkey { "me" } else { "" },
now.saturating_sub(last_updated),
node.id.to_string(),
node.id,
if let Some(node_version) = node_version {
node_version.to_string()
} else {

View File

@ -45,7 +45,7 @@ impl Config {
}
fn _load(config_file: &str) -> Result<Self, io::Error> {
let file = File::open(config_file.to_string())?;
let file = File::open(config_file)?;
let config = serde_yaml::from_reader(file)
.map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?;
Ok(config)

View File

@ -1890,10 +1890,7 @@ impl Blockstore {
}
pub fn get_block_time(&self, slot: Slot) -> Result<Option<UnixTimestamp>> {
datapoint_info!(
"blockstore-rpc-api",
("method", "get_block_time".to_string(), String)
);
datapoint_info!("blockstore-rpc-api", ("method", "get_block_time", String));
let _lock = self.check_lowest_cleanup_slot(slot)?;
self.blocktime_cf.get(slot)
}
@ -1903,10 +1900,7 @@ impl Blockstore {
}
pub fn get_block_height(&self, slot: Slot) -> Result<Option<u64>> {
datapoint_info!(
"blockstore-rpc-api",
("method", "get_block_height".to_string(), String)
);
datapoint_info!("blockstore-rpc-api", ("method", "get_block_height", String));
let _lock = self.check_lowest_cleanup_slot(slot)?;
self.block_height_cf.get(slot)
}
@ -1925,10 +1919,7 @@ impl Blockstore {
slot: Slot,
require_previous_blockhash: bool,
) -> Result<VersionedConfirmedBlock> {
datapoint_info!(
"blockstore-rpc-api",
("method", "get_rooted_block".to_string(), String)
);
datapoint_info!("blockstore-rpc-api", ("method", "get_rooted_block", String));
let _lock = self.check_lowest_cleanup_slot(slot)?;
if self.is_root(slot) {
@ -2256,11 +2247,7 @@ impl Blockstore {
) -> Result<Option<(Slot, TransactionStatusMeta)>> {
datapoint_info!(
"blockstore-rpc-api",
(
"method",
"get_rooted_transaction_status".to_string(),
String
)
("method", "get_rooted_transaction_status", String)
);
self.get_transaction_status(signature, &[])
}
@ -2273,7 +2260,7 @@ impl Blockstore {
) -> Result<Option<(Slot, TransactionStatusMeta)>> {
datapoint_info!(
"blockstore-rpc-api",
("method", "get_transaction_status".to_string(), String)
("method", "get_transaction_status", String)
);
self.get_transaction_status_with_counter(signature, confirmed_unrooted_slots)
.map(|(status, _)| status)
@ -2286,7 +2273,7 @@ impl Blockstore {
) -> Result<Option<VersionedConfirmedTransactionWithStatusMeta>> {
datapoint_info!(
"blockstore-rpc-api",
("method", "get_rooted_transaction".to_string(), String)
("method", "get_rooted_transaction", String)
);
self.get_transaction_with_status(signature, &[])
}
@ -2299,7 +2286,7 @@ impl Blockstore {
) -> Result<Option<VersionedConfirmedTransactionWithStatusMeta>> {
datapoint_info!(
"blockstore-rpc-api",
("method", "get_complete_transaction".to_string(), String)
("method", "get_complete_transaction", String)
);
let last_root = self.last_root();
let confirmed_unrooted_slots: Vec<_> =
@ -2442,11 +2429,7 @@ impl Blockstore {
) -> Result<Vec<Signature>> {
datapoint_info!(
"blockstore-rpc-api",
(
"method",
"get_confirmed_signatures_for_address".to_string(),
String
)
("method", "get_confirmed_signatures_for_address", String)
);
self.find_address_signatures(pubkey, start_slot, end_slot)
.map(|signatures| signatures.iter().map(|(_, signature)| *signature).collect())
@ -2491,11 +2474,7 @@ impl Blockstore {
) -> Result<SignatureInfosForAddress> {
datapoint_info!(
"blockstore-rpc-api",
(
"method",
"get_confirmed_signatures_for_address2".to_string(),
String
)
("method", "get_confirmed_signatures_for_address2", String)
);
let last_root = self.last_root();
let confirmed_unrooted_slots: Vec<_> = AncestorIterator::new_inclusive(highest_slot, self)
@ -3411,7 +3390,7 @@ fn send_signals(
"blockstore_error",
(
"error",
"Unable to send newly completed slot because channel is full".to_string(),
"Unable to send newly completed slot because channel is full",
String
),
);

View File

@ -1152,30 +1152,26 @@ pub fn get_shred_slot_index_type(
return None;
}
let index;
match limited_deserialize::<u32>(&p.data[index_start..index_end]) {
Ok(x) => index = x,
let index = match limited_deserialize::<u32>(&p.data[index_start..index_end]) {
Ok(x) => x,
Err(_e) => {
stats.index_bad_deserialize += 1;
return None;
}
}
};
if index >= MAX_DATA_SHREDS_PER_SLOT as u32 {
stats.index_out_of_bounds += 1;
return None;
}
let slot;
match limited_deserialize::<Slot>(&p.data[slot_start..slot_end]) {
Ok(x) => {
slot = x;
}
let slot = match limited_deserialize::<Slot>(&p.data[slot_start..slot_end]) {
Ok(x) => x,
Err(_e) => {
stats.slot_bad_deserialize += 1;
return None;
}
}
};
let shred_type = match ShredType::from_u8(p.data[OFFSET_OF_SHRED_TYPE]) {
None => {

View File

@ -2591,7 +2591,7 @@ fn test_votes_land_in_fork_during_long_partition() {
// Should finish faster than if the cluster were relying on replay vote
// refreshing to refresh the vote on blockhash expiration for the vote
// transaction.
!(start.elapsed() > Duration::from_millis(max_wait)),
start.elapsed() <= Duration::from_millis(max_wait),
"Went too long {} ms without a root",
max_wait,
);

View File

@ -18,7 +18,7 @@ impl DataPoint {
pub fn add_field_str(&mut self, name: &'static str, value: &str) -> &mut Self {
self.fields
.push((name, format!("\"{}\"", value.replace("\"", "\\\""))));
.push((name, format!("\"{}\"", value.replace('\"', "\\\""))));
self
}
@ -151,7 +151,7 @@ macro_rules! datapoint_trace {
mod test {
#[test]
fn test_datapoint() {
datapoint_debug!("name", ("field name", "test".to_string(), String));
datapoint_debug!("name", ("field name", "test", String));
datapoint_info!("name", ("field name", 12.34_f64, f64));
datapoint_trace!("name", ("field name", true, bool));
datapoint_warn!("name", ("field name", 1, i64));
@ -180,7 +180,7 @@ mod test {
let point = create_datapoint!(
@point "name",
("i64", 1, i64),
("String", "string space string".to_string(), String),
("String", "string space string", String),
("f64", 12.34_f64, f64),
("bool", true, bool)
);

View File

@ -373,7 +373,7 @@ impl RemoteWallet<hidapi::DeviceInfo> for LedgerWallet {
.product_string()
.unwrap_or("Unknown")
.to_lowercase()
.replace(" ", "-");
.replace(' ', "-");
let serial = dev_info.serial_number().unwrap_or("Unknown").to_string();
let host_device_path = dev_info.path().to_string_lossy().to_string();
let version = self.get_firmware_version()?;

View File

@ -6344,7 +6344,7 @@ pub mod tests {
assert_eq!(verify_pubkey(&pubkey.to_string()).unwrap(), pubkey);
let bad_pubkey = "a1b2c3d4";
assert_eq!(
verify_pubkey(&bad_pubkey.to_string()),
verify_pubkey(bad_pubkey),
Err(Error::invalid_params("Invalid param: WrongSize"))
);
}
@ -6363,7 +6363,7 @@ pub mod tests {
);
let bad_signature = "a1b2c3d4";
assert_eq!(
verify_signature(&bad_signature.to_string()),
verify_signature(bad_signature),
Err(Error::invalid_params("Invalid param: WrongSize"))
);
}

View File

@ -1097,7 +1097,7 @@ impl RpcSubscriptions {
inc_new_counter_info!("rpc-subscription-notify-bank-or-gossip", total_notified);
datapoint_info!(
"rpc_subscriptions",
("source", source.to_string(), String),
("source", source, String),
(
"num_account_subscriptions",
num_accounts_found.load(Ordering::Relaxed),

View File

@ -90,7 +90,7 @@ fn sync_bencher(bank: &Arc<Bank>, _bank_client: &BankClient, transactions: &[Tra
}
fn async_bencher(bank: &Arc<Bank>, bank_client: &BankClient, transactions: &[Transaction]) {
for transaction in transactions.to_owned() {
for transaction in transactions.iter().cloned() {
bank_client.async_send_transaction(transaction).unwrap();
}
for _ in 0..1_000_000_000_u64 {

View File

@ -4207,16 +4207,15 @@ impl AccountsDb {
.fetch_add(scan_storages_elasped.as_us(), Ordering::Relaxed);
let mut purge_accounts_index_elapsed = Measure::start("purge_accounts_index_elapsed");
let reclaims;
match scan_result {
let reclaims = match scan_result {
ScanStorageResult::Cached(_) => {
panic!("Should not see cached keys in this `else` branch, since we checked this slot did not exist in the cache above");
}
ScanStorageResult::Stored(stored_keys) => {
// Purge this slot from the accounts index
reclaims = self.purge_keys_exact(stored_keys.lock().unwrap().iter());
self.purge_keys_exact(stored_keys.lock().unwrap().iter())
}
}
};
purge_accounts_index_elapsed.stop();
purge_stats
.purge_accounts_index_elapsed
@ -5104,12 +5103,11 @@ impl AccountsDb {
.accounts_index
.account_maps
.iter()
.map(|map| {
.flat_map(|map| {
let mut keys = map.read().unwrap().keys();
keys.sort_unstable(); // hashmap is not ordered, but bins are relative to each other
keys
})
.flatten()
.collect();
collect.stop();

View File

@ -1154,7 +1154,7 @@ pub mod tests {
let sorted2 = chunk.clone();
let mut with_left_over = vec![left_over_1];
with_left_over.extend(sorted2[0..plus1 - 2].to_vec().into_iter().map(|i| i.hash));
with_left_over.extend(sorted2[0..plus1 - 2].iter().cloned().map(|i| i.hash));
let expected_hash2 = AccountsHash::compute_merkle_root(
with_left_over[0..target_fanout]
.iter()

View File

@ -12891,8 +12891,7 @@ pub(crate) mod tests {
assert!(cache.get(&key4).is_some());
let num_retained = [&key1, &key2, &key3]
.iter()
.map(|key| cache.get(key))
.flatten()
.filter_map(|key| cache.get(key))
.count();
assert_eq!(num_retained, 2);
@ -12903,8 +12902,7 @@ pub(crate) mod tests {
assert!(cache.get(&key3).is_some());
let num_retained = [&key1, &key2, &key4]
.iter()
.map(|key| cache.get(key))
.flatten()
.filter_map(|key| cache.get(key))
.count();
assert_eq!(num_retained, 2);
}
@ -12937,8 +12935,7 @@ pub(crate) mod tests {
assert!(cache.get(&key4).is_some());
let num_retained = [&key1, &key2, &key3]
.iter()
.map(|key| cache.get(key))
.flatten()
.filter_map(|key| cache.get(key))
.count();
assert_eq!(num_retained, 2);
@ -12948,8 +12945,7 @@ pub(crate) mod tests {
assert!(cache.get(&key3).is_some());
let num_retained = [&key2, &key4]
.iter()
.map(|key| cache.get(key))
.flatten()
.filter_map(|key| cache.get(key))
.count();
assert_eq!(num_retained, 1);

View File

@ -315,8 +315,7 @@ impl SyncClient for BankClient {
fn get_fee_for_message(&self, message: &Message) -> Result<u64> {
SanitizedMessage::try_from(message.clone())
.ok()
.map(|sanitized_message| self.bank.get_fee_for_message(&sanitized_message))
.flatten()
.and_then(|sanitized_message| self.bank.get_fee_for_message(&sanitized_message))
.ok_or_else(|| {
TransportError::IoError(io::Error::new(
io::ErrorKind::Other,

View File

@ -574,7 +574,7 @@ mod tests {
let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 2);
bank_forks.insert(bank);
let descendants = bank_forks.descendants();
let children: HashSet<u64> = [1u64, 2u64].to_vec().into_iter().collect();
let children: HashSet<u64> = [1u64, 2u64].iter().copied().collect();
assert_eq!(children, *descendants.get(&0).unwrap());
assert!(descendants[&1].is_empty());
assert!(descendants[&2].is_empty());

View File

@ -671,7 +671,7 @@ impl<T: IndexValue> InMemAccountsIndex<T> {
where
R: RangeBounds<Pubkey>,
{
assert!(!(only_add_if_already_held && !start_holding));
assert!(!only_add_if_already_held || start_holding);
let start = match range.start_bound() {
Bound::Included(bound) | Bound::Excluded(bound) => *bound,
Bound::Unbounded => Pubkey::new(&[0; 32]),

View File

@ -418,7 +418,7 @@ fn build_bpf_package(config: &Config, target_directory: &Path, package: &cargo_m
);
None
}
1 => Some(cdylib_targets[0].replace("-", "_")),
1 => Some(cdylib_targets[0].replace('-', "_")),
_ => {
eprintln!(
"{} crate contains multiple cdylib targets: {:?}",
@ -821,7 +821,7 @@ fn main() {
dump: matches.is_present("dump"),
features: values_t!(matches, "features", String)
.ok()
.unwrap_or_else(Vec::new),
.unwrap_or_default(),
generate_child_script_on_failure: matches.is_present("generate_child_script_on_failure"),
no_default_features: matches.is_present("no_default_features"),
offline: matches.is_present("offline"),

View File

@ -309,10 +309,10 @@ fn main() {
bpf_out_dir: value_t!(matches, "bpf_out_dir", String).ok(),
extra_cargo_test_args: values_t!(matches, "extra_cargo_test_args", String)
.ok()
.unwrap_or_else(Vec::new),
.unwrap_or_default(),
features: values_t!(matches, "features", String)
.ok()
.unwrap_or_else(Vec::new),
.unwrap_or_default(),
generate_child_script_on_failure: matches.is_present("generate_child_script_on_failure"),
test_name: value_t!(matches, "test", String).ok(),
no_default_features: matches.is_present("no_default_features"),

View File

@ -188,7 +188,7 @@ mod tests {
fn packed_len() {
assert_eq!(
get_packed_len::<TestEnum>(),
size_of::<u8>() + size_of::<u64>() + size_of::<u8>() * 8
size_of::<u8>() + size_of::<u64>() + u8::BITS as usize
);
assert_eq!(
get_packed_len::<TestStruct>(),

View File

@ -161,7 +161,7 @@ fn get_data_slice<'a>(
if signature_index >= instruction_datas.len() {
return Err(PrecompileError::InvalidDataOffsets);
}
&instruction_datas[signature_index]
instruction_datas[signature_index]
};
let start = offset_start as usize;

View File

@ -24,7 +24,7 @@ pub fn write_pubkey_file(outfile: &str, pubkey: Pubkey) -> Result<(), Box<dyn st
#[cfg(feature = "full")]
pub fn read_pubkey_file(infile: &str) -> Result<Pubkey, Box<dyn std::error::Error>> {
let f = std::fs::File::open(infile.to_string())?;
let f = std::fs::File::open(infile)?;
let printable: String = serde_json::from_reader(f)?;
use std::str::FromStr;

View File

@ -104,7 +104,7 @@ pub fn receiver(
use_pinned_memory: bool,
) -> JoinHandle<()> {
let res = sock.set_read_timeout(Some(Duration::new(1, 0)));
assert!(!res.is_err(), "streamer::receiver set_read_timeout error");
assert!(res.is_ok(), "streamer::receiver set_read_timeout error");
let exit = exit.clone();
Builder::new()
.name("solana-receiver".to_string())

View File

@ -577,8 +577,7 @@ impl VersionedTransactionWithStatusMeta {
let dynamic_keys_iter = self
.meta
.iter()
.map(|meta| meta.loaded_addresses.ordered_iter())
.flatten();
.flat_map(|meta| meta.loaded_addresses.ordered_iter());
static_keys_iter.chain(dynamic_keys_iter)
}

View File

@ -61,7 +61,7 @@ fn main() {
let name = v["name"].as_str().unwrap().trim_matches('\"').to_string();
if last_commit.is_none() {
last_commit = get_last_metrics(&"commit".to_string(), &db, &name, branch).ok();
last_commit = get_last_metrics("commit", &db, &name, branch).ok();
}
let median: i64 = v["median"].to_string().parse().unwrap();
@ -79,9 +79,9 @@ fn main() {
*/
let last_median =
get_last_metrics(&"median".to_string(), &db, &name, branch).unwrap_or_default();
let last_deviation = get_last_metrics(&"deviation".to_string(), &db, &name, branch)
.unwrap_or_default();
get_last_metrics("median", &db, &name, branch).unwrap_or_default();
let last_deviation =
get_last_metrics("deviation", &db, &name, branch).unwrap_or_default();
results.insert(name, (median, deviation, last_median, last_deviation));
}

View File

@ -169,10 +169,9 @@ impl Dashboard {
.unwrap_or_else(|| '-'.to_string()),
snapshot_slot_info
.as_ref()
.map(|snapshot_slot_info| snapshot_slot_info
.and_then(|snapshot_slot_info| snapshot_slot_info
.incremental
.map(|incremental| incremental.to_string()))
.flatten()
.unwrap_or_else(|| '-'.to_string()),
transaction_count,
identity_balance

View File

@ -334,10 +334,9 @@ fn wait_for_restart_window(
.unwrap_or_else(|| '-'.to_string()),
snapshot_slot_info
.as_ref()
.map(|snapshot_slot_info| snapshot_slot_info
.and_then(|snapshot_slot_info| snapshot_slot_info
.incremental
.map(|incremental| incremental.to_string()))
.flatten()
.unwrap_or_else(|| '-'.to_string()),
)
},

View File

@ -152,7 +152,7 @@ fn get_config() -> Config {
let json_rpc_url =
value_t!(matches, "json_rpc_url", String).unwrap_or_else(|_| config.json_rpc_url.clone());
let validator_identity_pubkeys: Vec<_> = pubkeys_of(&matches, "validator_identities")
.unwrap_or_else(Vec::new)
.unwrap_or_default()
.into_iter()
.collect();