patches clippy errors from new rust nightly release (#27996)

This commit is contained in:
behzad nouri 2022-09-22 22:23:03 +00:00 committed by GitHub
parent ff71df4695
commit 9a57c64f21
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
64 changed files with 155 additions and 177 deletions

View File

@ -115,10 +115,7 @@ impl UiAccount {
{
UiAccountData::Json(parsed_data)
} else {
UiAccountData::Binary(
base64::encode(&account.data()),
UiAccountEncoding::Base64,
)
UiAccountData::Binary(base64::encode(account.data()), UiAccountEncoding::Base64)
}
}
};

View File

@ -38,7 +38,7 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
let a = p.meta.socket_addr();
assert!(p.meta.size <= PACKET_DATA_SIZE);
let data = p.data(..).unwrap_or_default();
send.send_to(data, &a).unwrap();
send.send_to(data, a).unwrap();
num += 1;
}
assert_eq!(num, 10);

View File

@ -105,8 +105,8 @@ impl<T: Clone + Copy + Debug> BucketMap<T> {
fn erase_previous_drives(drives: &[PathBuf]) {
drives.iter().for_each(|folder| {
let _ = fs::remove_dir_all(&folder);
let _ = fs::create_dir_all(&folder);
let _ = fs::remove_dir_all(folder);
let _ = fs::create_dir_all(folder);
})
}

View File

@ -1229,7 +1229,7 @@ mod tests {
} if p == absolute_path_str)
);
assert!(
matches!(parse_signer_source(&relative_path_str).unwrap(), SignerSource {
matches!(parse_signer_source(relative_path_str).unwrap(), SignerSource {
kind: SignerSourceKind::Filepath(p),
derivation_path: None,
legacy: false,

View File

@ -1229,7 +1229,7 @@ mod tests {
} if p == absolute_path_str)
);
assert!(
matches!(parse_signer_source(&relative_path_str).unwrap(), SignerSource {
matches!(parse_signer_source(relative_path_str).unwrap(), SignerSource {
kind: SignerSourceKind::Filepath(p),
derivation_path: None,
legacy: false,

View File

@ -18,7 +18,7 @@ lazy_static! {
/// [lazy_static]: https://docs.rs/lazy_static
pub static ref CONFIG_FILE: Option<String> = {
dirs_next::home_dir().map(|mut path| {
path.extend(&[".config", "solana", "cli", "config.yml"]);
path.extend([".config", "solana", "cli", "config.yml"]);
path.to_str().unwrap().to_string()
})
};
@ -70,7 +70,7 @@ impl Default for Config {
fn default() -> Self {
let keypair_path = {
let mut keypair_path = dirs_next::home_dir().expect("home directory");
keypair_path.extend(&[".config", "solana", "id.json"]);
keypair_path.extend([".config", "solana", "id.json"]);
keypair_path.to_str().unwrap().to_string()
};
let json_rpc_url = "https://api.mainnet-beta.solana.com".to_string();

View File

@ -2058,7 +2058,7 @@ pub fn process_transaction_history(
Some(status) => format!("{:?}", status),
}
},
result.memo.unwrap_or_else(|| "".to_string()),
result.memo.unwrap_or_default(),
);
} else {
println!("{}", result.signature);

View File

@ -818,7 +818,7 @@ fn get_default_program_keypair(program_location: &Option<String>) -> Keypair {
filename.push("-keypair");
keypair_file.set_file_name(filename);
keypair_file.set_extension("json");
if let Ok(keypair) = read_keypair_file(&keypair_file.to_str().unwrap()) {
if let Ok(keypair) = read_keypair_file(keypair_file.to_str().unwrap()) {
keypair
} else {
Keypair::new()

View File

@ -134,7 +134,7 @@ impl BroadcastRun for BroadcastFakeShredsRun {
if fake == (i <= self.partition) {
// Send fake shreds to the first N peers
data_shreds.iter().for_each(|b| {
sock.send_to(b.payload(), &peer.tvu_forwards).unwrap();
sock.send_to(b.payload(), peer.tvu_forwards).unwrap();
});
}
});

View File

@ -2823,7 +2823,7 @@ pub mod test {
OpenOptions::new()
.write(true)
.truncate(true)
.open(&path)
.open(path)
.unwrap_or_else(|_| panic!("Failed to truncate file: {:?}", path));
},
);

View File

@ -475,14 +475,9 @@ impl RepairWeight {
.get_mut(&orphan_tree_root)
.expect("Orphan must exist");
let num_skip = if parent_tree_root.is_some() {
// Skip the leaf of the parent tree that the
// orphan would merge with later in a call
// to `merge_trees`
1
} else {
0
};
// Skip the leaf of the parent tree that the orphan would merge
// with later in a call to `merge_trees`
let num_skip = usize::from(parent_tree_root.is_some());
for ancestor in new_ancestors.iter().skip(num_skip).rev() {
self.slot_to_tree.insert(*ancestor, orphan_tree_root);

View File

@ -935,7 +935,7 @@ impl ReplayStage {
replay_active_banks_time.as_us(),
wait_receive_time.as_us(),
heaviest_fork_failures_time.as_us(),
if did_complete_bank {1} else {0},
u64::from(did_complete_bank),
process_gossip_duplicate_confirmed_slots_time.as_us(),
process_unfrozen_gossip_verified_vote_hashes_time.as_us(),
process_duplicate_slots_time.as_us(),

View File

@ -298,7 +298,7 @@ mod tests {
.collect();
// Create directory of hard links for snapshots
let link_snapshots_dir = tempfile::tempdir_in(&temp_dir).unwrap();
let link_snapshots_dir = tempfile::tempdir_in(temp_dir).unwrap();
for snapshots_path in snapshots_paths {
let snapshot_file_name = snapshots_path.file_name().unwrap();
let link_snapshots_dir = link_snapshots_dir.path().join(snapshot_file_name);

View File

@ -173,7 +173,7 @@ impl TowerStorage for FileTowerStorage {
trace!("load {}", filename.display());
// Ensure to create parent dir here, because restore() precedes save() always
fs::create_dir_all(&filename.parent().unwrap())?;
fs::create_dir_all(filename.parent().unwrap())?;
if let Ok(file) = File::open(&filename) {
// New format

View File

@ -499,7 +499,7 @@ mod tests {
let capacity = transactions.len();
let mut packet_vector = Vec::with_capacity(capacity);
for tx in transactions.iter() {
packet_vector.push(Packet::from_data(None, &tx).unwrap());
packet_vector.push(Packet::from_data(None, tx).unwrap());
}
for index in vote_indexes.iter() {
packet_vector[*index].meta.flags |= PacketFlags::SIMPLE_VOTE_TX;

View File

@ -2072,7 +2072,7 @@ pub fn move_and_async_delete_path(path: impl AsRef<Path> + Copy) {
return;
}
if let Err(err) = std::fs::rename(&path, &path_delete) {
if let Err(err) = std::fs::rename(path, &path_delete) {
warn!(
"Path renaming failed: {}. Falling back to rm_dir in sync mode",
err.to_string()
@ -2094,7 +2094,7 @@ pub fn move_and_async_delete_path(path: impl AsRef<Path> + Copy) {
/// to delete the top level directory it might be able to
/// delete the contents of that directory.
fn delete_contents_of_path(path: impl AsRef<Path> + Copy) {
if let Ok(dir_entries) = std::fs::read_dir(&path) {
if let Ok(dir_entries) = std::fs::read_dir(path) {
for entry in dir_entries.flatten() {
let sub_path = entry.path();
let metadata = match entry.metadata() {

View File

@ -50,7 +50,7 @@ impl WarmQuicCacheService {
.lookup_contact_info(&leader_pubkey, |leader| leader.tpu)
{
let conn = connection_cache.get_connection(&addr);
if let Err(err) = conn.send_wire_transaction(&[0u8]) {
if let Err(err) = conn.send_wire_transaction([0u8]) {
warn!(
"Failed to warmup QUIC connection to the leader {:?}, Error {:?}",
leader_pubkey, err

View File

@ -426,7 +426,7 @@ impl WindowService {
&verified_receiver,
&blockstore,
&leader_schedule_cache,
&handle_duplicate,
handle_duplicate,
&mut metrics,
&mut ws_metrics,
&completed_data_sets_sender,

View File

@ -452,7 +452,7 @@ fn test_concurrent_snapshot_packaging(
// currently sitting in the channel
snapshot_utils::purge_old_bank_snapshots(bank_snapshots_dir);
let mut bank_snapshots = snapshot_utils::get_bank_snapshots_pre(&bank_snapshots_dir);
let mut bank_snapshots = snapshot_utils::get_bank_snapshots_pre(bank_snapshots_dir);
bank_snapshots.sort_unstable();
assert!(bank_snapshots
.into_iter()

View File

@ -548,7 +548,7 @@ impl ClusterInfo {
let filename = self.contact_info_path.join("contact-info.bin");
let tmp_filename = &filename.with_extension("tmp");
match File::create(&tmp_filename) {
match File::create(tmp_filename) {
Ok(mut file) => {
if let Err(err) = bincode::serialize_into(&mut file, &nodes) {
warn!(
@ -565,7 +565,7 @@ impl ClusterInfo {
}
}
match fs::rename(&tmp_filename, &filename) {
match fs::rename(tmp_filename, &filename) {
Ok(()) => {
info!(
"Saved contact info for {} nodes into {}",
@ -1097,7 +1097,7 @@ impl ClusterInfo {
) -> Result<(), GossipError> {
let tpu = tpu.unwrap_or_else(|| self.my_contact_info().tpu);
let buf = serialize(transaction)?;
self.socket.send_to(&buf, &tpu)?;
self.socket.send_to(&buf, tpu)?;
Ok(())
}

View File

@ -167,7 +167,7 @@ fn extract_release_archive(
progress_bar.set_message(format!("{}Extracting...", PACKAGE));
if extract_dir.exists() {
let _ = fs::remove_dir_all(&extract_dir);
let _ = fs::remove_dir_all(extract_dir);
}
let tmp_extract_dir = extract_dir.with_file_name("tmp-extract");
@ -188,7 +188,7 @@ fn extract_release_archive(
}
fn load_release_version(version_yml: &Path) -> Result<ReleaseVersion, String> {
let file = File::open(&version_yml)
let file = File::open(version_yml)
.map_err(|err| format!("Unable to open {:?}: {:?}", version_yml, err))?;
let version: ReleaseVersion = serde_yaml::from_reader(file)
.map_err(|err| format!("Unable to parse {:?}: {:?}", version_yml, err))?;

View File

@ -3,19 +3,19 @@ pub const JSON_RPC_URL: &str = "http://api.devnet.solana.com";
lazy_static! {
pub static ref CONFIG_FILE: Option<String> = {
dirs_next::home_dir().map(|mut path| {
path.extend(&[".config", "solana", "install", "config.yml"]);
path.extend([".config", "solana", "install", "config.yml"]);
path.to_str().unwrap().to_string()
})
};
pub static ref USER_KEYPAIR: Option<String> = {
dirs_next::home_dir().map(|mut path| {
path.extend(&[".config", "solana", "id.json"]);
path.extend([".config", "solana", "id.json"]);
path.to_str().unwrap().to_string()
})
};
pub static ref DATA_DIR: Option<String> = {
dirs_next::home_dir().map(|mut path| {
path.extend(&[".local", "share", "solana", "install"]);
path.extend([".local", "share", "solana", "install"]);
path.to_str().unwrap().to_string()
})
};

View File

@ -73,7 +73,7 @@ const NO_OUTFILE_ARG: ArgConstant<'static> = ArgConstant {
fn word_count_arg<'a>() -> Arg<'a> {
Arg::new(WORD_COUNT_ARG.name)
.long(WORD_COUNT_ARG.long)
.possible_values(&["12", "15", "18", "21", "24"])
.possible_values(["12", "15", "18", "21", "24"])
.default_value("12")
.value_name("NUMBER")
.takes_value(true)
@ -83,7 +83,7 @@ fn word_count_arg<'a>() -> Arg<'a> {
fn language_arg<'a>() -> Arg<'a> {
Arg::new(LANGUAGE_ARG.name)
.long(LANGUAGE_ARG.long)
.possible_values(&[
.possible_values([
"english",
"chinese-simplified",
"chinese-traditional",
@ -143,7 +143,7 @@ fn get_keypair_from_matches(
} else if !config.keypair_path.is_empty() {
&config.keypair_path
} else {
path.extend(&[".config", "solana", "id.json"]);
path.extend([".config", "solana", "id.json"]);
path.to_str().unwrap()
};
signer_from_path(matches, path, "pubkey recovery", wallet_manager)
@ -597,7 +597,7 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box<dyn error::Error>> {
} else if matches.is_present(NO_OUTFILE_ARG.name) {
None
} else {
path.extend(&[".config", "solana", "id.json"]);
path.extend([".config", "solana", "id.json"]);
Some(path.to_str().unwrap())
};
@ -646,7 +646,7 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box<dyn error::Error>> {
let outfile = if matches.is_present("outfile") {
matches.value_of("outfile").unwrap()
} else {
path.extend(&[".config", "solana", "id.json"]);
path.extend([".config", "solana", "id.json"]);
path.to_str().unwrap()
};

View File

@ -277,7 +277,7 @@ pub async fn transaction_history(
"{}, slot={}, memo=\"{}\", status={}",
result.signature,
result.slot,
result.memo.unwrap_or_else(|| "".to_string()),
result.memo.unwrap_or_default(),
match result.err {
None => "Confirmed".to_string(),
Some(err) => format!("Failed: {:?}", err),

View File

@ -19,7 +19,7 @@ pub fn parse_ledger_path(matches: &ArgMatches<'_>, name: &str) -> PathBuf {
// Canonicalize ledger path to avoid issues with symlink creation
pub fn canonicalize_ledger_path(ledger_path: &Path) -> PathBuf {
fs::canonicalize(&ledger_path).unwrap_or_else(|err| {
fs::canonicalize(ledger_path).unwrap_or_else(|err| {
eprintln!(
"Unable to access ledger path '{}': {}",
ledger_path.display(),

View File

@ -234,7 +234,7 @@ impl Blockstore {
}
fn do_open(ledger_path: &Path, options: BlockstoreOptions) -> Result<Blockstore> {
fs::create_dir_all(&ledger_path)?;
fs::create_dir_all(ledger_path)?;
let blockstore_path = ledger_path.join(
options
.column_options
@ -2579,7 +2579,7 @@ impl Blockstore {
// Check the active_transaction_status_index to see if it contains slot. If so, start with
// that index, as it will contain higher slots
let starting_primary_index = *self.active_transaction_status_index.read().unwrap();
let next_primary_index = if starting_primary_index == 0 { 1 } else { 0 };
let next_primary_index = u64::from(starting_primary_index == 0);
let next_max_slot = self
.transaction_status_index_cf
.get(next_primary_index)?

View File

@ -257,7 +257,7 @@ impl Rocks {
let access_type = options.access_type.clone();
let recovery_mode = options.recovery_mode.clone();
fs::create_dir_all(&path)?;
fs::create_dir_all(path)?;
// Use default database options
if should_disable_auto_compactions(&access_type) {

View File

@ -304,7 +304,7 @@ pub fn sign_shreds_gpu_pinned_keypair(keypair: &Keypair, cache: &RecyclerCache)
let pubkey = keypair.pubkey().to_bytes();
let secret = keypair.secret().to_bytes();
let mut hasher = Sha512::default();
hasher.update(&secret);
hasher.update(secret);
let mut result = hasher.finalize();
result[0] &= 248;
result[31] &= 63;

View File

@ -1327,7 +1327,7 @@ fn test_snapshots_blockstore_floor() {
let archive_info = loop {
let archive =
snapshot_utils::get_highest_full_snapshot_archive_info(&full_snapshot_archives_dir);
snapshot_utils::get_highest_full_snapshot_archive_info(full_snapshot_archives_dir);
if archive.is_some() {
trace!("snapshot exists");
break archive.unwrap();

View File

@ -523,7 +523,7 @@ pub fn bind_common(
let addr = SocketAddr::new(ip_addr, port);
let sock_addr = SockAddr::from(addr);
sock.bind(&sock_addr)
.and_then(|_| TcpListener::bind(&addr).map(|listener| (sock.into(), listener)))
.and_then(|_| TcpListener::bind(addr).map(|listener| (sock.into(), listener)))
}
pub fn bind_two_in_range_with_offset(

View File

@ -121,7 +121,7 @@ fn find_cuda_home(perf_libs_path: &Path) -> Option<PathBuf> {
}
// Search /usr/local for a `cuda-` directory that matches a perf-libs subdirectory
for entry in fs::read_dir(&perf_libs_path).unwrap().flatten() {
for entry in fs::read_dir(perf_libs_path).unwrap().flatten() {
let path = entry.path();
if !path.is_dir() {
continue;

View File

@ -396,7 +396,7 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs {
pub fn find_file(filename: &str) -> Option<PathBuf> {
for dir in default_shared_object_dirs() {
let candidate = dir.join(&filename);
let candidate = dir.join(filename);
if candidate.exists() {
return Some(candidate);
}
@ -604,7 +604,7 @@ impl ProgramTest {
})
.ok()
.flatten()
.unwrap_or_else(|| "".to_string())
.unwrap_or_default()
);
this.add_account(

View File

@ -108,7 +108,7 @@ fn main() {
program
);
assert!(Command::new("../../cargo-build-sbf")
.args(&[
.args([
"--manifest-path",
&format!("rust/{}/Cargo.toml", program),
"--sbf-out-dir",

View File

@ -128,7 +128,7 @@ native machine code before execting it in the virtual machine.",
.long("use")
.takes_value(true)
.value_name("VALUE")
.possible_values(&["cfg", "disassembler", "interpreter", "jit"])
.possible_values(["cfg", "disassembler", "interpreter", "jit"])
.default_value("jit"),
)
.arg(
@ -159,7 +159,7 @@ native machine code before execting it in the virtual machine.",
.value_name("FORMAT")
.global(true)
.takes_value(true)
.possible_values(&["json", "json-compact"]),
.possible_values(["json", "json-compact"]),
)
.get_matches();
@ -246,7 +246,7 @@ native machine code before execting it in the virtual machine.",
let mut instruction_meter = ThisInstructionMeter { compute_meter };
let program = matches.value_of("PROGRAM").unwrap();
let mut file = File::open(&Path::new(program)).unwrap();
let mut file = File::open(Path::new(program)).unwrap();
let mut magic = [0u8; 4];
file.read_exact(&mut magic).unwrap();
file.seek(SeekFrom::Start(0)).unwrap();

View File

@ -36,7 +36,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
Command::new("udevadm").arg("trigger").output().unwrap();
Command::new("udevadm")
.args(&["control", "--reload-rules"])
.args(["control", "--reload-rules"])
.output()
.unwrap();

View File

@ -57,7 +57,7 @@ impl RpcFilterType {
if bytes.len() > MAX_DATA_BASE64_SIZE {
return Err(RpcFilterError::DataTooLarge);
}
let bytes = base64::decode(&bytes)?;
let bytes = base64::decode(bytes)?;
if bytes.len() > MAX_DATA_SIZE {
Err(RpcFilterError::DataTooLarge)
} else {

View File

@ -818,7 +818,7 @@ impl RpcSubscriptions {
{
debug!("slot notify: {:?}", slot_info);
inc_new_counter_info!("rpc-subscription-notify-slot", 1);
notifier.notify(&slot_info, sub, false);
notifier.notify(slot_info, sub, false);
}
}
NotificationEntry::SlotUpdate(slot_update) => {
@ -857,7 +857,7 @@ impl RpcSubscriptions {
{
debug!("root notify: {:?}", root);
inc_new_counter_info!("rpc-subscription-notify-root", 1);
notifier.notify(&root, sub, false);
notifier.notify(root, sub, false);
}
}
NotificationEntry::Bank(commitment_slots) => {

View File

@ -25,7 +25,7 @@ fn bench_status_cache_serialize(bencher: &mut Bencher) {
id = hash(id.as_ref());
sigbytes.extend(id.as_ref());
let sig = Signature::new(&sigbytes);
status_cache.insert(&blockhash, &sig, 0, Ok(()));
status_cache.insert(&blockhash, sig, 0, Ok(()));
}
}
assert!(status_cache.roots().contains(&0));

View File

@ -15977,7 +15977,7 @@ pub mod tests {
vec![(pk1, vec![slot1].into_iter().collect::<HashSet<_>>())],
purged_stored_account_slots.into_iter().collect::<Vec<_>>()
);
let expected = if already_removed { 1 } else { 0 };
let expected = u64::from(already_removed);
assert_eq!(db.accounts_index.ref_count_from_storage(&pk1), expected);
}
}

View File

@ -192,7 +192,7 @@ pub mod tests {
) {
self.accounts_notified
.entry(meta.pubkey)
.or_insert(Vec::default())
.or_default()
.push((slot, account.clone()));
}
@ -201,7 +201,7 @@ pub mod tests {
fn notify_account_restore_from_snapshot(&self, slot: Slot, account: &StoredAccountMeta) {
self.accounts_notified
.entry(account.meta.pubkey)
.or_insert(Vec::default())
.or_default()
.push((slot, account.clone_account()));
}

View File

@ -659,7 +659,7 @@ pub mod tests {
.read(true)
.write(true)
.create(true)
.open(&path)
.open(path)
.expect("create a test file for mmap");
let result = AppendVec::new_from_file(path, 0);

View File

@ -8998,11 +8998,7 @@ pub(crate) mod tests {
// Since, validator 1 and validator 2 has equal smallest stake, it comes down to comparison
// between their pubkey.
let tweak_1 = if validator_1_pubkey > validator_2_pubkey {
1
} else {
0
};
let tweak_1 = u64::from(validator_1_pubkey > validator_2_pubkey);
let validator_1_portion =
((validator_1_stake_lamports * rent_to_be_distributed) as f64 / 100.0) as u64 + tweak_1;
assert_eq!(
@ -9012,11 +9008,7 @@ pub(crate) mod tests {
// Since, validator 1 and validator 2 has equal smallest stake, it comes down to comparison
// between their pubkey.
let tweak_2 = if validator_2_pubkey > validator_1_pubkey {
1
} else {
0
};
let tweak_2 = u64::from(validator_2_pubkey > validator_1_pubkey);
let validator_2_portion =
((validator_2_stake_lamports * rent_to_be_distributed) as f64 / 100.0) as u64 + tweak_2;
assert_eq!(

View File

@ -316,7 +316,7 @@ impl CostTracker {
fn number_of_accounts(&self) -> usize {
self.cost_by_writable_accounts
.iter()
.map(|(_key, units)| if *units > 0 { 1 } else { 0 })
.map(|(_key, units)| usize::from(*units > 0))
.sum()
}
}

View File

@ -703,7 +703,7 @@ pub mod tests {
let result = ExpectedRentCollection::new(
&pubkey,
&account,
expected_rent_collection_slot_max_epoch + if greater { 1 } else { 0 },
expected_rent_collection_slot_max_epoch + u64::from(greater),
&epoch_schedule,
&rent_collector,
&SlotInfoInEpoch::new(max_slot_in_storages_inclusive, &epoch_schedule),
@ -740,7 +740,7 @@ pub mod tests {
find_unskipped_slot,
None,
);
let epoch_delta = if previous_epoch { 1 } else { 0 };
let epoch_delta = u64::from(previous_epoch);
let slot_delta = epoch_delta * slots_per_epoch;
assert_eq!(
result,
@ -763,7 +763,7 @@ pub mod tests {
let original_rent_epoch = account.rent_epoch();
for already_collected in [true, false] {
// to consider: maybe if we already collected rent_epoch IN this slot and slot matches what we need, then we should return None here
account.set_rent_epoch(original_rent_epoch + if already_collected { 1 } else { 0 });
account.set_rent_epoch(original_rent_epoch + u64::from(already_collected));
let result = ExpectedRentCollection::new(
&pubkey,
&account,
@ -959,7 +959,7 @@ pub mod tests {
} else {
expected_rent_collection_slot_max_epoch
},
rent_epoch: rent_collector.epoch - if prior_epoch { 1 } else { 0 },
rent_epoch: rent_collector.epoch - u64::from(prior_epoch),
}),
"find_unskipped_slot(0): {:?}, rent_collector.epoch: {}, prior_epoch: {}",
find_unskipped_slot(0),
@ -1047,7 +1047,7 @@ pub mod tests {
rent_collector.epoch = epoch_schedule.get_epoch(max_slot_in_storages_inclusive);
let first_slot_in_max_epoch = max_slot_in_storages_inclusive
- max_slot_in_storages_inclusive % slots_per_epoch;
let skip_offset = if skipped_slot { 1 } else { 0 };
let skip_offset = u64::from(skipped_slot);
let mut expected_rent_collection_slot_max_epoch =
first_slot_in_max_epoch + partition_from_pubkey + skip_offset;
let hit_this_epoch =

View File

@ -482,7 +482,7 @@ pub fn unpack_genesis_archive(
let extract_start = Instant::now();
fs::create_dir_all(destination_dir)?;
let tar_bz2 = File::open(&archive_filename)?;
let tar_bz2 = File::open(archive_filename)?;
let tar = BzDecoder::new(BufReader::new(tar_bz2));
let mut archive = Archive::new(tar);
unpack_genesis(

View File

@ -120,12 +120,10 @@ impl<SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send>
pub fn insert(&self, key: &Pubkey, inner_key: &Pubkey) {
{
let pubkeys_map = self.index.get(key).unwrap_or_else(|| {
self.index
.entry(*key)
.or_insert(SecondaryIndexEntryType::default())
.downgrade()
});
let pubkeys_map = self
.index
.get(key)
.unwrap_or_else(|| self.index.entry(*key).or_default().downgrade());
pubkeys_map.insert_if_not_exists(inner_key, &self.stats.num_inner_keys);
}

View File

@ -441,7 +441,7 @@ pub fn archive_snapshot_package(
// Atomically move the archive into position for other validators to find
let metadata = fs::metadata(&archive_path)
.map_err(|e| SnapshotError::IoWithSource(e, "archive path stat"))?;
fs::rename(&archive_path, &snapshot_package.path())
fs::rename(&archive_path, snapshot_package.path())
.map_err(|e| SnapshotError::IoWithSource(e, "archive path rename"))?;
purge_old_snapshot_archives(
@ -1628,10 +1628,8 @@ pub fn purge_old_snapshot_archives(
incremental_snapshot_archives.sort_unstable();
let num_to_retain = if Some(base_slot) == highest_full_snapshot_slot {
maximum_incremental_snapshot_archives_to_retain
} else if retained_full_snapshot_slots.contains(&base_slot) {
1
} else {
0
usize::from(retained_full_snapshot_slots.contains(&base_slot))
};
trace!(
"There are {} incremental snapshot archives for base slot {}, removing {} of them",
@ -1691,7 +1689,7 @@ fn untar_snapshot_create_shared_buffer(
snapshot_tar: &Path,
archive_format: ArchiveFormat,
) -> SharedBuffer {
let open_file = || File::open(&snapshot_tar).unwrap();
let open_file = || File::open(snapshot_tar).unwrap();
match archive_format {
ArchiveFormat::TarBzip2 => SharedBuffer::new(BzDecoder::new(BufReader::new(open_file()))),
ArchiveFormat::TarGzip => SharedBuffer::new(GzDecoder::new(BufReader::new(open_file()))),
@ -3062,7 +3060,7 @@ mod tests {
let temp_snap_dir = tempfile::TempDir::new().unwrap();
for snap_name in snapshot_names {
let snap_path = temp_snap_dir.path().join(&snap_name);
let snap_path = temp_snap_dir.path().join(snap_name);
let mut _snap_file = File::create(snap_path);
}
purge_old_snapshot_archives(

View File

@ -300,11 +300,11 @@ mod tests {
let blockhash = hash(Hash::default().as_ref());
let status_cache = BankStatusCache::default();
assert_eq!(
status_cache.get_status(&sig, &blockhash, &Ancestors::default()),
status_cache.get_status(sig, &blockhash, &Ancestors::default()),
None
);
assert_eq!(
status_cache.get_status_any_blockhash(&sig, &Ancestors::default()),
status_cache.get_status_any_blockhash(sig, &Ancestors::default()),
None
);
}
@ -315,13 +315,13 @@ mod tests {
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
let ancestors = vec![(0, 1)].into_iter().collect();
status_cache.insert(&blockhash, &sig, 0, ());
status_cache.insert(&blockhash, sig, 0, ());
assert_eq!(
status_cache.get_status(&sig, &blockhash, &ancestors),
status_cache.get_status(sig, &blockhash, &ancestors),
Some((0, ()))
);
assert_eq!(
status_cache.get_status_any_blockhash(&sig, &ancestors),
status_cache.get_status_any_blockhash(sig, &ancestors),
Some((0, ()))
);
}
@ -332,12 +332,9 @@ mod tests {
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
let ancestors = Ancestors::default();
status_cache.insert(&blockhash, &sig, 1, ());
assert_eq!(status_cache.get_status(&sig, &blockhash, &ancestors), None);
assert_eq!(
status_cache.get_status_any_blockhash(&sig, &ancestors),
None
);
status_cache.insert(&blockhash, sig, 1, ());
assert_eq!(status_cache.get_status(sig, &blockhash, &ancestors), None);
assert_eq!(status_cache.get_status_any_blockhash(sig, &ancestors), None);
}
#[test]
@ -346,10 +343,10 @@ mod tests {
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
let ancestors = Ancestors::default();
status_cache.insert(&blockhash, &sig, 0, ());
status_cache.insert(&blockhash, sig, 0, ());
status_cache.add_root(0);
assert_eq!(
status_cache.get_status(&sig, &blockhash, &ancestors),
status_cache.get_status(sig, &blockhash, &ancestors),
Some((0, ()))
);
}
@ -360,13 +357,13 @@ mod tests {
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
let ancestors = vec![(0, 0)].into_iter().collect();
status_cache.insert(&blockhash, &sig, 0, ());
status_cache.insert(&blockhash, &sig, 1, ());
status_cache.insert(&blockhash, sig, 0, ());
status_cache.insert(&blockhash, sig, 1, ());
for i in 0..(MAX_CACHE_ENTRIES + 1) {
status_cache.add_root(i as u64);
}
assert!(status_cache
.get_status(&sig, &blockhash, &ancestors)
.get_status(sig, &blockhash, &ancestors)
.is_some());
}
@ -376,11 +373,11 @@ mod tests {
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
let ancestors = Ancestors::default();
status_cache.insert(&blockhash, &sig, 0, ());
status_cache.insert(&blockhash, sig, 0, ());
for i in 0..(MAX_CACHE_ENTRIES + 1) {
status_cache.add_root(i as u64);
}
assert_eq!(status_cache.get_status(&sig, &blockhash, &ancestors), None);
assert_eq!(status_cache.get_status(sig, &blockhash, &ancestors), None);
}
#[test]
@ -389,10 +386,10 @@ mod tests {
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
let ancestors = Ancestors::default();
status_cache.insert(&blockhash, &sig, 0, ());
status_cache.insert(&blockhash, sig, 0, ());
status_cache.add_root(0);
status_cache.clear();
assert_eq!(status_cache.get_status(&sig, &blockhash, &ancestors), None);
assert_eq!(status_cache.get_status(sig, &blockhash, &ancestors), None);
}
#[test]
@ -403,9 +400,9 @@ mod tests {
let ancestors = Ancestors::default();
status_cache.add_root(0);
status_cache.clear();
status_cache.insert(&blockhash, &sig, 0, ());
status_cache.insert(&blockhash, sig, 0, ());
assert!(status_cache
.get_status(&sig, &blockhash, &ancestors)
.get_status(sig, &blockhash, &ancestors)
.is_some());
}
@ -415,7 +412,7 @@ mod tests {
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
status_cache.clear();
status_cache.insert(&blockhash, &sig, 0, ());
status_cache.insert(&blockhash, sig, 0, ());
let (_, index, sig_map) = status_cache.cache.get(&blockhash).unwrap();
let sig_slice: &[u8; CACHED_KEY_SIZE] =
arrayref::array_ref![sig.as_ref(), *index, CACHED_KEY_SIZE];
@ -428,7 +425,7 @@ mod tests {
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
status_cache.clear();
status_cache.insert(&blockhash, &sig, 0, ());
status_cache.insert(&blockhash, sig, 0, ());
assert!(status_cache.roots().contains(&0));
let slot_deltas = status_cache.root_slot_deltas();
let cache = StatusCache::from_slot_deltas(&slot_deltas);
@ -444,9 +441,9 @@ mod tests {
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
let blockhash2 = hash(blockhash.as_ref());
status_cache.insert(&blockhash, &sig, 0, ());
status_cache.insert(&blockhash, &sig, 1, ());
status_cache.insert(&blockhash2, &sig, 1, ());
status_cache.insert(&blockhash, sig, 0, ());
status_cache.insert(&blockhash, sig, 1, ());
status_cache.insert(&blockhash2, sig, 1, ());
for i in 0..(MAX_CACHE_ENTRIES + 1) {
status_cache.add_root(i as u64);
}
@ -469,9 +466,9 @@ mod tests {
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
let blockhash2 = hash(blockhash.as_ref());
status_cache.insert(&blockhash, &sig, 0, ());
status_cache.insert(&blockhash, &sig, 1, ());
status_cache.insert(&blockhash2, &sig, 1, ());
status_cache.insert(&blockhash, sig, 0, ());
status_cache.insert(&blockhash, sig, 1, ());
status_cache.insert(&blockhash2, sig, 1, ());
let mut ancestors0 = Ancestors::default();
ancestors0.insert(0, 0);
@ -480,17 +477,17 @@ mod tests {
// Clear slot 0 related data
assert!(status_cache
.get_status(&sig, &blockhash, &ancestors0)
.get_status(sig, &blockhash, &ancestors0)
.is_some());
status_cache.clear_slot_entries(0);
assert!(status_cache
.get_status(&sig, &blockhash, &ancestors0)
.get_status(sig, &blockhash, &ancestors0)
.is_none());
assert!(status_cache
.get_status(&sig, &blockhash, &ancestors1)
.get_status(sig, &blockhash, &ancestors1)
.is_some());
assert!(status_cache
.get_status(&sig, &blockhash2, &ancestors1)
.get_status(sig, &blockhash2, &ancestors1)
.is_some());
// Check that the slot delta for slot 0 is gone, but slot 1 still
@ -502,10 +499,10 @@ mod tests {
status_cache.clear_slot_entries(1);
assert!(status_cache.slot_deltas.is_empty());
assert!(status_cache
.get_status(&sig, &blockhash, &ancestors1)
.get_status(sig, &blockhash, &ancestors1)
.is_none());
assert!(status_cache
.get_status(&sig, &blockhash2, &ancestors1)
.get_status(sig, &blockhash2, &ancestors1)
.is_none());
assert!(status_cache.cache.is_empty());
}
@ -521,13 +518,13 @@ mod tests {
let blockhash = hash(blockhash.as_ref());
let sig_key = Signature::default();
let hash_key = Hash::new_unique();
status_cache.insert(&blockhash, &sig_key, 0, ());
status_cache.insert(&blockhash, &hash_key, 0, ());
status_cache.insert(&blockhash, sig_key, 0, ());
status_cache.insert(&blockhash, hash_key, 0, ());
assert!(status_cache
.get_status(&sig_key, &blockhash, &ancestors)
.get_status(sig_key, &blockhash, &ancestors)
.is_some());
assert!(status_cache
.get_status(&hash_key, &blockhash, &ancestors)
.get_status(hash_key, &blockhash, &ancestors)
.is_some());
}
}

View File

@ -141,7 +141,7 @@ fn install_if_missing(
.next()
.is_none()
{
fs::remove_dir(&target_path).map_err(|err| err.to_string())?;
fs::remove_dir(target_path).map_err(|err| err.to_string())?;
}
// Check whether the package is already in ~/.cache/solana.
@ -153,9 +153,9 @@ fn install_if_missing(
.unwrap_or(false)
{
if target_path.exists() {
fs::remove_file(&target_path).map_err(|err| err.to_string())?;
fs::remove_file(target_path).map_err(|err| err.to_string())?;
}
fs::create_dir_all(&target_path).map_err(|err| err.to_string())?;
fs::create_dir_all(target_path).map_err(|err| err.to_string())?;
let mut url = String::from(url);
url.push('/');
url.push_str(config.sbf_tools_version);
@ -169,9 +169,7 @@ fn install_if_missing(
let zip = File::open(&download_file_path).map_err(|err| err.to_string())?;
let tar = BzDecoder::new(BufReader::new(zip));
let mut archive = Archive::new(tar);
archive
.unpack(&target_path)
.map_err(|err| err.to_string())?;
archive.unpack(target_path).map_err(|err| err.to_string())?;
fs::remove_file(download_file_path).map_err(|err| err.to_string())?;
}
// Make a symbolic link source_path -> target_path in the
@ -475,7 +473,7 @@ fn build_sbf_package(config: &Config, target_directory: &Path, package: &cargo_m
})
.join("release");
env::set_current_dir(&root_package_dir).unwrap_or_else(|err| {
env::set_current_dir(root_package_dir).unwrap_or_else(|err| {
error!(
"Unable to set current directory to {}: {}",
root_package_dir, err
@ -536,7 +534,7 @@ fn build_sbf_package(config: &Config, target_directory: &Path, package: &cargo_m
// The package version directory doesn't contain a valid
// installation, and it should be removed.
let target_path_parent = target_path.parent().expect("Invalid package path");
fs::remove_dir_all(&target_path_parent).unwrap_or_else(|err| {
fs::remove_dir_all(target_path_parent).unwrap_or_else(|err| {
error!(
"Failed to remove {} while recovering from installation failure: {}",
target_path_parent.to_string_lossy(),
@ -711,7 +709,7 @@ fn build_sbf_package(config: &Config, target_directory: &Path, package: &cargo_m
#[cfg(not(windows))]
let output = spawn(
&config.sbf_sdk.join("scripts").join("strip.sh"),
&[&program_unstripped_so, &program_so],
[&program_unstripped_so, &program_so],
config.generate_child_script_on_failure,
);
if config.verbose {
@ -734,7 +732,7 @@ fn build_sbf_package(config: &Config, target_directory: &Path, package: &cargo_m
{
let output = spawn(
&dump_script,
&[&program_unstripped_so, &program_dump],
[&program_unstripped_so, &program_dump],
config.generate_child_script_on_failure,
);
if config.verbose {
@ -752,7 +750,7 @@ fn build_sbf_package(config: &Config, target_directory: &Path, package: &cargo_m
let output = spawn(
llvm_objcopy,
&[
[
"--only-keep-debug".as_ref(),
program_unstripped_so.as_os_str(),
program_debug.as_os_str(),
@ -939,7 +937,7 @@ fn main() {
.arg(
Arg::new("arch")
.long("arch")
.possible_values(&["bpf", "sbf", "sbfv2"])
.possible_values(["bpf", "sbf", "sbfv2"])
.default_value("sbf")
.help("Build for the given SBF version"),
)

View File

@ -51,7 +51,7 @@ fn test_build() {
fn test_dump() {
// This test requires rustfilt.
assert_cmd::Command::new("cargo")
.args(&["install", "-f", "rustfilt"])
.args(["install", "-f", "rustfilt"])
.assert()
.success();
run_cargo_build("noop", &["--dump"], false);
@ -120,7 +120,7 @@ fn test_sbfv2() {
.join("bin")
.join("llvm-readelf");
assert_cmd::Command::new(readelf)
.args(&["-h", bin])
.args(["-h", bin])
.assert()
.stdout(predicate::str::contains(
"Flags: 0x20",

View File

@ -319,7 +319,7 @@ fn main() {
.arg(
Arg::new("arch")
.long("arch")
.possible_values(&["bpf", "sbf", "sbfv2"])
.possible_values(["bpf", "sbf", "sbfv2"])
.default_value("sbf")
.help("Build for the given SBF version"),
)

View File

@ -291,10 +291,10 @@ fn parse_pubkey(
) -> Result<proc_macro2::TokenStream> {
let id_vec = bs58::decode(id_literal.value())
.into_vec()
.map_err(|_| syn::Error::new_spanned(&id_literal, "failed to decode base58 string"))?;
.map_err(|_| syn::Error::new_spanned(id_literal, "failed to decode base58 string"))?;
let id_array = <[u8; 32]>::try_from(<&[u8]>::clone(&&id_vec[..])).map_err(|_| {
syn::Error::new_spanned(
&id_literal,
id_literal,
format!("pubkey array is not 32 bytes long: len={}", id_vec.len()),
)
})?;

View File

@ -197,7 +197,7 @@ impl GenesisConfig {
)
})?;
std::fs::create_dir_all(&ledger_path)?;
std::fs::create_dir_all(ledger_path)?;
let mut file = File::create(Self::genesis_filename(ledger_path))?;
file.write_all(&serialized)
@ -343,6 +343,6 @@ mod tests {
config.write(path).expect("write");
let loaded_config = GenesisConfig::load(path).expect("load");
assert_eq!(config.hash(), loaded_config.hash());
let _ignored = std::fs::remove_file(&path);
let _ignored = std::fs::remove_file(path);
}
}

View File

@ -130,8 +130,9 @@ impl fmt::Debug for Packet {
#[allow(clippy::uninit_assumed_init)]
impl Default for Packet {
fn default() -> Packet {
let buffer = std::mem::MaybeUninit::<[u8; PACKET_DATA_SIZE]>::uninit();
Packet {
buffer: unsafe { std::mem::MaybeUninit::uninit().assume_init() },
buffer: unsafe { buffer.assume_init() },
meta: Meta::default(),
}
}

View File

@ -852,7 +852,7 @@ pub fn new_secp256k1_instruction(
let secp_pubkey = libsecp256k1::PublicKey::from_secret_key(priv_key);
let eth_pubkey = construct_eth_pubkey(&secp_pubkey);
let mut hasher = sha3::Keccak256::new();
hasher.update(&message_arr);
hasher.update(message_arr);
let message_hash = hasher.finalize();
let mut message_hash_arr = [0u8; 32];
message_hash_arr.copy_from_slice(message_hash.as_slice());
@ -1318,7 +1318,7 @@ pub mod test {
data.extend(signature.serialize());
data.push(recovery_id.serialize());
let eth_address_offset = data.len();
data.extend(&eth_address);
data.extend(eth_address);
let message_data_offset = data.len();
data.extend(message);

View File

@ -68,7 +68,7 @@ pub fn send_to(
let addr = p.meta.socket_addr();
if socket_addr_space.check(&addr) {
if let Some(data) = p.data(..) {
socket.send_to(data, &addr)?;
socket.send_to(data, addr)?;
}
}
}

View File

@ -75,7 +75,8 @@ pub fn recv_mmsg(sock: &UdpSocket, packets: &mut [Packet]) -> io::Result</*num p
const SOCKADDR_STORAGE_SIZE: usize = mem::size_of::<sockaddr_storage>();
let mut hdrs: [mmsghdr; NUM_RCVMMSGS] = unsafe { mem::zeroed() };
let mut iovs: [iovec; NUM_RCVMMSGS] = unsafe { mem::MaybeUninit::uninit().assume_init() };
let iovs = mem::MaybeUninit::<[iovec; NUM_RCVMMSGS]>::uninit();
let mut iovs: [iovec; NUM_RCVMMSGS] = unsafe { iovs.assume_init() };
let mut addrs: [sockaddr_storage; NUM_RCVMMSGS] = unsafe { mem::zeroed() };
let sock_fd = sock.as_raw_fd();
@ -141,7 +142,7 @@ mod tests {
let sent = TEST_NUM_MSGS - 1;
for _ in 0..sent {
let data = [0; PACKET_DATA_SIZE];
sender.send_to(&data[..], &addr).unwrap();
sender.send_to(&data[..], addr).unwrap();
}
let mut packets = vec![Packet::default(); TEST_NUM_MSGS];
@ -167,7 +168,7 @@ mod tests {
let sent = TEST_NUM_MSGS + 10;
for _ in 0..sent {
let data = [0; PACKET_DATA_SIZE];
sender.send_to(&data[..], &addr).unwrap();
sender.send_to(&data[..], addr).unwrap();
}
let mut packets = vec![Packet::default(); TEST_NUM_MSGS];
@ -208,7 +209,7 @@ mod tests {
let sent = TEST_NUM_MSGS;
for _ in 0..sent {
let data = [0; PACKET_DATA_SIZE];
sender.send_to(&data[..], &addr).unwrap();
sender.send_to(&data[..], addr).unwrap();
}
let start = Instant::now();
@ -243,12 +244,12 @@ mod tests {
for _ in 0..sent1 {
let data = [0; PACKET_DATA_SIZE];
sender1.send_to(&data[..], &addr).unwrap();
sender1.send_to(&data[..], addr).unwrap();
}
for _ in 0..sent2 {
let data = [0; PACKET_DATA_SIZE];
sender2.send_to(&data[..], &addr).unwrap();
sender2.send_to(&data[..], addr).unwrap();
}
let mut packets = vec![Packet::default(); TEST_NUM_MSGS];

View File

@ -134,7 +134,8 @@ where
{
let size = packets.len();
#[allow(clippy::uninit_assumed_init)]
let mut iovs = vec![unsafe { std::mem::MaybeUninit::uninit().assume_init() }; size];
let iovec = std::mem::MaybeUninit::<iovec>::uninit();
let mut iovs = vec![unsafe { iovec.assume_init() }; size];
let mut addrs = vec![unsafe { std::mem::zeroed() }; size];
let mut hdrs = vec![unsafe { std::mem::zeroed() }; size];
for ((pkt, dest), hdr, iov, addr) in izip!(packets, &mut hdrs, &mut iovs, &mut addrs) {

View File

@ -22,7 +22,7 @@ pub fn test_recv_mmsg_batch_size() {
(0..1000).for_each(|_| {
for _ in 0..sent {
let data = [0; PACKET_DATA_SIZE];
sender.send_to(&data[..], &addr).unwrap();
sender.send_to(&data[..], addr).unwrap();
}
let mut packets = vec![Packet::default(); TEST_BATCH_SIZE];
let now = Instant::now();
@ -38,7 +38,7 @@ pub fn test_recv_mmsg_batch_size() {
(0..1000).for_each(|_| {
for _ in 0..sent {
let data = [0; PACKET_DATA_SIZE];
sender.send_to(&data[..], &addr).unwrap();
sender.send_to(&data[..], addr).unwrap();
}
let mut packets = vec![Packet::default(); 4];
let mut recv = 0;

View File

@ -52,7 +52,7 @@ fn tune_poh_service_priority(uid: u32) {
info!("PoH thread PID is {}", pid);
let pid = format!("{}", pid);
let output = Command::new("chrt")
.args(&["-r", "-p", "99", pid.as_str()])
.args(["-r", "-p", "99", pid.as_str()])
.output()
.expect("Expected to set priority of thread");
if output.status.success() {

View File

@ -924,7 +924,7 @@ pub fn test_process_distribute_tokens_with_client(
let allocations_file = NamedTempFile::new().unwrap();
let input_csv = allocations_file.path().to_str().unwrap().to_string();
let mut wtr = csv::WriterBuilder::new().from_writer(allocations_file);
wtr.write_record(&["recipient", "amount"]).unwrap();
wtr.write_record(["recipient", "amount"]).unwrap();
wtr.write_record(&[
alice_pubkey.to_string(),
lamports_to_sol(expected_amount).to_string(),
@ -1024,7 +1024,7 @@ pub fn test_process_create_stake_with_client(client: &RpcClient, sender_keypair:
let file = NamedTempFile::new().unwrap();
let input_csv = file.path().to_str().unwrap().to_string();
let mut wtr = csv::WriterBuilder::new().from_writer(file);
wtr.write_record(&["recipient", "amount", "lockup_date"])
wtr.write_record(["recipient", "amount", "lockup_date"])
.unwrap();
wtr.write_record(&[
alice_pubkey.to_string(),
@ -1146,7 +1146,7 @@ pub fn test_process_distribute_stake_with_client(client: &RpcClient, sender_keyp
let file = NamedTempFile::new().unwrap();
let input_csv = file.path().to_str().unwrap().to_string();
let mut wtr = csv::WriterBuilder::new().from_writer(file);
wtr.write_record(&["recipient", "amount", "lockup_date"])
wtr.write_record(["recipient", "amount", "lockup_date"])
.unwrap();
wtr.write_record(&[
alice_pubkey.to_string(),

View File

@ -43,7 +43,7 @@ fn main() {
let upload_metrics = args.len() > 2;
let git_output = Command::new("git")
.args(&["rev-parse", "HEAD"])
.args(["rev-parse", "HEAD"])
.output()
.expect("failed to execute git rev-parse");
let git_commit_hash = String::from_utf8_lossy(&git_output.stdout);

View File

@ -873,7 +873,7 @@ fn main() {
}
fn remove_directory_contents(ledger_path: &Path) -> Result<(), io::Error> {
for entry in fs::read_dir(&ledger_path)? {
for entry in fs::read_dir(ledger_path)? {
let entry = entry?;
if entry.metadata()?.is_dir() {
fs::remove_dir_all(&entry.path())?

View File

@ -58,7 +58,7 @@ pub fn redirect_stderr_to_file(logfile: Option<String>) -> Option<JoinHandle<()>
{
use log::info;
let mut signals =
signal_hook::iterator::Signals::new(&[signal_hook::consts::SIGUSR1])
signal_hook::iterator::Signals::new([signal_hook::consts::SIGUSR1])
.unwrap_or_else(|err| {
eprintln!("Unable to register SIGUSR1 handler: {:?}", err);
exit(1);