General cleanup (#32980)

* Fixes `cargo::` in build.rs

* Fixes overshadowing reexports.

* Removes unnecessary `as *const u8`.

* Removes unnecessary `.into_iter()`.

* cargo clippy
This commit is contained in:
Alexander Meißner 2023-08-24 21:44:19 +02:00 committed by GitHub
parent d91b22c421
commit a8be70fa7a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 53 additions and 69 deletions

View File

@ -103,7 +103,7 @@ impl AccountStorage {
pub fn initialize(&mut self, all_storages: AccountStorageMap) {
assert!(self.map.is_empty());
assert!(self.no_shrink_in_progress());
self.map.extend(all_storages.into_iter())
self.map.extend(all_storages)
}
/// remove the append vec at 'slot'

View File

@ -3487,8 +3487,7 @@ impl AccountsDb {
let (reclaims, pubkeys_removed_from_accounts_index2) =
self.purge_keys_exact(pubkey_to_slot_set.iter());
pubkeys_removed_from_accounts_index
.extend(pubkeys_removed_from_accounts_index2.into_iter());
pubkeys_removed_from_accounts_index.extend(pubkeys_removed_from_accounts_index2);
// Don't reset from clean, since the pubkeys in those stores may need to be unref'ed
// and those stores may be used for background hashing.
@ -7358,7 +7357,7 @@ impl AccountsDb {
let mut sort_time = Measure::start("sort_storages");
let min_root = self.accounts_index.min_alive_root();
let storages = SortedStorages::new_with_slots(
combined_maps.iter().zip(slots.into_iter()),
combined_maps.iter().zip(slots),
min_root,
Some(slot),
);
@ -7824,7 +7823,7 @@ impl AccountsDb {
let (storages, slots) =
self.get_snapshot_storages(base_slot.checked_add(1).unwrap()..=slot);
let sorted_storages =
SortedStorages::new_with_slots(storages.iter().zip(slots.into_iter()), None, None);
SortedStorages::new_with_slots(storages.iter().zip(slots), None, None);
let calculated_incremental_accounts_hash = self.calculate_incremental_accounts_hash(
&calc_config,
&sorted_storages,

View File

@ -427,7 +427,7 @@ impl AppendVec {
Some((
//UNSAFE: This unsafe creates a slice that represents a chunk of self.map memory
//The lifetime of this slice is tied to &self, since it points to self.map memory
unsafe { std::slice::from_raw_parts(data.as_ptr() as *const u8, size) },
unsafe { std::slice::from_raw_parts(data.as_ptr(), size) },
next,
))
}
@ -615,7 +615,7 @@ impl AppendVec {
let ptrs = [
(meta_ptr as *const u8, mem::size_of::<StoredMeta>()),
(account_meta_ptr as *const u8, mem::size_of::<AccountMeta>()),
(hash_ptr as *const u8, mem::size_of::<Hash>()),
(hash_ptr, mem::size_of::<Hash>()),
(data_ptr, data_len),
];
if let Some(res) = self.append_ptrs_locked(&mut offset, &ptrs) {

View File

@ -67,7 +67,7 @@ impl<'a> SortedStorages<'a> {
let slots = source.iter().map(|storage| {
storage.slot() // this must be unique. Will be enforced in new_with_slots
});
Self::new_with_slots(source.iter().zip(slots.into_iter()), None, None)
Self::new_with_slots(source.iter().zip(slots), None, None)
}
/// create [`SortedStorages`] from `source` iterator.

View File

@ -31,7 +31,7 @@ pub fn get_slice(map: &Mmap, offset: usize, size: usize) -> std::io::Result<(&[u
}
let data = &map[offset..next];
let next = u64_align!(next);
let ptr = data.as_ptr() as *const u8;
let ptr = data.as_ptr();
Ok((unsafe { std::slice::from_raw_parts(ptr, size) }, next))
}

View File

@ -409,7 +409,7 @@ impl<O: BucketOccupied> BucketStorage<O> {
unsafe {
let dst = dst_slice.as_ptr() as *mut u8;
let src = src_slice.as_ptr() as *const u8;
let src = src_slice.as_ptr();
std::ptr::copy_nonoverlapping(src, dst, old_bucket.cell_size as usize);
};
}

View File

@ -2385,7 +2385,7 @@ pub fn return_signers_data(tx: &Transaction, config: &ReturnSignersConfig) -> Cl
tx.signatures
.iter()
.zip(tx.message.account_keys.iter())
.zip(verify_results.into_iter())
.zip(verify_results)
.for_each(|((sig, key), res)| {
if res {
signers.push(format!("{key}={sig}"))

View File

@ -1980,7 +1980,7 @@ pub fn process_show_validators(
let validators: Vec<_> = current_validators
.into_iter()
.chain(delinquent_validators.into_iter())
.chain(delinquent_validators)
.collect();
let (average_skip_rate, average_stake_weighted_skip_rate) = {

View File

@ -1385,7 +1385,7 @@ pub fn process_close_vote_account(
if let Some(vote_account) = vote_account_status
.current
.into_iter()
.chain(vote_account_status.delinquent.into_iter())
.chain(vote_account_status.delinquent)
.next()
{
if vote_account.activated_stake != 0 {

View File

@ -208,7 +208,7 @@ impl ShredFetchStage {
turbine_disabled.clone(),
);
tvu_threads.extend(repair_receiver.into_iter());
tvu_threads.extend(repair_receiver);
tvu_threads.push(tvu_filter);
tvu_threads.push(repair_handler);

View File

@ -676,7 +676,7 @@ pub(crate) fn submit_gossip_stats(
.pull
.votes
.into_iter()
.chain(crds_stats.push.votes.into_iter())
.chain(crds_stats.push.votes)
.into_grouping_map()
.aggregate(|acc, _slot, num_votes| Some(acc.unwrap_or_default() + num_votes));
submit_vote_stats("cluster_info_crds_stats_votes", &votes);

View File

@ -527,9 +527,7 @@ fn sanitize_entries(addrs: &[IpAddr], sockets: &[SocketEntry]) -> Result<(), Err
// Verify that port offsets don't overflow.
if sockets
.iter()
.fold(Some(0u16), |offset, entry| {
offset?.checked_add(entry.offset)
})
.try_fold(0u16, |offset, entry| offset.checked_add(entry.offset))
.is_none()
{
return Err(Error::PortOffsetsOverflow);

View File

@ -177,7 +177,7 @@ impl ProgramSubCommand for App<'_, '_> {
.arg(
Arg::with_name("input")
.help(
r##"Input for the program to run on, where FILE is a name of a JSON file
r#"Input for the program to run on, where FILE is a name of a JSON file
with input data, or BYTES is the number of 0-valued bytes to allocate for program parameters"
The input data for a program execution have to be in JSON format
@ -196,7 +196,7 @@ and the following fields are required
],
"instruction_data": [31, 32, 23, 24]
}
"##,
"#,
)
.short("i")
.long("input")

View File

@ -516,7 +516,7 @@ pub fn start_gossip_voter(
let (labels, votes) = cluster_info.get_votes_with_labels(&mut cursor);
let mut parsed_vote_iter: Vec<_> = labels
.into_iter()
.zip(votes.into_iter())
.zip(votes)
.filter_map(&vote_filter)
.collect();

View File

@ -58,7 +58,7 @@ impl<'a> Proof<'a> {
None
}
});
matches!(result, Some(_))
result.is_some()
}
}

View File

@ -527,10 +527,7 @@ pub mod test_mocks {
assert!(!points.is_empty());
let new_points = points.len();
self.points_written
.lock()
.unwrap()
.extend(points.into_iter());
self.points_written.lock().unwrap().extend(points);
info!(
"Writing {} points ({} total)",

View File

@ -332,7 +332,7 @@ fn create_memory_mapping<'a, 'b, C: ContextObject>(
MemoryRegion::new_writable(heap.as_slice_mut(), MM_HEAP_START),
]
.into_iter()
.chain(additional_regions.into_iter())
.chain(additional_regions)
.collect();
Ok(if let Some(cow_cb) = cow_cb {

View File

@ -196,13 +196,7 @@ fn memmove_non_contiguous(
memory_mapping,
reverse,
|src_host_addr, dst_host_addr, chunk_len| {
unsafe {
std::ptr::copy(
src_host_addr as *const u8,
dst_host_addr as *mut u8,
chunk_len,
)
};
unsafe { std::ptr::copy(src_host_addr, dst_host_addr as *mut u8, chunk_len) };
Ok(0)
},
)
@ -237,8 +231,8 @@ fn memcmp_non_contiguous(
false,
|s1_addr, s2_addr, chunk_len| {
let res = unsafe {
let s1 = slice::from_raw_parts(s1_addr as *const u8, chunk_len);
let s2 = slice::from_raw_parts(s2_addr as *const u8, chunk_len);
let s1 = slice::from_raw_parts(s1_addr, chunk_len);
let s2 = slice::from_raw_parts(s2_addr, chunk_len);
// Safety:
// memcmp is marked unsafe since it assumes that s1 and s2 are exactly chunk_len
// long. The whole point of iter_memory_pair_chunks is to find same length chunks

View File

@ -1738,7 +1738,7 @@ declare_syscall!(
let base = translate_slice::<u8>(
memory_mapping,
params.base as *const _ as *const u8 as u64,
params.base as *const _ as u64,
params.base_len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
@ -1746,7 +1746,7 @@ declare_syscall!(
let exponent = translate_slice::<u8>(
memory_mapping,
params.exponent as *const _ as *const u8 as u64,
params.exponent as *const _ as u64,
params.exponent_len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
@ -1754,7 +1754,7 @@ declare_syscall!(
let modulus = translate_slice::<u8>(
memory_mapping,
params.modulus as *const _ as *const u8 as u64,
params.modulus as *const _ as u64,
params.modulus_len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),

View File

@ -1,12 +1,12 @@
//! config for staking
//! carries variables that the stake program cares about
#[allow(deprecated)]
use solana_sdk::stake::config;
#[deprecated(
since = "1.8.0",
note = "Please use `solana_sdk::stake::config` or `solana_program::stake::config` instead"
)]
pub use solana_sdk::stake::config::*;
#[allow(deprecated)]
use solana_sdk::stake::config::{self, Config};
use {
bincode::deserialize,
solana_config_program::{create_config_account, get_config_data},

View File

@ -5,7 +5,7 @@ use {
log::*,
serde_derive::{Deserialize, Serialize},
solana_metrics::datapoint_debug,
solana_program::vote::{error::VoteError, program::id, state::serde_compact_vote_state_update},
solana_program::vote::{error::VoteError, program::id},
solana_sdk::{
account::{AccountSharedData, ReadableAccount, WritableAccount},
clock::{Epoch, Slot, UnixTimestamp},

View File

@ -176,7 +176,7 @@ impl<T> SnapshotAccountsDbFields<T> {
})?;
let mut combined_storages = full_snapshot_storages;
combined_storages.extend(incremental_snapshot_storages.into_iter());
combined_storages.extend(incremental_snapshot_storages);
Ok(AccountsDbFields(
combined_storages,

View File

@ -300,7 +300,7 @@ pub fn bank_from_snapshot_archives(
if let Some(ref mut unarchive_preparation_result) = unarchived_incremental_snapshot {
let incremental_snapshot_storages =
std::mem::take(&mut unarchive_preparation_result.storage);
storage.extend(incremental_snapshot_storages.into_iter());
storage.extend(incremental_snapshot_storages);
}
let storage_and_next_append_vec_id = StorageAndNextAppendVecId {

View File

@ -138,7 +138,7 @@ fn move_stake_account(
new_withdraw_authority_pubkey,
);
instructions.extend(authorize_instructions.into_iter());
instructions.extend(authorize_instructions);
let message = Message::new(&instructions, Some(fee_payer_pubkey));
Some(message)
}

View File

@ -48,28 +48,24 @@ pub fn decompress(data: &[u8]) -> Result<Vec<u8>, io::Error> {
pub fn compress(method: CompressionMethod, data: &[u8]) -> Result<Vec<u8>, io::Error> {
let mut compressed_data = bincode::serialize(&method).unwrap();
compressed_data.extend(
match method {
CompressionMethod::Bzip2 => {
let mut e = bzip2::write::BzEncoder::new(Vec::new(), bzip2::Compression::best());
e.write_all(data)?;
e.finish()?
}
CompressionMethod::Gzip => {
let mut e =
flate2::write::GzEncoder::new(Vec::new(), flate2::Compression::default());
e.write_all(data)?;
e.finish()?
}
CompressionMethod::Zstd => {
let mut e = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap();
e.write_all(data)?;
e.finish()?
}
CompressionMethod::NoCompression => data.to_vec(),
compressed_data.extend(match method {
CompressionMethod::Bzip2 => {
let mut e = bzip2::write::BzEncoder::new(Vec::new(), bzip2::Compression::best());
e.write_all(data)?;
e.finish()?
}
.into_iter(),
);
CompressionMethod::Gzip => {
let mut e = flate2::write::GzEncoder::new(Vec::new(), flate2::Compression::default());
e.write_all(data)?;
e.finish()?
}
CompressionMethod::Zstd => {
let mut e = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap();
e.write_all(data)?;
e.finish()?
}
CompressionMethod::NoCompression => data.to_vec(),
});
Ok(compressed_data)
}

View File

@ -10,7 +10,7 @@ fn main() -> Result<(), std::io::Error> {
let mut protos = Vec::new();
for proto_file in &proto_files {
let proto = proto_base_path.join(proto_file);
println!("cargo::rerun-if-changed={}", proto.display());
println!("cargo:rerun-if-changed={}", proto.display());
protos.push(proto);
}

View File

@ -485,7 +485,7 @@ impl AdminRpc for AdminRpcImpl {
.staked_map_id;
let mut write_staked_nodes = meta.staked_nodes_overrides.write().unwrap();
write_staked_nodes.clear();
write_staked_nodes.extend(loaded_config.into_iter());
write_staked_nodes.extend(loaded_config);
info!("Staked nodes overrides loaded from {}", path);
debug!("overrides map: {:?}", write_staked_nodes);
Ok(())

View File

@ -1450,7 +1450,7 @@ pub fn main() {
if let Some(account_shrink_snapshot_paths) = account_shrink_snapshot_paths {
account_snapshot_paths
.into_iter()
.chain(account_shrink_snapshot_paths.into_iter())
.chain(account_shrink_snapshot_paths)
.collect()
} else {
account_snapshot_paths