removes erroneous uses of std::mem::swap (#26536)

All instances should be replace by std::mem::{replace,take},
or just plain assignment.
This commit is contained in:
behzad nouri 2022-07-11 11:33:15 +00:00 committed by GitHub
parent f8669c6d52
commit ba785cf8ab
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 56 additions and 106 deletions

View File

@ -458,8 +458,7 @@ impl<T: Clone + Copy> Bucket<T> {
pub fn handle_delayed_grows(&mut self) {
if self.reallocated.get_reallocated() {
// swap out the bucket that was resized previously with a read lock
let mut items = ReallocatedItems::default();
std::mem::swap(&mut items, &mut self.reallocated.items.lock().unwrap());
let mut items = std::mem::take(&mut *self.reallocated.items.lock().unwrap());
if let Some((random, bucket)) = items.index.take() {
self.apply_grow_index(random, bucket);

View File

@ -651,14 +651,14 @@ impl BankingStage {
let buffered_packets_len = buffered_packet_batches.len();
let mut proc_start = Measure::start("consume_buffered_process");
let mut reached_end_of_slot = false;
let mut retryable_packets = MinMaxHeap::with_capacity(buffered_packet_batches.capacity());
std::mem::swap(
&mut buffered_packet_batches.packet_priority_queue,
&mut retryable_packets,
);
let mut retryable_packets: MinMaxHeap<Rc<ImmutableDeserializedPacket>> = retryable_packets
let mut retryable_packets = {
let capacity = buffered_packet_batches.capacity();
std::mem::replace(
&mut buffered_packet_batches.packet_priority_queue,
MinMaxHeap::with_capacity(capacity),
)
};
let retryable_packets: MinMaxHeap<Rc<ImmutableDeserializedPacket>> = retryable_packets
.drain_desc()
.chunks(num_packets_to_process_per_iteration)
.into_iter()
@ -789,10 +789,7 @@ impl BankingStage {
})
.collect();
std::mem::swap(
&mut retryable_packets,
&mut buffered_packet_batches.packet_priority_queue,
);
buffered_packet_batches.packet_priority_queue = retryable_packets;
if reached_end_of_slot {
slot_metrics_tracker

View File

@ -60,8 +60,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
// and make progress
if bank.slot() > SLOT_TO_RESOLVE && !self.good_shreds.is_empty() {
info!("Resolving bad shreds");
let mut shreds = vec![];
std::mem::swap(&mut shreds, &mut self.good_shreds);
let shreds = std::mem::take(&mut self.good_shreds);
blockstore_sender.send((Arc::new(shreds), None))?;
}

View File

@ -167,7 +167,7 @@ impl AggregateCommitmentService {
);
new_block_commitment.set_highest_confirmed_root(highest_confirmed_root);
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
*w_block_commitment_cache = new_block_commitment;
w_block_commitment_cache.commitment_slots()
}

View File

@ -29,11 +29,11 @@ impl OptimisticConfirmationVerifier {
) -> Vec<(Slot, Hash)> {
let root = root_bank.slot();
let root_ancestors = &root_bank.ancestors;
let mut slots_before_root = self
let slots_after_root = self
.unchecked_slots
.split_off(&((root + 1), Hash::default()));
// `slots_before_root` now contains all slots <= root
std::mem::swap(&mut slots_before_root, &mut self.unchecked_slots);
let slots_before_root = std::mem::replace(&mut self.unchecked_slots, slots_after_root);
slots_before_root
.into_iter()
.filter(|(optimistic_slot, optimistic_hash)| {

View File

@ -310,8 +310,7 @@ impl RepairWeight {
// Purge `self.unrooted_slots` of slots less than `new_root` as we know any
// unrooted votes for slots < `new_root` will now be rejected, so we won't
// need to check `self.unrooted_slots` to see if those slots are unrooted.
let mut new_unrooted_slots = self.unrooted_slots.split_off(&new_root);
std::mem::swap(&mut self.unrooted_slots, &mut new_unrooted_slots);
self.unrooted_slots = self.unrooted_slots.split_off(&new_root);
self.root = new_root;
}

View File

@ -3039,18 +3039,15 @@ impl ReplayStage {
}
progress.handle_new_root(&r_bank_forks);
heaviest_subtree_fork_choice.set_root((new_root, r_bank_forks.root_bank().hash()));
let mut slots_ge_root = duplicate_slots_tracker.split_off(&new_root);
*duplicate_slots_tracker = duplicate_slots_tracker.split_off(&new_root);
// duplicate_slots_tracker now only contains entries >= `new_root`
std::mem::swap(duplicate_slots_tracker, &mut slots_ge_root);
let mut slots_ge_root = gossip_duplicate_confirmed_slots.split_off(&new_root);
*gossip_duplicate_confirmed_slots = gossip_duplicate_confirmed_slots.split_off(&new_root);
// gossip_confirmed_slots now only contains entries >= `new_root`
std::mem::swap(gossip_duplicate_confirmed_slots, &mut slots_ge_root);
unfrozen_gossip_verified_vote_hashes.set_root(new_root);
let mut slots_ge_root = epoch_slots_frozen_slots.split_off(&new_root);
*epoch_slots_frozen_slots = epoch_slots_frozen_slots.split_off(&new_root);
// epoch_slots_frozen_slots now only contains entries >= `new_root`
std::mem::swap(epoch_slots_frozen_slots, &mut slots_ge_root);
}
fn generate_new_bank_forks(

View File

@ -127,11 +127,7 @@ impl SigVerifier for TransactionSigVerifier {
&mut self,
packet_batches: Vec<PacketBatch>,
) -> Result<(), SigVerifyServiceError<Self::SendType>> {
let mut tracer_packet_stats_to_send = SigverifyTracerPacketStats::default();
std::mem::swap(
&mut tracer_packet_stats_to_send,
&mut self.tracer_packet_stats,
);
let tracer_packet_stats_to_send = std::mem::take(&mut self.tracer_packet_stats);
self.packet_sender
.send((packet_batches, Some(tracer_packet_stats_to_send)))?;
Ok(())

View File

@ -49,9 +49,8 @@ impl UnfrozenGossipVerifiedVoteHashes {
// Cleanup `votes_per_slot` based on new roots
pub fn set_root(&mut self, new_root: Slot) {
let mut slots_ge_root = self.votes_per_slot.split_off(&new_root);
self.votes_per_slot = self.votes_per_slot.split_off(&new_root);
// `self.votes_per_slot` now only contains entries >= `new_root`
std::mem::swap(&mut self.votes_per_slot, &mut slots_ge_root);
}
pub fn remove_slot_hash(&mut self, slot: Slot, hash: &Hash) -> Option<Vec<Pubkey>> {

View File

@ -49,9 +49,7 @@ impl Poh {
pub fn reset(&mut self, hash: Hash, hashes_per_tick: Option<u64>) {
// retains ticks_per_slot: this cannot change without restarting the validator
let tick_number = 0;
let mut poh =
Poh::new_with_slot_info(hash, hashes_per_tick, self.ticks_per_slot, tick_number);
std::mem::swap(&mut poh, self);
*self = Poh::new_with_slot_info(hash, hashes_per_tick, self.ticks_per_slot, tick_number);
}
pub fn target_poh_time(&self, target_ns_per_tick: u64) -> Instant {

View File

@ -216,8 +216,7 @@ impl CompressedSlots {
CompressedSlots::Uncompressed(vals) => {
let unc = vals.clone();
let compressed = Flate2::deflate(unc)?;
let mut new = CompressedSlots::Flate2(compressed);
std::mem::swap(self, &mut new);
*self = CompressedSlots::Flate2(compressed);
Ok(())
}
CompressedSlots::Flate2(_) => Ok(()),

View File

@ -1703,8 +1703,7 @@ impl Blockstore {
current_slot += 1;
parent_slot = current_slot - 1;
remaining_ticks_in_slot = ticks_per_slot;
let mut current_entries = vec![];
std::mem::swap(&mut slot_entries, &mut current_entries);
let current_entries = std::mem::take(&mut slot_entries);
let start_index = {
if all_shreds.is_empty() {
start_index

View File

@ -220,9 +220,8 @@ impl SlotMeta {
}
pub fn clear_unconfirmed_slot(&mut self) {
let mut new_self = SlotMeta::new_orphan(self.slot);
std::mem::swap(&mut new_self.next_slots, &mut self.next_slots);
std::mem::swap(self, &mut new_self);
let old = std::mem::replace(self, SlotMeta::new_orphan(self.slot));
self.next_slots = old.next_slots;
}
pub(crate) fn new(slot: Slot, parent_slot: Option<Slot>) -> Self {

View File

@ -1423,13 +1423,9 @@ fn test_snapshots_restart_validity() {
.full_snapshot_archives_dir;
// Set up the cluster with 1 snapshotting validator
let mut all_account_storage_dirs = vec![vec![]];
std::mem::swap(
&mut all_account_storage_dirs[0],
let mut all_account_storage_dirs = vec![std::mem::take(
&mut snapshot_test_config.account_storage_dirs,
);
)];
let mut config = ClusterConfig {
node_stakes: vec![DEFAULT_NODE_STAKE],
cluster_lamports: DEFAULT_CLUSTER_LAMPORTS,

View File

@ -179,8 +179,7 @@ impl MetricsAgent {
}
fn collect_points(points: &mut Vec<DataPoint>, counters: &mut CounterMap) -> Vec<DataPoint> {
let mut ret: Vec<DataPoint> = Vec::default();
std::mem::swap(&mut ret, points);
let mut ret = std::mem::take(points);
ret.extend(counters.values().map(|v| v.into()));
counters.clear();
ret

View File

@ -459,7 +459,6 @@ impl PohRecorder {
// synchronize PoH with a bank
pub fn reset(&mut self, reset_bank: Arc<Bank>, next_leader_slot: Option<(Slot, Slot)>) {
self.clear_bank();
let mut cache = vec![];
let blockhash = reset_bank.last_blockhash();
let poh_hash = {
let mut poh = self.poh.lock().unwrap();
@ -475,8 +474,7 @@ impl PohRecorder {
reset_bank.slot()
);
std::mem::swap(&mut cache, &mut self.tick_cache);
self.tick_cache = vec![];
self.start_bank = reset_bank;
self.tick_height = (self.start_slot() + 1) * self.ticks_per_slot;
self.start_tick_height = self.tick_height + 1;

View File

@ -4892,13 +4892,12 @@ pub mod tests {
slot,
));
let mut new_block_commitment = BlockCommitmentCache::new(
let new_block_commitment = BlockCommitmentCache::new(
HashMap::new(),
0,
CommitmentSlots::new_from_slot(self.bank_forks.read().unwrap().highest_slot()),
);
let mut w_block_commitment_cache = self.block_commitment_cache.write().unwrap();
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
*self.block_commitment_cache.write().unwrap() = new_block_commitment;
bank
}

View File

@ -1857,12 +1857,10 @@ impl<'a, T: Fn(Slot) -> Option<Slot> + Sync + Send + Clone> AppendVecScan for Sc
result
}
fn get_accum(&mut self) -> BinnedHashData {
let mut def = BinnedHashData::default();
std::mem::swap(&mut def, &mut self.accum);
def
std::mem::take(&mut self.accum)
}
fn set_accum(&mut self, mut accum: BinnedHashData) {
std::mem::swap(&mut self.accum, &mut accum);
fn set_accum(&mut self, accum: BinnedHashData) {
self.accum = accum;
}
}
@ -6320,9 +6318,7 @@ impl AccountsDb {
.map(|stored_account| stored_account.meta.pubkey)
.unwrap(), // will always be 'Some'
) {
let mut account: (StoredMetaWriteVersion, Option<StoredAccountMeta<'_>>) =
(0, None);
std::mem::swap(&mut account, found_account);
let account = std::mem::take(found_account);
scanner.found_account(&LoadedAccount::Stored(account.1.unwrap()));
}
let next = progress[min_index].next().map(|stored_account| {
@ -9369,12 +9365,10 @@ pub mod tests {
}
fn init_accum(&mut self, _count: usize) {}
fn get_accum(&mut self) -> BinnedHashData {
let mut def = BinnedHashData::default();
std::mem::swap(&mut def, &mut self.accum);
def
std::mem::take(&mut self.accum)
}
fn set_accum(&mut self, mut accum: BinnedHashData) {
std::mem::swap(&mut self.accum, &mut accum);
fn set_accum(&mut self, accum: BinnedHashData) {
self.accum = accum;
}
fn found_account(&mut self, loaded_account: &LoadedAccount) {
self.calls.fetch_add(1, Ordering::Relaxed);
@ -9633,12 +9627,10 @@ pub mod tests {
self.accum
}
fn get_accum(&mut self) -> BinnedHashData {
let mut def = BinnedHashData::default();
std::mem::swap(&mut def, &mut self.accum);
def
std::mem::take(&mut self.accum)
}
fn set_accum(&mut self, mut accum: BinnedHashData) {
std::mem::swap(&mut self.accum, &mut accum);
fn set_accum(&mut self, accum: BinnedHashData) {
self.accum = accum;
}
}

View File

@ -12,8 +12,7 @@ pub struct TempFile {
impl Drop for TempFile {
fn drop(&mut self) {
let mut path = PathBuf::new();
std::mem::swap(&mut path, &mut self.path);
let path = std::mem::replace(&mut self.path, PathBuf::new());
let _ignored = std::fs::remove_file(path);
}
}

View File

@ -4269,8 +4269,7 @@ impl Bank {
);
let prev_accounts_data_len = self.load_accounts_data_size();
let mut transaction_accounts = Vec::new();
std::mem::swap(&mut loaded_transaction.accounts, &mut transaction_accounts);
let transaction_accounts = std::mem::take(&mut loaded_transaction.accounts);
let mut transaction_context = TransactionContext::new(
transaction_accounts,
compute_budget.max_invoke_depth.saturating_add(1),

View File

@ -534,9 +534,7 @@ impl<T: IndexValue> InMemAccountsIndex<T> {
let reclaim_item = if !(found_slot || found_other_slot) {
// first time we found an entry in 'slot' or 'other_slot', so replace it in-place.
// this may be the only instance we find
let mut new_item = (slot, account_info);
std::mem::swap(&mut new_item, &mut slot_list[slot_list_index]);
new_item
std::mem::replace(&mut slot_list[slot_list_index], (slot, account_info))
} else {
// already replaced one entry, so this one has to be removed
slot_list.remove(slot_list_index)
@ -1003,11 +1001,7 @@ impl<T: IndexValue> InMemAccountsIndex<T> {
}
fn write_startup_info_to_disk(&self) {
let mut insert = vec![];
{
let mut lock = self.startup_info.lock().unwrap();
std::mem::swap(&mut insert, &mut lock.insert);
}
let insert = std::mem::take(&mut self.startup_info.lock().unwrap().insert);
if insert.is_empty() {
// nothing to insert for this bin
return;
@ -1067,10 +1061,7 @@ impl<T: IndexValue> InMemAccountsIndex<T> {
// in order to return accurate and complete duplicates, we must have nothing left remaining to insert
assert!(write.insert.is_empty());
let write = &mut write.duplicates;
let mut duplicates = vec![];
std::mem::swap(&mut duplicates, write);
duplicates
std::mem::take(&mut write.duplicates)
}
/// synchronize the in-mem index with the disk index

View File

@ -294,8 +294,7 @@ pub mod tests {
impl RollingBitField {
pub fn clear(&mut self) {
let mut n = Self::new(self.max_width);
std::mem::swap(&mut n, self);
*self = Self::new(self.max_width);
}
}

View File

@ -268,7 +268,7 @@ fn test_bank_serialize_style(
// This make serialized bytes not deterministic.
// But, we can guarantee that the buffer is different if we change the hash!
assert_ne!(buf, buf_reserialized);
std::mem::swap(&mut buf, &mut buf_reserialized);
buf = buf_reserialized;
}
}

View File

@ -237,9 +237,9 @@ impl SharedBufferBgReader {
bytes_read += size;
// loop to read some more. Underlying reader does not usually read all we ask for.
}
Err(mut err) => {
Err(err) => {
error_received = true;
std::mem::swap(&mut error, &mut err);
error = err;
break;
}
}
@ -297,8 +297,7 @@ impl SharedBufferInternal {
return false;
}
// grab all data from bg
let mut newly_read_data: Vec<OneSharedBuffer> = vec![];
std::mem::swap(&mut *from_lock, &mut newly_read_data);
let mut newly_read_data: Vec<OneSharedBuffer> = std::mem::take(&mut *from_lock);
// append all data to fg
let mut to_lock = self.data.write().unwrap();
// from_lock has to be held until we have the to_lock lock. Otherwise, we can race with another reader and append to to_lock out of order.
@ -367,10 +366,10 @@ impl SharedBufferReader {
let eof = self.instance.has_reached_eof();
for recycle in previous_buffer_index..new_min {
let mut remove = self.empty_buffer.clone();
let mut data = self.instance.data.write().unwrap();
std::mem::swap(&mut remove, &mut data[recycle]);
drop(data);
let remove = {
let mut data = self.instance.data.write().unwrap();
std::mem::replace(&mut data[recycle], self.empty_buffer.clone())
};
if remove.is_empty() {
continue; // another thread beat us swapping out this buffer, so nothing to recycle here
}
@ -463,10 +462,8 @@ impl Read for SharedBufferReader {
let mut error = instance.bg_reader_data.error.write().unwrap();
if error.is_err() {
// replace the current error (with AN error instead of ok)
let mut stored_error = Err(Self::default_error());
std::mem::swap(&mut *error, &mut stored_error);
// return the original error
return stored_error;
return std::mem::replace(&mut *error, Err(Self::default_error()));
}
}