preliminaries for bumping nightly to 2023-08-25 (#33047)

* remove unnecessary hashes around raw string literals

* remove unncessary literal `unwrap()`s

* remove panicking `unwrap()`

* remove unnecessary `unwrap()`

* use `[]` instead of `vec![]` where applicable

* remove (more) unnecessary explicit `into_iter()` calls

* remove redundant pattern matching

* don't cast to same type and constness

* do not `cfg(any(...` a single item

* remove needless pass by `&mut`

* prefer `or_default()` to `or_insert_with(T::default())`

* `filter_map()` better written as `filter()`

* incorrect `PartialOrd` impl on `Ord` type

* replace "slow zero-filled `Vec` initializations"

* remove redundant local bindings

* add required lifetime to associated constant
This commit is contained in:
Trent Nelson 2023-08-29 17:05:35 -06:00 committed by GitHub
parent 00bfc6abd3
commit b8dc5daedb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
52 changed files with 258 additions and 290 deletions

View File

@ -1933,7 +1933,7 @@ mod tests {
assert_eq!(loaded_transaction.program_indices.len(), 1);
assert_eq!(loaded_transaction.program_indices[0].len(), 0);
}
(Err(e), _nonce) => Err(e).unwrap(),
(Err(e), _nonce) => panic!("{e}"),
}
}
@ -2282,7 +2282,7 @@ mod tests {
}
}
}
(Err(e), _nonce) => Err(e).unwrap(),
(Err(e), _nonce) => panic!("{e}"),
}
}

View File

@ -2417,7 +2417,7 @@ pub struct PubkeyHashAccount {
}
impl AccountsDb {
pub const ACCOUNTS_HASH_CACHE_DIR: &str = "accounts_hash_cache";
pub const ACCOUNTS_HASH_CACHE_DIR: &'static str = "accounts_hash_cache";
pub fn default_for_tests() -> Self {
Self::default_with_accounts_index(AccountInfoAccountsIndex::default_for_tests(), None)
@ -4731,10 +4731,7 @@ impl AccountsDb {
It is a performance optimization to not send the ENTIRE old/pre-shrunk append vec to clean in the normal case.
*/
let mut uncleaned_pubkeys = self
.uncleaned_pubkeys
.entry(slot)
.or_insert_with(Vec::default);
let mut uncleaned_pubkeys = self.uncleaned_pubkeys.entry(slot).or_default();
uncleaned_pubkeys.extend(pubkeys);
}
@ -8512,7 +8509,7 @@ impl AccountsDb {
.lock()
.unwrap()
.entry(accounts.target_slot())
.or_insert_with(BankHashStats::default)
.or_default()
.accumulate(&stats);
}
@ -9480,9 +9477,7 @@ impl AccountsDb {
let mut storage_size_accounts_map_flatten_time =
Measure::start("storage_size_accounts_map_flatten_time");
if !accounts_map.is_empty() {
let mut info = storage_info
.entry(store_id)
.or_insert_with(StorageSizeAndCount::default);
let mut info = storage_info.entry(store_id).or_default();
info.stored_size += storage_info_local.stored_size;
info.count += storage_info_local.count;
}
@ -10385,7 +10380,7 @@ pub mod tests {
CalculateHashIntermediate::new(Hash::default(), 256, pubkey255),
];
let expected_hashes = vec![
let expected_hashes = [
Hash::from_str("5K3NW73xFHwgTWVe4LyCg4QfQda8f88uZj2ypDx2kmmH").unwrap(),
Hash::from_str("84ozw83MZ8oeSF4hRAg7SeW1Tqs9LMXagX1BrDRjtZEx").unwrap(),
Hash::from_str("5XqtnEJ41CG2JWNp7MAg9nxkRUAnyjLxfsKsdrLxQUbC").unwrap(),
@ -10730,7 +10725,7 @@ pub mod tests {
let slot = MAX_ITEMS_PER_CHUNK as Slot;
let (storages, raw_expected) =
sample_storages_and_account_in_slot(slot, &accounts_db, INCLUDE_SLOT_IN_HASH_TESTS);
let storage_data = vec![(&storages[0], slot)];
let storage_data = [(&storages[0], slot)];
let sorted_storages =
SortedStorages::new_debug(&storage_data[..], 0, MAX_ITEMS_PER_CHUNK as usize + 1);
@ -10827,7 +10822,7 @@ pub mod tests {
}
let bins = 256;
let bin_locations = vec![0, 127, 128, 255];
let bin_locations = [0, 127, 128, 255];
let range = 1;
for bin in 0..bins {
let accounts_db = AccountsDb::new_single_for_tests();
@ -10869,7 +10864,7 @@ pub mod tests {
let slot = MAX_ITEMS_PER_CHUNK as Slot;
let (storages, raw_expected) =
sample_storages_and_account_in_slot(slot, &accounts_db, INCLUDE_SLOT_IN_HASH_TESTS);
let storage_data = vec![(&storages[0], slot)];
let storage_data = [(&storages[0], slot)];
let sorted_storages =
SortedStorages::new_debug(&storage_data[..], 0, MAX_ITEMS_PER_CHUNK as usize + 1);
@ -14542,7 +14537,7 @@ pub mod tests {
})
.unwrap();
assert_eq!(account_info.0, slot);
let reclaims = vec![account_info];
let reclaims = [account_info];
accounts_db.remove_dead_accounts(reclaims.iter(), None, None, true);
let after_size = storage0.alive_bytes.load(Ordering::Acquire);
assert_eq!(before_size, after_size + account.stored_size());
@ -16260,7 +16255,7 @@ pub mod tests {
&mut purged_stored_account_slots,
&pubkeys_removed_from_accounts_index,
);
for (pk, slots) in vec![(pk1, vec![slot1, slot2]), (pk2, vec![slot1])] {
for (pk, slots) in [(pk1, vec![slot1, slot2]), (pk2, vec![slot1])] {
let result = purged_stored_account_slots.remove(&pk).unwrap();
assert_eq!(result, slots.into_iter().collect::<HashSet<_>>());
}
@ -17742,7 +17737,7 @@ pub mod tests {
let slot0 = 0;
let dropped_roots = vec![slot0];
db.accounts_index.add_root(slot0);
db.accounts_index.add_uncleaned_roots([slot0].into_iter());
db.accounts_index.add_uncleaned_roots([slot0]);
assert!(db.accounts_index.is_uncleaned_root(slot0));
assert!(db.accounts_index.is_alive_root(slot0));
db.handle_dropped_roots_for_ancient(dropped_roots.into_iter());

View File

@ -2941,7 +2941,7 @@ pub mod tests {
assert_eq!(0, index.roots_tracker.read().unwrap().uncleaned_roots.len());
index.add_root(0);
index.add_root(1);
index.add_uncleaned_roots([0, 1].into_iter());
index.add_uncleaned_roots([0, 1]);
assert_eq!(2, index.roots_tracker.read().unwrap().uncleaned_roots.len());
index.reset_uncleaned_roots(None);
@ -2950,7 +2950,7 @@ pub mod tests {
index.add_root(2);
index.add_root(3);
index.add_uncleaned_roots([2, 3].into_iter());
index.add_uncleaned_roots([2, 3]);
assert_eq!(4, index.roots_tracker.read().unwrap().alive_roots.len());
assert_eq!(2, index.roots_tracker.read().unwrap().uncleaned_roots.len());

View File

@ -2071,7 +2071,7 @@ pub mod tests {
let can_randomly_shrink = false;
for method in TestCollectInfo::iter() {
for slot1_is_alive in [false, true] {
let alives = vec![false /*dummy*/, slot1_is_alive, !slot1_is_alive];
let alives = [false /*dummy*/, slot1_is_alive, !slot1_is_alive];
let slots = 2;
// 1_040_000 is big enough relative to page size to cause shrink ratio to be triggered
for data_size in [None, Some(1_040_000)] {
@ -2098,7 +2098,7 @@ pub mod tests {
});
let alive_storages = storages
.iter()
.filter_map(|storage| alives[storage.slot() as usize].then_some(storage))
.filter(|storage| alives[storage.slot() as usize])
.collect::<Vec<_>>();
let alive_bytes_expected = alive_storages
.iter()
@ -2362,7 +2362,7 @@ pub mod tests {
let can_randomly_shrink = false;
for method in TestCollectInfo::iter() {
for slot1_shrink in [false, true] {
let shrinks = vec![false /*dummy*/, slot1_shrink, !slot1_shrink];
let shrinks = [false /*dummy*/, slot1_shrink, !slot1_shrink];
let slots = 2;
// 1_040_000 is big enough relative to page size to cause shrink ratio to be triggered
let data_sizes = shrinks

View File

@ -787,7 +787,7 @@ pub mod tests {
// for (Slot, &'a [(&'a Pubkey, &'a T)], IncludeSlotInHash)
let account = AccountSharedData::default();
let slot = 0 as Slot;
let pubkeys = vec![Pubkey::default()];
let pubkeys = [Pubkey::default()];
let hashes = Vec::<Hash>::default();
let write_versions = Vec::default();
let mut accounts = vec![(&pubkeys[0], &account)];
@ -808,10 +808,10 @@ pub mod tests {
// for (Slot, &'a [(&'a Pubkey, &'a T)], IncludeSlotInHash)
let account = AccountSharedData::default();
let slot = 0 as Slot;
let pubkeys = vec![Pubkey::from([5; 32]), Pubkey::from([6; 32])];
let pubkeys = [Pubkey::from([5; 32]), Pubkey::from([6; 32])];
let hashes = vec![Hash::new(&[3; 32]), Hash::new(&[4; 32])];
let write_versions = vec![42, 43];
let accounts = vec![(&pubkeys[0], &account), (&pubkeys[1], &account)];
let accounts = [(&pubkeys[0], &account), (&pubkeys[1], &account)];
let accounts2 = (slot, &accounts[..], INCLUDE_SLOT_IN_HASH_TESTS);
let storable =
StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions(
@ -842,7 +842,7 @@ pub mod tests {
let pubkey = Pubkey::default();
let hashes = vec![Hash::default()];
let write_versions = vec![0];
let accounts = vec![(&pubkey, &account)];
let accounts = [(&pubkey, &account)];
let accounts2 = (slot, &accounts[..], INCLUDE_SLOT_IN_HASH_TESTS);
let storable =
StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions(
@ -861,7 +861,7 @@ pub mod tests {
}
.to_account_shared_data();
// for (Slot, &'a [(&'a Pubkey, &'a T)], IncludeSlotInHash)
let accounts = vec![(&pubkey, &account)];
let accounts = [(&pubkey, &account)];
let accounts2 = (slot, &accounts[..], INCLUDE_SLOT_IN_HASH_TESTS);
let storable =
StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions(

View File

@ -571,7 +571,7 @@ pub mod tests {
let remaining2 = entries.saturating_sub(entries0 + entries1);
for entries2 in 0..=remaining2 {
let remaining3 = entries.saturating_sub(entries0 + entries1 + entries2);
let entries_by_level = vec![entries0, entries1, entries2, remaining3];
let entries_by_level = [entries0, entries1, entries2, remaining3];
let mut overall_index = 0;
let mut expected_slots = Vec::default();
let slots_and_accounts = entries_by_level

View File

@ -837,7 +837,7 @@ mod test {
assert_eq!(
output,
r#"Block Time: 2021-08-10T22:16:31Z
r"Block Time: 2021-08-10T22:16:31Z
Version: legacy
Recent Blockhash: 11111111111111111111111111111111
Signature 0: 5pkjrE4VBa3Bu9CMKXgh1U345cT1gGo8QBVRTzHAo6gHeiPae5BTbShP15g6NgqRMNqu8Qrhph1ATmrfC1Ley3rx (pass)
@ -860,7 +860,7 @@ Return Data from Program 8qbHbw2BbbTHBW1sbeqakYXVKRQM8Ne7pLK7m6CVfeR:
Rewards:
Address Type Amount New Balance \0
4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi rent -0.000000100 0.000009900 \0
"#.replace("\\0", "") // replace marker used to subvert trailing whitespace linter on CI
".replace("\\0", "") // replace marker used to subvert trailing whitespace linter on CI
);
}
@ -916,7 +916,7 @@ Rewards:
assert_eq!(
output,
r#"Block Time: 2021-08-10T22:16:31Z
r"Block Time: 2021-08-10T22:16:31Z
Version: 0
Recent Blockhash: 11111111111111111111111111111111
Signature 0: 5iEy3TT3ZhTA1NkuCY8GrQGNVY8d5m1bpjdh5FT3Ca4Py81fMipAZjafDuKJKrkw5q5UAAd8oPcgZ4nyXpHt4Fp7 (pass)
@ -948,7 +948,7 @@ Return Data from Program 8qbHbw2BbbTHBW1sbeqakYXVKRQM8Ne7pLK7m6CVfeR:
Rewards:
Address Type Amount New Balance \0
CktRuQ2mttgRGkXJtyksdKHjUdc2C4TgDzyB98oEzy8 rent -0.000000100 0.000014900 \0
"#.replace("\\0", "") // replace marker used to subvert trailing whitespace linter on CI
".replace("\\0", "") // replace marker used to subvert trailing whitespace linter on CI
);
}

View File

@ -1919,8 +1919,8 @@ mod tests {
assert!(parse_command(&test_bad_signature, &default_signer, &mut None).is_err());
// Test CreateAddressWithSeed
let from_pubkey = Some(solana_sdk::pubkey::new_rand());
let from_str = from_pubkey.unwrap().to_string();
let from_pubkey = solana_sdk::pubkey::new_rand();
let from_str = from_pubkey.to_string();
for (name, program_id) in &[
("STAKE", stake::program::id()),
("VOTE", solana_vote_program::id()),
@ -1938,7 +1938,7 @@ mod tests {
parse_command(&test_create_address_with_seed, &default_signer, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateAddressWithSeed {
from_pubkey,
from_pubkey: Some(from_pubkey),
seed: "seed".to_string(),
program_id: *program_id
},

View File

@ -102,7 +102,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
.collect::<Vec<_>>();
let batches_len = batches.len();
let mut transaction_buffer = UnprocessedTransactionStorage::new_transaction_storage(
UnprocessedPacketBatches::from_iter(batches.into_iter(), 2 * batches_len),
UnprocessedPacketBatches::from_iter(batches, 2 * batches_len),
ThreadType::Transactions,
);
let (s, _r) = unbounded();

View File

@ -1834,7 +1834,7 @@ mod tests {
let mut buffered_packet_batches =
UnprocessedTransactionStorage::new_transaction_storage(
UnprocessedPacketBatches::from_iter(
deserialized_packets.into_iter(),
deserialized_packets,
num_conflicting_transactions,
),
ThreadType::Transactions,
@ -1912,7 +1912,7 @@ mod tests {
let mut buffered_packet_batches =
UnprocessedTransactionStorage::new_transaction_storage(
UnprocessedPacketBatches::from_iter(
deserialized_packets.into_iter(),
deserialized_packets,
num_conflicting_transactions,
),
ThreadType::Transactions,
@ -1964,7 +1964,7 @@ mod tests {
let mut buffered_packet_batches =
UnprocessedTransactionStorage::new_transaction_storage(
UnprocessedPacketBatches::from_iter(
deserialized_packets.into_iter(),
deserialized_packets,
num_conflicting_transactions,
),
ThreadType::Transactions,

View File

@ -441,10 +441,7 @@ mod tests {
};
let mut unprocessed_packet_batches = UnprocessedTransactionStorage::new_transaction_storage(
UnprocessedPacketBatches::from_iter(
vec![forwarded_packet, normal_packet].into_iter(),
2,
),
UnprocessedPacketBatches::from_iter(vec![forwarded_packet, normal_packet], 2),
ThreadType::Transactions,
);
let connection_cache = ConnectionCache::new("connection_cache_test");

View File

@ -162,6 +162,6 @@ mod tests {
let packet = Packet::from_data(None, tx).unwrap();
let deserialized_packet = ImmutableDeserializedPacket::new(packet);
assert!(matches!(deserialized_packet, Ok(_)));
assert!(deserialized_packet.is_ok());
}
}

View File

@ -1046,7 +1046,7 @@ mod tests {
// all packets are forwarded
{
let buffered_packet_batches: UnprocessedPacketBatches =
UnprocessedPacketBatches::from_iter(packets.clone().into_iter(), packets.len());
UnprocessedPacketBatches::from_iter(packets.clone(), packets.len());
let mut transaction_storage = UnprocessedTransactionStorage::new_transaction_storage(
buffered_packet_batches,
ThreadType::Transactions,
@ -1085,7 +1085,7 @@ mod tests {
packet.forwarded = true;
}
let buffered_packet_batches: UnprocessedPacketBatches =
UnprocessedPacketBatches::from_iter(packets.clone().into_iter(), packets.len());
UnprocessedPacketBatches::from_iter(packets.clone(), packets.len());
let mut transaction_storage = UnprocessedTransactionStorage::new_transaction_storage(
buffered_packet_batches,
ThreadType::Transactions,
@ -1119,7 +1119,7 @@ mod tests {
assert_eq!(current_bank.process_transaction(tx), Ok(()));
}
let buffered_packet_batches: UnprocessedPacketBatches =
UnprocessedPacketBatches::from_iter(packets.clone().into_iter(), packets.len());
UnprocessedPacketBatches::from_iter(packets.clone(), packets.len());
let mut transaction_storage = UnprocessedTransactionStorage::new_transaction_storage(
buffered_packet_batches,
ThreadType::Transactions,
@ -1285,7 +1285,7 @@ mod tests {
// all tracer packets are forwardable
{
let buffered_packet_batches: UnprocessedPacketBatches =
UnprocessedPacketBatches::from_iter(packets.clone().into_iter(), packets.len());
UnprocessedPacketBatches::from_iter(packets.clone(), packets.len());
let (
total_tracer_packets_in_buffer,
total_packets_to_forward,
@ -1303,7 +1303,7 @@ mod tests {
packet.forwarded = true;
}
let buffered_packet_batches: UnprocessedPacketBatches =
UnprocessedPacketBatches::from_iter(packets.clone().into_iter(), packets.len());
UnprocessedPacketBatches::from_iter(packets.clone(), packets.len());
let (
total_tracer_packets_in_buffer,
total_packets_to_forward,
@ -1320,7 +1320,7 @@ mod tests {
packet.forwarded = true;
}
let buffered_packet_batches: UnprocessedPacketBatches =
UnprocessedPacketBatches::from_iter(packets.clone().into_iter(), packets.len());
UnprocessedPacketBatches::from_iter(packets.clone(), packets.len());
let (
total_tracer_packets_in_buffer,
total_packets_to_forward,

View File

@ -1197,7 +1197,7 @@ mod tests {
let all_expected_slots: BTreeSet<_> = gossip_vote_slots
.clone()
.into_iter()
.chain(replay_vote_slots.clone().into_iter())
.chain(replay_vote_slots.clone())
.collect();
let mut pubkey_to_votes: HashMap<Pubkey, BTreeSet<Slot>> = HashMap::new();
for (received_pubkey, new_votes) in verified_vote_receiver.try_iter() {

View File

@ -215,7 +215,7 @@ impl AggregateCommitmentService {
if *a <= root {
commitment
.entry(*a)
.or_insert_with(BlockCommitment::default)
.or_default()
.increase_rooted_stake(lamports);
} else {
ancestors_index = i;
@ -229,7 +229,7 @@ impl AggregateCommitmentService {
while ancestors[ancestors_index] <= vote.slot() {
commitment
.entry(ancestors[ancestors_index])
.or_insert_with(BlockCommitment::default)
.or_default()
.increase_confirmation_stake(vote.confirmation_count() as usize, lamports);
ancestors_index += 1;

View File

@ -346,7 +346,7 @@ impl Tower {
for vote in &vote_state.votes {
lockout_intervals
.entry(vote.lockout.last_locked_out_slot())
.or_insert_with(Vec::new)
.or_default()
.push((vote.slot(), key));
}

View File

@ -192,7 +192,7 @@ impl PartialEq for HeaviestSubtreeForkChoice {
impl PartialOrd for HeaviestSubtreeForkChoice {
// Sort by root
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
self.tree_root.partial_cmp(&other.tree_root)
Some(self.tree_root.cmp(&other.tree_root))
}
}

View File

@ -153,7 +153,7 @@ mod tests {
// Case 2: Frozen vote should be added, but the same vote added again
// shouldn't update state
let num_repeated_iterations = 3;
let frozen_hash = Some(Hash::new_unique());
let frozen_hash = Hash::new_unique();
for i in 0..num_repeated_iterations {
let expected_result = if i == 0 {
(true, Some(vote_slot))
@ -164,7 +164,7 @@ mod tests {
latest_validator_votes_for_frozen_banks.check_add_vote(
vote_pubkey,
vote_slot,
frozen_hash,
Some(frozen_hash),
is_replay_vote,
),
expected_result
@ -173,7 +173,7 @@ mod tests {
*latest_validator_votes_for_frozen_banks
.latest_vote(&vote_pubkey, is_replay_vote)
.unwrap(),
(vote_slot, vec![frozen_hash.unwrap()])
(vote_slot, vec![frozen_hash])
);
if is_replay_vote {
assert_eq!(
@ -181,7 +181,7 @@ mod tests {
.fork_choice_dirty_set
.get(&vote_pubkey)
.unwrap(),
(vote_slot, vec![frozen_hash.unwrap()])
(vote_slot, vec![frozen_hash])
);
} else {
assert!(latest_validator_votes_for_frozen_banks
@ -192,13 +192,13 @@ mod tests {
}
// Case 3: Adding duplicate vote for same slot should update the state
let duplicate_frozen_hash = Some(Hash::new_unique());
let all_frozen_hashes = vec![frozen_hash.unwrap(), duplicate_frozen_hash.unwrap()];
let duplicate_frozen_hash = Hash::new_unique();
let all_frozen_hashes = vec![frozen_hash, duplicate_frozen_hash];
assert_eq!(
latest_validator_votes_for_frozen_banks.check_add_vote(
vote_pubkey,
vote_slot,
duplicate_frozen_hash,
Some(duplicate_frozen_hash),
is_replay_vote,
),
(true, Some(vote_slot))
@ -293,12 +293,12 @@ mod tests {
// Case 6: Adding a vote for a new higher slot that *is* frozen
// should upate the state
let frozen_hash = Some(Hash::new_unique());
let frozen_hash = Hash::new_unique();
assert_eq!(
latest_validator_votes_for_frozen_banks.check_add_vote(
vote_pubkey,
vote_slot,
frozen_hash,
Some(frozen_hash),
is_replay_vote,
),
(true, Some(vote_slot))
@ -307,7 +307,7 @@ mod tests {
*latest_validator_votes_for_frozen_banks
.latest_vote(&vote_pubkey, is_replay_vote)
.unwrap(),
(vote_slot, vec![frozen_hash.unwrap()])
(vote_slot, vec![frozen_hash])
);
if is_replay_vote {
assert_eq!(
@ -315,7 +315,7 @@ mod tests {
.fork_choice_dirty_set
.get(&vote_pubkey)
.unwrap(),
(vote_slot, vec![frozen_hash.unwrap()])
(vote_slot, vec![frozen_hash])
);
} else {
assert!(latest_validator_votes_for_frozen_banks
@ -326,13 +326,13 @@ mod tests {
// Case 7: Adding a vote for a new pubkey should also update the state
vote_slot += 1;
let frozen_hash = Some(Hash::new_unique());
let frozen_hash = Hash::new_unique();
let vote_pubkey = Pubkey::new_unique();
assert_eq!(
latest_validator_votes_for_frozen_banks.check_add_vote(
vote_pubkey,
vote_slot,
frozen_hash,
Some(frozen_hash),
is_replay_vote,
),
(true, Some(vote_slot))
@ -341,7 +341,7 @@ mod tests {
*latest_validator_votes_for_frozen_banks
.latest_vote(&vote_pubkey, is_replay_vote)
.unwrap(),
(vote_slot, vec![frozen_hash.unwrap()])
(vote_slot, vec![frozen_hash])
);
if is_replay_vote {
assert_eq!(
@ -349,7 +349,7 @@ mod tests {
.fork_choice_dirty_set
.get(&vote_pubkey)
.unwrap(),
(vote_slot, vec![frozen_hash.unwrap()])
(vote_slot, vec![frozen_hash])
);
} else {
assert!(latest_validator_votes_for_frozen_banks

View File

@ -304,7 +304,7 @@ mod test {
assert!(optimistic_confirmation_verifier.unchecked_slots.is_empty());
// If we know set the root in blockstore, should return nothing
blockstore.set_roots(vec![1, 3].iter()).unwrap();
blockstore.set_roots([1, 3].iter()).unwrap();
optimistic_confirmation_verifier
.add_new_optimistic_confirmed_slots(optimistic_slots, &blockstore);
assert!(optimistic_confirmation_verifier

View File

@ -93,10 +93,7 @@ impl PohTimingReporter {
/// Process incoming PohTimingPoint from the channel
pub fn process(&mut self, slot: Slot, root_slot: Option<Slot>, t: PohTimingPoint) -> bool {
let slot_timestamp = self
.slot_timestamps
.entry(slot)
.or_insert_with(SlotPohTimestamp::default);
let slot_timestamp = self.slot_timestamps.entry(slot).or_default();
slot_timestamp.update(t);
let is_completed = slot_timestamp.is_complete();

View File

@ -94,10 +94,10 @@ pub struct DeadState {
impl DeadState {
pub fn new_from_state(
slot: Slot,
duplicate_slots_tracker: &mut DuplicateSlotsTracker,
duplicate_slots_tracker: &DuplicateSlotsTracker,
gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots,
fork_choice: &mut HeaviestSubtreeForkChoice,
epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots,
fork_choice: &HeaviestSubtreeForkChoice,
epoch_slots_frozen_slots: &EpochSlotsFrozenSlots,
) -> Self {
let cluster_confirmed_hash = get_cluster_confirmed_hash_from_state(
slot,
@ -131,9 +131,9 @@ impl BankFrozenState {
pub fn new_from_state(
slot: Slot,
frozen_hash: Hash,
duplicate_slots_tracker: &mut DuplicateSlotsTracker,
duplicate_slots_tracker: &DuplicateSlotsTracker,
gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots,
fork_choice: &mut HeaviestSubtreeForkChoice,
fork_choice: &HeaviestSubtreeForkChoice,
epoch_slots_frozen_slots: &EpochSlotsFrozenSlots,
) -> Self {
let cluster_confirmed_hash = get_cluster_confirmed_hash_from_state(
@ -197,7 +197,7 @@ impl DuplicateState {
pub fn new_from_state(
slot: Slot,
gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots,
fork_choice: &mut HeaviestSubtreeForkChoice,
fork_choice: &HeaviestSubtreeForkChoice,
is_dead: impl Fn() -> bool,
get_hash: impl Fn() -> Option<Hash>,
) -> Self {
@ -237,7 +237,7 @@ impl EpochSlotsFrozenState {
slot: Slot,
epoch_slots_frozen_hash: Hash,
gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots,
fork_choice: &mut HeaviestSubtreeForkChoice,
fork_choice: &HeaviestSubtreeForkChoice,
is_dead: impl Fn() -> bool,
get_hash: impl Fn() -> Option<Hash>,
is_popular_pruned: bool,
@ -691,7 +691,7 @@ fn get_cluster_confirmed_hash_from_state(
slot: Slot,
gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots,
epoch_slots_frozen_slots: &EpochSlotsFrozenSlots,
fork_choice: &mut HeaviestSubtreeForkChoice,
fork_choice: &HeaviestSubtreeForkChoice,
bank_frozen_hash: Option<Hash>,
) -> Option<ClusterConfirmedHash> {
let gossip_duplicate_confirmed_hash = gossip_duplicate_confirmed_slots.get(&slot).cloned();
@ -722,7 +722,7 @@ fn get_cluster_confirmed_hash_from_state(
fn get_duplicate_confirmed_hash_from_state(
slot: Slot,
gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots,
fork_choice: &mut HeaviestSubtreeForkChoice,
fork_choice: &HeaviestSubtreeForkChoice,
bank_frozen_hash: Option<Hash>,
) -> Option<Hash> {
let gossip_duplicate_confirmed_hash = gossip_duplicate_confirmed_slots.get(&slot).cloned();
@ -1324,9 +1324,9 @@ mod test {
)
},
duplicate_state_update_5: {
let duplicate_confirmed_hash = Some(Hash::new_unique());
let bank_status = BankStatus::Frozen(duplicate_confirmed_hash.unwrap());
let duplicate_state = DuplicateState::new(duplicate_confirmed_hash, bank_status);
let duplicate_confirmed_hash = Hash::new_unique();
let bank_status = BankStatus::Frozen(duplicate_confirmed_hash);
let duplicate_state = DuplicateState::new(Some(duplicate_confirmed_hash), bank_status);
(
SlotStateUpdate::Duplicate(duplicate_state),
Vec::<ResultingStateChange>::new()
@ -1447,9 +1447,9 @@ mod test {
},
epoch_slots_frozen_state_update_10: {
let epoch_slots_frozen_hash = Hash::new_unique();
let duplicate_confirmed_hash = Some(Hash::new_unique());
let bank_status = BankStatus::Frozen(duplicate_confirmed_hash.unwrap());
let epoch_slots_frozen_state = EpochSlotsFrozenState::new(epoch_slots_frozen_hash, duplicate_confirmed_hash, bank_status, false);
let duplicate_confirmed_hash = Hash::new_unique();
let bank_status = BankStatus::Frozen(duplicate_confirmed_hash);
let epoch_slots_frozen_state = EpochSlotsFrozenState::new(epoch_slots_frozen_hash, Some(duplicate_confirmed_hash), bank_status, false);
(
SlotStateUpdate::EpochSlotsFrozen(epoch_slots_frozen_state),
Vec::<ResultingStateChange>::new()
@ -1849,7 +1849,7 @@ mod test {
let duplicate_state = DuplicateState::new_from_state(
duplicate_slot,
&gossip_duplicate_confirmed_slots,
&mut heaviest_subtree_fork_choice,
&heaviest_subtree_fork_choice,
|| progress.is_dead(duplicate_slot).unwrap_or(false),
|| initial_bank_hash,
);
@ -1886,9 +1886,9 @@ mod test {
let bank_frozen_state = BankFrozenState::new_from_state(
duplicate_slot,
frozen_duplicate_slot_hash,
&mut duplicate_slots_tracker,
&duplicate_slots_tracker,
&gossip_duplicate_confirmed_slots,
&mut heaviest_subtree_fork_choice,
&heaviest_subtree_fork_choice,
&epoch_slots_frozen_slots,
);
check_slot_agrees_with_cluster(
@ -1997,7 +1997,7 @@ mod test {
let duplicate_state = DuplicateState::new_from_state(
3,
&gossip_duplicate_confirmed_slots,
&mut heaviest_subtree_fork_choice,
&heaviest_subtree_fork_choice,
|| progress.is_dead(3).unwrap_or(false),
|| Some(slot3_hash),
);
@ -2067,7 +2067,7 @@ mod test {
let duplicate_state = DuplicateState::new_from_state(
2,
&gossip_duplicate_confirmed_slots,
&mut heaviest_subtree_fork_choice,
&heaviest_subtree_fork_choice,
|| progress.is_dead(2).unwrap_or(false),
|| Some(slot2_hash),
);
@ -2216,7 +2216,7 @@ mod test {
let duplicate_state = DuplicateState::new_from_state(
1,
&gossip_duplicate_confirmed_slots,
&mut heaviest_subtree_fork_choice,
&heaviest_subtree_fork_choice,
|| progress.is_dead(1).unwrap_or(false),
|| Some(slot1_hash),
);
@ -2270,7 +2270,7 @@ mod test {
3,
slot3_hash,
&gossip_duplicate_confirmed_slots,
&mut heaviest_subtree_fork_choice,
&heaviest_subtree_fork_choice,
|| progress.is_dead(3).unwrap_or(false),
|| Some(slot3_hash),
false,
@ -2364,7 +2364,7 @@ mod test {
3,
mismatched_hash,
&gossip_duplicate_confirmed_slots,
&mut heaviest_subtree_fork_choice,
&heaviest_subtree_fork_choice,
|| progress.is_dead(3).unwrap_or(false),
|| Some(slot3_hash),
false,

View File

@ -659,7 +659,7 @@ impl ReplayStage {
&mut duplicate_slots_tracker,
&gossip_duplicate_confirmed_slots,
&mut epoch_slots_frozen_slots,
&mut progress,
&progress,
&mut heaviest_subtree_fork_choice,
&bank_forks,
&mut duplicate_slots_to_repair,
@ -678,7 +678,7 @@ impl ReplayStage {
&mut gossip_duplicate_confirmed_slots,
&mut epoch_slots_frozen_slots,
&bank_forks,
&mut progress,
&progress,
&mut heaviest_subtree_fork_choice,
&mut duplicate_slots_to_repair,
&ancestor_hashes_replay_update_sender,
@ -729,7 +729,7 @@ impl ReplayStage {
&gossip_duplicate_confirmed_slots,
&mut epoch_slots_frozen_slots,
&bank_forks,
&mut progress,
&progress,
&mut heaviest_subtree_fork_choice,
&mut duplicate_slots_to_repair,
&ancestor_hashes_replay_update_sender,
@ -1408,7 +1408,7 @@ impl ReplayStage {
duplicate_slots_tracker: &mut DuplicateSlotsTracker,
gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots,
epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots,
progress: &mut ProgressMap,
progress: &ProgressMap,
fork_choice: &mut HeaviestSubtreeForkChoice,
bank_forks: &RwLock<BankForks>,
duplicate_slots_to_repair: &mut DuplicateSlotsToRepair,
@ -1648,7 +1648,7 @@ impl ReplayStage {
gossip_duplicate_confirmed_slots: &mut GossipDuplicateConfirmedSlots,
epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots,
bank_forks: &RwLock<BankForks>,
progress: &mut ProgressMap,
progress: &ProgressMap,
fork_choice: &mut HeaviestSubtreeForkChoice,
duplicate_slots_to_repair: &mut DuplicateSlotsToRepair,
ancestor_hashes_replay_update_sender: &AncestorHashesReplayUpdateSender,
@ -1716,7 +1716,7 @@ impl ReplayStage {
gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots,
epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots,
bank_forks: &RwLock<BankForks>,
progress: &mut ProgressMap,
progress: &ProgressMap,
fork_choice: &mut HeaviestSubtreeForkChoice,
duplicate_slots_to_repair: &mut DuplicateSlotsToRepair,
ancestor_hashes_replay_update_sender: &AncestorHashesReplayUpdateSender,
@ -3240,7 +3240,7 @@ impl ReplayStage {
fn select_forks_failed_switch_threshold(
reset_bank: Option<&Bank>,
progress: &ProgressMap,
tower: &mut Tower,
tower: &Tower,
heaviest_bank_slot: Slot,
failure_reasons: &mut Vec<HeaviestForkFailures>,
switch_proof_stake: u64,
@ -6326,7 +6326,7 @@ pub(crate) mod tests {
let duplicate_state = DuplicateState::new_from_state(
5,
&gossip_duplicate_confirmed_slots,
&mut vote_simulator.heaviest_subtree_fork_choice,
&vote_simulator.heaviest_subtree_fork_choice,
|| progress.is_dead(5).unwrap_or(false),
|| Some(bank5_hash),
);
@ -6463,7 +6463,7 @@ pub(crate) mod tests {
let duplicate_state = DuplicateState::new_from_state(
4,
&gossip_duplicate_confirmed_slots,
&mut vote_simulator.heaviest_subtree_fork_choice,
&vote_simulator.heaviest_subtree_fork_choice,
|| progress.is_dead(4).unwrap_or(false),
|| Some(bank4_hash),
);
@ -6500,7 +6500,7 @@ pub(crate) mod tests {
let duplicate_state = DuplicateState::new_from_state(
2,
&gossip_duplicate_confirmed_slots,
&mut vote_simulator.heaviest_subtree_fork_choice,
&vote_simulator.heaviest_subtree_fork_choice,
|| progress.is_dead(2).unwrap_or(false),
|| Some(bank2_hash),
);

View File

@ -624,7 +624,7 @@ mod tests {
let second_vote = VoteStateUpdate::from(vec![(2, 4), (4, 3), (11, 1)]);
let third_vote = VoteStateUpdate::from(vec![(2, 5), (4, 4), (11, 3), (12, 2), (13, 1)]);
for vote in vec![second_vote.clone(), first_vote.clone()] {
for vote in [second_vote.clone(), first_vote.clone()] {
s.send(vec![VerifiedVoteMetadata {
vote_account_key,
vote: VoteTransaction::from(vote),

View File

@ -306,7 +306,7 @@ fn test_bank_forks_snapshot(snapshot_version: SnapshotVersion, cluster_type: Clu
);
}
fn goto_end_of_slot(bank: &mut Bank) {
fn goto_end_of_slot(bank: &Bank) {
let mut tick_hash = bank.last_blockhash();
loop {
tick_hash = hashv(&[tick_hash.as_ref(), &[42]]);

View File

@ -343,7 +343,7 @@ fn create_sender_thread(
fn create_generator_thread<T: 'static + BenchTpsClient + Send + Sync>(
tx_sender: &Sender<TransactionBatchMsg>,
send_batch_size: usize,
transaction_generator: &mut TransactionGenerator,
transaction_generator: &TransactionGenerator,
client: Option<Arc<T>>,
payer: Option<Keypair>,
) -> thread::JoinHandle<()> {
@ -589,7 +589,7 @@ fn run_dos_transactions<T: 'static + BenchTpsClient + Send + Sync>(
client.as_ref(),
);
let mut transaction_generator = TransactionGenerator::new(transaction_params);
let transaction_generator = TransactionGenerator::new(transaction_params);
let (tx_sender, tx_receiver) = unbounded();
let sender_thread = create_sender_thread(tx_receiver, iterations, &target, tpu_use_quic);
@ -599,7 +599,7 @@ fn run_dos_transactions<T: 'static + BenchTpsClient + Send + Sync>(
create_generator_thread(
&tx_sender,
send_batch_size,
&mut transaction_generator,
&transaction_generator,
client.clone(),
payer,
)

View File

@ -312,8 +312,7 @@ pub fn request_airdrop_transaction(
}
// Read the transaction
let mut buffer = Vec::new();
buffer.resize(transaction_length, 0);
let mut buffer = vec![0; transaction_length];
stream.read_exact(&mut buffer).map_err(|err| {
info!(
"request_airdrop_transaction: buffer read_exact error: {:?}",

View File

@ -3359,10 +3359,7 @@ mod tests {
let recycler = PacketBatchRecycler::default();
let packets = cluster_info
.handle_ping_messages(
remote_nodes
.iter()
.map(|(_, socket)| *socket)
.zip(pings.into_iter()),
remote_nodes.iter().map(|(_, socket)| *socket).zip(pings),
&recycler,
)
.unwrap();

View File

@ -244,7 +244,7 @@ fn connected_staked_network_create(stakes: &[u64]) -> Network {
Network::new(network)
}
fn network_simulator_pull_only(thread_pool: &ThreadPool, network: &mut Network) {
fn network_simulator_pull_only(thread_pool: &ThreadPool, network: &Network) {
let num = network.len();
let (converged, bytes_tx) = network_run_pull(thread_pool, network, 0, num * 2, 0.9);
trace!(
@ -466,7 +466,7 @@ fn network_run_push(
fn network_run_pull(
thread_pool: &ThreadPool,
network: &mut Network,
network: &Network,
start: usize,
end: usize,
max_convergance: f64,
@ -635,16 +635,16 @@ fn new_ping_cache() -> Mutex<PingCache> {
#[test]
#[serial]
fn test_star_network_pull_50() {
let mut network = star_network_create(50);
let network = star_network_create(50);
let thread_pool = build_gossip_thread_pool();
network_simulator_pull_only(&thread_pool, &mut network);
network_simulator_pull_only(&thread_pool, &network);
}
#[test]
#[serial]
fn test_star_network_pull_100() {
let mut network = star_network_create(100);
let network = star_network_create(100);
let thread_pool = build_gossip_thread_pool();
network_simulator_pull_only(&thread_pool, &mut network);
network_simulator_pull_only(&thread_pool, &network);
}
#[test]
#[serial]
@ -704,9 +704,9 @@ fn test_connected_staked_network() {
#[ignore]
fn test_star_network_large_pull() {
solana_logger::setup();
let mut network = star_network_create(2000);
let network = star_network_create(2000);
let thread_pool = build_gossip_thread_pool();
network_simulator_pull_only(&thread_pool, &mut network);
network_simulator_pull_only(&thread_pool, &network);
}
#[test]
#[ignore]

View File

@ -630,7 +630,6 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box<dyn error::Error>> {
let passphrase = passphrase.clone();
let passphrase_message = passphrase_message.clone();
let derivation_path = derivation_path.clone();
let skip_len_44_pubkeys = skip_len_44_pubkeys;
thread::spawn(move || loop {
if done.load(Ordering::Relaxed) {

View File

@ -29,7 +29,7 @@ fn bench_write_shreds(bench: &mut Bencher, entries: Vec<Entry>, ledger_path: &Pa
// Insert some shreds into the ledger in preparation for read benchmarks
fn setup_read_bench(
blockstore: &mut Blockstore,
blockstore: &Blockstore,
num_small_shreds: u64,
num_large_shreds: u64,
slot: Slot,
@ -79,7 +79,7 @@ fn bench_write_big(bench: &mut Bencher) {
#[ignore]
fn bench_read_sequential(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path!();
let mut blockstore =
let blockstore =
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger");
// Insert some big and small shreds into the ledger
@ -87,7 +87,7 @@ fn bench_read_sequential(bench: &mut Bencher) {
let num_large_shreds = 32 * 1024;
let total_shreds = num_small_shreds + num_large_shreds;
let slot = 0;
setup_read_bench(&mut blockstore, num_small_shreds, num_large_shreds, slot);
setup_read_bench(&blockstore, num_small_shreds, num_large_shreds, slot);
let num_reads = total_shreds / 15;
let mut rng = rand::thread_rng();
@ -106,7 +106,7 @@ fn bench_read_sequential(bench: &mut Bencher) {
#[ignore]
fn bench_read_random(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path!();
let mut blockstore =
let blockstore =
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger");
// Insert some big and small shreds into the ledger
@ -114,7 +114,7 @@ fn bench_read_random(bench: &mut Bencher) {
let num_large_shreds = 32 * 1024;
let total_shreds = num_small_shreds + num_large_shreds;
let slot = 0;
setup_read_bench(&mut blockstore, num_small_shreds, num_large_shreds, slot);
setup_read_bench(&blockstore, num_small_shreds, num_large_shreds, slot);
let num_reads = total_shreds / 15;

View File

@ -638,7 +638,7 @@ impl Blockstore {
}
fn recover_shreds(
index: &mut Index,
index: &Index,
erasure_meta: &ErasureMeta,
prev_inserted_shreds: &HashMap<ShredId, Shred>,
recovered_shreds: &mut Vec<Shred>,
@ -5409,7 +5409,7 @@ pub mod tests {
} = Blockstore::open_with_signal(ledger_path.path(), BlockstoreOptions::default()).unwrap();
let entries_per_slot = 10;
let slots = vec![2, 5, 10];
let slots = [2, 5, 10];
let mut all_shreds = make_chaining_slot_entries(&slots[..], entries_per_slot);
// Get the shreds for slot 10, chaining to slot 5
@ -5766,7 +5766,7 @@ pub mod tests {
assert_eq!(&roots, &blockstore_roots(&blockstore));
// Mark additional root
let new_roots = vec![16];
let new_roots = [16];
let roots = vec![0, 2, 4, 6, 8, 10, 12, 16];
blockstore.set_roots(new_roots.iter()).unwrap();
assert_eq!(&roots, &blockstore_roots(&blockstore));
@ -6835,7 +6835,7 @@ pub mod tests {
fn test_is_skipped() {
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
let roots = vec![2, 4, 7, 12, 15];
let roots = [2, 4, 7, 12, 15];
blockstore.set_roots(roots.iter()).unwrap();
for i in 0..20 {
@ -7032,7 +7032,7 @@ pub mod tests {
true, // merkle_variant
);
blockstore.insert_shreds(shreds, None, false).unwrap();
blockstore.set_roots(vec![slot].iter()).unwrap();
blockstore.set_roots([slot].iter()).unwrap();
}
assert_eq!(blockstore.get_first_available_block().unwrap(), 0);
assert_eq!(blockstore.lowest_slot_with_genesis(), 0);
@ -7081,7 +7081,7 @@ pub mod tests {
.insert_shreds(unrooted_shreds, None, false)
.unwrap();
blockstore
.set_roots(vec![slot - 1, slot, slot + 1].iter())
.set_roots([slot - 1, slot, slot + 1].iter())
.unwrap();
let parent_meta = SlotMeta::default();
@ -7671,7 +7671,7 @@ pub mod tests {
let meta3 = SlotMeta::new(3, Some(2));
blockstore.meta_cf.put(3, &meta3).unwrap();
blockstore.set_roots(vec![0, 2].iter()).unwrap();
blockstore.set_roots([0, 2].iter()).unwrap();
// Initialize index 0, including:
// signature2 in non-root and root,
@ -7854,7 +7854,7 @@ pub mod tests {
let meta3 = SlotMeta::new(3, Some(2));
blockstore.meta_cf.put(3, &meta3).unwrap();
blockstore.set_roots(vec![0, 1, 2, 3].iter()).unwrap();
blockstore.set_roots([0, 1, 2, 3].iter()).unwrap();
let lowest_cleanup_slot = 1;
let lowest_available_slot = lowest_cleanup_slot + 1;
@ -7971,7 +7971,7 @@ pub mod tests {
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
blockstore.set_roots(vec![slot - 1, slot].iter()).unwrap();
blockstore.set_roots([slot - 1, slot].iter()).unwrap();
let expected_transactions: Vec<VersionedTransactionWithStatusMeta> = entries
.iter()
@ -8237,7 +8237,7 @@ pub mod tests {
)
.unwrap();
}
blockstore.set_roots(vec![slot0, slot1].iter()).unwrap();
blockstore.set_roots([slot0, slot1].iter()).unwrap();
let all0 = blockstore
.get_confirmed_signatures_for_address(address0, 0, 50)
@ -8330,7 +8330,7 @@ pub mod tests {
)
.unwrap();
}
blockstore.set_roots(vec![21, 22, 23, 24].iter()).unwrap();
blockstore.set_roots([21, 22, 23, 24].iter()).unwrap();
let mut past_slot = 0;
for (slot, _) in blockstore.find_address_signatures(address0, 1, 25).unwrap() {
assert!(slot >= past_slot);
@ -8508,9 +8508,7 @@ pub mod tests {
}
// Leave one slot unrooted to test only returns confirmed signatures
blockstore
.set_roots(vec![1, 2, 4, 5, 6, 7, 8].iter())
.unwrap();
blockstore.set_roots([1, 2, 4, 5, 6, 7, 8].iter()).unwrap();
let highest_super_majority_root = 8;
// Fetch all rooted signatures for address 0 at once...

View File

@ -682,7 +682,7 @@ pub mod tests {
}
fn clear_and_repopulate_transaction_statuses_for_test(
blockstore: &mut Blockstore,
blockstore: &Blockstore,
index0_max_slot: u64,
index1_max_slot: u64,
) {
@ -803,14 +803,14 @@ pub mod tests {
#[allow(clippy::cognitive_complexity)]
fn test_purge_transaction_status_exact() {
let ledger_path = get_tmp_ledger_path_auto_delete!();
let mut blockstore = Blockstore::open(ledger_path.path()).unwrap();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
let index0_max_slot = 9;
let index1_max_slot = 19;
// Test purge outside bounds
clear_and_repopulate_transaction_statuses_for_test(
&mut blockstore,
&blockstore,
index0_max_slot,
index1_max_slot,
);
@ -857,7 +857,7 @@ pub mod tests {
// Test purge inside index 0
clear_and_repopulate_transaction_statuses_for_test(
&mut blockstore,
&blockstore,
index0_max_slot,
index1_max_slot,
);
@ -906,7 +906,7 @@ pub mod tests {
// Test purge inside index 0 at upper boundary
clear_and_repopulate_transaction_statuses_for_test(
&mut blockstore,
&blockstore,
index0_max_slot,
index1_max_slot,
);
@ -957,7 +957,7 @@ pub mod tests {
// Test purge inside index 1 at lower boundary
clear_and_repopulate_transaction_statuses_for_test(
&mut blockstore,
&blockstore,
index0_max_slot,
index1_max_slot,
);
@ -1005,7 +1005,7 @@ pub mod tests {
// Test purge across index boundaries
clear_and_repopulate_transaction_statuses_for_test(
&mut blockstore,
&blockstore,
index0_max_slot,
index1_max_slot,
);
@ -1055,7 +1055,7 @@ pub mod tests {
// Test purge include complete index 1
clear_and_repopulate_transaction_statuses_for_test(
&mut blockstore,
&blockstore,
index0_max_slot,
index1_max_slot,
);
@ -1102,7 +1102,7 @@ pub mod tests {
// Test purge all
clear_and_repopulate_transaction_statuses_for_test(
&mut blockstore,
&blockstore,
index0_max_slot,
index1_max_slot,
);

View File

@ -2007,7 +2007,7 @@ pub mod tests {
#[test]
fn test_should_enable_cf_compaction() {
let columns_to_compact = vec![
let columns_to_compact = [
columns::TransactionStatus::NAME,
columns::AddressSignatures::NAME,
];

View File

@ -2241,7 +2241,7 @@ pub mod tests {
info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
blockstore.set_roots(vec![0, 1, 4].iter()).unwrap();
blockstore.set_roots([0, 1, 4].iter()).unwrap();
let opts = ProcessOptions {
run_verification: true,
@ -2321,7 +2321,7 @@ pub mod tests {
info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
blockstore.set_roots(vec![0, 1].iter()).unwrap();
blockstore.set_roots([0, 1].iter()).unwrap();
let opts = ProcessOptions {
run_verification: true,
@ -3528,7 +3528,7 @@ pub mod tests {
genesis_config.ticks_per_slot,
genesis_config.hash(),
);
blockstore.set_roots(vec![0, 1].iter()).unwrap();
blockstore.set_roots([0, 1].iter()).unwrap();
// Specify halting at slot 0
let opts = ProcessOptions {
@ -3580,7 +3580,7 @@ pub mod tests {
last_hash =
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, i + 1, i, last_hash);
}
blockstore.set_roots(vec![3, 5].iter()).unwrap();
blockstore.set_roots([3, 5].iter()).unwrap();
// Set up bank1
let mut bank_forks = BankForks::new(Bank::new_for_tests(&genesis_config));

View File

@ -133,7 +133,7 @@ mod tests {
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 4, fork_point, fork_hash);
// Set a root
blockstore.set_roots(vec![1, 2, 3].iter()).unwrap();
blockstore.set_roots([1, 2, 3].iter()).unwrap();
// Trying to get an iterator on a different fork will error
assert!(RootedSlotIterator::new(4, &blockstore).is_err());
@ -195,7 +195,7 @@ mod tests {
}
// Set roots
blockstore.set_roots(vec![0, 1, 2, 3].iter()).unwrap();
blockstore.set_roots([0, 1, 2, 3].iter()).unwrap();
// Create one post-skip slot at 10, simulating starting from a snapshot
// at 10

View File

@ -164,9 +164,7 @@ fn sort_data_coding_into_fec_sets(
// Make sure there are no duplicates for same key
assert!(!data_slot_and_index.contains(&key));
data_slot_and_index.insert(key);
let fec_entry = fec_data
.entry(shred.fec_set_index())
.or_insert_with(Vec::new);
let fec_entry = fec_data.entry(shred.fec_set_index()).or_default();
fec_entry.push(shred);
}
for shred in coding_shreds {
@ -175,9 +173,7 @@ fn sort_data_coding_into_fec_sets(
// Make sure there are no duplicates for same key
assert!(!coding_slot_and_index.contains(&key));
coding_slot_and_index.insert(key);
let fec_entry = fec_coding
.entry(shred.fec_set_index())
.or_insert_with(Vec::new);
let fec_entry = fec_coding.entry(shred.fec_set_index()).or_default();
fec_entry.push(shred);
}
}

View File

@ -1622,7 +1622,7 @@ fn test_optimistic_confirmation_violation_detection() {
// First set up the cluster with 2 nodes
let slots_per_epoch = 2048;
let node_stakes = vec![50 * DEFAULT_NODE_STAKE, 51 * DEFAULT_NODE_STAKE];
let validator_keys: Vec<_> = vec![
let validator_keys: Vec<_> = [
"4qhhXNTbKD1a5vxDDLZcHKj7ELNeiivtUBxn3wUK1F5VRsQVP89VUhfXqSfgiFB14GfuBgtrQ96n9NvWQADVkcCg",
"3kHBzVwie5vTEaY6nFCPeFT8qDpoXzn7dCEioGRNBTnUDpvwnG85w8Wq63gVWpVTP8k2a8cgcWRjSXyUkEygpXWS",
]
@ -1910,7 +1910,7 @@ fn do_test_future_tower(cluster_mode: ClusterMode) {
ClusterMode::MasterSlave => vec![DEFAULT_NODE_STAKE * 100, DEFAULT_NODE_STAKE],
};
let validator_keys = vec![
let validator_keys = [
"28bN3xyvrP4E8LwEgtLjhnkb7cY4amQb6DrYAbAYjgRV4GAGgkVM2K7wnxnAS7WDneuavza7x21MiafLu1HkwQt4",
"2saHBBoTkLMmttmPQP8KfBkcCw45S5cwtV3wTdGCscRC8uxdgvHxpHiWXKx4LvJjNJtnNcbSv5NdheokFFqnNDt8",
]
@ -2075,7 +2075,7 @@ fn test_hard_fork_invalidates_tower() {
let slots_per_epoch = 2048;
let node_stakes = vec![60 * DEFAULT_NODE_STAKE, 40 * DEFAULT_NODE_STAKE];
let validator_keys = vec![
let validator_keys = [
"28bN3xyvrP4E8LwEgtLjhnkb7cY4amQb6DrYAbAYjgRV4GAGgkVM2K7wnxnAS7WDneuavza7x21MiafLu1HkwQt4",
"2saHBBoTkLMmttmPQP8KfBkcCw45S5cwtV3wTdGCscRC8uxdgvHxpHiWXKx4LvJjNJtnNcbSv5NdheokFFqnNDt8",
]
@ -2244,7 +2244,7 @@ fn test_hard_fork_with_gap_in_roots() {
let slots_per_epoch = 2048;
let node_stakes = vec![60, 40];
let validator_keys = vec![
let validator_keys = [
"28bN3xyvrP4E8LwEgtLjhnkb7cY4amQb6DrYAbAYjgRV4GAGgkVM2K7wnxnAS7WDneuavza7x21MiafLu1HkwQt4",
"2saHBBoTkLMmttmPQP8KfBkcCw45S5cwtV3wTdGCscRC8uxdgvHxpHiWXKx4LvJjNJtnNcbSv5NdheokFFqnNDt8",
]
@ -2411,7 +2411,7 @@ fn test_restart_tower_rollback() {
let slots_per_epoch = 2048;
let node_stakes = vec![DEFAULT_NODE_STAKE * 100, DEFAULT_NODE_STAKE];
let validator_strings = vec![
let validator_strings = [
"28bN3xyvrP4E8LwEgtLjhnkb7cY4amQb6DrYAbAYjgRV4GAGgkVM2K7wnxnAS7WDneuavza7x21MiafLu1HkwQt4",
"2saHBBoTkLMmttmPQP8KfBkcCw45S5cwtV3wTdGCscRC8uxdgvHxpHiWXKx4LvJjNJtnNcbSv5NdheokFFqnNDt8",
];
@ -2566,7 +2566,7 @@ fn test_rpc_block_subscribe() {
let mut validator_config = ValidatorConfig::default_for_test();
validator_config.enable_default_rpc_block_subscribe();
let validator_keys = vec![
let validator_keys = [
"28bN3xyvrP4E8LwEgtLjhnkb7cY4amQb6DrYAbAYjgRV4GAGgkVM2K7wnxnAS7WDneuavza7x21MiafLu1HkwQt4",
]
.iter()
@ -2642,7 +2642,7 @@ fn test_oc_bad_signatures() {
..ValidatorConfig::default_for_test()
};
validator_config.enable_default_rpc_block_subscribe();
let validator_keys = vec![
let validator_keys = [
"28bN3xyvrP4E8LwEgtLjhnkb7cY4amQb6DrYAbAYjgRV4GAGgkVM2K7wnxnAS7WDneuavza7x21MiafLu1HkwQt4",
"2saHBBoTkLMmttmPQP8KfBkcCw45S5cwtV3wTdGCscRC8uxdgvHxpHiWXKx4LvJjNJtnNcbSv5NdheokFFqnNDt8",
]
@ -2991,7 +2991,7 @@ fn run_test_load_program_accounts(scan_commitment: CommitmentConfig) {
// First set up the cluster with 2 nodes
let slots_per_epoch = 2048;
let node_stakes = vec![51 * DEFAULT_NODE_STAKE, 50 * DEFAULT_NODE_STAKE];
let validator_keys: Vec<_> = vec![
let validator_keys: Vec<_> = [
"4qhhXNTbKD1a5vxDDLZcHKj7ELNeiivtUBxn3wUK1F5VRsQVP89VUhfXqSfgiFB14GfuBgtrQ96n9NvWQADVkcCg",
"3kHBzVwie5vTEaY6nFCPeFT8qDpoXzn7dCEioGRNBTnUDpvwnG85w8Wq63gVWpVTP8k2a8cgcWRjSXyUkEygpXWS",
]
@ -3126,7 +3126,7 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b
// C can avoid NoPropagatedConfirmation errors and continue to generate blocks
// 2) Provide gossip discovery for `A` when it restarts because `A` will restart
// at a different gossip port than the entrypoint saved in C's gossip table
let validator_keys = vec![
let validator_keys = [
"28bN3xyvrP4E8LwEgtLjhnkb7cY4amQb6DrYAbAYjgRV4GAGgkVM2K7wnxnAS7WDneuavza7x21MiafLu1HkwQt4",
"2saHBBoTkLMmttmPQP8KfBkcCw45S5cwtV3wTdGCscRC8uxdgvHxpHiWXKx4LvJjNJtnNcbSv5NdheokFFqnNDt8",
"4mx9yoFBeYasDKBGDWCTWGJdWuJCKbgqmuP8bN9umybCh5Jzngw7KQxe99Rf5uzfyzgba1i65rJW4Wqk7Ab5S8ye",
@ -4345,14 +4345,14 @@ fn test_slot_hash_expiry() {
let slots_per_epoch = 2048;
let node_stakes = vec![60 * DEFAULT_NODE_STAKE, 40 * DEFAULT_NODE_STAKE];
let validator_keys = vec![
let validator_keys = [
"28bN3xyvrP4E8LwEgtLjhnkb7cY4amQb6DrYAbAYjgRV4GAGgkVM2K7wnxnAS7WDneuavza7x21MiafLu1HkwQt4",
"2saHBBoTkLMmttmPQP8KfBkcCw45S5cwtV3wTdGCscRC8uxdgvHxpHiWXKx4LvJjNJtnNcbSv5NdheokFFqnNDt8",
]
.iter()
.map(|s| (Arc::new(Keypair::from_base58_string(s)), true))
.collect::<Vec<_>>();
let node_vote_keys = vec![
let node_vote_keys = [
"3NDQ3ud86RTVg8hTy2dDWnS4P8NfjhZ2gDgQAJbr3heaKaUVS1FW3sTLKA1GmDrY9aySzsa4QxpDkbLv47yHxzr3",
"46ZHpHE6PEvXYPu3hf9iQqjBk2ZNDaJ9ejqKWHEjxaQjpAGasKaWKbKHbP3646oZhfgDRzx95DH9PCBKKsoCVngk",
]
@ -4574,7 +4574,7 @@ fn test_duplicate_with_pruned_ancestor() {
let num_nodes = node_stakes.len();
let validator_keys = vec![
let validator_keys = [
"28bN3xyvrP4E8LwEgtLjhnkb7cY4amQb6DrYAbAYjgRV4GAGgkVM2K7wnxnAS7WDneuavza7x21MiafLu1HkwQt4",
"2saHBBoTkLMmttmPQP8KfBkcCw45S5cwtV3wTdGCscRC8uxdgvHxpHiWXKx4LvJjNJtnNcbSv5NdheokFFqnNDt8",
"4mx9yoFBeYasDKBGDWCTWGJdWuJCKbgqmuP8bN9umybCh5Jzngw7KQxe99Rf5uzfyzgba1i65rJW4Wqk7Ab5S8ye",
@ -5162,7 +5162,7 @@ fn test_duplicate_shreds_switch_failure() {
}
solana_logger::setup_with_default(RUST_LOG_FILTER);
let validator_keypairs = vec![
let validator_keypairs = [
"28bN3xyvrP4E8LwEgtLjhnkb7cY4amQb6DrYAbAYjgRV4GAGgkVM2K7wnxnAS7WDneuavza7x21MiafLu1HkwQt4",
"2saHBBoTkLMmttmPQP8KfBkcCw45S5cwtV3wTdGCscRC8uxdgvHxpHiWXKx4LvJjNJtnNcbSv5NdheokFFqnNDt8",
"4mx9yoFBeYasDKBGDWCTWGJdWuJCKbgqmuP8bN9umybCh5Jzngw7KQxe99Rf5uzfyzgba1i65rJW4Wqk7Ab5S8ye",

View File

@ -20,7 +20,7 @@ mod common;
#[tokio::test]
async fn test_close_lookup_table() {
let mut context = setup_test_context().await;
overwrite_slot_hashes_with_slots(&mut context, &[]);
overwrite_slot_hashes_with_slots(&context, &[]);
let lookup_table_address = Pubkey::new_unique();
let authority_keypair = Keypair::new();

View File

@ -91,7 +91,7 @@ pub async fn add_lookup_table_account(
account
}
pub fn overwrite_slot_hashes_with_slots(context: &mut ProgramTestContext, slots: &[Slot]) {
pub fn overwrite_slot_hashes_with_slots(context: &ProgramTestContext, slots: &[Slot]) {
let mut slot_hashes = SlotHashes::default();
for slot in slots {
slot_hashes.add(*slot, Hash::new_unique());

View File

@ -29,7 +29,7 @@ async fn test_create_lookup_table_idempotent() {
let mut context = setup_test_context().await;
let test_recent_slot = 123;
overwrite_slot_hashes_with_slots(&mut context, &[test_recent_slot]);
overwrite_slot_hashes_with_slots(&context, &[test_recent_slot]);
let client = &mut context.banks_client;
let payer = &context.payer;
@ -89,7 +89,7 @@ async fn test_create_lookup_table_not_idempotent() {
let mut context = setup_test_context_without_authority_feature().await;
let test_recent_slot = 123;
overwrite_slot_hashes_with_slots(&mut context, &[test_recent_slot]);
overwrite_slot_hashes_with_slots(&context, &[test_recent_slot]);
let client = &mut context.banks_client;
let payer = &context.payer;
@ -129,7 +129,7 @@ async fn test_create_lookup_table_use_payer_as_authority() {
let mut context = setup_test_context().await;
let test_recent_slot = 123;
overwrite_slot_hashes_with_slots(&mut context, &[test_recent_slot]);
overwrite_slot_hashes_with_slots(&context, &[test_recent_slot]);
let client = &mut context.banks_client;
let payer = &context.payer;
@ -188,7 +188,7 @@ async fn test_create_lookup_table_not_recent_slot() {
async fn test_create_lookup_table_pda_mismatch() {
let mut context = setup_test_context().await;
let test_recent_slot = 123;
overwrite_slot_hashes_with_slots(&mut context, &[test_recent_slot]);
overwrite_slot_hashes_with_slots(&context, &[test_recent_slot]);
let payer = &context.payer;
let authority_address = Pubkey::new_unique();

View File

@ -329,8 +329,7 @@ mod tests {
serialized_table_1.resize(LOOKUP_TABLE_META_SIZE, 0);
let address_table = AddressLookupTable::new_for_tests(meta, 0);
let mut serialized_table_2 = Vec::new();
serialized_table_2.resize(LOOKUP_TABLE_META_SIZE, 0);
let mut serialized_table_2 = vec![0; LOOKUP_TABLE_META_SIZE];
AddressLookupTable::overwrite_meta_data(&mut serialized_table_2, address_table.meta)
.unwrap();

View File

@ -18,7 +18,7 @@ pub fn recurse(data: &mut [u8]) {
#[no_mangle]
pub unsafe extern "C" fn entrypoint(input: *mut u8) -> u64 {
msg!("Call depth");
let depth = *(input.add(16) as *mut u8);
let depth = *input.add(16);
sol_log_64(line!() as u64, 0, 0, 0, depth as u64);
let mut data = Vec::with_capacity(depth as usize);
for i in 0_u8..depth {

View File

@ -1468,7 +1468,7 @@ fn assert_instruction_count() {
}
#[test]
#[cfg(any(feature = "sbf_rust"))]
#[cfg(feature = "sbf_rust")]
fn test_program_sbf_instruction_introspection() {
solana_logger::setup();
@ -3718,7 +3718,7 @@ fn test_program_sbf_realloc_invoke() {
}
#[test]
#[cfg(any(feature = "sbf_rust"))]
#[cfg(feature = "sbf_rust")]
fn test_program_sbf_processed_inner_instruction() {
solana_logger::setup();

View File

@ -7372,7 +7372,7 @@ pub mod tests {
let bank = Arc::new(Bank::default_for_tests());
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
blockstore.set_roots(vec![0, 1].iter()).unwrap();
blockstore.set_roots([0, 1].iter()).unwrap();
// Build BlockCommitmentCache with rooted slots
let mut cache0 = BlockCommitment::default();
cache0.increase_rooted_stake(50);

View File

@ -2963,7 +2963,7 @@ impl Bank {
reward_calculate_params: &EpochRewardCalculateParamInfo,
rewards: u64,
thread_pool: &ThreadPool,
metrics: &mut RewardsMetrics,
metrics: &RewardsMetrics,
) -> Option<PointValue> {
let EpochRewardCalculateParamInfo {
stake_history,
@ -3023,7 +3023,7 @@ impl Bank {
rewards: u64,
stake_history: &StakeHistory,
thread_pool: &ThreadPool,
metrics: &mut RewardsMetrics,
metrics: &RewardsMetrics,
) -> Option<PointValue> {
let (points, measure) = measure!(thread_pool.install(|| {
vote_with_stake_delegations_map
@ -3291,7 +3291,7 @@ impl Bank {
&self,
thread_pool: &ThreadPool,
stake_rewards: &[StakeReward],
metrics: &mut RewardsMetrics,
metrics: &RewardsMetrics,
) {
// store stake account even if stake_reward is 0
// because credits observed has changed
@ -3352,7 +3352,7 @@ impl Bank {
fn store_vote_accounts_partitioned(
&self,
vote_account_rewards: VoteRewardsAccounts,
metrics: &mut RewardsMetrics,
metrics: &RewardsMetrics,
) -> Vec<(Pubkey, RewardInfo)> {
let (_, measure_us) = measure_us!({
// reformat data to make it not sparse.
@ -3378,7 +3378,7 @@ impl Bank {
fn store_vote_accounts(
&self,
vote_account_rewards: VoteRewards,
metrics: &mut RewardsMetrics,
metrics: &RewardsMetrics,
) -> Vec<(Pubkey, RewardInfo)> {
let (vote_rewards, measure) = measure!(vote_account_rewards
.into_iter()
@ -8447,7 +8447,7 @@ pub mod test_utils {
solana_sdk::{hash::hashv, pubkey::Pubkey},
solana_vote_program::vote_state::{self, BlockTimestamp, VoteStateVersions},
};
pub fn goto_end_of_slot(bank: &mut Bank) {
pub fn goto_end_of_slot(bank: &Bank) {
let mut tick_hash = bank.last_blockhash();
loop {
tick_hash = hashv(&[tick_hash.as_ref(), &[42]]);

View File

@ -517,7 +517,7 @@ fn test_credit_debit_rent_no_side_effect_on_hash() {
fn store_accounts_for_rent_test(
bank: &Bank,
keypairs: &mut [Keypair],
keypairs: &[Keypair],
mock_program_id: Pubkey,
generic_rent_due_for_system_account: u64,
) {
@ -1294,7 +1294,7 @@ fn test_rent_complex() {
store_accounts_for_rent_test(
&bank,
&mut keypairs,
&keypairs,
mock_program_id,
generic_rent_due_for_system_account,
);
@ -2720,7 +2720,7 @@ fn test_bank_tx_fee() {
let (expected_fee_collected, expected_fee_burned) =
genesis_config.fee_rate_governor.burn(expected_fee_paid);
let mut bank = Bank::new_for_tests(&genesis_config);
let bank = Bank::new_for_tests(&genesis_config);
let capitalization = bank.capitalization();
@ -2741,7 +2741,7 @@ fn test_bank_tx_fee() {
);
assert_eq!(bank.get_balance(&leader), initial_balance);
goto_end_of_slot(&mut bank);
goto_end_of_slot(&bank);
assert_eq!(bank.signature_count(), 1);
assert_eq!(
bank.get_balance(&leader),
@ -2769,7 +2769,7 @@ fn test_bank_tx_fee() {
);
// Verify that an InstructionError collects fees, too
let mut bank = Bank::new_from_parent(Arc::new(bank), &leader, 1);
let bank = Bank::new_from_parent(Arc::new(bank), &leader, 1);
let mut tx = system_transaction::transfer(&mint_keypair, &key, 1, bank.last_blockhash());
// Create a bogus instruction to system_program to cause an instruction error
tx.message.instructions[0].data[0] = 40;
@ -2781,7 +2781,7 @@ fn test_bank_tx_fee() {
bank.get_balance(&mint_keypair.pubkey()),
mint - arbitrary_transfer_amount - 2 * expected_fee_paid
); // mint_keypair still pays a fee
goto_end_of_slot(&mut bank);
goto_end_of_slot(&bank);
assert_eq!(bank.signature_count(), 1);
// Profit! 2 transaction signatures processed at 3 lamports each
@ -2834,7 +2834,7 @@ fn test_bank_tx_compute_unit_fee() {
let (expected_fee_collected, expected_fee_burned) =
genesis_config.fee_rate_governor.burn(expected_fee_paid);
let mut bank = Bank::new_for_tests(&genesis_config);
let bank = Bank::new_for_tests(&genesis_config);
let capitalization = bank.capitalization();
@ -2854,7 +2854,7 @@ fn test_bank_tx_compute_unit_fee() {
);
assert_eq!(bank.get_balance(&leader), initial_balance);
goto_end_of_slot(&mut bank);
goto_end_of_slot(&bank);
assert_eq!(bank.signature_count(), 1);
assert_eq!(
bank.get_balance(&leader),
@ -2882,7 +2882,7 @@ fn test_bank_tx_compute_unit_fee() {
);
// Verify that an InstructionError collects fees, too
let mut bank = Bank::new_from_parent(Arc::new(bank), &leader, 1);
let bank = Bank::new_from_parent(Arc::new(bank), &leader, 1);
let mut tx = system_transaction::transfer(&mint_keypair, &key, 1, bank.last_blockhash());
// Create a bogus instruction to system_program to cause an instruction error
tx.message.instructions[0].data[0] = 40;
@ -2894,7 +2894,7 @@ fn test_bank_tx_compute_unit_fee() {
bank.get_balance(&mint_keypair.pubkey()),
mint - arbitrary_transfer_amount - 2 * expected_fee_paid
); // mint_keypair still pays a fee
goto_end_of_slot(&mut bank);
goto_end_of_slot(&bank);
assert_eq!(bank.signature_count(), 1);
// Profit! 2 transaction signatures processed at 3 lamports each
@ -2932,14 +2932,14 @@ fn test_bank_blockhash_fee_structure() {
.target_lamports_per_signature = 5000;
genesis_config.fee_rate_governor.target_signatures_per_slot = 0;
let mut bank = Bank::new_for_tests(&genesis_config);
goto_end_of_slot(&mut bank);
let bank = Bank::new_for_tests(&genesis_config);
goto_end_of_slot(&bank);
let cheap_blockhash = bank.last_blockhash();
let cheap_lamports_per_signature = bank.get_lamports_per_signature();
assert_eq!(cheap_lamports_per_signature, 0);
let mut bank = Bank::new_from_parent(Arc::new(bank), &leader, 1);
goto_end_of_slot(&mut bank);
let bank = Bank::new_from_parent(Arc::new(bank), &leader, 1);
goto_end_of_slot(&bank);
let expensive_blockhash = bank.last_blockhash();
let expensive_lamports_per_signature = bank.get_lamports_per_signature();
assert!(cheap_lamports_per_signature < expensive_lamports_per_signature);
@ -2984,14 +2984,14 @@ fn test_bank_blockhash_compute_unit_fee_structure() {
.target_lamports_per_signature = 1000;
genesis_config.fee_rate_governor.target_signatures_per_slot = 1;
let mut bank = Bank::new_for_tests(&genesis_config);
goto_end_of_slot(&mut bank);
let bank = Bank::new_for_tests(&genesis_config);
goto_end_of_slot(&bank);
let cheap_blockhash = bank.last_blockhash();
let cheap_lamports_per_signature = bank.get_lamports_per_signature();
assert_eq!(cheap_lamports_per_signature, 0);
let mut bank = Bank::new_from_parent(Arc::new(bank), &leader, 1);
goto_end_of_slot(&mut bank);
let bank = Bank::new_from_parent(Arc::new(bank), &leader, 1);
goto_end_of_slot(&bank);
let expensive_blockhash = bank.last_blockhash();
let expensive_lamports_per_signature = bank.get_lamports_per_signature();
assert!(cheap_lamports_per_signature < expensive_lamports_per_signature);
@ -5000,7 +5000,7 @@ fn get_nonce_blockhash(bank: &Bank, nonce_pubkey: &Pubkey) -> Option<Hash> {
}
fn nonce_setup(
bank: &mut Arc<Bank>,
bank: &Arc<Bank>,
mint_keypair: &Keypair,
custodian_lamports: u64,
nonce_lamports: u64,
@ -5057,7 +5057,7 @@ where
}
let (custodian_keypair, nonce_keypair) = nonce_setup(
&mut bank,
&bank,
&mint_keypair,
custodian_lamports,
nonce_lamports,
@ -5756,7 +5756,7 @@ fn test_nonce_fee_calculator_updates() {
// Deliberately use bank 0 to initialize nonce account, so that nonce account fee_calculator indicates 0 fees
let (custodian_keypair, nonce_keypair) =
nonce_setup(&mut bank, &mint_keypair, 500_000, 100_000, None).unwrap();
nonce_setup(&bank, &mint_keypair, 500_000, 100_000, None).unwrap();
let custodian_pubkey = custodian_keypair.pubkey();
let nonce_pubkey = nonce_keypair.pubkey();
@ -5824,7 +5824,7 @@ fn test_nonce_fee_calculator_updates_tx_wide_cap() {
// Deliberately use bank 0 to initialize nonce account, so that nonce account fee_calculator indicates 0 fees
let (custodian_keypair, nonce_keypair) =
nonce_setup(&mut bank, &mint_keypair, 500_000, 100_000, None).unwrap();
nonce_setup(&bank, &mint_keypair, 500_000, 100_000, None).unwrap();
let custodian_pubkey = custodian_keypair.pubkey();
let nonce_pubkey = nonce_keypair.pubkey();
@ -9192,35 +9192,29 @@ fn test_vote_epoch_panic() {
let stake_keypair = keypair_from_seed(&[2u8; 32]).unwrap();
let mut setup_ixs = Vec::new();
setup_ixs.extend(
vote_instruction::create_account_with_config(
&mint_keypair.pubkey(),
&vote_keypair.pubkey(),
&VoteInit {
node_pubkey: mint_keypair.pubkey(),
authorized_voter: vote_keypair.pubkey(),
authorized_withdrawer: mint_keypair.pubkey(),
commission: 0,
},
1_000_000_000,
vote_instruction::CreateVoteAccountConfig {
space: VoteStateVersions::vote_state_size_of(true) as u64,
..vote_instruction::CreateVoteAccountConfig::default()
},
)
.into_iter(),
);
setup_ixs.extend(
stake_instruction::create_account_and_delegate_stake(
&mint_keypair.pubkey(),
&stake_keypair.pubkey(),
&vote_keypair.pubkey(),
&Authorized::auto(&mint_keypair.pubkey()),
&Lockup::default(),
1_000_000_000_000,
)
.into_iter(),
);
setup_ixs.extend(vote_instruction::create_account_with_config(
&mint_keypair.pubkey(),
&vote_keypair.pubkey(),
&VoteInit {
node_pubkey: mint_keypair.pubkey(),
authorized_voter: vote_keypair.pubkey(),
authorized_withdrawer: mint_keypair.pubkey(),
commission: 0,
},
1_000_000_000,
vote_instruction::CreateVoteAccountConfig {
space: VoteStateVersions::vote_state_size_of(true) as u64,
..vote_instruction::CreateVoteAccountConfig::default()
},
));
setup_ixs.extend(stake_instruction::create_account_and_delegate_stake(
&mint_keypair.pubkey(),
&stake_keypair.pubkey(),
&vote_keypair.pubkey(),
&Authorized::auto(&mint_keypair.pubkey()),
&Lockup::default(),
1_000_000_000_000,
));
setup_ixs.push(vote_instruction::withdraw(
&vote_keypair.pubkey(),
&mint_keypair.pubkey(),
@ -9519,7 +9513,7 @@ fn test_get_largest_accounts() {
}
// Exclude more, and non-sequential, accounts
let exclude: HashSet<_> = vec![pubkeys[0], pubkeys[2], pubkeys[4]]
let exclude: HashSet<_> = [pubkeys[0], pubkeys[2], pubkeys[4]]
.iter()
.cloned()
.collect();
@ -12109,7 +12103,7 @@ fn test_runtime_feature_enable_with_program_cache() {
genesis_config
.accounts
.remove(&feature_set::reject_callx_r10::id());
let mut root_bank = Bank::new_for_tests(&genesis_config);
let root_bank = Bank::new_for_tests(&genesis_config);
// Test a basic transfer
let amount = genesis_config.rent.minimum_balance(0);
@ -12138,7 +12132,7 @@ fn test_runtime_feature_enable_with_program_cache() {
let transaction1 = Transaction::new(&signers1, message1, root_bank.last_blockhash());
// Advance the bank so the next transaction can be submitted.
goto_end_of_slot(&mut root_bank);
goto_end_of_slot(&root_bank);
let mut bank = new_from_parent(Arc::new(root_bank));
// Compose second instruction using the same program with a different block hash
@ -12334,7 +12328,7 @@ fn test_rewards_point_calculation() {
let (bank, _, _) = create_reward_bank(expected_num_delegations);
let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
let mut rewards_metrics = RewardsMetrics::default();
let rewards_metrics = RewardsMetrics::default();
let expected_rewards = 100_000_000_000;
let stakes: RwLockReadGuard<Stakes<StakeAccount<Delegation>>> = bank.stakes_cache.stakes();
@ -12344,7 +12338,7 @@ fn test_rewards_point_calculation() {
&reward_calculate_param,
expected_rewards,
&thread_pool,
&mut rewards_metrics,
&rewards_metrics,
);
assert!(point_value.is_some());
@ -12361,7 +12355,7 @@ fn test_rewards_point_calculation_empty() {
let bank = Bank::new_for_tests(&genesis_config);
let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
let mut rewards_metrics: RewardsMetrics = RewardsMetrics::default();
let rewards_metrics: RewardsMetrics = RewardsMetrics::default();
let expected_rewards = 100_000_000_000;
let stakes: RwLockReadGuard<Stakes<StakeAccount<Delegation>>> = bank.stakes_cache.stakes();
let reward_calculate_param = bank.get_epoch_reward_calculate_param_info(&stakes);
@ -12370,7 +12364,7 @@ fn test_rewards_point_calculation_empty() {
&reward_calculate_param,
expected_rewards,
&thread_pool,
&mut rewards_metrics,
&rewards_metrics,
);
assert!(point_value.is_none());
@ -13071,10 +13065,9 @@ fn test_store_vote_accounts_partitioned() {
}
});
let mut metrics = RewardsMetrics::default();
let metrics = RewardsMetrics::default();
let stored_vote_accounts =
bank.store_vote_accounts_partitioned(vote_rewards_account, &mut metrics);
let stored_vote_accounts = bank.store_vote_accounts_partitioned(vote_rewards_account, &metrics);
assert_eq!(expected_vote_rewards_num, stored_vote_accounts.len());
// load accounts to make sure they were stored correctly
@ -13094,9 +13087,9 @@ fn test_store_vote_accounts_partitioned_empty() {
let expected = 0;
let vote_rewards = VoteRewardsAccounts::default();
let mut metrics = RewardsMetrics::default();
let metrics = RewardsMetrics::default();
let stored_vote_accounts = bank.store_vote_accounts_partitioned(vote_rewards, &mut metrics);
let stored_vote_accounts = bank.store_vote_accounts_partitioned(vote_rewards, &metrics);
assert_eq!(expected, stored_vote_accounts.len());
}

View File

@ -425,7 +425,7 @@ mod tests {
// update fee cache is asynchronous, this test helper blocks until update is completed.
fn sync_update<'a>(
prioritization_fee_cache: &mut PrioritizationFeeCache,
prioritization_fee_cache: &PrioritizationFeeCache,
bank: Arc<Bank>,
txs: impl Iterator<Item = &'a SanitizedTransaction>,
) {
@ -449,7 +449,7 @@ mod tests {
// finalization is asynchronous, this test helper blocks until finalization is completed.
fn sync_finalize_priority_fee_for_test(
prioritization_fee_cache: &mut PrioritizationFeeCache,
prioritization_fee_cache: &PrioritizationFeeCache,
slot: Slot,
) {
prioritization_fee_cache.finalize_priority_fee(slot);
@ -489,8 +489,8 @@ mod tests {
let bank = Arc::new(Bank::default_for_tests());
let slot = bank.slot();
let mut prioritization_fee_cache = PrioritizationFeeCache::default();
sync_update(&mut prioritization_fee_cache, bank, txs.iter());
let prioritization_fee_cache = PrioritizationFeeCache::default();
sync_update(&prioritization_fee_cache, bank, txs.iter());
// assert block minimum fee and account a, b, c fee accordingly
{
@ -511,7 +511,7 @@ mod tests {
// assert after prune, account a and c should be removed from cache to save space
{
sync_finalize_priority_fee_for_test(&mut prioritization_fee_cache, slot);
sync_finalize_priority_fee_for_test(&prioritization_fee_cache, slot);
let fee = PrioritizationFeeCache::get_prioritization_fee(
prioritization_fee_cache.cache.clone(),
&slot,
@ -571,7 +571,7 @@ mod tests {
let bank2 = Arc::new(Bank::new_from_parent(bank.clone(), &collector, 2));
let bank3 = Arc::new(Bank::new_from_parent(bank, &collector, 3));
let mut prioritization_fee_cache = PrioritizationFeeCache::default();
let prioritization_fee_cache = PrioritizationFeeCache::default();
// Assert no minimum fee from empty cache
assert!(prioritization_fee_cache
@ -603,7 +603,7 @@ mod tests {
&Pubkey::new_unique(),
),
];
sync_update(&mut prioritization_fee_cache, bank1, txs.iter());
sync_update(&prioritization_fee_cache, bank1, txs.iter());
// before block is marked as completed
assert!(prioritization_fee_cache
.get_prioritization_fees(&[])
@ -624,7 +624,7 @@ mod tests {
.get_prioritization_fees(&[write_account_a, write_account_b, write_account_c])
.is_empty());
// after block is completed
sync_finalize_priority_fee_for_test(&mut prioritization_fee_cache, 1);
sync_finalize_priority_fee_for_test(&prioritization_fee_cache, 1);
assert_eq!(
hashmap_of(vec![(1, 1)]),
prioritization_fee_cache.get_prioritization_fees(&[])
@ -666,7 +666,7 @@ mod tests {
&Pubkey::new_unique(),
),
];
sync_update(&mut prioritization_fee_cache, bank2, txs.iter());
sync_update(&prioritization_fee_cache, bank2, txs.iter());
// before block is marked as completed
assert_eq!(
hashmap_of(vec![(1, 1)]),
@ -698,7 +698,7 @@ mod tests {
])
);
// after block is completed
sync_finalize_priority_fee_for_test(&mut prioritization_fee_cache, 2);
sync_finalize_priority_fee_for_test(&prioritization_fee_cache, 2);
assert_eq!(
hashmap_of(vec![(2, 3), (1, 1)]),
prioritization_fee_cache.get_prioritization_fees(&[]),
@ -740,7 +740,7 @@ mod tests {
&Pubkey::new_unique(),
),
];
sync_update(&mut prioritization_fee_cache, bank3, txs.iter());
sync_update(&prioritization_fee_cache, bank3, txs.iter());
// before block is marked as completed
assert_eq!(
hashmap_of(vec![(2, 3), (1, 1)]),
@ -772,7 +772,7 @@ mod tests {
]),
);
// after block is completed
sync_finalize_priority_fee_for_test(&mut prioritization_fee_cache, 3);
sync_finalize_priority_fee_for_test(&prioritization_fee_cache, 3);
assert_eq!(
hashmap_of(vec![(3, 5), (2, 3), (1, 1)]),
prioritization_fee_cache.get_prioritization_fees(&[]),

View File

@ -268,7 +268,7 @@ impl<T: Serialize + Clone> StatusCache<T> {
.or_insert((slot, key_index, HashMap::new()));
hash_map.0 = std::cmp::max(slot, hash_map.0);
let forks = hash_map.2.entry(key_slice).or_insert_with(Vec::new);
let forks = hash_map.2.entry(key_slice).or_default();
forks.push((slot, res.clone()));
let slot_deltas = self.slot_deltas.entry(slot).or_default();
let mut fork_entry = slot_deltas.lock().unwrap();

View File

@ -461,7 +461,7 @@ impl SendTransactionService {
.fetch_add(transactions.len() as u64, Ordering::Relaxed);
Self::send_transactions_in_batch(
&tpu_address,
&mut transactions,
&transactions,
leader_info_provider.lock().unwrap().get_leader_info(),
&connection_cache,
&config,
@ -558,7 +558,7 @@ impl SendTransactionService {
/// Process transactions in batch.
fn send_transactions_in_batch<T: TpuInfo>(
tpu_address: &SocketAddr,
transactions: &mut HashMap<Signature, TransactionInfo>,
transactions: &HashMap<Signature, TransactionInfo>,
leader_info: Option<&T>,
connection_cache: &Arc<ConnectionCache>,
config: &Config,

View File

@ -1026,7 +1026,7 @@ impl ConnectionTable {
last_update: u64,
max_connections_per_peer: usize,
) -> Option<(Arc<AtomicU64>, Arc<AtomicBool>)> {
let connection_entry = self.table.entry(key).or_insert_with(Vec::new);
let connection_entry = self.table.entry(key).or_default();
let has_connection_capacity = connection_entry
.len()
.checked_add(1)
@ -1243,14 +1243,7 @@ pub mod test {
let conn2 = make_client_endpoint(&server_address, None).await;
let mut s1 = conn1.open_uni().await.unwrap();
let s2 = conn2.open_uni().await;
if s2.is_err() {
// It has been noticed if there is already connection open against the server, this open_uni can fail
// with ApplicationClosed(ApplicationClose) error due to CONNECTION_CLOSE_CODE_TOO_MANY before writing to
// the stream -- expect it.
let s2 = s2.err().unwrap();
assert!(matches!(s2, quinn::ConnectionError::ApplicationClosed(_)));
} else {
let mut s2 = s2.unwrap();
if let Ok(mut s2) = s2 {
s1.write_all(&[0u8]).await.unwrap();
s1.finish().await.unwrap();
// Send enough data to create more than 1 chunks.
@ -1263,6 +1256,12 @@ pub mod test {
s2.finish()
.await
.expect_err("shouldn't be able to open 2 connections");
} else {
// It has been noticed if there is already connection open against the server, this open_uni can fail
// with ApplicationClosed(ApplicationClose) error due to CONNECTION_CLOSE_CODE_TOO_MANY before writing to
// the stream -- expect it.
let s2 = s2.err().unwrap();
assert!(matches!(s2, quinn::ConnectionError::ApplicationClosed(_)));
}
}

View File

@ -197,7 +197,7 @@ pub(crate) fn check_output_file(path: &str, db: &PickleDb) {
new_stake_account_address: info
.new_stake_account_address
.map(|x| x.to_string())
.unwrap_or_else(|| "".to_string()),
.unwrap_or_default(),
finalized_date: info.finalized_date,
signature: info.transaction.signatures[0].to_string(),
})

View File

@ -25,7 +25,6 @@ pub(in crate::parse_token) fn parse_interest_bearing_mint_instruction(
} = *decode_instruction_data(instruction_data).map_err(|_| {
ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken)
})?;
let rate_authority = rate_authority;
let rate_authority: Option<Pubkey> = rate_authority.into();
Ok(ParsedInstructionEnum {
instruction_type: "initializeInterestBearingConfig".to_string(),