This commit is contained in:
Greg Fitzgerald 2021-03-04 23:16:53 -08:00 committed by GitHub
parent f035b9c7cd
commit edd159e7d7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 23 additions and 23 deletions

View File

@ -354,7 +354,7 @@ impl AccountsBackgroundService {
} else {
// under sustained writes, shrink can lag behind so cap to
// SHRUNKEN_ACCOUNT_PER_INTERVAL (which is based on INTERVAL_MS,
// which in turn roughly asscociated block time)
// which in turn roughly associated block time)
consumed_budget = bank
.process_stale_slot_with_budget(
consumed_budget,

View File

@ -1861,7 +1861,7 @@ impl AccountsDb {
// `store_accounts_frozen()` above may have purged accounts from some
// other storage entries (the ones that were just overwritten by this
// new storage entry). This means some of those stores might have caused
// this slot to be readded to `self.shrink_candidate_slots`, so delete
// this slot to be read to `self.shrink_candidate_slots`, so delete
// those here
self.shrink_candidate_slots.lock().unwrap().remove(&slot);
@ -3231,7 +3231,7 @@ impl AccountsDb {
(num_new_roots, num_roots_flushed)
}
// `should_flush_f` is an optional closure that determines wehther a given
// `should_flush_f` is an optional closure that determines whether a given
// account should be flushed. Passing `None` will by default flush all
// accounts
fn flush_slot_cache(

View File

@ -548,7 +548,7 @@ impl<T: 'static + Clone + IsCached + ZeroLamport> AccountsIndex<T> {
F: FnMut(&Pubkey, (&T, Slot)),
{
for pubkey in index.get(index_key) {
// Maybe these reads from the AccountsIndex can be batched everytime it
// Maybe these reads from the AccountsIndex can be batched every time it
// grabs the read lock as well...
if let Some((list_r, index)) = self.get(&pubkey, Some(ancestors), max_root) {
func(
@ -975,8 +975,8 @@ impl<T: 'static + Clone + IsCached + ZeroLamport> AccountsIndex<T> {
max_clean_root: Option<Slot>,
account_indexes: &HashSet<AccountIndex>,
) {
let roots_traker = &self.roots_tracker.read().unwrap();
let max_root = Self::get_max_root(&roots_traker.roots, &list, max_clean_root);
let roots_tracker = &self.roots_tracker.read().unwrap();
let max_root = Self::get_max_root(&roots_tracker.roots, &list, max_clean_root);
let mut purged_slots: HashSet<Slot> = HashSet::new();
list.retain(|(slot, value)| {

View File

@ -118,7 +118,7 @@ type TransactionAccountDepRefCells = Vec<(Pubkey, RefCell<Account>)>;
type TransactionLoaderRefCells = Vec<Vec<(Pubkey, RefCell<Account>)>>;
// Eager rent collection repeats in cyclic manner.
// Each cycle is composed of <partiion_count> number of tiny pubkey subranges
// Each cycle is composed of <partition_count> number of tiny pubkey subranges
// to scan, which is always multiple of the number of slots in epoch.
type PartitionIndex = u64;
type PartitionsPerCycle = u64;
@ -3612,8 +3612,8 @@ impl Bank {
let cycle_params = self.determine_collection_cycle_params(epoch);
let (_, _, in_multi_epoch_cycle, _, _, partition_count) = cycle_params;
// use common code-path for both very-likely and very-unlikely for the sake of minimized
// risk of any mis-calculation instead of neligilbe faster computation per slot for the
// use common codepath for both very likely and very unlikely for the sake of minimized
// risk of any miscalculation instead of negligibly faster computation per slot for the
// likely case.
let mut start_partition_index =
Self::partition_index_from_slot_index(start_slot_index, cycle_params);
@ -3625,7 +3625,7 @@ impl Bank {
let in_middle_of_cycle = start_partition_index > 0;
if in_multi_epoch_cycle && is_special_new_epoch && in_middle_of_cycle {
// Adjust slot indexes so that the final partition ranges are continuous!
// This is neeed because the caller gives us off-by-one indexes when
// This is need because the caller gives us off-by-one indexes when
// an epoch boundary is crossed.
// Usually there is no need for this adjustment because cycles are aligned
// with epochs. But for multi-epoch cycles, adjust the indexes if it
@ -3950,7 +3950,7 @@ impl Bank {
let rent_fix_enabled = self.cumulative_rent_related_fixes_enabled();
// don't collect rents if we're in the new behavior;
// in genral, it's not worthwhile to account for rents outside the runtime (transactions)
// in general, it's not worthwhile to account for rents outside the runtime (transactions)
// there are too many and subtly nuanced modification codepaths
if !rent_fix_enabled {
// previously we're too much collecting rents as if it existed since epoch 0...
@ -6359,7 +6359,7 @@ pub(crate) mod tests {
}
#[test]
fn test_rent_eager_under_fixed_cycle_for_developemnt() {
fn test_rent_eager_under_fixed_cycle_for_development() {
solana_logger::setup();
let leader_pubkey = solana_sdk::pubkey::new_rand();
let leader_lamports = 3;
@ -6505,11 +6505,11 @@ pub(crate) mod tests {
let max_exact = 64;
// Make sure `max_exact` divides evenly when calculating `calculate_partition_width`
assert!(should_cause_overflow(max_exact));
// Make sure `max_unexact` doesn't divide evenly when calculating `calculate_partition_width`
let max_unexact = 10;
assert!(!should_cause_overflow(max_unexact));
// Make sure `max_inexact` doesn't divide evenly when calculating `calculate_partition_width`
let max_inexact = 10;
assert!(!should_cause_overflow(max_inexact));
for max in &[max_exact, max_unexact] {
for max in &[max_exact, max_inexact] {
let range = Bank::pubkey_range_from_partition((max - 1, max - 1, *max));
assert_eq!(
range,

View File

@ -632,7 +632,7 @@ impl MessageProcessor {
) {
ic_msg!(
invoke_context,
"{}'s signer priviledge escalated",
"{}'s signer privilege escalated",
account.pubkey
);
return Err(InstructionError::PrivilegeEscalation);

View File

@ -83,7 +83,7 @@ impl RentCollector {
account.rent_epoch = self.epoch
+ if rent_fix_enabled && exempt {
// Rent isn't collected for the next epoch
// Make sure to check exempt status later in curent epoch again
// Make sure to check exempt status later in current epoch again
0
} else {
// Rent is collected for next epoch

View File

@ -196,7 +196,7 @@ impl<SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send>
// If the inner key was moved to a different primary key, remove
// the previous index entry.
// Check is necessary because anoher thread's writes could feasibly be
// Check is necessary because another thread's writes could feasibly be
// interleaved between `should_insert = { ... slots_map.get(...) ... }` and
// `prev_key = { ... slots_map.insert(...) ... }`
// Currently this isn't possible due to current AccountsIndex's (pubkey, slot)-per-thread
@ -255,7 +255,7 @@ impl<SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send>
// Specifying `slots_to_remove` == Some will only remove keys for those specific slots
// found for the `inner_key` in the reverse index. Otherwise, passing `None`
// will remove all keys that are found for the `inner_key` in the reverse index.
// will remove all keys that are found for the `inner_key` in the reverse index.
// Note passing `None` is dangerous unless you're sure there's no other competing threads
// writing updates to the index for this Pubkey at the same time!

View File

@ -103,7 +103,7 @@ impl<T: Serialize + Clone> StatusCache<T> {
}
} else {
panic!(
"Map for signature must exist if siganture exists in self.slot_deltas, slot: {}",
"Map for signature must exist if signature exists in self.slot_deltas, slot: {}",
slot
)
}

View File

@ -207,7 +207,7 @@ fn transfer(
if from.signer_key().is_none() {
ic_msg!(
invoke_context,
"Transfer: `from` accont {} must sign",
"Transfer: `from` account {} must sign",
from.unsigned_key()
);
return Err(InstructionError::MissingRequiredSignature);
@ -656,7 +656,7 @@ mod tests {
#[test]
fn test_create_with_zero_lamports() {
// create account with zero lamports tranferred
// create account with zero lamports transferred
let new_owner = Pubkey::new(&[9; 32]);
let from = solana_sdk::pubkey::new_rand();
let from_account = Account::new_ref(100, 1, &solana_sdk::pubkey::new_rand()); // not from system account