Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
This commit is contained in:
Greg Fitzgerald 2020-06-17 21:54:52 -06:00 committed by GitHub
parent 5b9cd72d8f
commit 0550b893b0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 45 additions and 46 deletions

View File

@ -96,7 +96,7 @@ impl BroadcastStats for InsertShredsStats {
} }
} }
// Tracks metrics of type `T` acrosss multiple threads // Tracks metrics of type `T` across multiple threads
#[derive(Default)] #[derive(Default)]
pub(crate) struct BatchCounter<T: BroadcastStats + Default> { pub(crate) struct BatchCounter<T: BroadcastStats + Default> {
// The number of batches processed across all threads so far // The number of batches processed across all threads so far

View File

@ -1245,7 +1245,7 @@ pub mod test {
// The other two validators voted at slots 46, 47, which // The other two validators voted at slots 46, 47, which
// will only both show up in slot 48, at which point // will only both show up in slot 48, at which point
// 2/5 > SWITCH_FORK_THRESHOLD of the stake has voted // 2/5 > SWITCH_FORK_THRESHOLD of the stake has voted
// on another fork, so switching should suceed // on another fork, so switching should succeed
let votes_to_simulate = (46..=48).collect(); let votes_to_simulate = (46..=48).collect();
let results = vote_simulator.create_and_vote_new_branch( let results = vote_simulator.create_and_vote_new_branch(
45, 45,
@ -1377,9 +1377,8 @@ pub mod test {
pubkey_votes.sort(); pubkey_votes.sort();
assert_eq!(pubkey_votes, account_latest_votes); assert_eq!(pubkey_votes, account_latest_votes);
// Each acccount has 1 vote in it. After simulating a vote in collect_vote_lockouts, // Each account has 1 vote in it. After simulating a vote in collect_vote_lockouts,
// the account will have 2 votes, with lockout 2 + 4 = 6. So expected weight for // the account will have 2 votes, with lockout 2 + 4 = 6. So expected weight for
// two acccounts is 2 * 6 = 12
assert_eq!(bank_weight, 12) assert_eq!(bank_weight, 12)
} }

View File

@ -203,7 +203,7 @@ impl ContactInfo {
} }
/// port must not be 0 /// port must not be 0
/// ip must be specified and not mulitcast /// ip must be specified and not multicast
/// loopback ip is only allowed in tests /// loopback ip is only allowed in tests
pub fn is_valid_address(addr: &SocketAddr) -> bool { pub fn is_valid_address(addr: &SocketAddr) -> bool {
(addr.port() != 0) && Self::is_valid_ip(addr.ip()) (addr.port() != 0) && Self::is_valid_ip(addr.ip())

View File

@ -156,7 +156,7 @@ impl Crds {
} }
} }
/// Update the timestamp's of all the labels that are assosciated with Pubkey /// Update the timestamp's of all the labels that are associated with Pubkey
pub fn update_record_timestamp(&mut self, pubkey: &Pubkey, now: u64) { pub fn update_record_timestamp(&mut self, pubkey: &Pubkey, now: u64) {
for label in &CrdsValue::record_labels(pubkey) { for label in &CrdsValue::record_labels(pubkey) {
self.update_label_timestamp(label, now); self.update_label_timestamp(label, now);

View File

@ -415,7 +415,7 @@ mod test {
assert!(stats.propagated_validators.contains(&vote_pubkey)); assert!(stats.propagated_validators.contains(&vote_pubkey));
assert_eq!(stats.propagated_validators_stake, 1); assert_eq!(stats.propagated_validators_stake, 1);
// Addding another pubkey should succeed // Adding another pubkey should succeed
vote_pubkey = Pubkey::new_rand(); vote_pubkey = Pubkey::new_rand();
stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 2); stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 2);
assert!(stats.propagated_validators.contains(&vote_pubkey)); assert!(stats.propagated_validators.contains(&vote_pubkey));
@ -475,7 +475,7 @@ mod test {
staked_vote_accounts as u64 staked_vote_accounts as u64
); );
// Addding another pubkey with same vote accounts should succeed, but stake // Adding another pubkey with same vote accounts should succeed, but stake
// shouldn't increase // shouldn't increase
node_pubkey = Pubkey::new_rand(); node_pubkey = Pubkey::new_rand();
stats.add_node_pubkey_internal( stats.add_node_pubkey_internal(
@ -494,7 +494,7 @@ mod test {
3 3
); );
// Addding another pubkey with different vote accounts should succeed // Adding another pubkey with different vote accounts should succeed
// and increase stake // and increase stake
node_pubkey = Pubkey::new_rand(); node_pubkey = Pubkey::new_rand();
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(Pubkey::new_rand) let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(Pubkey::new_rand)

View File

@ -1052,7 +1052,7 @@ mod test {
// Should not be able to find signature for slot 9 for the tx // Should not be able to find signature for slot 9 for the tx
assert!(bank9.get_signature_status(&vote_tx.signatures[0]).is_none()); assert!(bank9.get_signature_status(&vote_tx.signatures[0]).is_none());
// Getting balance should return the old balance (acounts were cleared) // Getting balance should return the old balance (accounts were cleared)
assert_eq!( assert_eq!(
bank9.get_balance(&keypairs.node_keypair.pubkey()), bank9.get_balance(&keypairs.node_keypair.pubkey()),
old_balance old_balance

View File

@ -1373,7 +1373,7 @@ impl ReplayStage {
// 2) The best "selected" bank is on a different fork, // 2) The best "selected" bank is on a different fork,
// switch_threshold fails // switch_threshold fails
// 3) The best "selected" bank is on a different fork, // 3) The best "selected" bank is on a different fork,
// switch_threshold succceeds // switch_threshold succeeds
let mut failure_reasons = vec![]; let mut failure_reasons = vec![];
let selected_fork = { let selected_fork = {
let switch_fork_decision = tower.check_switch_threshold( let switch_fork_decision = tower.check_switch_threshold(
@ -1551,9 +1551,9 @@ impl ReplayStage {
// Remove the vote/node pubkeys that we already know voted for this // Remove the vote/node pubkeys that we already know voted for this
// slot. These vote accounts/validator identities are safe to drop // slot. These vote accounts/validator identities are safe to drop
// because they don't to be ported back any further because earler // because they don't to be ported back any further because earlier
// parents must have: // parents must have:
// 1) Also recorded these pubkeyss already, or // 1) Also recorded these pubkeys already, or
// 2) Already reached the propagation threshold, in which case // 2) Already reached the propagation threshold, in which case
// they no longer need to track the set of propagated validators // they no longer need to track the set of propagated validators
newly_voted_pubkeys.retain(|vote_pubkey| { newly_voted_pubkeys.retain(|vote_pubkey| {
@ -1941,7 +1941,7 @@ pub(crate) mod tests {
.get(2 * NUM_CONSECUTIVE_LEADER_SLOTS) .get(2 * NUM_CONSECUTIVE_LEADER_SLOTS)
.is_some()); .is_some());
// // There are 20 equally staked acccounts, of which 3 have built // // There are 20 equally staked accounts, of which 3 have built
// banks above or at bank 1. Because 3/20 < SUPERMINORITY_THRESHOLD, // banks above or at bank 1. Because 3/20 < SUPERMINORITY_THRESHOLD,
// we should see 3 validators in bank 1's propagated_validator set. // we should see 3 validators in bank 1's propagated_validator set.
let expected_leader_slots = vec![ let expected_leader_slots = vec![
@ -3388,7 +3388,7 @@ pub(crate) mod tests {
let mut parent_slot = 3; let mut parent_slot = 3;
// Set up the progress map to show that the last leader slot of 4 is 3, // Set up the progress map to show that the last leader slot of 4 is 3,
// which means 3 and 4 are consecutiive leader slots // which means 3 and 4 are consecutive leader slots
progress_map.insert( progress_map.insert(
3, 3,
ForkProgress::new( ForkProgress::new(
@ -3519,7 +3519,7 @@ pub(crate) mod tests {
); );
// Result should be equivalent to removing slot from BankForks // Result should be equivalent to removing slot from BankForks
// and regeneratinig the `ancestor` `descendant` maps // and regenerating the `ancestor` `descendant` maps
for d in slot_2_descendants { for d in slot_2_descendants {
bank_forks.write().unwrap().remove(d); bank_forks.write().unwrap().remove(d);
} }

View File

@ -179,7 +179,7 @@ mod tests {
// Make tarball from packageable snapshot // Make tarball from packageable snapshot
snapshot_utils::archive_snapshot_package(&snapshot_package).unwrap(); snapshot_utils::archive_snapshot_package(&snapshot_package).unwrap();
// before we compare, stick an empty status_cache in this dir so that the package comparision works // before we compare, stick an empty status_cache in this dir so that the package comparison works
// This is needed since the status_cache is added by the packager and is not collected from // This is needed since the status_cache is added by the packager and is not collected from
// the source dir for snapshots // the source dir for snapshots
let dummy_slot_deltas: Vec<BankSlotDelta> = vec![]; let dummy_slot_deltas: Vec<BankSlotDelta> = vec![];

View File

@ -312,7 +312,7 @@ impl Validator {
); );
if config.dev_halt_at_slot.is_some() { if config.dev_halt_at_slot.is_some() {
// Simulate a confirmed root to avoid RPC errors with CommitmentmentConfig::max() and // Simulate a confirmed root to avoid RPC errors with CommitmentConfig::max() and
// to ensure RPC endpoints like getConfirmedBlock, which require a confirmed root, work // to ensure RPC endpoints like getConfirmedBlock, which require a confirmed root, work
block_commitment_cache block_commitment_cache
.write() .write()

View File

@ -170,7 +170,7 @@ mod tests {
#[test] #[test]
fn test_bank_forks_snapshot_n() { fn test_bank_forks_snapshot_n() {
// create banks upto slot 4 and create 1 new account in each bank. test that bank 4 snapshots // create banks up to slot 4 and create 1 new account in each bank. test that bank 4 snapshots
// and restores correctly // and restores correctly
run_bank_forks_snapshot_n( run_bank_forks_snapshot_n(
4, 4,
@ -333,7 +333,7 @@ mod tests {
// Check the archive we cached the state for earlier was generated correctly // Check the archive we cached the state for earlier was generated correctly
// before we compare, stick an empty status_cache in this dir so that the package comparision works // before we compare, stick an empty status_cache in this dir so that the package comparison works
// This is needed since the status_cache is added by the packager and is not collected from // This is needed since the status_cache is added by the packager and is not collected from
// the source dir for snapshots // the source dir for snapshots
let dummy_slot_deltas: Vec<BankSlotDelta> = vec![]; let dummy_slot_deltas: Vec<BankSlotDelta> = vec![];
@ -401,7 +401,7 @@ mod tests {
#[test] #[test]
fn test_bank_forks_status_cache_snapshot_n() { fn test_bank_forks_status_cache_snapshot_n() {
// create banks upto slot (MAX_CACHE_ENTRIES * 2) + 1 while transferring 1 lamport into 2 different accounts each time // create banks up to slot (MAX_CACHE_ENTRIES * 2) + 1 while transferring 1 lamport into 2 different accounts each time
// this is done to ensure the AccountStorageEntries keep getting cleaned up as the root moves // this is done to ensure the AccountStorageEntries keep getting cleaned up as the root moves
// ahead. Also tests the status_cache purge and status cache snapshotting. // ahead. Also tests the status_cache purge and status cache snapshotting.
// Makes sure that the last bank is restored correctly // Makes sure that the last bank is restored correctly

View File

@ -495,7 +495,7 @@ fn test_no_partitions() {
/// * num_partitions - 1 to 100 partitions /// * num_partitions - 1 to 100 partitions
/// * fail_rate - 0 to 1.0 rate of packet receive failure /// * fail_rate - 0 to 1.0 rate of packet receive failure
/// * delay_count - number of forks to observe before voting /// * delay_count - number of forks to observe before voting
/// * parasite_rate - number of parasite nodes that vote oposite the greedy choice /// * parasite_rate - number of parasite nodes that vote opposite the greedy choice
fn test_with_partitions( fn test_with_partitions(
num_partitions: usize, num_partitions: usize,
fail_rate: f64, fail_rate: f64,

View File

@ -1176,7 +1176,7 @@ impl Blockstore {
buffer_offset += shred_len; buffer_offset += shred_len;
last_index = index; last_index = index;
// All shreds are of the same length. // All shreds are of the same length.
// Let's check if we have scope to accomodate another shred // Let's check if we have scope to accommodate another shred
// If not, let's break right away, as it'll save on 1 DB read // If not, let's break right away, as it'll save on 1 DB read
if buffer.len().saturating_sub(buffer_offset) < shred_len { if buffer.len().saturating_sub(buffer_offset) < shred_len {
break; break;
@ -2123,7 +2123,7 @@ impl Blockstore {
.expect("fetch from DuplicateSlots column family failed") .expect("fetch from DuplicateSlots column family failed")
} }
// `new_shred` is asssumed to have slot and index equal to the given slot and index. // `new_shred` is assumed to have slot and index equal to the given slot and index.
// Returns the existing shred if `new_shred` is not equal to the existing shred at the // Returns the existing shred if `new_shred` is not equal to the existing shred at the
// given slot and index as this implies the leader generated two different shreds with // given slot and index as this implies the leader generated two different shreds with
// the same slot and index // the same slot and index
@ -2674,7 +2674,7 @@ pub fn create_new_ledger(
} }
// ensure the genesis archive can be unpacked and it is under // ensure the genesis archive can be unpacked and it is under
// max_genesis_archive_unpacked_size, immedately after creating it above. // max_genesis_archive_unpacked_size, immediately after creating it above.
{ {
let temp_dir = tempfile::TempDir::new().unwrap(); let temp_dir = tempfile::TempDir::new().unwrap();
// unpack into a temp dir, while completely discarding the unpacked files // unpack into a temp dir, while completely discarding the unpacked files

View File

@ -1482,7 +1482,7 @@ pub mod tests {
let rooted_slots: Vec<_> = (0..=last_slot).collect(); let rooted_slots: Vec<_> = (0..=last_slot).collect();
blockstore.set_roots(&rooted_slots).unwrap(); blockstore.set_roots(&rooted_slots).unwrap();
// Set a root on the next slot of the confrimed epoch // Set a root on the next slot of the confirmed epoch
blockstore.set_roots(&[last_slot + 1]).unwrap(); blockstore.set_roots(&[last_slot + 1]).unwrap();
// Check that we can properly restart the ledger / leader scheduler doesn't fail // Check that we can properly restart the ledger / leader scheduler doesn't fail

View File

@ -738,7 +738,7 @@ mod tests {
let tx0 = system_transaction::transfer(&keypair, &keypair.pubkey(), 0, zero); let tx0 = system_transaction::transfer(&keypair, &keypair.pubkey(), 0, zero);
let tx1 = system_transaction::transfer(&keypair, &keypair.pubkey(), 1, zero); let tx1 = system_transaction::transfer(&keypair, &keypair.pubkey(), 1, zero);
// Verify entry with 2 transctions // Verify entry with 2 transactions
let mut e0 = vec![Entry::new(&zero, 0, vec![tx0, tx1])]; let mut e0 = vec![Entry::new(&zero, 0, vec![tx0, tx1])];
assert!(e0.verify(&zero)); assert!(e0.verify(&zero));

View File

@ -51,7 +51,7 @@ impl<'a> Iterator for RootedSlotIterator<'a> {
.map(|r| { .map(|r| {
self.blockstore self.blockstore
.meta(r) .meta(r)
.expect("Database failure, couldnt fetch SlotMeta") .expect("Database failure, couldn't fetch SlotMeta")
}) })
.unwrap_or(None); .unwrap_or(None);

View File

@ -1584,7 +1584,7 @@ pub mod tests {
}); });
coding_shreds.iter().enumerate().for_each(|(i, s)| { coding_shreds.iter().enumerate().for_each(|(i, s)| {
// There'll be half the number of coding shreds, as FEC rate is 0.5 // There will be half the number of coding shreds, as FEC rate is 0.5
// So multiply i with 2 // So multiply i with 2
let expected_fec_set_index = let expected_fec_set_index =
start_index + ((i * 2 / max_per_block) * max_per_block) as u32; start_index + ((i * 2 / max_per_block) * max_per_block) as u32;

View File

@ -143,7 +143,7 @@ fn test_multi_fec_block_different_size_coding() {
// Necessary in order to ensure the last shred in the slot // Necessary in order to ensure the last shred in the slot
// is part of the recovered set, and that the below `index` // is part of the recovered set, and that the below `index`
// cacluation in the loop is correct // calcuation in the loop is correct
assert!(fec_data_shreds.len() % 2 == 0); assert!(fec_data_shreds.len() % 2 == 0);
for (i, recovered_shred) in recovered_data.into_iter().enumerate() { for (i, recovered_shred) in recovered_data.into_iter().enumerate() {
let index = first_data_index + (i * 2) + 1; let index = first_data_index + (i * 2) + 1;

View File

@ -1,5 +1,5 @@
use log::*; use log::*;
/// Cluster independant integration tests /// Cluster independent integration tests
/// ///
/// All tests must start from an entry point and a funding keypair and /// All tests must start from an entry point and a funding keypair and
/// discover the rest of the network. /// discover the rest of the network.

View File

@ -76,7 +76,7 @@ fn test_ledger_cleanup_service() {
.slot_meta_iterator(0) .slot_meta_iterator(0)
.unwrap() .unwrap()
.for_each(|_| slots += 1); .for_each(|_| slots += 1);
// with 3 nodes upto 3 slots can be in progress and not complete so max slots in blockstore should be upto 103 // with 3 nodes up to 3 slots can be in progress and not complete so max slots in blockstore should be up to 103
assert!(slots <= 103, "got {}", slots); assert!(slots <= 103, "got {}", slots);
} }
} }
@ -363,7 +363,7 @@ fn test_kill_heaviest_partition() {
// This test: // This test:
// 1) Spins up four partitions, the heaviest being the first with more stake // 1) Spins up four partitions, the heaviest being the first with more stake
// 2) Schedules the other validators for sufficient slots in the schedule // 2) Schedules the other validators for sufficient slots in the schedule
// so that they will still be locked out of voting for the major partitoin // so that they will still be locked out of voting for the major partition
// when the partition resolves // when the partition resolves
// 3) Kills the most staked partition. Validators are locked out, but should all // 3) Kills the most staked partition. Validators are locked out, but should all
// eventually choose the major partition // eventually choose the major partition

View File

@ -81,7 +81,7 @@ impl MerkleTree {
// this cause the total nodes number increased by tree height, we use this // this cause the total nodes number increased by tree height, we use this
// condition as the max nodes consuming case. // condition as the max nodes consuming case.
// n is current leaf nodes number // n is current leaf nodes number
// asuming n-1 is a full balanced binary tree, n-1 tree nodes number will be // assuming n-1 is a full balanced binary tree, n-1 tree nodes number will be
// 2(n-1) - 1, n tree height is closed to log2(n) + 1 // 2(n-1) - 1, n tree height is closed to log2(n) + 1
// so the max nodes number is 2(n-1) - 1 + log2(n) + 1, finally we can use // so the max nodes number is 2(n-1) - 1 + log2(n) + 1, finally we can use
// 2n + log2(n+1) as a safe capacity value. // 2n + log2(n+1) as a safe capacity value.

View File

@ -81,7 +81,7 @@ impl From<RemoteWalletError> for SignerError {
} }
} }
/// Collection of conntected RemoteWallets /// Collection of connected RemoteWallets
pub struct RemoteWalletManager { pub struct RemoteWalletManager {
usb: Arc<Mutex<hidapi::HidApi>>, usb: Arc<Mutex<hidapi::HidApi>>,
devices: RwLock<Vec<Device>>, devices: RwLock<Vec<Device>>,

View File

@ -60,7 +60,7 @@ pub type TransactionLoaders = Vec<Vec<(Pubkey, Account)>>;
pub type TransactionLoadResult = (TransactionAccounts, TransactionLoaders, TransactionRent); pub type TransactionLoadResult = (TransactionAccounts, TransactionLoaders, TransactionRent);
pub enum AccountAddressFilter { pub enum AccountAddressFilter {
Exclude, // exclude all addresses matching the fiter Exclude, // exclude all addresses matching the filter
Include, // only include addresses matching the filter Include, // only include addresses matching the filter
} }

View File

@ -748,7 +748,7 @@ impl AccountsDB {
} }
} }
// Atomicallly process reclaims and new dead_slots in this thread, gauranteeing // Atomically process reclaims and new dead_slots in this thread, guaranteeing
// complete data removal for slots in reclaims. // complete data removal for slots in reclaims.
fn handle_reclaims_ensure_cleanup(&self, reclaims: SlotSlice<AccountInfo>) { fn handle_reclaims_ensure_cleanup(&self, reclaims: SlotSlice<AccountInfo>) {
let mut dead_accounts = Measure::start("reclaims::remove_dead_accounts"); let mut dead_accounts = Measure::start("reclaims::remove_dead_accounts");
@ -3683,7 +3683,7 @@ pub mod tests {
let mut current_slot = 0; let mut current_slot = 0;
let accounts = AccountsDB::new_single(); let accounts = AccountsDB::new_single();
// create intermidiate updates to purged_pubkey1 so that // create intermediate updates to purged_pubkey1 so that
// generate_index must add slots as root last at once // generate_index must add slots as root last at once
current_slot += 1; current_slot += 1;
accounts.store(current_slot, &[(&pubkey, &account)]); accounts.store(current_slot, &[(&pubkey, &account)]);

View File

@ -696,7 +696,7 @@ pub mod tests {
account.set_data_len_unsafe(crafted_data_len); account.set_data_len_unsafe(crafted_data_len);
assert_eq!(account.meta.data_len, crafted_data_len); assert_eq!(account.meta.data_len, crafted_data_len);
// Reload accoutns and observe crafted_data_len // Reload accounts and observe crafted_data_len
let accounts = av.accounts(0); let accounts = av.accounts(0);
let account = accounts.first().unwrap(); let account = accounts.first().unwrap();
assert_eq!(account.meta.data_len, crafted_data_len); assert_eq!(account.meta.data_len, crafted_data_len);
@ -763,7 +763,7 @@ pub mod tests {
{ {
let executable_bool: &bool = &account.account_meta.executable; let executable_bool: &bool = &account.account_meta.executable;
// Depending on use, *executable_bool can be truthy or falsy due to direct memory manipulation // Depending on use, *executable_bool can be truthy or falsy due to direct memory manipulation
// assert_eq! thinks *exeutable_bool is equal to false but the if condition thinks it's not, contradictly. // assert_eq! thinks *executable_bool is equal to false but the if condition thinks it's not, contradictorily.
assert_eq!(*executable_bool, false); assert_eq!(*executable_bool, false);
const FALSE: bool = false; // keep clippy happy const FALSE: bool = false; // keep clippy happy
if *executable_bool == FALSE { if *executable_bool == FALSE {

View File

@ -1,4 +1,4 @@
//! The `bank_forks` module implments BankForks a DAG of checkpointed Banks //! The `bank_forks` module implements BankForks a DAG of checkpointed Banks
use crate::snapshot_package::{AccountsPackageSendError, AccountsPackageSender}; use crate::snapshot_package::{AccountsPackageSendError, AccountsPackageSender};
use crate::snapshot_utils::{self, SnapshotError}; use crate::snapshot_utils::{self, SnapshotError};

View File

@ -464,7 +464,7 @@ mod tests {
#[test] #[test]
fn test_create_with_zero_lamports() { fn test_create_with_zero_lamports() {
// create account with zero lamports tranferred // create account with zero lamports transferred
let new_owner = Pubkey::new(&[9; 32]); let new_owner = Pubkey::new(&[9; 32]);
let from = Pubkey::new_rand(); let from = Pubkey::new_rand();
let from_account = Account::new_ref(100, 1, &Pubkey::new_rand()); // not from system account let from_account = Account::new_ref(100, 1, &Pubkey::new_rand()); // not from system account

View File

@ -861,7 +861,7 @@ mod tests {
.lamports(1, 2) .lamports(1, 2)
.verify(), .verify(),
Err(InstructionError::ExecutableLamportChange), Err(InstructionError::ExecutableLamportChange),
"owner should not be able to add lamports once makred executable" "owner should not be able to add lamports once marked executable"
); );
assert_eq!( assert_eq!(
Change::new(&owner, &owner) Change::new(&owner, &owner)
@ -1045,7 +1045,7 @@ mod tests {
.data(vec![0], vec![0, 0]) .data(vec![0], vec![0, 0])
.verify(), .verify(),
Ok(()), Ok(()),
"system program should be able to change acount data size" "system program should be able to change account data size"
); );
} }

View File

@ -62,7 +62,7 @@ impl AbiDigester {
} }
} }
// must create separate instances because we can't pass the single instnace to // must create separate instances because we can't pass the single instance to
// `.serialize()` multiple times // `.serialize()` multiple times
pub fn create_new(&self) -> Self { pub fn create_new(&self) -> Self {
Self { Self {

View File

@ -83,7 +83,7 @@ mod tests {
#[test] #[test]
fn slot_history_test1() { fn slot_history_test1() {
solana_logger::setup(); solana_logger::setup();
// should be divisable by 64 since the clear logic works on blocks // should be divisible by 64 since the clear logic works on blocks
assert_eq!(MAX_ENTRIES % 64, 0); assert_eq!(MAX_ENTRIES % 64, 0);
let mut slot_history = SlotHistory::default(); let mut slot_history = SlotHistory::default();
info!("add 2"); info!("add 2");