From 03386cc7b9ad14ac631ec67e44fd3d17c18a9fa5 Mon Sep 17 00:00:00 2001 From: GoodDaisy <90915921+GoodDaisy@users.noreply.github.com> Date: Fri, 22 Dec 2023 04:06:00 +0800 Subject: [PATCH] Fix typos (#34459) * Fix typos * Fix typos * fix typo --- accounts-db/src/account_info.rs | 2 +- accounts-db/src/accounts.rs | 2 +- accounts-db/src/accounts_hash.rs | 2 +- accounts-db/src/tiered_storage/byte_block.rs | 2 +- bench-tps/src/bench.rs | 2 +- bucket_map/src/restart.rs | 2 +- ci/buildkite-pipeline-in-disk.sh | 2 +- ci/buildkite-pipeline.sh | 2 +- ci/buildkite-solana-private.sh | 2 +- ci/test-stable.sh | 2 +- cli/src/cluster_query.rs | 4 ++-- cli/src/spend_utils.rs | 2 +- client/src/connection_cache.rs | 2 +- core/src/banking_stage/latest_unprocessed_votes.rs | 2 +- core/src/banking_stage/leader_slot_metrics.rs | 4 ++-- core/src/consensus/heaviest_subtree_fork_choice.rs | 4 ++-- core/src/repair/duplicate_repair_status.rs | 2 +- core/src/repair/repair_weight.rs | 10 +++++----- core/src/repair/serve_repair.rs | 2 +- core/tests/snapshots.rs | 2 +- gossip/src/cluster_info.rs | 4 ++-- gossip/src/contact_info.rs | 2 +- gossip/src/crds_value.rs | 2 +- ledger/src/blockstore.rs | 8 ++++---- ledger/src/blockstore_cleanup_service.rs | 2 +- local-cluster/tests/local_cluster.rs | 8 ++++---- perf/src/recycler.rs | 2 +- poh/src/poh_recorder.rs | 2 +- pubsub-client/src/nonblocking/pubsub_client.rs | 2 +- pubsub-client/src/pubsub_client.rs | 2 +- quic-client/src/quic_client.rs | 2 +- quic-client/tests/quic_client.rs | 2 +- rpc-client/src/mock_sender.rs | 2 +- rpc-client/src/nonblocking/rpc_client.rs | 10 +++++----- rpc-client/src/rpc_client.rs | 6 +++--- runtime/src/accounts/mod.rs | 2 +- runtime/src/bank.rs | 4 ++-- runtime/src/snapshot_bank_utils.rs | 2 +- runtime/src/snapshot_minimizer.rs | 2 +- runtime/src/snapshot_utils.rs | 8 ++++---- scripts/coverage-in-disk.sh | 2 +- scripts/coverage.sh | 2 +- scripts/sed-i-all-rs-files-for-rust-analyzer.sh | 2 +- scripts/system-stats.sh | 6 +++--- sdk/program/src/instruction.rs | 2 +- sdk/program/src/loader_v4_instruction.rs | 4 ++-- sdk/program/src/secp256k1_recover.rs | 2 +- sdk/src/transaction/mod.rs | 2 +- sdk/src/transaction_context.rs | 2 +- storage-bigtable/src/lib.rs | 2 +- system-test/testnet-automation.sh | 2 +- transaction-dos/src/main.rs | 2 +- transaction-status/src/parse_address_lookup_table.rs | 2 +- turbine/src/cluster_nodes.rs | 2 +- zk-token-sdk/src/zk_token_elgamal/ops.rs | 2 +- 55 files changed, 82 insertions(+), 82 deletions(-) diff --git a/accounts-db/src/account_info.rs b/accounts-db/src/account_info.rs index a26122823..67c02282f 100644 --- a/accounts-db/src/account_info.rs +++ b/accounts-db/src/account_info.rs @@ -76,7 +76,7 @@ const CACHED_OFFSET: OffsetReduced = (1 << (OffsetReduced::BITS - 1)) - 1; #[repr(C)] #[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] pub struct PackedOffsetAndFlags { - /// this provides 2^31 bits, which when multipled by 8 (sizeof(u64)) = 16G, which is the maximum size of an append vec + /// this provides 2^31 bits, which when multiplied by 8 (sizeof(u64)) = 16G, which is the maximum size of an append vec offset_reduced: B31, /// use 1 bit to specify that the entry is zero lamport is_zero_lamport: bool, diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 6782459a0..8eb070296 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -2108,7 +2108,7 @@ mod tests { let accounts = Accounts::new(Arc::new(accounts_db)); /* This test assumes pubkey0 < pubkey1 < pubkey2. - * But the keys created with new_unique() does not gurantee this + * But the keys created with new_unique() does not guarantee this * order because of the endianness. new_unique() calls add 1 at each * key generaration as the little endian integer. A pubkey stores its * value in a 32-byte array bytes, and its eq-partial trait considers diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 72f74be6f..78662a041 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -1230,7 +1230,7 @@ pub enum ZeroLamportAccounts { pub struct AccountHash(pub Hash); // Ensure the newtype wrapper never changes size from the underlying Hash -// This also ensures there are no padding bytes, which is requried to safely implement Pod +// This also ensures there are no padding bytes, which is required to safely implement Pod const _: () = assert!(std::mem::size_of::() == std::mem::size_of::()); /// Hash of accounts diff --git a/accounts-db/src/tiered_storage/byte_block.rs b/accounts-db/src/tiered_storage/byte_block.rs index 28e32815b..869036251 100644 --- a/accounts-db/src/tiered_storage/byte_block.rs +++ b/accounts-db/src/tiered_storage/byte_block.rs @@ -85,7 +85,7 @@ impl ByteBlockWriter { /// Write all the Some fields of the specified AccountMetaOptionalFields. /// - /// Note that the existance of each optional field is stored separately in + /// Note that the existence of each optional field is stored separately in /// AccountMetaFlags. pub fn write_optional_fields( &mut self, diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index f33b69241..bddce402a 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -238,7 +238,7 @@ where // Move on to next chunk self.chunk_index = (self.chunk_index + 1) % self.account_chunks.source.len(); - // Switch directions after transfering for each "chunk" + // Switch directions after transferring for each "chunk" if self.chunk_index == 0 { self.reclaim_lamports_back_to_source_account = !self.reclaim_lamports_back_to_source_account; diff --git a/bucket_map/src/restart.rs b/bucket_map/src/restart.rs index aae4d455f..fd921401c 100644 --- a/bucket_map/src/restart.rs +++ b/bucket_map/src/restart.rs @@ -79,7 +79,7 @@ impl RestartableBucket { bucket.random = random; } } - /// retreive the file_name and random that were used prior to the current restart. + /// retrieve the file_name and random that were used prior to the current restart. /// This was written into the restart file on the prior run by `set_file`. pub(crate) fn get(&self) -> Option<(u128, u64)> { self.restart.as_ref().map(|restart| { diff --git a/ci/buildkite-pipeline-in-disk.sh b/ci/buildkite-pipeline-in-disk.sh index 113b009aa..ad12e1fc0 100755 --- a/ci/buildkite-pipeline-in-disk.sh +++ b/ci/buildkite-pipeline-in-disk.sh @@ -48,7 +48,7 @@ affects() { # the worse (affected) return 0 fi - # Assume everyting needs to be tested when any Dockerfile changes + # Assume everything needs to be tested when any Dockerfile changes for pattern in ^ci/docker-rust/Dockerfile ^ci/docker-rust-nightly/Dockerfile "$@"; do if [[ ${pattern:0:1} = "!" ]]; then for file in "${affected_files[@]}"; do diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh index 91ffe0e3a..fb6b6f90b 100755 --- a/ci/buildkite-pipeline.sh +++ b/ci/buildkite-pipeline.sh @@ -30,7 +30,7 @@ annotate() { fi } -# Assume everyting needs to be tested when this file or any Dockerfile changes +# Assume everything needs to be tested when this file or any Dockerfile changes mandatory_affected_files=() mandatory_affected_files+=(^ci/buildkite-pipeline.sh) mandatory_affected_files+=(^ci/docker-rust/Dockerfile) diff --git a/ci/buildkite-solana-private.sh b/ci/buildkite-solana-private.sh index 03bbccfd2..eeb087d32 100755 --- a/ci/buildkite-solana-private.sh +++ b/ci/buildkite-solana-private.sh @@ -48,7 +48,7 @@ affects() { # the worse (affected) return 0 fi - # Assume everyting needs to be tested when any Dockerfile changes + # Assume everything needs to be tested when any Dockerfile changes for pattern in ^ci/docker-rust/Dockerfile ^ci/docker-rust-nightly/Dockerfile "$@"; do if [[ ${pattern:0:1} = "!" ]]; then for file in "${affected_files[@]}"; do diff --git a/ci/test-stable.sh b/ci/test-stable.sh index f521a6c17..40ee0ae2c 100755 --- a/ci/test-stable.sh +++ b/ci/test-stable.sh @@ -107,7 +107,7 @@ test-stable-sbf) _ cargo test \ --manifest-path programs/sbf/Cargo.toml \ --no-default-features --features=sbf_c,sbf_rust assert_instruction_count \ - -- --nocapture &> "${sbf_target_path}"/deploy/instuction_counts.txt + -- --nocapture &> "${sbf_target_path}"/deploy/instruction_counts.txt sbf_dump_archive="sbf-dumps.tar.bz2" rm -f "$sbf_dump_archive" diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index ccde037a0..a5162e949 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -755,7 +755,7 @@ pub fn process_catchup( if node_json_rpc_url.is_some() && node_json_rpc_url != gussed_default { // go to new line to leave this message on console println!( - "Prefering explicitly given rpc ({}) as us, although --our-localhost is given\n", + "Preferring explicitly given rpc ({}) as us, although --our-localhost is given\n", node_json_rpc_url.as_ref().unwrap() ); } else { @@ -771,7 +771,7 @@ pub fn process_catchup( (if node_pubkey.is_some() && node_pubkey != guessed_default { // go to new line to leave this message on console println!( - "Prefering explicitly given node pubkey ({}) as us, although --our-localhost \ + "Preferring explicitly given node pubkey ({}) as us, although --our-localhost \ is given\n", node_pubkey.unwrap() ); diff --git a/cli/src/spend_utils.rs b/cli/src/spend_utils.rs index c9ca3356a..62f7a8dfd 100644 --- a/cli/src/spend_utils.rs +++ b/cli/src/spend_utils.rs @@ -161,7 +161,7 @@ where dummy_message.recent_blockhash = *blockhash; get_fee_for_messages(rpc_client, &[&dummy_message])? } - None => 0, // Offline, cannot calulate fee + None => 0, // Offline, cannot calculate fee }; match amount { diff --git a/client/src/connection_cache.rs b/client/src/connection_cache.rs index 36820ba44..216687aec 100644 --- a/client/src/connection_cache.rs +++ b/client/src/connection_cache.rs @@ -76,7 +76,7 @@ impl ConnectionCache { Self::new_with_client_options(name, connection_pool_size, None, None, None) } - /// Create a quic conneciton_cache with more client options + /// Create a quic connection_cache with more client options pub fn new_with_client_options( name: &'static str, connection_pool_size: usize, diff --git a/core/src/banking_stage/latest_unprocessed_votes.rs b/core/src/banking_stage/latest_unprocessed_votes.rs index 03184ae12..a62e5bf9b 100644 --- a/core/src/banking_stage/latest_unprocessed_votes.rs +++ b/core/src/banking_stage/latest_unprocessed_votes.rs @@ -26,7 +26,7 @@ pub enum VoteSource { Tpu, } -/// Holds deserialized vote messages as well as their source, foward status and slot +/// Holds deserialized vote messages as well as their source, forward status and slot #[derive(Debug, Clone)] pub struct LatestValidatorVotePacket { vote_source: VoteSource, diff --git a/core/src/banking_stage/leader_slot_metrics.rs b/core/src/banking_stage/leader_slot_metrics.rs index b36200d86..449ff7801 100644 --- a/core/src/banking_stage/leader_slot_metrics.rs +++ b/core/src/banking_stage/leader_slot_metrics.rs @@ -47,7 +47,7 @@ pub(crate) struct ProcessTransactionsSummary { // Total amount of time spent running the cost model pub cost_model_us: u64, - // Breakdown of time spent executing and comitting transactions + // Breakdown of time spent executing and committing transactions pub execute_and_commit_timings: LeaderExecuteAndCommitTimings, // Breakdown of all the transaction errors from transactions passed for execution @@ -104,7 +104,7 @@ struct LeaderSlotPacketCountMetrics { // total number of transactions that were executed, but failed to be committed into the Poh stream because // the block ended. Some of these may be already counted in `nonretryable_errored_transactions_count` if they - // then hit the age limit after failing to be comitted. + // then hit the age limit after failing to be committed. executed_transactions_failed_commit_count: u64, // total number of transactions that were excluded from the block because there were concurrent write locks active. diff --git a/core/src/consensus/heaviest_subtree_fork_choice.rs b/core/src/consensus/heaviest_subtree_fork_choice.rs index 8afebae2b..2c9ea97b3 100644 --- a/core/src/consensus/heaviest_subtree_fork_choice.rs +++ b/core/src/consensus/heaviest_subtree_fork_choice.rs @@ -585,7 +585,7 @@ impl HeaviestSubtreeForkChoice { let mut update_operations: UpdateOperations = BTreeMap::new(); // Insert aggregate operations up to the root self.insert_aggregate_operations(&mut update_operations, *slot_hash_key); - // Remove child link so that this slot cannot be choosen as best or deepest + // Remove child link so that this slot cannot be chosen as best or deepest assert!(self .fork_infos .get_mut(&parent) @@ -1308,7 +1308,7 @@ impl ForkChoice for HeaviestSubtreeForkChoice { // be for a slot that we currently do not have in our bank forks, so we // return None. // - // We are guarenteed that we will eventually repair a duplicate confirmed version + // We are guaranteed that we will eventually repair a duplicate confirmed version // of this slot because the state machine will never dump a slot unless it has // observed a duplicate confirmed version of the slot. // diff --git a/core/src/repair/duplicate_repair_status.rs b/core/src/repair/duplicate_repair_status.rs index 9d58a5c68..53c2bd647 100644 --- a/core/src/repair/duplicate_repair_status.rs +++ b/core/src/repair/duplicate_repair_status.rs @@ -1123,7 +1123,7 @@ pub mod tests { let request_slot = 100; let mut test_setup = setup_add_response_test_pruned(request_slot, 10); - // Insert all the correct ancestory + // Insert all the correct ancestry let tree = test_setup .correct_ancestors_response .iter() diff --git a/core/src/repair/repair_weight.rs b/core/src/repair/repair_weight.rs index 430a02850..7e65cfaa2 100644 --- a/core/src/repair/repair_weight.rs +++ b/core/src/repair/repair_weight.rs @@ -338,7 +338,7 @@ impl RepairWeight { } Some(TreeRoot::PrunedRoot(subtree_root)) => { // Even if these orphaned slots were previously pruned, they should be added back to - // `self.trees` as we are no longer sure of their ancestory. + // `self.trees` as we are no longer sure of their ancestry. // After they are repaired there is a chance that they are now part of the rooted path. // This is possible for a duplicate slot with multiple ancestors, if the // version we had pruned before had the wrong ancestor, and the correct version is @@ -892,7 +892,7 @@ impl RepairWeight { ); } - /// Finds any ancestors avaiable from `blockstore` for `slot`. + /// Finds any ancestors available from `blockstore` for `slot`. /// Ancestor search is stopped when finding one that chains to any /// tree in `self.trees` or `self.pruned_trees` or if the ancestor is < self.root. /// @@ -2201,21 +2201,21 @@ mod test { let (blockstore, _, mut repair_weight) = setup_orphan_repair_weight(); // Ancestor of slot 4 is slot 2, with an existing subtree rooted at 0 - // because there wass a vote for a descendant + // because there was a vote for a descendant assert_eq!( repair_weight.find_ancestor_subtree_of_slot(&blockstore, 4), (VecDeque::from([2]), Some(TreeRoot::Root(0))) ); // Ancestors of 5 are [1, 3], with an existing subtree rooted at 0 - // because there wass a vote for a descendant + // because there was a vote for a descendant assert_eq!( repair_weight.find_ancestor_subtree_of_slot(&blockstore, 5), (VecDeque::from([1, 3]), Some(TreeRoot::Root(0))) ); // Ancestors of slot 23 are [20, 22], with an existing subtree of 20 - // because there wass a vote for 20 + // because there was a vote for 20 assert_eq!( repair_weight.find_ancestor_subtree_of_slot(&blockstore, 23), (VecDeque::from([20, 22]), Some(TreeRoot::Root(20))) diff --git a/core/src/repair/serve_repair.rs b/core/src/repair/serve_repair.rs index 2662d487f..a12848f2e 100644 --- a/core/src/repair/serve_repair.rs +++ b/core/src/repair/serve_repair.rs @@ -965,7 +965,7 @@ impl ServeRepair { stats.dropped_requests_outbound_bandwidth += 1; continue; } - // Bypass ping/pong check for requests comming from QUIC endpoint. + // Bypass ping/pong check for requests coming from QUIC endpoint. if !matches!(&request, RepairProtocol::Pong(_)) && response_sender.is_none() { let (check, ping_pkt) = Self::check_ping_cache(ping_cache, &request, &from_addr, &identity_keypair); diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 7709e393a..c348b1f48 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -73,7 +73,7 @@ struct SnapshotTestConfig { full_snapshot_archives_dir: TempDir, bank_snapshots_dir: TempDir, accounts_dir: PathBuf, - // as the underscore prefix indicates, this isn't explictly used; but it's needed to keep + // as the underscore prefix indicates, this isn't explicitly used; but it's needed to keep // TempDir::drop from running to retain that dir for the duration of test _accounts_tmp_dir: TempDir, } diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 9f76523be..d79e0ac99 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -1218,7 +1218,7 @@ impl ClusterInfo { } /// Returns epoch-slots inserted since the given cursor. - /// Excludes entries from nodes with unkown or different shred version. + /// Excludes entries from nodes with unknown or different shred version. pub fn get_epoch_slots(&self, cursor: &mut Cursor) -> Vec { let self_shred_version = Some(self.my_shred_version()); let gossip_crds = self.gossip.crds.read().unwrap(); @@ -1752,7 +1752,7 @@ impl ClusterInfo { match gossip_crds.trim(cap, &keep, stakes, timestamp()) { Err(err) => { self.stats.trim_crds_table_failed.add_relaxed(1); - // TODO: Stakes are comming from the root-bank. Debug why/when + // TODO: Stakes are coming from the root-bank. Debug why/when // they are empty/zero. debug!("crds table trim failed: {:?}", err); } diff --git a/gossip/src/contact_info.rs b/gossip/src/contact_info.rs index b09957f2c..b3ca9c94a 100644 --- a/gossip/src/contact_info.rs +++ b/gossip/src/contact_info.rs @@ -350,7 +350,7 @@ impl ContactInfo { } // Removes the IP address at the given index if - // no socket entry refrences that index. + // no socket entry references that index. fn maybe_remove_addr(&mut self, index: u8) { if !self.sockets.iter().any(|entry| entry.index == index) { self.addrs.remove(usize::from(index)); diff --git a/gossip/src/crds_value.rs b/gossip/src/crds_value.rs index 4bd1939ef..61d916e76 100644 --- a/gossip/src/crds_value.rs +++ b/gossip/src/crds_value.rs @@ -1066,7 +1066,7 @@ mod test { assert!(!other.check_duplicate(&node_crds)); assert_eq!(node.overrides(&other_crds), None); assert_eq!(other.overrides(&node_crds), None); - // Differnt crds value is not a duplicate. + // Different crds value is not a duplicate. let other = LegacyContactInfo::new_rand(&mut rng, Some(pubkey)); let other = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(other)); assert!(!node.check_duplicate(&other)); diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index edd13893c..a32af8522 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -6341,7 +6341,7 @@ pub mod tests { assert_eq!( blockstore.find_missing_data_indexes( slot, - 0, // first_timestmap + 0, // first_timestamp 0, // defer_threshold_ticks 0, // start_index gap - 1, // end_index @@ -6352,7 +6352,7 @@ pub mod tests { assert_eq!( blockstore.find_missing_data_indexes( slot, - 0, // first_timestmap + 0, // first_timestamp 0, // defer_threshold_ticks gap - 2, // start_index gap, // end_index @@ -9951,7 +9951,7 @@ pub mod tests { } #[test] - fn test_rewards_protobuf_backward_compatability() { + fn test_rewards_protobuf_backward_compatibility() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); @@ -9994,7 +9994,7 @@ pub mod tests { // ledger archives, but typically those require contemporaraneous software for other reasons. // However, we are persisting the test since the apis still exist in `blockstore_db`. #[test] - fn test_transaction_status_protobuf_backward_compatability() { + fn test_transaction_status_protobuf_backward_compatibility() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); diff --git a/ledger/src/blockstore_cleanup_service.rs b/ledger/src/blockstore_cleanup_service.rs index dbd8e64e6..d9212bf6d 100644 --- a/ledger/src/blockstore_cleanup_service.rs +++ b/ledger/src/blockstore_cleanup_service.rs @@ -303,7 +303,7 @@ mod tests { fn flush_blockstore_contents_to_disk(blockstore: Blockstore) -> Blockstore { // The find_slots_to_clean() routine uses a method that queries data // from RocksDB SST files. On a running validator, these are created - // fairly reguarly as new data is coming in and contents of memory are + // fairly regularly as new data is coming in and contents of memory are // pushed to disk. In a unit test environment, we aren't pushing nearly // enough data for this to happen organically. So, instead open and // close the Blockstore which will perform the flush to SSTs. diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 892ca38c9..955123df3 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -2383,7 +2383,7 @@ fn test_hard_fork_with_gap_in_roots() { .reversed_rooted_slot_iterator(common_root) .unwrap() .collect::>(); - // artifically restore the forcibly purged genesis only for the validator A just for the sake of + // artificially restore the forcibly purged genesis only for the validator A just for the sake of // the final assertions. slots_a.push(genesis_slot); roots_a.push(genesis_slot); @@ -4286,7 +4286,7 @@ fn test_leader_failure_4() { // // Validator A (60%) // Validator B (40%) -// / --- 10 --- [..] --- 16 (B is voting, due to network issues is initally not able to see the other fork at all) +// / --- 10 --- [..] --- 16 (B is voting, due to network issues is initially not able to see the other fork at all) // / // 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 (A votes 1 - 9 votes are landing normally. B does the same however votes are not landing) // \ @@ -4482,7 +4482,7 @@ fn test_slot_hash_expiry() { ); } -// This test simulates a case where a leader sends a duplicate block with different ancestory. One +// This test simulates a case where a leader sends a duplicate block with different ancestry. One // version builds off of the rooted path, however the other version builds off a pruned branch. The // validators that receive the pruned version will need to repair in order to continue, which // requires an ancestor hashes repair. @@ -4511,7 +4511,7 @@ fn test_slot_hash_expiry() { // reached as minority cannot pass threshold otherwise). // 4) Let minority produce forks on pruned forks until out of leader slots then kill. // 5) Truncate majority ledger past fork slot so it starts building off of fork slot. -// 6) Restart majority and wait untill it starts producing blocks on main fork and roots something +// 6) Restart majority and wait until it starts producing blocks on main fork and roots something // past the fork slot. // 7) Construct our ledger by copying majority ledger and copying blocks from minority for the pruned path. // 8) In our node's ledger, change the parent of the latest slot in majority fork to be the latest diff --git a/perf/src/recycler.rs b/perf/src/recycler.rs index 87c44399e..f940dc36c 100644 --- a/perf/src/recycler.rs +++ b/perf/src/recycler.rs @@ -241,7 +241,7 @@ mod tests { let count = rng.gen_range(1..128); let _packets: Vec<_> = repeat_with(|| recycler.allocate("")).take(count).collect(); } - // Assert that the gc size has shrinked. + // Assert that the gc size has shrunk. assert_eq!( recycler.recycler.gc.lock().unwrap().len(), RECYCLER_SHRINK_SIZE diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index f0d37e24c..49c2d4dc3 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -1608,7 +1608,7 @@ mod tests { assert!(poh_recorder.working_bank.is_some()); // Drop entry receiver, and try to tick again. Because - // the reciever is closed, the ticks will not be drained from the cache, + // the receiver is closed, the ticks will not be drained from the cache, // and the working bank will be cleared drop(entry_receiver); poh_recorder.tick(); diff --git a/pubsub-client/src/nonblocking/pubsub_client.rs b/pubsub-client/src/nonblocking/pubsub_client.rs index 4bbd3a42c..b80241283 100644 --- a/pubsub-client/src/nonblocking/pubsub_client.rs +++ b/pubsub-client/src/nonblocking/pubsub_client.rs @@ -519,7 +519,7 @@ impl PubsubClient { /// Receives messages of type [`SlotUpdate`] when various updates to a slot occur. /// /// Note that this method operates differently than other subscriptions: - /// instead of sending the message to a reciever on a channel, it accepts a + /// instead of sending the message to a receiver on a channel, it accepts a /// `handler` callback that processes the message directly. This processing /// occurs on another thread. /// diff --git a/pubsub-client/src/pubsub_client.rs b/pubsub-client/src/pubsub_client.rs index 6b88d4310..36bb99158 100644 --- a/pubsub-client/src/pubsub_client.rs +++ b/pubsub-client/src/pubsub_client.rs @@ -766,7 +766,7 @@ impl PubsubClient { /// Receives messages of type [`SlotUpdate`] when various updates to a slot occur. /// /// Note that this method operates differently than other subscriptions: - /// instead of sending the message to a reciever on a channel, it accepts a + /// instead of sending the message to a receiver on a channel, it accepts a /// `handler` callback that processes the message directly. This processing /// occurs on another thread. /// diff --git a/quic-client/src/quic_client.rs b/quic-client/src/quic_client.rs index c2a8e862b..f057980c7 100644 --- a/quic-client/src/quic_client.rs +++ b/quic-client/src/quic_client.rs @@ -26,7 +26,7 @@ const SEND_DATA_TIMEOUT: Duration = Duration::from_secs(10); /// A semaphore used for limiting the number of asynchronous tasks spawn to the /// runtime. Before spawnning a task, use acquire. After the task is done (be it -/// succsess or failure), call release. +/// success or failure), call release. struct AsyncTaskSemaphore { /// Keep the counter info about the usage counter: Mutex, diff --git a/quic-client/tests/quic_client.rs b/quic-client/tests/quic_client.rs index b874261de..246eb9b92 100644 --- a/quic-client/tests/quic_client.rs +++ b/quic-client/tests/quic_client.rs @@ -193,7 +193,7 @@ mod tests { fn test_quic_bi_direction() { /// This tests bi-directional quic communication. There are the following components /// The request receiver -- responsible for receiving requests - /// The request sender -- responsible sending requests to the request reciever using quic + /// The request sender -- responsible sending requests to the request receiver using quic /// The response receiver -- responsible for receiving the responses to the requests /// The response sender -- responsible for sending responses to the response receiver. /// In this we demonstrate that the request sender and the response receiver use the diff --git a/rpc-client/src/mock_sender.rs b/rpc-client/src/mock_sender.rs index de8f8cddd..44ab26359 100644 --- a/rpc-client/src/mock_sender.rs +++ b/rpc-client/src/mock_sender.rs @@ -66,7 +66,7 @@ pub struct MockSender { /// If `url` is "fails" then any call to `send` will return `Ok(Value::Null)`. /// /// It is customary to set the `url` to "succeeds" for mocks that should -/// return sucessfully, though this value is not actually interpreted. +/// return successfully, though this value is not actually interpreted. /// /// Other possible values of `url` are specific to different `RpcRequest` /// values. Read the implementation for specifics. diff --git a/rpc-client/src/nonblocking/rpc_client.rs b/rpc-client/src/nonblocking/rpc_client.rs index 21350938a..a4fd13c98 100644 --- a/rpc-client/src/nonblocking/rpc_client.rs +++ b/rpc-client/src/nonblocking/rpc_client.rs @@ -341,7 +341,7 @@ impl RpcClient { /// behavior in specific scenarios: /// /// - It is customary to set the `url` to "succeeds" for mocks that should - /// return sucessfully, though this value is not actually interpreted. + /// return successfully, though this value is not actually interpreted. /// /// - If `url` is "fails" then any call to `send` will return `Ok(Value::Null)`. /// @@ -396,7 +396,7 @@ impl RpcClient { /// scenarios. /// /// It is customary to set the `url` to "succeeds" for mocks that should - /// return sucessfully, though this value is not actually interpreted. + /// return successfully, though this value is not actually interpreted. /// /// If `url` is "fails" then any call to `send` will return `Ok(Value::Null)`. /// @@ -424,7 +424,7 @@ impl RpcClient { /// # use solana_rpc_client::nonblocking::rpc_client::RpcClient; /// # use std::collections::HashMap; /// # use serde_json::json; - /// // Create a mock with a custom repsonse to the `GetBalance` request + /// // Create a mock with a custom response to the `GetBalance` request /// let account_balance = 50; /// let account_balance_response = json!(Response { /// context: RpcResponseContext { slot: 1, api_version: None }, @@ -1480,7 +1480,7 @@ impl RpcClient { /// recent slots, plus up to /// [`MAX_RECENT_BLOCKHASHES`][solana_sdk::clock::MAX_RECENT_BLOCKHASHES] /// rooted slots. To search the full transaction history use the - /// [`get_signature_statuse_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history] + /// [`get_signature_status_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history] /// method. /// /// # RPC Reference @@ -1700,7 +1700,7 @@ impl RpcClient { /// recent slots, plus up to /// [`MAX_RECENT_BLOCKHASHES`][solana_sdk::clock::MAX_RECENT_BLOCKHASHES] /// rooted slots. To search the full transaction history use the - /// [`get_signature_statuse_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history] + /// [`get_signature_status_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history] /// method. /// /// # RPC Reference diff --git a/rpc-client/src/rpc_client.rs b/rpc-client/src/rpc_client.rs index afccd7af0..95d473f9f 100644 --- a/rpc-client/src/rpc_client.rs +++ b/rpc-client/src/rpc_client.rs @@ -492,7 +492,7 @@ impl RpcClient { /// # use solana_rpc_client::rpc_client::RpcClient; /// # use std::collections::HashMap; /// # use serde_json::json; - /// // Create a mock with a custom repsonse to the `GetBalance` request + /// // Create a mock with a custom response to the `GetBalance` request /// let account_balance = 50; /// let account_balance_response = json!(Response { /// context: RpcResponseContext { slot: 1, api_version: None }, @@ -1205,7 +1205,7 @@ impl RpcClient { /// recent slots, plus up to /// [`MAX_RECENT_BLOCKHASHES`][solana_sdk::clock::MAX_RECENT_BLOCKHASHES] /// rooted slots. To search the full transaction history use the - /// [`get_signature_statuse_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history] + /// [`get_signature_status_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history] /// method. /// /// # RPC Reference @@ -1406,7 +1406,7 @@ impl RpcClient { /// recent slots, plus up to /// [`MAX_RECENT_BLOCKHASHES`][solana_sdk::clock::MAX_RECENT_BLOCKHASHES] /// rooted slots. To search the full transaction history use the - /// [`get_signature_statuse_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history] + /// [`get_signature_status_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history] /// method. /// /// # RPC Reference diff --git a/runtime/src/accounts/mod.rs b/runtime/src/accounts/mod.rs index cea2af674..28343a056 100644 --- a/runtime/src/accounts/mod.rs +++ b/runtime/src/accounts/mod.rs @@ -1662,7 +1662,7 @@ mod tests { } } - // If payer account has insufficent balance, expect InsufficientFundsForFee error + // If payer account has insufficient balance, expect InsufficientFundsForFee error // regardless feature gate status, or if payer is nonce account. { for (is_nonce, min_balance) in [(true, min_balance), (false, 0)] { diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 14643c3aa..cf72333f6 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5701,7 +5701,7 @@ impl Bank { /// Calculates (and returns) skipped rewrites for this bank /// /// Refer to `rebuild_skipped_rewrites()` for more documentation. - /// This implementaion is purposely separate to facilitate testing. + /// This implementation is purposely separate to facilitate testing. /// /// The key observation is that accounts in Bank::skipped_rewrites are only used IFF the /// specific account is *not* already in the accounts delta hash. If an account is not in @@ -7258,7 +7258,7 @@ impl Bank { /// This should only be used for developing purposes. pub fn set_capitalization(&self) -> u64 { let old = self.capitalization(); - // We cannot debug verify the hash calculation here becuase calculate_capitalization will use the index calculation due to callers using the write cache. + // We cannot debug verify the hash calculation here because calculate_capitalization will use the index calculation due to callers using the write cache. // debug_verify only exists as an extra debugging step under the assumption that this code path is only used for tests. But, this is used by ledger-tool create-snapshot // for example. let debug_verify = false; diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index ed0a4dab8..67464230c 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -1734,7 +1734,7 @@ mod tests { /// - take an incremental snapshot /// - ensure deserializing from this snapshot is equal to this bank /// slot 3: - /// - remove Account2's reference back to slot 2 by transfering from the mint to Account2 + /// - remove Account2's reference back to slot 2 by transferring from the mint to Account2 /// slot 4: /// - ensure `clean_accounts()` has run and that Account1 is gone /// - take another incremental snapshot diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index 37db9eea7..4e7d576f0 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -43,7 +43,7 @@ pub struct SnapshotMinimizer<'a> { impl<'a> SnapshotMinimizer<'a> { /// Removes all accounts not necessary for replaying slots in the range [starting_slot, ending_slot]. /// `transaction_account_set` should contain accounts used in transactions in the slot range [starting_slot, ending_slot]. - /// This function will accumulate other accounts (rent colleciton, builtins, etc) necessary to replay transactions. + /// This function will accumulate other accounts (rent collection, builtins, etc) necessary to replay transactions. /// /// This function will modify accounts_db by removing accounts not needed to replay [starting_slot, ending_slot], /// and update the bank's capitalization. diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 88be65216..da9ee3595 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -715,7 +715,7 @@ pub fn archive_snapshot_package( .map_err(|err| SnapshotError::IoWithSource(err, "create staging snapshots path"))?; let src_snapshot_dir = &snapshot_package.bank_snapshot_dir; - // To be a source for symlinking and archiving, the path need to be an aboslute path + // To be a source for symlinking and archiving, the path need to be an absolute path let src_snapshot_dir = src_snapshot_dir .canonicalize() .map_err(|_e| SnapshotError::InvalidSnapshotDirPath(src_snapshot_dir.clone()))?; @@ -2024,7 +2024,7 @@ pub fn verify_snapshot_archive( // The new the status_cache file is inside the slot directory together with the snapshot file. // When unpacking an archive, the status_cache file from the archive is one-level up outside of - // the slot direcotry. + // the slot directory. // The unpacked status_cache file need to be put back into the slot directory for the directory // comparison to pass. let existing_unpacked_status_cache_file = @@ -3031,7 +3031,7 @@ mod tests { } // Ensure the remaining incremental snapshots are at the right slot - let expected_remaing_incremental_snapshot_archive_slots = + let expected_remaining_incremental_snapshot_archive_slots = (latest_full_snapshot_archive_slot..) .step_by(incremental_snapshot_interval) .take(num_incremental_snapshots_per_full_snapshot) @@ -3048,7 +3048,7 @@ mod tests { .collect::>(); assert_eq!( actual_remaining_incremental_snapshot_archive_slots, - expected_remaing_incremental_snapshot_archive_slots + expected_remaining_incremental_snapshot_archive_slots ); } diff --git a/scripts/coverage-in-disk.sh b/scripts/coverage-in-disk.sh index 50d3e0ac9..a6d8e3481 100755 --- a/scripts/coverage-in-disk.sh +++ b/scripts/coverage-in-disk.sh @@ -34,7 +34,7 @@ fi coverageFlags=() coverageFlags+=(-Zprofile) # Enable coverage -coverageFlags+=("-Aincomplete_features") # Supress warnings due to frozen abi, which is harmless for it +coverageFlags+=("-Aincomplete_features") # Suppress warnings due to frozen abi, which is harmless for it if [[ $(uname) != Darwin ]]; then # macOS skipped due to https://github.com/rust-lang/rust/issues/63047 coverageFlags+=("-Clink-dead-code") # Dead code should appear red in the report fi diff --git a/scripts/coverage.sh b/scripts/coverage.sh index 93a9afbe3..d1ed8c752 100755 --- a/scripts/coverage.sh +++ b/scripts/coverage.sh @@ -34,7 +34,7 @@ fi coverageFlags=() coverageFlags+=(-Zprofile) # Enable coverage -coverageFlags+=("-Aincomplete_features") # Supress warnings due to frozen abi, which is harmless for it +coverageFlags+=("-Aincomplete_features") # Suppress warnings due to frozen abi, which is harmless for it if [[ $(uname) != Darwin ]]; then # macOS skipped due to https://github.com/rust-lang/rust/issues/63047 coverageFlags+=("-Clink-dead-code") # Dead code should appear red in the report fi diff --git a/scripts/sed-i-all-rs-files-for-rust-analyzer.sh b/scripts/sed-i-all-rs-files-for-rust-analyzer.sh index 4c14819a6..c8b4d7d17 100755 --- a/scripts/sed-i-all-rs-files-for-rust-analyzer.sh +++ b/scripts/sed-i-all-rs-files-for-rust-analyzer.sh @@ -6,7 +6,7 @@ set -e # so, here's some wild hack from ryoqun! if [[ $1 = "doit" ]]; then - # it's true that we put true just for truely-aligned lines + # it's true that we put true just for truly-aligned lines # shellcheck disable=SC2046 # our rust files are sanely named with no need to escape true && sed -i -e 's/#\[cfg(test)\]/#[cfg(escaped_cfg_test)]/g' $(git ls-files :**.rs :^**/build.rs) && diff --git a/scripts/system-stats.sh b/scripts/system-stats.sh index 08e27506b..12c72ee1e 100755 --- a/scripts/system-stats.sh +++ b/scripts/system-stats.sh @@ -12,11 +12,11 @@ source scripts/configure-metrics.sh while true; do # collect top twice because the first time is inaccurate - top_ouput="$(top -bn2 -d1)" + top_output="$(top -bn2 -d1)" # collect the total cpu usage by subtracting idle usage from 100% - cpu_usage=$(echo "${top_ouput}" | grep '%Cpu(s):' | sed "s/.*, *\([0-9.]*\)%* id.*/\1/" | tail -1 | awk '{print 100 - $1}') + cpu_usage=$(echo "${top_output}" | grep '%Cpu(s):' | sed "s/.*, *\([0-9.]*\)%* id.*/\1/" | tail -1 | awk '{print 100 - $1}') # collect the total ram usage by dividing used memory / total memory - ram_total_and_usage=$(echo "${top_ouput}" | grep '.*B Mem'| tail -1 | sed "s/.*: *\([0-9.]*\)%* total.*, *\([0-9.]*\)%* used.*/\1 \2/") + ram_total_and_usage=$(echo "${top_output}" | grep '.*B Mem'| tail -1 | sed "s/.*: *\([0-9.]*\)%* total.*, *\([0-9.]*\)%* used.*/\1 \2/") read -r total used <<< "$ram_total_and_usage" ram_usage=$(awk "BEGIN {print $used / $total * 100}") cpu_report="cpu_usage=$cpu_usage,ram_usage=$ram_usage" diff --git a/sdk/program/src/instruction.rs b/sdk/program/src/instruction.rs index 21b3e774a..8eec4746a 100644 --- a/sdk/program/src/instruction.rs +++ b/sdk/program/src/instruction.rs @@ -347,7 +347,7 @@ impl Instruction { /// `program_id` is the address of the program that will execute the instruction. /// `accounts` contains a description of all accounts that may be accessed by the program. /// - /// Borsh serialization is often prefered over bincode as it has a stable + /// Borsh serialization is often preferred over bincode as it has a stable /// [specification] and an [implementation in JavaScript][jsb], neither of /// which are true of bincode. /// diff --git a/sdk/program/src/loader_v4_instruction.rs b/sdk/program/src/loader_v4_instruction.rs index 66d868fe4..d2e0e041c 100644 --- a/sdk/program/src/loader_v4_instruction.rs +++ b/sdk/program/src/loader_v4_instruction.rs @@ -24,7 +24,7 @@ pub enum LoaderV4Instruction { /// Decreasing to size zero closes the program account and resets it /// into an uninitialized state. /// Providing additional lamports upfront might be necessary to reach rent exemption. - /// Superflous funds are transfered to the recipient account. + /// Superflous funds are transferred to the recipient account. /// /// # Account references /// 0. `[(signer), writable]` The program account to change the size of. @@ -51,7 +51,7 @@ pub enum LoaderV4Instruction { /// Undo the deployment of a program account. /// - /// The program is no longer executable and goes into maintainance. + /// The program is no longer executable and goes into maintenance. /// Necessary for writing data and truncating. /// /// # Account references diff --git a/sdk/program/src/secp256k1_recover.rs b/sdk/program/src/secp256k1_recover.rs index 8e2e3be05..f688e7d48 100644 --- a/sdk/program/src/secp256k1_recover.rs +++ b/sdk/program/src/secp256k1_recover.rs @@ -109,7 +109,7 @@ impl Secp256k1Pubkey { /// arbitrary message, signed by some public key. /// /// The recovery ID is a value in the range [0, 3] that is generated during -/// signing, and allows the recovery process to be more efficent. Note that the +/// signing, and allows the recovery process to be more efficient. Note that the /// `recovery_id` here does not directly correspond to an Ethereum recovery ID /// as used in `ecrecover`. This function accepts recovery IDs in the range of /// [0, 3], while Ethereum's recovery IDs have a value of 27 or 28. To convert diff --git a/sdk/src/transaction/mod.rs b/sdk/src/transaction/mod.rs index 072c7baa9..1d9facfac 100644 --- a/sdk/src/transaction/mod.rs +++ b/sdk/src/transaction/mod.rs @@ -147,7 +147,7 @@ pub enum TransactionVerificationMode { pub type Result = result::Result; -/// An atomically-commited sequence of instructions. +/// An atomically-committed sequence of instructions. /// /// While [`Instruction`]s are the basic unit of computation in Solana, /// they are submitted by clients in [`Transaction`]s containing one or diff --git a/sdk/src/transaction_context.rs b/sdk/src/transaction_context.rs index e73f1c2ed..8922a01f4 100644 --- a/sdk/src/transaction_context.rs +++ b/sdk/src/transaction_context.rs @@ -995,7 +995,7 @@ impl<'a> BorrowedAccount<'a> { // about to write into it. Make the account mutable by copying it in a // buffer with MAX_PERMITTED_DATA_INCREASE capacity so that if the // transaction reallocs, we don't have to copy the whole account data a - // second time to fullfill the realloc. + // second time to fulfill the realloc. // // NOTE: The account memory region CoW code in bpf_loader::create_vm() implements the same // logic and must be kept in sync. diff --git a/storage-bigtable/src/lib.rs b/storage-bigtable/src/lib.rs index 1feba4d93..9f6667cec 100644 --- a/storage-bigtable/src/lib.rs +++ b/storage-bigtable/src/lib.rs @@ -823,7 +823,7 @@ impl LedgerStorage { .unwrap_or(0); // Return the next tx-by-addr data of amount `limit` plus extra to account for the largest - // number that might be flitered out + // number that might be filtered out let tx_by_addr_data = bigtable .get_row_data( "tx-by-addr", diff --git a/system-test/testnet-automation.sh b/system-test/testnet-automation.sh index 9f4cbc192..e4256b7a7 100755 --- a/system-test/testnet-automation.sh +++ b/system-test/testnet-automation.sh @@ -12,7 +12,7 @@ function cleanup_testnet { Test failed during step: ${STEP} -Failure occured when running the following command: +Failure occurred when running the following command: $*" fi diff --git a/transaction-dos/src/main.rs b/transaction-dos/src/main.rs index afbae094e..94fecf5e6 100644 --- a/transaction-dos/src/main.rs +++ b/transaction-dos/src/main.rs @@ -514,7 +514,7 @@ fn main() { .long("batch-sleep-ms") .takes_value(true) .value_name("NUM") - .help("Sleep for this long the num outstanding transctions is greater than the batch size."), + .help("Sleep for this long the num outstanding transactions is greater than the batch size."), ) .arg( Arg::with_name("check_gossip") diff --git a/transaction-status/src/parse_address_lookup_table.rs b/transaction-status/src/parse_address_lookup_table.rs index 94127c8e0..a76b0138b 100644 --- a/transaction-status/src/parse_address_lookup_table.rs +++ b/transaction-status/src/parse_address_lookup_table.rs @@ -126,7 +126,7 @@ mod test { #[test] fn test_parse_create_address_lookup_table_ix() { let from_pubkey = Pubkey::new_unique(); - // use explicit key to have predicatble bump_seed + // use explicit key to have predictable bump_seed let authority = Pubkey::from_str("HkxY6vXdrKzoCQLmdJ3cYo9534FdZQxzBNWTyrJzzqJM").unwrap(); let slot = 42; diff --git a/turbine/src/cluster_nodes.rs b/turbine/src/cluster_nodes.rs index 124d6d8c9..8079178cf 100644 --- a/turbine/src/cluster_nodes.rs +++ b/turbine/src/cluster_nodes.rs @@ -311,7 +311,7 @@ fn get_nodes(cluster_info: &ClusterInfo, stakes: &HashMap) -> Vec( fanout: usize, - index: usize, // Local node's index withing the nodes slice. + index: usize, // Local node's index within the nodes slice. nodes: &[T], ) -> impl Iterator + '_ { // Node's index within its neighborhood. diff --git a/zk-token-sdk/src/zk_token_elgamal/ops.rs b/zk-token-sdk/src/zk_token_elgamal/ops.rs index bbca56f8e..10db117c4 100644 --- a/zk-token-sdk/src/zk_token_elgamal/ops.rs +++ b/zk-token-sdk/src/zk_token_elgamal/ops.rs @@ -254,7 +254,7 @@ mod tests { source_pk.encrypt_with(22_u64, &final_source_open).into(); assert_eq!(expected_source, final_source_spendable); - // program arithemtic for the destination account + // program arithmetic for the destination account let dest_lo_ct: pod::ElGamalCiphertext = (comm_lo, handle_dest_lo).into(); let dest_hi_ct: pod::ElGamalCiphertext = (comm_hi, handle_dest_hi).into();