* Fix typos

* Fix typos

* fix typo
This commit is contained in:
GoodDaisy 2023-12-22 04:06:00 +08:00 committed by GitHub
parent 5247be17a9
commit 03386cc7b9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
55 changed files with 82 additions and 82 deletions

View File

@ -76,7 +76,7 @@ const CACHED_OFFSET: OffsetReduced = (1 << (OffsetReduced::BITS - 1)) - 1;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)]
pub struct PackedOffsetAndFlags {
/// this provides 2^31 bits, which when multipled by 8 (sizeof(u64)) = 16G, which is the maximum size of an append vec
/// this provides 2^31 bits, which when multiplied by 8 (sizeof(u64)) = 16G, which is the maximum size of an append vec
offset_reduced: B31,
/// use 1 bit to specify that the entry is zero lamport
is_zero_lamport: bool,

View File

@ -2108,7 +2108,7 @@ mod tests {
let accounts = Accounts::new(Arc::new(accounts_db));
/* This test assumes pubkey0 < pubkey1 < pubkey2.
* But the keys created with new_unique() does not gurantee this
* But the keys created with new_unique() does not guarantee this
* order because of the endianness. new_unique() calls add 1 at each
* key generaration as the little endian integer. A pubkey stores its
* value in a 32-byte array bytes, and its eq-partial trait considers

View File

@ -1230,7 +1230,7 @@ pub enum ZeroLamportAccounts {
pub struct AccountHash(pub Hash);
// Ensure the newtype wrapper never changes size from the underlying Hash
// This also ensures there are no padding bytes, which is requried to safely implement Pod
// This also ensures there are no padding bytes, which is required to safely implement Pod
const _: () = assert!(std::mem::size_of::<AccountHash>() == std::mem::size_of::<Hash>());
/// Hash of accounts

View File

@ -85,7 +85,7 @@ impl ByteBlockWriter {
/// Write all the Some fields of the specified AccountMetaOptionalFields.
///
/// Note that the existance of each optional field is stored separately in
/// Note that the existence of each optional field is stored separately in
/// AccountMetaFlags.
pub fn write_optional_fields(
&mut self,

View File

@ -238,7 +238,7 @@ where
// Move on to next chunk
self.chunk_index = (self.chunk_index + 1) % self.account_chunks.source.len();
// Switch directions after transfering for each "chunk"
// Switch directions after transferring for each "chunk"
if self.chunk_index == 0 {
self.reclaim_lamports_back_to_source_account =
!self.reclaim_lamports_back_to_source_account;

View File

@ -79,7 +79,7 @@ impl RestartableBucket {
bucket.random = random;
}
}
/// retreive the file_name and random that were used prior to the current restart.
/// retrieve the file_name and random that were used prior to the current restart.
/// This was written into the restart file on the prior run by `set_file`.
pub(crate) fn get(&self) -> Option<(u128, u64)> {
self.restart.as_ref().map(|restart| {

View File

@ -48,7 +48,7 @@ affects() {
# the worse (affected)
return 0
fi
# Assume everyting needs to be tested when any Dockerfile changes
# Assume everything needs to be tested when any Dockerfile changes
for pattern in ^ci/docker-rust/Dockerfile ^ci/docker-rust-nightly/Dockerfile "$@"; do
if [[ ${pattern:0:1} = "!" ]]; then
for file in "${affected_files[@]}"; do

View File

@ -30,7 +30,7 @@ annotate() {
fi
}
# Assume everyting needs to be tested when this file or any Dockerfile changes
# Assume everything needs to be tested when this file or any Dockerfile changes
mandatory_affected_files=()
mandatory_affected_files+=(^ci/buildkite-pipeline.sh)
mandatory_affected_files+=(^ci/docker-rust/Dockerfile)

View File

@ -48,7 +48,7 @@ affects() {
# the worse (affected)
return 0
fi
# Assume everyting needs to be tested when any Dockerfile changes
# Assume everything needs to be tested when any Dockerfile changes
for pattern in ^ci/docker-rust/Dockerfile ^ci/docker-rust-nightly/Dockerfile "$@"; do
if [[ ${pattern:0:1} = "!" ]]; then
for file in "${affected_files[@]}"; do

View File

@ -107,7 +107,7 @@ test-stable-sbf)
_ cargo test \
--manifest-path programs/sbf/Cargo.toml \
--no-default-features --features=sbf_c,sbf_rust assert_instruction_count \
-- --nocapture &> "${sbf_target_path}"/deploy/instuction_counts.txt
-- --nocapture &> "${sbf_target_path}"/deploy/instruction_counts.txt
sbf_dump_archive="sbf-dumps.tar.bz2"
rm -f "$sbf_dump_archive"

View File

@ -755,7 +755,7 @@ pub fn process_catchup(
if node_json_rpc_url.is_some() && node_json_rpc_url != gussed_default {
// go to new line to leave this message on console
println!(
"Prefering explicitly given rpc ({}) as us, although --our-localhost is given\n",
"Preferring explicitly given rpc ({}) as us, although --our-localhost is given\n",
node_json_rpc_url.as_ref().unwrap()
);
} else {
@ -771,7 +771,7 @@ pub fn process_catchup(
(if node_pubkey.is_some() && node_pubkey != guessed_default {
// go to new line to leave this message on console
println!(
"Prefering explicitly given node pubkey ({}) as us, although --our-localhost \
"Preferring explicitly given node pubkey ({}) as us, although --our-localhost \
is given\n",
node_pubkey.unwrap()
);

View File

@ -161,7 +161,7 @@ where
dummy_message.recent_blockhash = *blockhash;
get_fee_for_messages(rpc_client, &[&dummy_message])?
}
None => 0, // Offline, cannot calulate fee
None => 0, // Offline, cannot calculate fee
};
match amount {

View File

@ -76,7 +76,7 @@ impl ConnectionCache {
Self::new_with_client_options(name, connection_pool_size, None, None, None)
}
/// Create a quic conneciton_cache with more client options
/// Create a quic connection_cache with more client options
pub fn new_with_client_options(
name: &'static str,
connection_pool_size: usize,

View File

@ -26,7 +26,7 @@ pub enum VoteSource {
Tpu,
}
/// Holds deserialized vote messages as well as their source, foward status and slot
/// Holds deserialized vote messages as well as their source, forward status and slot
#[derive(Debug, Clone)]
pub struct LatestValidatorVotePacket {
vote_source: VoteSource,

View File

@ -47,7 +47,7 @@ pub(crate) struct ProcessTransactionsSummary {
// Total amount of time spent running the cost model
pub cost_model_us: u64,
// Breakdown of time spent executing and comitting transactions
// Breakdown of time spent executing and committing transactions
pub execute_and_commit_timings: LeaderExecuteAndCommitTimings,
// Breakdown of all the transaction errors from transactions passed for execution
@ -104,7 +104,7 @@ struct LeaderSlotPacketCountMetrics {
// total number of transactions that were executed, but failed to be committed into the Poh stream because
// the block ended. Some of these may be already counted in `nonretryable_errored_transactions_count` if they
// then hit the age limit after failing to be comitted.
// then hit the age limit after failing to be committed.
executed_transactions_failed_commit_count: u64,
// total number of transactions that were excluded from the block because there were concurrent write locks active.

View File

@ -585,7 +585,7 @@ impl HeaviestSubtreeForkChoice {
let mut update_operations: UpdateOperations = BTreeMap::new();
// Insert aggregate operations up to the root
self.insert_aggregate_operations(&mut update_operations, *slot_hash_key);
// Remove child link so that this slot cannot be choosen as best or deepest
// Remove child link so that this slot cannot be chosen as best or deepest
assert!(self
.fork_infos
.get_mut(&parent)
@ -1308,7 +1308,7 @@ impl ForkChoice for HeaviestSubtreeForkChoice {
// be for a slot that we currently do not have in our bank forks, so we
// return None.
//
// We are guarenteed that we will eventually repair a duplicate confirmed version
// We are guaranteed that we will eventually repair a duplicate confirmed version
// of this slot because the state machine will never dump a slot unless it has
// observed a duplicate confirmed version of the slot.
//

View File

@ -1123,7 +1123,7 @@ pub mod tests {
let request_slot = 100;
let mut test_setup = setup_add_response_test_pruned(request_slot, 10);
// Insert all the correct ancestory
// Insert all the correct ancestry
let tree = test_setup
.correct_ancestors_response
.iter()

View File

@ -338,7 +338,7 @@ impl RepairWeight {
}
Some(TreeRoot::PrunedRoot(subtree_root)) => {
// Even if these orphaned slots were previously pruned, they should be added back to
// `self.trees` as we are no longer sure of their ancestory.
// `self.trees` as we are no longer sure of their ancestry.
// After they are repaired there is a chance that they are now part of the rooted path.
// This is possible for a duplicate slot with multiple ancestors, if the
// version we had pruned before had the wrong ancestor, and the correct version is
@ -892,7 +892,7 @@ impl RepairWeight {
);
}
/// Finds any ancestors avaiable from `blockstore` for `slot`.
/// Finds any ancestors available from `blockstore` for `slot`.
/// Ancestor search is stopped when finding one that chains to any
/// tree in `self.trees` or `self.pruned_trees` or if the ancestor is < self.root.
///
@ -2201,21 +2201,21 @@ mod test {
let (blockstore, _, mut repair_weight) = setup_orphan_repair_weight();
// Ancestor of slot 4 is slot 2, with an existing subtree rooted at 0
// because there wass a vote for a descendant
// because there was a vote for a descendant
assert_eq!(
repair_weight.find_ancestor_subtree_of_slot(&blockstore, 4),
(VecDeque::from([2]), Some(TreeRoot::Root(0)))
);
// Ancestors of 5 are [1, 3], with an existing subtree rooted at 0
// because there wass a vote for a descendant
// because there was a vote for a descendant
assert_eq!(
repair_weight.find_ancestor_subtree_of_slot(&blockstore, 5),
(VecDeque::from([1, 3]), Some(TreeRoot::Root(0)))
);
// Ancestors of slot 23 are [20, 22], with an existing subtree of 20
// because there wass a vote for 20
// because there was a vote for 20
assert_eq!(
repair_weight.find_ancestor_subtree_of_slot(&blockstore, 23),
(VecDeque::from([20, 22]), Some(TreeRoot::Root(20)))

View File

@ -965,7 +965,7 @@ impl ServeRepair {
stats.dropped_requests_outbound_bandwidth += 1;
continue;
}
// Bypass ping/pong check for requests comming from QUIC endpoint.
// Bypass ping/pong check for requests coming from QUIC endpoint.
if !matches!(&request, RepairProtocol::Pong(_)) && response_sender.is_none() {
let (check, ping_pkt) =
Self::check_ping_cache(ping_cache, &request, &from_addr, &identity_keypair);

View File

@ -73,7 +73,7 @@ struct SnapshotTestConfig {
full_snapshot_archives_dir: TempDir,
bank_snapshots_dir: TempDir,
accounts_dir: PathBuf,
// as the underscore prefix indicates, this isn't explictly used; but it's needed to keep
// as the underscore prefix indicates, this isn't explicitly used; but it's needed to keep
// TempDir::drop from running to retain that dir for the duration of test
_accounts_tmp_dir: TempDir,
}

View File

@ -1218,7 +1218,7 @@ impl ClusterInfo {
}
/// Returns epoch-slots inserted since the given cursor.
/// Excludes entries from nodes with unkown or different shred version.
/// Excludes entries from nodes with unknown or different shred version.
pub fn get_epoch_slots(&self, cursor: &mut Cursor) -> Vec<EpochSlots> {
let self_shred_version = Some(self.my_shred_version());
let gossip_crds = self.gossip.crds.read().unwrap();
@ -1752,7 +1752,7 @@ impl ClusterInfo {
match gossip_crds.trim(cap, &keep, stakes, timestamp()) {
Err(err) => {
self.stats.trim_crds_table_failed.add_relaxed(1);
// TODO: Stakes are comming from the root-bank. Debug why/when
// TODO: Stakes are coming from the root-bank. Debug why/when
// they are empty/zero.
debug!("crds table trim failed: {:?}", err);
}

View File

@ -350,7 +350,7 @@ impl ContactInfo {
}
// Removes the IP address at the given index if
// no socket entry refrences that index.
// no socket entry references that index.
fn maybe_remove_addr(&mut self, index: u8) {
if !self.sockets.iter().any(|entry| entry.index == index) {
self.addrs.remove(usize::from(index));

View File

@ -1066,7 +1066,7 @@ mod test {
assert!(!other.check_duplicate(&node_crds));
assert_eq!(node.overrides(&other_crds), None);
assert_eq!(other.overrides(&node_crds), None);
// Differnt crds value is not a duplicate.
// Different crds value is not a duplicate.
let other = LegacyContactInfo::new_rand(&mut rng, Some(pubkey));
let other = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(other));
assert!(!node.check_duplicate(&other));

View File

@ -6341,7 +6341,7 @@ pub mod tests {
assert_eq!(
blockstore.find_missing_data_indexes(
slot,
0, // first_timestmap
0, // first_timestamp
0, // defer_threshold_ticks
0, // start_index
gap - 1, // end_index
@ -6352,7 +6352,7 @@ pub mod tests {
assert_eq!(
blockstore.find_missing_data_indexes(
slot,
0, // first_timestmap
0, // first_timestamp
0, // defer_threshold_ticks
gap - 2, // start_index
gap, // end_index
@ -9951,7 +9951,7 @@ pub mod tests {
}
#[test]
fn test_rewards_protobuf_backward_compatability() {
fn test_rewards_protobuf_backward_compatibility() {
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
@ -9994,7 +9994,7 @@ pub mod tests {
// ledger archives, but typically those require contemporaraneous software for other reasons.
// However, we are persisting the test since the apis still exist in `blockstore_db`.
#[test]
fn test_transaction_status_protobuf_backward_compatability() {
fn test_transaction_status_protobuf_backward_compatibility() {
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();

View File

@ -303,7 +303,7 @@ mod tests {
fn flush_blockstore_contents_to_disk(blockstore: Blockstore) -> Blockstore {
// The find_slots_to_clean() routine uses a method that queries data
// from RocksDB SST files. On a running validator, these are created
// fairly reguarly as new data is coming in and contents of memory are
// fairly regularly as new data is coming in and contents of memory are
// pushed to disk. In a unit test environment, we aren't pushing nearly
// enough data for this to happen organically. So, instead open and
// close the Blockstore which will perform the flush to SSTs.

View File

@ -2383,7 +2383,7 @@ fn test_hard_fork_with_gap_in_roots() {
.reversed_rooted_slot_iterator(common_root)
.unwrap()
.collect::<Vec<_>>();
// artifically restore the forcibly purged genesis only for the validator A just for the sake of
// artificially restore the forcibly purged genesis only for the validator A just for the sake of
// the final assertions.
slots_a.push(genesis_slot);
roots_a.push(genesis_slot);
@ -4286,7 +4286,7 @@ fn test_leader_failure_4() {
//
// Validator A (60%)
// Validator B (40%)
// / --- 10 --- [..] --- 16 (B is voting, due to network issues is initally not able to see the other fork at all)
// / --- 10 --- [..] --- 16 (B is voting, due to network issues is initially not able to see the other fork at all)
// /
// 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 (A votes 1 - 9 votes are landing normally. B does the same however votes are not landing)
// \
@ -4482,7 +4482,7 @@ fn test_slot_hash_expiry() {
);
}
// This test simulates a case where a leader sends a duplicate block with different ancestory. One
// This test simulates a case where a leader sends a duplicate block with different ancestry. One
// version builds off of the rooted path, however the other version builds off a pruned branch. The
// validators that receive the pruned version will need to repair in order to continue, which
// requires an ancestor hashes repair.
@ -4511,7 +4511,7 @@ fn test_slot_hash_expiry() {
// reached as minority cannot pass threshold otherwise).
// 4) Let minority produce forks on pruned forks until out of leader slots then kill.
// 5) Truncate majority ledger past fork slot so it starts building off of fork slot.
// 6) Restart majority and wait untill it starts producing blocks on main fork and roots something
// 6) Restart majority and wait until it starts producing blocks on main fork and roots something
// past the fork slot.
// 7) Construct our ledger by copying majority ledger and copying blocks from minority for the pruned path.
// 8) In our node's ledger, change the parent of the latest slot in majority fork to be the latest

View File

@ -241,7 +241,7 @@ mod tests {
let count = rng.gen_range(1..128);
let _packets: Vec<_> = repeat_with(|| recycler.allocate("")).take(count).collect();
}
// Assert that the gc size has shrinked.
// Assert that the gc size has shrunk.
assert_eq!(
recycler.recycler.gc.lock().unwrap().len(),
RECYCLER_SHRINK_SIZE

View File

@ -1608,7 +1608,7 @@ mod tests {
assert!(poh_recorder.working_bank.is_some());
// Drop entry receiver, and try to tick again. Because
// the reciever is closed, the ticks will not be drained from the cache,
// the receiver is closed, the ticks will not be drained from the cache,
// and the working bank will be cleared
drop(entry_receiver);
poh_recorder.tick();

View File

@ -519,7 +519,7 @@ impl PubsubClient {
/// Receives messages of type [`SlotUpdate`] when various updates to a slot occur.
///
/// Note that this method operates differently than other subscriptions:
/// instead of sending the message to a reciever on a channel, it accepts a
/// instead of sending the message to a receiver on a channel, it accepts a
/// `handler` callback that processes the message directly. This processing
/// occurs on another thread.
///

View File

@ -766,7 +766,7 @@ impl PubsubClient {
/// Receives messages of type [`SlotUpdate`] when various updates to a slot occur.
///
/// Note that this method operates differently than other subscriptions:
/// instead of sending the message to a reciever on a channel, it accepts a
/// instead of sending the message to a receiver on a channel, it accepts a
/// `handler` callback that processes the message directly. This processing
/// occurs on another thread.
///

View File

@ -26,7 +26,7 @@ const SEND_DATA_TIMEOUT: Duration = Duration::from_secs(10);
/// A semaphore used for limiting the number of asynchronous tasks spawn to the
/// runtime. Before spawnning a task, use acquire. After the task is done (be it
/// succsess or failure), call release.
/// success or failure), call release.
struct AsyncTaskSemaphore {
/// Keep the counter info about the usage
counter: Mutex<u64>,

View File

@ -193,7 +193,7 @@ mod tests {
fn test_quic_bi_direction() {
/// This tests bi-directional quic communication. There are the following components
/// The request receiver -- responsible for receiving requests
/// The request sender -- responsible sending requests to the request reciever using quic
/// The request sender -- responsible sending requests to the request receiver using quic
/// The response receiver -- responsible for receiving the responses to the requests
/// The response sender -- responsible for sending responses to the response receiver.
/// In this we demonstrate that the request sender and the response receiver use the

View File

@ -66,7 +66,7 @@ pub struct MockSender {
/// If `url` is "fails" then any call to `send` will return `Ok(Value::Null)`.
///
/// It is customary to set the `url` to "succeeds" for mocks that should
/// return sucessfully, though this value is not actually interpreted.
/// return successfully, though this value is not actually interpreted.
///
/// Other possible values of `url` are specific to different `RpcRequest`
/// values. Read the implementation for specifics.

View File

@ -341,7 +341,7 @@ impl RpcClient {
/// behavior in specific scenarios:
///
/// - It is customary to set the `url` to "succeeds" for mocks that should
/// return sucessfully, though this value is not actually interpreted.
/// return successfully, though this value is not actually interpreted.
///
/// - If `url` is "fails" then any call to `send` will return `Ok(Value::Null)`.
///
@ -396,7 +396,7 @@ impl RpcClient {
/// scenarios.
///
/// It is customary to set the `url` to "succeeds" for mocks that should
/// return sucessfully, though this value is not actually interpreted.
/// return successfully, though this value is not actually interpreted.
///
/// If `url` is "fails" then any call to `send` will return `Ok(Value::Null)`.
///
@ -424,7 +424,7 @@ impl RpcClient {
/// # use solana_rpc_client::nonblocking::rpc_client::RpcClient;
/// # use std::collections::HashMap;
/// # use serde_json::json;
/// // Create a mock with a custom repsonse to the `GetBalance` request
/// // Create a mock with a custom response to the `GetBalance` request
/// let account_balance = 50;
/// let account_balance_response = json!(Response {
/// context: RpcResponseContext { slot: 1, api_version: None },
@ -1480,7 +1480,7 @@ impl RpcClient {
/// recent slots, plus up to
/// [`MAX_RECENT_BLOCKHASHES`][solana_sdk::clock::MAX_RECENT_BLOCKHASHES]
/// rooted slots. To search the full transaction history use the
/// [`get_signature_statuse_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history]
/// [`get_signature_status_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history]
/// method.
///
/// # RPC Reference
@ -1700,7 +1700,7 @@ impl RpcClient {
/// recent slots, plus up to
/// [`MAX_RECENT_BLOCKHASHES`][solana_sdk::clock::MAX_RECENT_BLOCKHASHES]
/// rooted slots. To search the full transaction history use the
/// [`get_signature_statuse_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history]
/// [`get_signature_status_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history]
/// method.
///
/// # RPC Reference

View File

@ -492,7 +492,7 @@ impl RpcClient {
/// # use solana_rpc_client::rpc_client::RpcClient;
/// # use std::collections::HashMap;
/// # use serde_json::json;
/// // Create a mock with a custom repsonse to the `GetBalance` request
/// // Create a mock with a custom response to the `GetBalance` request
/// let account_balance = 50;
/// let account_balance_response = json!(Response {
/// context: RpcResponseContext { slot: 1, api_version: None },
@ -1205,7 +1205,7 @@ impl RpcClient {
/// recent slots, plus up to
/// [`MAX_RECENT_BLOCKHASHES`][solana_sdk::clock::MAX_RECENT_BLOCKHASHES]
/// rooted slots. To search the full transaction history use the
/// [`get_signature_statuse_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history]
/// [`get_signature_status_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history]
/// method.
///
/// # RPC Reference
@ -1406,7 +1406,7 @@ impl RpcClient {
/// recent slots, plus up to
/// [`MAX_RECENT_BLOCKHASHES`][solana_sdk::clock::MAX_RECENT_BLOCKHASHES]
/// rooted slots. To search the full transaction history use the
/// [`get_signature_statuse_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history]
/// [`get_signature_status_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history]
/// method.
///
/// # RPC Reference

View File

@ -1662,7 +1662,7 @@ mod tests {
}
}
// If payer account has insufficent balance, expect InsufficientFundsForFee error
// If payer account has insufficient balance, expect InsufficientFundsForFee error
// regardless feature gate status, or if payer is nonce account.
{
for (is_nonce, min_balance) in [(true, min_balance), (false, 0)] {

View File

@ -5701,7 +5701,7 @@ impl Bank {
/// Calculates (and returns) skipped rewrites for this bank
///
/// Refer to `rebuild_skipped_rewrites()` for more documentation.
/// This implementaion is purposely separate to facilitate testing.
/// This implementation is purposely separate to facilitate testing.
///
/// The key observation is that accounts in Bank::skipped_rewrites are only used IFF the
/// specific account is *not* already in the accounts delta hash. If an account is not in
@ -7258,7 +7258,7 @@ impl Bank {
/// This should only be used for developing purposes.
pub fn set_capitalization(&self) -> u64 {
let old = self.capitalization();
// We cannot debug verify the hash calculation here becuase calculate_capitalization will use the index calculation due to callers using the write cache.
// We cannot debug verify the hash calculation here because calculate_capitalization will use the index calculation due to callers using the write cache.
// debug_verify only exists as an extra debugging step under the assumption that this code path is only used for tests. But, this is used by ledger-tool create-snapshot
// for example.
let debug_verify = false;

View File

@ -1734,7 +1734,7 @@ mod tests {
/// - take an incremental snapshot
/// - ensure deserializing from this snapshot is equal to this bank
/// slot 3:
/// - remove Account2's reference back to slot 2 by transfering from the mint to Account2
/// - remove Account2's reference back to slot 2 by transferring from the mint to Account2
/// slot 4:
/// - ensure `clean_accounts()` has run and that Account1 is gone
/// - take another incremental snapshot

View File

@ -43,7 +43,7 @@ pub struct SnapshotMinimizer<'a> {
impl<'a> SnapshotMinimizer<'a> {
/// Removes all accounts not necessary for replaying slots in the range [starting_slot, ending_slot].
/// `transaction_account_set` should contain accounts used in transactions in the slot range [starting_slot, ending_slot].
/// This function will accumulate other accounts (rent colleciton, builtins, etc) necessary to replay transactions.
/// This function will accumulate other accounts (rent collection, builtins, etc) necessary to replay transactions.
///
/// This function will modify accounts_db by removing accounts not needed to replay [starting_slot, ending_slot],
/// and update the bank's capitalization.

View File

@ -715,7 +715,7 @@ pub fn archive_snapshot_package(
.map_err(|err| SnapshotError::IoWithSource(err, "create staging snapshots path"))?;
let src_snapshot_dir = &snapshot_package.bank_snapshot_dir;
// To be a source for symlinking and archiving, the path need to be an aboslute path
// To be a source for symlinking and archiving, the path need to be an absolute path
let src_snapshot_dir = src_snapshot_dir
.canonicalize()
.map_err(|_e| SnapshotError::InvalidSnapshotDirPath(src_snapshot_dir.clone()))?;
@ -2024,7 +2024,7 @@ pub fn verify_snapshot_archive(
// The new the status_cache file is inside the slot directory together with the snapshot file.
// When unpacking an archive, the status_cache file from the archive is one-level up outside of
// the slot direcotry.
// the slot directory.
// The unpacked status_cache file need to be put back into the slot directory for the directory
// comparison to pass.
let existing_unpacked_status_cache_file =
@ -3031,7 +3031,7 @@ mod tests {
}
// Ensure the remaining incremental snapshots are at the right slot
let expected_remaing_incremental_snapshot_archive_slots =
let expected_remaining_incremental_snapshot_archive_slots =
(latest_full_snapshot_archive_slot..)
.step_by(incremental_snapshot_interval)
.take(num_incremental_snapshots_per_full_snapshot)
@ -3048,7 +3048,7 @@ mod tests {
.collect::<HashSet<_>>();
assert_eq!(
actual_remaining_incremental_snapshot_archive_slots,
expected_remaing_incremental_snapshot_archive_slots
expected_remaining_incremental_snapshot_archive_slots
);
}

View File

@ -34,7 +34,7 @@ fi
coverageFlags=()
coverageFlags+=(-Zprofile) # Enable coverage
coverageFlags+=("-Aincomplete_features") # Supress warnings due to frozen abi, which is harmless for it
coverageFlags+=("-Aincomplete_features") # Suppress warnings due to frozen abi, which is harmless for it
if [[ $(uname) != Darwin ]]; then # macOS skipped due to https://github.com/rust-lang/rust/issues/63047
coverageFlags+=("-Clink-dead-code") # Dead code should appear red in the report
fi

View File

@ -34,7 +34,7 @@ fi
coverageFlags=()
coverageFlags+=(-Zprofile) # Enable coverage
coverageFlags+=("-Aincomplete_features") # Supress warnings due to frozen abi, which is harmless for it
coverageFlags+=("-Aincomplete_features") # Suppress warnings due to frozen abi, which is harmless for it
if [[ $(uname) != Darwin ]]; then # macOS skipped due to https://github.com/rust-lang/rust/issues/63047
coverageFlags+=("-Clink-dead-code") # Dead code should appear red in the report
fi

View File

@ -6,7 +6,7 @@ set -e
# so, here's some wild hack from ryoqun!
if [[ $1 = "doit" ]]; then
# it's true that we put true just for truely-aligned lines
# it's true that we put true just for truly-aligned lines
# shellcheck disable=SC2046 # our rust files are sanely named with no need to escape
true &&
sed -i -e 's/#\[cfg(test)\]/#[cfg(escaped_cfg_test)]/g' $(git ls-files :**.rs :^**/build.rs) &&

View File

@ -12,11 +12,11 @@ source scripts/configure-metrics.sh
while true; do
# collect top twice because the first time is inaccurate
top_ouput="$(top -bn2 -d1)"
top_output="$(top -bn2 -d1)"
# collect the total cpu usage by subtracting idle usage from 100%
cpu_usage=$(echo "${top_ouput}" | grep '%Cpu(s):' | sed "s/.*, *\([0-9.]*\)%* id.*/\1/" | tail -1 | awk '{print 100 - $1}')
cpu_usage=$(echo "${top_output}" | grep '%Cpu(s):' | sed "s/.*, *\([0-9.]*\)%* id.*/\1/" | tail -1 | awk '{print 100 - $1}')
# collect the total ram usage by dividing used memory / total memory
ram_total_and_usage=$(echo "${top_ouput}" | grep '.*B Mem'| tail -1 | sed "s/.*: *\([0-9.]*\)%* total.*, *\([0-9.]*\)%* used.*/\1 \2/")
ram_total_and_usage=$(echo "${top_output}" | grep '.*B Mem'| tail -1 | sed "s/.*: *\([0-9.]*\)%* total.*, *\([0-9.]*\)%* used.*/\1 \2/")
read -r total used <<< "$ram_total_and_usage"
ram_usage=$(awk "BEGIN {print $used / $total * 100}")
cpu_report="cpu_usage=$cpu_usage,ram_usage=$ram_usage"

View File

@ -347,7 +347,7 @@ impl Instruction {
/// `program_id` is the address of the program that will execute the instruction.
/// `accounts` contains a description of all accounts that may be accessed by the program.
///
/// Borsh serialization is often prefered over bincode as it has a stable
/// Borsh serialization is often preferred over bincode as it has a stable
/// [specification] and an [implementation in JavaScript][jsb], neither of
/// which are true of bincode.
///

View File

@ -24,7 +24,7 @@ pub enum LoaderV4Instruction {
/// Decreasing to size zero closes the program account and resets it
/// into an uninitialized state.
/// Providing additional lamports upfront might be necessary to reach rent exemption.
/// Superflous funds are transfered to the recipient account.
/// Superflous funds are transferred to the recipient account.
///
/// # Account references
/// 0. `[(signer), writable]` The program account to change the size of.
@ -51,7 +51,7 @@ pub enum LoaderV4Instruction {
/// Undo the deployment of a program account.
///
/// The program is no longer executable and goes into maintainance.
/// The program is no longer executable and goes into maintenance.
/// Necessary for writing data and truncating.
///
/// # Account references

View File

@ -109,7 +109,7 @@ impl Secp256k1Pubkey {
/// arbitrary message, signed by some public key.
///
/// The recovery ID is a value in the range [0, 3] that is generated during
/// signing, and allows the recovery process to be more efficent. Note that the
/// signing, and allows the recovery process to be more efficient. Note that the
/// `recovery_id` here does not directly correspond to an Ethereum recovery ID
/// as used in `ecrecover`. This function accepts recovery IDs in the range of
/// [0, 3], while Ethereum's recovery IDs have a value of 27 or 28. To convert

View File

@ -147,7 +147,7 @@ pub enum TransactionVerificationMode {
pub type Result<T> = result::Result<T, TransactionError>;
/// An atomically-commited sequence of instructions.
/// An atomically-committed sequence of instructions.
///
/// While [`Instruction`]s are the basic unit of computation in Solana,
/// they are submitted by clients in [`Transaction`]s containing one or

View File

@ -995,7 +995,7 @@ impl<'a> BorrowedAccount<'a> {
// about to write into it. Make the account mutable by copying it in a
// buffer with MAX_PERMITTED_DATA_INCREASE capacity so that if the
// transaction reallocs, we don't have to copy the whole account data a
// second time to fullfill the realloc.
// second time to fulfill the realloc.
//
// NOTE: The account memory region CoW code in bpf_loader::create_vm() implements the same
// logic and must be kept in sync.

View File

@ -823,7 +823,7 @@ impl LedgerStorage {
.unwrap_or(0);
// Return the next tx-by-addr data of amount `limit` plus extra to account for the largest
// number that might be flitered out
// number that might be filtered out
let tx_by_addr_data = bigtable
.get_row_data(
"tx-by-addr",

View File

@ -12,7 +12,7 @@ function cleanup_testnet {
Test failed during step:
${STEP}
Failure occured when running the following command:
Failure occurred when running the following command:
$*"
fi

View File

@ -514,7 +514,7 @@ fn main() {
.long("batch-sleep-ms")
.takes_value(true)
.value_name("NUM")
.help("Sleep for this long the num outstanding transctions is greater than the batch size."),
.help("Sleep for this long the num outstanding transactions is greater than the batch size."),
)
.arg(
Arg::with_name("check_gossip")

View File

@ -126,7 +126,7 @@ mod test {
#[test]
fn test_parse_create_address_lookup_table_ix() {
let from_pubkey = Pubkey::new_unique();
// use explicit key to have predicatble bump_seed
// use explicit key to have predictable bump_seed
let authority = Pubkey::from_str("HkxY6vXdrKzoCQLmdJ3cYo9534FdZQxzBNWTyrJzzqJM").unwrap();
let slot = 42;

View File

@ -311,7 +311,7 @@ fn get_nodes(cluster_info: &ClusterInfo, stakes: &HashMap<Pubkey, u64>) -> Vec<N
// fanout + k, 2*fanout + k, ..., fanout*fanout + k
fn get_retransmit_peers<T: Copy>(
fanout: usize,
index: usize, // Local node's index withing the nodes slice.
index: usize, // Local node's index within the nodes slice.
nodes: &[T],
) -> impl Iterator<Item = T> + '_ {
// Node's index within its neighborhood.

View File

@ -254,7 +254,7 @@ mod tests {
source_pk.encrypt_with(22_u64, &final_source_open).into();
assert_eq!(expected_source, final_source_spendable);
// program arithemtic for the destination account
// program arithmetic for the destination account
let dest_lo_ct: pod::ElGamalCiphertext = (comm_lo, handle_dest_lo).into();
let dest_hi_ct: pod::ElGamalCiphertext = (comm_hi, handle_dest_hi).into();