diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index 0b06715121..8ff0f3d3ce 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -19,7 +19,7 @@ use solana_ledger::{ use solana_measure::measure::Measure; use solana_perf::packet::to_packets_chunked; use solana_runtime::{ - accounts_background_service::ABSRequestSender, bank::Bank, bank_forks::BankForks, + accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks, }; use solana_sdk::{ hash::Hash, @@ -326,7 +326,7 @@ fn main() { poh_recorder.lock().unwrap().set_bank(&bank); assert!(poh_recorder.lock().unwrap().bank().is_some()); if bank.slot() > 32 { - bank_forks.set_root(root, &ABSRequestSender::default(), None); + bank_forks.set_root(root, &AbsRequestSender::default(), None); root += 1; } debug!( diff --git a/cli/src/program.rs b/cli/src/program.rs index 4993538049..15c067d330 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -10,7 +10,7 @@ use bincode::serialize; use bip39::{Language, Mnemonic, MnemonicType, Seed}; use clap::{App, AppSettings, Arg, ArgMatches, SubCommand}; use log::*; -use solana_bpf_loader_program::{bpf_verifier, BPFError, ThisInstructionMeter}; +use solana_bpf_loader_program::{bpf_verifier, BpfError, ThisInstructionMeter}; use solana_clap_utils::{self, input_parsers::*, input_validators::*, keypair::*}; use solana_cli_output::{ display::new_spinner_progress_bar, CliProgramAccountType, CliProgramAuthority, @@ -1372,7 +1372,7 @@ fn read_and_verify_elf(program_location: &str) -> Result, Box::from_elf( + Executable::::from_elf( &program_data, Some(|x| bpf_verifier::check(x)), Config::default(), diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 0f202f55db..3257af2215 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -592,7 +592,7 @@ impl BankingStage { processed_transactions_indexes, ); } - Err(e) => panic!(format!("Poh recorder returned unexpected error: {:?}", e)), + Err(e) => panic!("Poh recorder returned unexpected error: {:?}", e), } poh_record.stop(); } diff --git a/core/src/broadcast_stage.rs b/core/src/broadcast_stage.rs index 8c182e057f..b018600367 100644 --- a/core/src/broadcast_stage.rs +++ b/core/src/broadcast_stage.rs @@ -402,7 +402,7 @@ pub fn broadcast_shreds( match send_mmsg(s, &packets[sent..]) { Ok(n) => sent += n, Err(e) => { - return Err(Error::IO(e)); + return Err(Error::Io(e)); } } } diff --git a/core/src/cluster_info.rs b/core/src/cluster_info.rs index 375dd15d8b..6268d0bb87 100644 --- a/core/src/cluster_info.rs +++ b/core/src/cluster_info.rs @@ -1507,7 +1507,7 @@ impl ClusterInfo { 1 ); error!("retransmit result {:?}", e); - return Err(Error::IO(e)); + return Err(Error::Io(e)); } } } diff --git a/core/src/commitment_service.rs b/core/src/commitment_service.rs index 6249d1f57b..d597385ad6 100644 --- a/core/src/commitment_service.rs +++ b/core/src/commitment_service.rs @@ -249,7 +249,7 @@ mod tests { use super::*; use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo}; use solana_runtime::{ - accounts_background_service::ABSRequestSender, + accounts_background_service::AbsRequestSender, bank_forks::BankForks, genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs}, }; @@ -534,7 +534,7 @@ mod tests { &working_bank, ); for x in 0..root { - bank_forks.set_root(x, &ABSRequestSender::default(), None); + bank_forks.set_root(x, &AbsRequestSender::default(), None); } // Add an additional bank/vote that will root slot 2 @@ -573,7 +573,7 @@ mod tests { .highest_confirmed_root(); bank_forks.set_root( root, - &ABSRequestSender::default(), + &AbsRequestSender::default(), Some(highest_confirmed_root), ); let highest_confirmed_root_bank = bank_forks.get(highest_confirmed_root); @@ -642,7 +642,7 @@ mod tests { .highest_confirmed_root(); bank_forks.set_root( root, - &ABSRequestSender::default(), + &AbsRequestSender::default(), Some(highest_confirmed_root), ); let highest_confirmed_root_bank = bank_forks.get(highest_confirmed_root); diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 75fbb8a098..9efb151a60 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -845,10 +845,9 @@ impl Tower { assert!( self.last_vote == Vote::default() && self.lockouts.votes.is_empty() || self.last_vote != Vote::default() && !self.lockouts.votes.is_empty(), - format!( - "last vote: {:?} lockouts.votes: {:?}", - self.last_vote, self.lockouts.votes - ) + "last vote: {:?} lockouts.votes: {:?}", + self.last_vote, + self.lockouts.votes ); if let Some(last_voted_slot) = self.last_voted_slot() { @@ -1131,7 +1130,7 @@ impl Tower { #[derive(Error, Debug)] pub enum TowerError { #[error("IO Error: {0}")] - IOError(#[from] std::io::Error), + IoError(#[from] std::io::Error), #[error("Serialization Error: {0}")] SerializeError(#[from] bincode::Error), @@ -1157,7 +1156,7 @@ pub enum TowerError { impl TowerError { pub fn is_file_missing(&self) -> bool { - if let TowerError::IOError(io_err) = &self { + if let TowerError::IoError(io_err) = &self { io_err.kind() == std::io::ErrorKind::NotFound } else { false @@ -1246,7 +1245,7 @@ pub mod test { }; use solana_ledger::{blockstore::make_slot_entries, get_tmp_ledger_path}; use solana_runtime::{ - accounts_background_service::ABSRequestSender, + accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks, genesis_utils::{ @@ -1417,7 +1416,7 @@ pub mod test { new_root, &self.bank_forks, &mut self.progress, - &ABSRequestSender::default(), + &AbsRequestSender::default(), None, &mut self.heaviest_subtree_fork_choice, ) @@ -2704,7 +2703,7 @@ pub mod test { remove_file(path).unwrap(); }, ); - assert_matches!(loaded, Err(TowerError::IOError(_))) + assert_matches!(loaded, Err(TowerError::IoError(_))) } #[test] diff --git a/core/src/non_circulating_supply.rs b/core/src/non_circulating_supply.rs index bebd2ac6e2..df4a0ae466 100644 --- a/core/src/non_circulating_supply.rs +++ b/core/src/non_circulating_supply.rs @@ -31,7 +31,7 @@ pub fn calculate_non_circulating_supply(bank: &Arc) -> NonCirculatingSuppl bank.get_filtered_indexed_accounts( &IndexKey::ProgramId(solana_stake_program::id()), // The program-id account index checks for Account owner on inclusion. However, due to - // the current AccountsDB implementation, an account may remain in storage as a + // the current AccountsDb implementation, an account may remain in storage as a // zero-lamport Account::Default() after being wiped and reinitialized in later // updates. We include the redundant filter here to avoid returning these accounts. |account| account.owner == solana_stake_program::id(), diff --git a/core/src/optimistically_confirmed_bank_tracker.rs b/core/src/optimistically_confirmed_bank_tracker.rs index 523712dc34..b8f54e9e36 100644 --- a/core/src/optimistically_confirmed_bank_tracker.rs +++ b/core/src/optimistically_confirmed_bank_tracker.rs @@ -168,7 +168,7 @@ mod tests { use super::*; use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo}; use solana_runtime::{ - accounts_background_service::ABSRequestSender, commitment::BlockCommitmentCache, + accounts_background_service::AbsRequestSender, commitment::BlockCommitmentCache, }; use solana_sdk::pubkey::Pubkey; @@ -284,7 +284,7 @@ mod tests { bank_forks .write() .unwrap() - .set_root(7, &ABSRequestSender::default(), None); + .set_root(7, &AbsRequestSender::default(), None); OptimisticallyConfirmedBankTracker::process_notification( BankNotification::OptimisticallyConfirmed(6), &bank_forks, diff --git a/core/src/poh_service.rs b/core/src/poh_service.rs index 8d94fbeb59..9dc915749a 100644 --- a/core/src/poh_service.rs +++ b/core/src/poh_service.rs @@ -248,11 +248,9 @@ mod tests { if entry.is_tick() { assert!( entry.num_hashes <= poh_config.hashes_per_tick.unwrap(), - format!( - "{} <= {}", - entry.num_hashes, - poh_config.hashes_per_tick.unwrap() - ) + "{} <= {}", + entry.num_hashes, + poh_config.hashes_per_tick.unwrap() ); if entry.num_hashes == poh_config.hashes_per_tick.unwrap() { diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index a35d42b318..04f42e6674 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -28,7 +28,7 @@ use solana_ledger::{ use solana_measure::{measure::Measure, thread_mem_usage}; use solana_metrics::inc_new_counter_info; use solana_runtime::{ - accounts_background_service::ABSRequestSender, bank::Bank, bank_forks::BankForks, + accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache, vote_sender_types::ReplayVoteSender, }; use solana_sdk::{ @@ -98,7 +98,7 @@ pub struct ReplayStageConfig { pub subscriptions: Arc, pub leader_schedule_cache: Arc, pub latest_root_senders: Vec>, - pub accounts_background_request_sender: ABSRequestSender, + pub accounts_background_request_sender: AbsRequestSender, pub block_commitment_cache: Arc>, pub transaction_status_sender: Option, pub rewards_recorder_sender: Option, @@ -1070,7 +1070,7 @@ impl ReplayStage { blockstore: &Arc, leader_schedule_cache: &Arc, lockouts_sender: &Sender, - accounts_background_request_sender: &ABSRequestSender, + accounts_background_request_sender: &AbsRequestSender, latest_root_senders: &[Sender], subscriptions: &Arc, block_commitment_cache: &Arc>, @@ -1835,7 +1835,7 @@ impl ReplayStage { new_root: Slot, bank_forks: &RwLock, progress: &mut ProgressMap, - accounts_background_request_sender: &ABSRequestSender, + accounts_background_request_sender: &AbsRequestSender, highest_confirmed_root: Option, heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, ) { @@ -2013,7 +2013,7 @@ pub(crate) mod tests { }, }; use solana_runtime::{ - accounts_background_service::ABSRequestSender, + accounts_background_service::AbsRequestSender, commitment::BlockCommitment, genesis_utils::{self, GenesisConfigInfo, ValidatorVoteKeypairs}, }; @@ -2249,7 +2249,7 @@ pub(crate) mod tests { root, &bank_forks, &mut progress, - &ABSRequestSender::default(), + &AbsRequestSender::default(), None, &mut heaviest_subtree_fork_choice, ); @@ -2293,7 +2293,7 @@ pub(crate) mod tests { root, &bank_forks, &mut progress, - &ABSRequestSender::default(), + &AbsRequestSender::default(), Some(confirmed_root), &mut heaviest_subtree_fork_choice, ); @@ -3245,7 +3245,7 @@ pub(crate) mod tests { bank_forks.insert(Bank::new_from_parent(&bank0, &Pubkey::default(), 9)); let bank9 = bank_forks.get(9).unwrap().clone(); bank_forks.insert(Bank::new_from_parent(&bank9, &Pubkey::default(), 10)); - bank_forks.set_root(9, &ABSRequestSender::default(), None); + bank_forks.set_root(9, &AbsRequestSender::default(), None); let total_epoch_stake = bank0.total_epoch_stake(); // Insert new ForkProgress for slot 10 and its @@ -3336,7 +3336,7 @@ pub(crate) mod tests { .get_propagated_stats_mut(0) .unwrap() .is_leader_slot = true; - bank_forks.set_root(0, &ABSRequestSender::default(), None); + bank_forks.set_root(0, &AbsRequestSender::default(), None); let total_epoch_stake = bank_forks.root_bank().total_epoch_stake(); // Insert new ForkProgress representing a slot for all slots 1..=num_banks. Only @@ -3416,7 +3416,7 @@ pub(crate) mod tests { .get_propagated_stats_mut(0) .unwrap() .is_leader_slot = true; - bank_forks.set_root(0, &ABSRequestSender::default(), None); + bank_forks.set_root(0, &AbsRequestSender::default(), None); let total_epoch_stake = num_validators as u64 * stake_per_validator; @@ -3761,7 +3761,7 @@ pub(crate) mod tests { bank_forks .write() .unwrap() - .set_root(3, &ABSRequestSender::default(), None); + .set_root(3, &AbsRequestSender::default(), None); let mut descendants = bank_forks.read().unwrap().descendants().clone(); let mut ancestors = bank_forks.read().unwrap().ancestors(); let slot_3_descendants = descendants.get(&3).unwrap().clone(); diff --git a/core/src/result.rs b/core/src/result.rs index bca8f79544..ac9907bfbc 100644 --- a/core/src/result.rs +++ b/core/src/result.rs @@ -10,8 +10,8 @@ use std::any::Any; #[derive(Debug)] pub enum Error { - IO(std::io::Error), - JSON(serde_json::Error), + Io(std::io::Error), + Json(serde_json::Error), AddrParse(std::net::AddrParseError), JoinError(Box), RecvError(std::sync::mpsc::RecvError), @@ -108,7 +108,7 @@ impl std::convert::From> for Error { } impl std::convert::From for Error { fn from(e: std::io::Error) -> Error { - Error::IO(e) + Error::Io(e) } } impl std::convert::From for Error { @@ -118,7 +118,7 @@ impl std::convert::From for Error { } impl std::convert::From for Error { fn from(e: serde_json::Error) -> Error { - Error::JSON(e) + Error::Json(e) } } impl std::convert::From for Error { @@ -199,7 +199,7 @@ mod tests { assert_matches!(send_error(), Err(Error::SendError)); assert_matches!(join_error(), Err(Error::JoinError(_))); let ioe = io::Error::new(io::ErrorKind::NotFound, "hi"); - assert_matches!(Error::from(ioe), Error::IO(_)); + assert_matches!(Error::from(ioe), Error::Io(_)); } #[test] fn fmt_test() { diff --git a/core/src/rpc.rs b/core/src/rpc.rs index dddd8180aa..8c43c5bef3 100644 --- a/core/src/rpc.rs +++ b/core/src/rpc.rs @@ -1402,7 +1402,7 @@ impl JsonRpcRequestProcessor { { bank.get_filtered_indexed_accounts(&IndexKey::ProgramId(*program_id), |account| { // The program-id account index checks for Account owner on inclusion. However, due - // to the current AccountsDB implementation, an account may remain in storage as a + // to the current AccountsDb implementation, an account may remain in storage as a // zero-lamport Account::Default() after being wiped and reinitialized in later // updates. We include the redundant filters here to avoid returning these // accounts. @@ -1421,7 +1421,7 @@ impl JsonRpcRequestProcessor { mut filters: Vec, ) -> Vec<(Pubkey, Account)> { // The by-owner accounts index checks for Token Account state and Owner address on - // inclusion. However, due to the current AccountsDB implementation, an account may remain + // inclusion. However, due to the current AccountsDb implementation, an account may remain // in storage as a zero-lamport Account::Default() after being wiped and reinitialized in // later updates. We include the redundant filters here to avoid returning these accounts. // @@ -1461,7 +1461,7 @@ impl JsonRpcRequestProcessor { mut filters: Vec, ) -> Vec<(Pubkey, Account)> { // The by-mint accounts index checks for Token Account state and Mint address on inclusion. - // However, due to the current AccountsDB implementation, an account may remain in storage + // However, due to the current AccountsDb implementation, an account may remain in storage // as be zero-lamport Account::Default() after being wiped and reinitialized in later // updates. We include the redundant filters here to avoid returning these accounts. // @@ -3035,7 +3035,7 @@ pub mod tests { genesis_utils::{create_genesis_config, GenesisConfigInfo}, }; use solana_runtime::{ - accounts_background_service::ABSRequestSender, commitment::BlockCommitment, + accounts_background_service::AbsRequestSender, commitment::BlockCommitment, }; use solana_sdk::{ clock::MAX_RECENT_BLOCKHASHES, @@ -3152,7 +3152,7 @@ pub mod tests { bank_forks .write() .unwrap() - .set_root(*root, &ABSRequestSender::default(), Some(0)); + .set_root(*root, &AbsRequestSender::default(), Some(0)); let mut stakes = HashMap::new(); stakes.insert(leader_vote_keypair.pubkey(), (1, Account::default())); let block_time = bank_forks diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 20bb624cbd..8d4e1c2a7b 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -30,7 +30,7 @@ use solana_ledger::{ }; use solana_runtime::{ accounts_background_service::{ - ABSRequestHandler, ABSRequestSender, AccountsBackgroundService, SendDroppedBankCallback, + AbsRequestHandler, AbsRequestSender, AccountsBackgroundService, SendDroppedBankCallback, SnapshotRequestHandler, }, bank_forks::{BankForks, SnapshotConfig}, @@ -228,9 +228,9 @@ impl Tvu { )))); } - let accounts_background_request_sender = ABSRequestSender::new(snapshot_request_sender); + let accounts_background_request_sender = AbsRequestSender::new(snapshot_request_sender); - let accounts_background_request_handler = ABSRequestHandler { + let accounts_background_request_handler = AbsRequestHandler { snapshot_request_handler, pruned_banks_receiver, }; diff --git a/core/src/validator.rs b/core/src/validator.rs index 8d94f50741..ab19f28520 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -1048,7 +1048,7 @@ fn new_banks_from_ledger( )); bank_forks.set_root( warp_slot, - &solana_runtime::accounts_background_service::ABSRequestSender::default(), + &solana_runtime::accounts_background_service::AbsRequestSender::default(), Some(warp_slot), ); leader_schedule_cache.set_root(&bank_forks.root_bank()); diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 9196437b3d..5a8be3eae0 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -45,7 +45,7 @@ mod tests { snapshot_packager_service::{PendingSnapshotPackage, SnapshotPackagerService}, }; use solana_runtime::{ - accounts_background_service::{ABSRequestSender, SnapshotRequestHandler}, + accounts_background_service::{AbsRequestSender, SnapshotRequestHandler}, accounts_db, bank::{Bank, BankSlotDelta}, bank_forks::{ArchiveFormat, BankForks, SnapshotConfig}, @@ -203,7 +203,7 @@ mod tests { let (s, snapshot_request_receiver) = unbounded(); let (accounts_package_sender, _r) = channel(); - let request_sender = ABSRequestSender::new(Some(s)); + let request_sender = AbsRequestSender::new(Some(s)); let snapshot_request_handler = SnapshotRequestHandler { snapshot_config: snapshot_test_config.snapshot_config.clone(), snapshot_request_receiver, @@ -518,7 +518,7 @@ mod tests { (*add_root_interval * num_set_roots * 2) as u64, ); let mut current_bank = snapshot_test_config.bank_forks[0].clone(); - let request_sender = ABSRequestSender::new(Some(snapshot_sender)); + let request_sender = AbsRequestSender::new(Some(snapshot_sender)); for _ in 0..num_set_roots { for _ in 0..*add_root_interval { let new_slot = current_bank.slot() + 1; diff --git a/frozen-abi/src/abi_digester.rs b/frozen-abi/src/abi_digester.rs index 7365d195b8..c233a29867 100644 --- a/frozen-abi/src/abi_digester.rs +++ b/frozen-abi/src/abi_digester.rs @@ -1,15 +1,11 @@ use crate::abi_example::{normalize_type_name, AbiEnumVisitor}; use crate::hash::{Hash, Hasher}; - use log::*; - use serde::ser::Error as SerdeError; use serde::ser::*; use serde::{Serialize, Serializer}; - use std::any::type_name; use std::io::Write; - use thiserror::Error; #[derive(Debug)] @@ -561,21 +557,21 @@ mod tests { #[frozen_abi(digest = "GttWH8FAY3teUjTaSds9mL3YbiDQ7qWw7WAvDXKd4ZzX")] type TestUnitStruct = std::marker::PhantomData; - #[frozen_abi(digest = "2zvXde11f8sNnFbc9E6ZZeFxV7D2BTVLKEZmNTsCDBpS")] + #[frozen_abi(digest = "6kj3mPXbzWTwZho48kZWxZjuseLU2oiqhbpqca4DmcRq")] #[derive(Serialize, AbiExample, AbiEnumVisitor)] enum TestEnum { - VARIANT1, - VARIANT2, + Variant1, + Variant2, } - #[frozen_abi(digest = "6keb3v7GXLahhL6zoinzCWwSvB3KhmvZMB3tN2mamAm3")] + #[frozen_abi(digest = "3WqYwnbQEdu6iPZi5LJa2b5kw55hxBtZdqFqiViFCKPo")] #[derive(Serialize, AbiExample, AbiEnumVisitor)] enum TestTupleVariant { - VARIANT1(u8, u16), - VARIANT2(u8, u16), + Variant1(u8, u16), + Variant2(u8, u16), } - #[frozen_abi(digest = "DywMfwKq8HZCbUfTwnemHWMN8LvMZCvipQuLddQ2ywwG")] + #[frozen_abi(digest = "4E9gJjvKiETBeZ8dybZPAQ7maaHTHFucmLqgX2m6yrBh")] #[derive(Serialize, AbiExample)] struct TestVecEnum { enums: Vec, @@ -642,21 +638,21 @@ mod tests { _skipped_test_field: i8, } - #[frozen_abi(digest = "2zvXde11f8sNnFbc9E6ZZeFxV7D2BTVLKEZmNTsCDBpS")] + #[frozen_abi(digest = "6kj3mPXbzWTwZho48kZWxZjuseLU2oiqhbpqca4DmcRq")] #[derive(Serialize, AbiExample, AbiEnumVisitor)] enum TestEnum { - VARIANT1, - VARIANT2, + Variant1, + Variant2, #[serde(skip)] #[allow(dead_code)] - VARIANT3, + Variant3, } - #[frozen_abi(digest = "6keb3v7GXLahhL6zoinzCWwSvB3KhmvZMB3tN2mamAm3")] + #[frozen_abi(digest = "3WqYwnbQEdu6iPZi5LJa2b5kw55hxBtZdqFqiViFCKPo")] #[derive(Serialize, AbiExample, AbiEnumVisitor)] enum TestTupleVariant { - VARIANT1(u8, u16), - VARIANT2(u8, u16, #[serde(skip)] u32), + Variant1(u8, u16), + Variant2(u8, u16, #[serde(skip)] u32), } } } diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 45e6b8f102..fdb87f95fe 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -45,7 +45,7 @@ use std::{ cmp, collections::{HashMap, HashSet}, fs, - io::{Error as IOError, ErrorKind}, + io::{Error as IoError, ErrorKind}, path::{Path, PathBuf}, rc::Rc, sync::{ @@ -2082,7 +2082,7 @@ impl Blockstore { Some((slot, _)) => { let confirmed_block = self.get_confirmed_block(slot, false).map_err(|err| { - BlockstoreError::IO(IOError::new( + BlockstoreError::Io(IoError::new( ErrorKind::Other, format!("Unable to get confirmed block: {}", err), )) @@ -2133,7 +2133,7 @@ impl Blockstore { Some((slot, _)) => { let confirmed_block = self.get_confirmed_block(slot, false).map_err(|err| { - BlockstoreError::IO(IOError::new( + BlockstoreError::Io(IoError::new( ErrorKind::Other, format!("Unable to get confirmed block: {}", err), )) @@ -3256,7 +3256,7 @@ pub fn create_new_ledger( error!("tar stdout: {}", from_utf8(&output.stdout).unwrap_or("?")); error!("tar stderr: {}", from_utf8(&output.stderr).unwrap_or("?")); - return Err(BlockstoreError::IO(IOError::new( + return Err(BlockstoreError::Io(IoError::new( ErrorKind::Other, format!( "Error trying to generate snapshot archive: {}", @@ -3303,7 +3303,7 @@ pub fn create_new_ledger( error_messages += &format!("/failed to stash problematic rocksdb: {}", e) }); - return Err(BlockstoreError::IO(IOError::new( + return Err(BlockstoreError::Io(IoError::new( ErrorKind::Other, format!( "Error checking to unpack genesis archive: {}{}", diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index b7e8c238f9..14466a2333 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -64,7 +64,7 @@ pub enum BlockstoreError { RocksDb(#[from] rocksdb::Error), SlotNotRooted, DeadSlot, - IO(#[from] std::io::Error), + Io(#[from] std::io::Error), Serialize(#[from] Box), FsExtraError(#[from] fs_extra::error::Error), SlotCleanedUp, diff --git a/ledger/src/entry.rs b/ledger/src/entry.rs index 6a03b42e01..1cde091107 100644 --- a/ledger/src/entry.rs +++ b/ledger/src/entry.rs @@ -214,8 +214,8 @@ pub struct GpuVerificationData { } pub enum DeviceVerificationData { - CPU(), - GPU(GpuVerificationData), + Cpu(), + Gpu(GpuVerificationData), } pub struct EntryVerificationState { @@ -257,7 +257,7 @@ impl EntryVerificationState { pub fn finish_verify(&mut self, entries: &[Entry]) -> bool { match &mut self.device_verification_data { - DeviceVerificationData::GPU(verification_state) => { + DeviceVerificationData::Gpu(verification_state) => { let gpu_time_us = verification_state.thread_h.take().unwrap().join().unwrap(); let mut verify_check_time = Measure::start("verify_check"); @@ -297,7 +297,7 @@ impl EntryVerificationState { }; res } - DeviceVerificationData::CPU() => { + DeviceVerificationData::Cpu() => { self.verification_status == EntryVerificationStatus::Success } } @@ -380,7 +380,7 @@ impl EntrySlice for [Entry] { }, poh_duration_us, transaction_duration_us: 0, - device_verification_data: DeviceVerificationData::CPU(), + device_verification_data: DeviceVerificationData::Cpu(), } } @@ -464,7 +464,7 @@ impl EntrySlice for [Entry] { }, poh_duration_us, transaction_duration_us: 0, - device_verification_data: DeviceVerificationData::CPU(), + device_verification_data: DeviceVerificationData::Cpu(), } } @@ -527,7 +527,7 @@ impl EntrySlice for [Entry] { verification_status: EntryVerificationStatus::Failure, transaction_duration_us, poh_duration_us: 0, - device_verification_data: DeviceVerificationData::CPU(), + device_verification_data: DeviceVerificationData::Cpu(), }; } @@ -607,7 +607,7 @@ impl EntrySlice for [Entry] { }) }); - let device_verification_data = DeviceVerificationData::GPU(GpuVerificationData { + let device_verification_data = DeviceVerificationData::Gpu(GpuVerificationData { thread_h: Some(gpu_verify_thread), tx_hashes, hashes: Some(hashes), diff --git a/log-analyzer/src/main.rs b/log-analyzer/src/main.rs index 8d0852b02a..a8eaf41bc5 100644 --- a/log-analyzer/src/main.rs +++ b/log-analyzer/src/main.rs @@ -11,7 +11,7 @@ use std::ops::Sub; use std::path::PathBuf; #[derive(Deserialize, Serialize, Debug)] -struct IPAddrMapping { +struct IpAddrMapping { private: String, public: String, } @@ -90,7 +90,7 @@ impl Sub for &LogLine { } } -fn map_ip_address(mappings: &[IPAddrMapping], target: String) -> String { +fn map_ip_address(mappings: &[IpAddrMapping], target: String) -> String { for mapping in mappings { if target.contains(&mapping.private) { return target.replace(&mapping.private, mapping.public.as_str()); @@ -100,7 +100,7 @@ fn map_ip_address(mappings: &[IPAddrMapping], target: String) -> String { } fn process_iftop_logs(matches: &ArgMatches) { - let mut map_list: Vec = vec![]; + let mut map_list: Vec = vec![]; if let ("map-IP", Some(args_matches)) = matches.subcommand() { let mut list = args_matches .value_of("list") diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index ad93ec9ad4..e55fe3cb91 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -873,7 +873,7 @@ impl ProgramTestContext { )); bank_forks.set_root( pre_warp_slot, - &solana_runtime::accounts_background_service::ABSRequestSender::default(), + &solana_runtime::accounts_background_service::AbsRequestSender::default(), Some(warp_slot), ); diff --git a/programs/bpf/benches/bpf_loader.rs b/programs/bpf/benches/bpf_loader.rs index 97bcbc9ea5..23e784c9d1 100644 --- a/programs/bpf/benches/bpf_loader.rs +++ b/programs/bpf/benches/bpf_loader.rs @@ -7,7 +7,7 @@ extern crate solana_bpf_loader_program; use byteorder::{ByteOrder, LittleEndian, WriteBytesExt}; use solana_bpf_loader_program::{ - create_vm, serialization::serialize_parameters, syscalls::register_syscalls, BPFError, + create_vm, serialization::serialize_parameters, syscalls::register_syscalls, BpfError, ThisInstructionMeter, }; use solana_measure::measure::Measure; @@ -76,7 +76,7 @@ fn bench_program_create_executable(bencher: &mut Bencher) { bencher.iter(|| { let _ = - Executable::::from_elf(&elf, None, Config::default()) + Executable::::from_elf(&elf, None, Config::default()) .unwrap(); }); } @@ -95,7 +95,7 @@ fn bench_program_alu(bencher: &mut Bencher) { let elf = load_elf("bench_alu").unwrap(); let mut executable = - Executable::::from_elf(&elf, None, Config::default()) + Executable::::from_elf(&elf, None, Config::default()) .unwrap(); executable.set_syscall_registry(register_syscalls(&mut invoke_context).unwrap()); executable.jit_compile().unwrap(); @@ -221,7 +221,7 @@ fn bench_instruction_count_tuner(_bencher: &mut Bencher) { let elf = load_elf("tuner").unwrap(); let mut executable = - Executable::::from_elf(&elf, None, Config::default()) + Executable::::from_elf(&elf, None, Config::default()) .unwrap(); executable.set_syscall_registry(register_syscalls(&mut invoke_context).unwrap()); let compute_meter = invoke_context.get_compute_meter(); diff --git a/programs/bpf_loader/src/allocator_bump.rs b/programs/bpf_loader/src/allocator_bump.rs index 406bb1481d..05d68ba912 100644 --- a/programs/bpf_loader/src/allocator_bump.rs +++ b/programs/bpf_loader/src/allocator_bump.rs @@ -4,14 +4,14 @@ use alloc::{Alloc, AllocErr}; use std::alloc::Layout; #[derive(Debug)] -pub struct BPFAllocator { +pub struct BpfAllocator { heap: Vec, start: u64, len: u64, pos: u64, } -impl BPFAllocator { +impl BpfAllocator { pub fn new(heap: Vec, virtual_address: u64) -> Self { let len = heap.len() as u64; Self { @@ -23,7 +23,7 @@ impl BPFAllocator { } } -impl Alloc for BPFAllocator { +impl Alloc for BpfAllocator { fn alloc(&mut self, layout: Layout) -> Result { let bytes_to_align = (self.pos as *const u8).align_offset(layout.align()) as u64; if self diff --git a/programs/bpf_loader/src/bpf_verifier.rs b/programs/bpf_loader/src/bpf_verifier.rs index c87b5c0dea..69eb987035 100644 --- a/programs/bpf_loader/src/bpf_verifier.rs +++ b/programs/bpf_loader/src/bpf_verifier.rs @@ -1,4 +1,6 @@ -use crate::BPFError; +#![allow(clippy::upper_case_acronyms)] + +use crate::BpfError; use solana_rbpf::ebpf; use thiserror::Error; @@ -58,7 +60,7 @@ fn adj_insn_ptr(insn_ptr: usize) -> usize { insn_ptr + ebpf::ELF_INSN_DUMP_OFFSET } -fn check_prog_len(prog: &[u8]) -> Result<(), BPFError> { +fn check_prog_len(prog: &[u8]) -> Result<(), BpfError> { if prog.len() % ebpf::INSN_SIZE != 0 { return Err(VerifierError::ProgramLengthNotMultiple.into()); } @@ -72,21 +74,21 @@ fn check_prog_len(prog: &[u8]) -> Result<(), BPFError> { Ok(()) } -fn check_imm_nonzero(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), BPFError> { +fn check_imm_nonzero(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), BpfError> { if insn.imm == 0 { return Err(VerifierError::DivisionByZero(adj_insn_ptr(insn_ptr)).into()); } Ok(()) } -fn check_imm_endian(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), BPFError> { +fn check_imm_endian(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), BpfError> { match insn.imm { 16 | 32 | 64 => Ok(()), _ => Err(VerifierError::UnsupportedLEBEArgument(adj_insn_ptr(insn_ptr)).into()), } } -fn check_load_dw(prog: &[u8], insn_ptr: usize) -> Result<(), BPFError> { +fn check_load_dw(prog: &[u8], insn_ptr: usize) -> Result<(), BpfError> { if insn_ptr + 1 >= (prog.len() / ebpf::INSN_SIZE) { // Last instruction cannot be LD_DW because there would be no 2nd DW return Err(VerifierError::LDDWCannotBeLast.into()); @@ -98,7 +100,7 @@ fn check_load_dw(prog: &[u8], insn_ptr: usize) -> Result<(), BPFError> { Ok(()) } -fn check_jmp_offset(prog: &[u8], insn_ptr: usize) -> Result<(), BPFError> { +fn check_jmp_offset(prog: &[u8], insn_ptr: usize) -> Result<(), BpfError> { let insn = ebpf::get_insn(prog, insn_ptr); // if insn.off == -1 { // return Err(VerifierError::InfiniteLoop(adj_insn_ptr(insn_ptr)).into()); @@ -121,7 +123,7 @@ fn check_jmp_offset(prog: &[u8], insn_ptr: usize) -> Result<(), BPFError> { Ok(()) } -fn check_registers(insn: &ebpf::Insn, store: bool, insn_ptr: usize) -> Result<(), BPFError> { +fn check_registers(insn: &ebpf::Insn, store: bool, insn_ptr: usize) -> Result<(), BpfError> { if insn.src > 10 { return Err(VerifierError::InvalidSourceRegister(adj_insn_ptr(insn_ptr)).into()); } @@ -149,7 +151,7 @@ fn check_imm_register(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), Verifier } #[rustfmt::skip] -pub fn check(prog: &[u8]) -> Result<(), BPFError> { +pub fn check(prog: &[u8]) -> Result<(), BpfError> { check_prog_len(prog)?; let mut insn_ptr: usize = 0; diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 42ebe7e4a3..8f0837f9d3 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -50,17 +50,17 @@ solana_sdk::declare_builtin!( /// Errors returned by functions the BPF Loader registers with the VM #[derive(Debug, Error, PartialEq)] -pub enum BPFError { +pub enum BpfError { #[error("{0}")] VerifierError(#[from] VerifierError), #[error("{0}")] SyscallError(#[from] SyscallError), } -impl UserDefinedError for BPFError {} +impl UserDefinedError for BpfError {} fn map_ebpf_error( invoke_context: &mut dyn InvokeContext, - e: EbpfError, + e: EbpfError, ) -> InstructionError { ic_msg!(invoke_context, "{}", e); InstructionError::InvalidAccountData @@ -71,9 +71,9 @@ pub fn create_and_cache_executor( data: &[u8], invoke_context: &mut dyn InvokeContext, use_jit: bool, -) -> Result, InstructionError> { +) -> Result, InstructionError> { let bpf_compute_budget = invoke_context.get_bpf_compute_budget(); - let mut program = Executable::::from_elf( + let mut program = Executable::::from_elf( data, None, Config { @@ -100,7 +100,7 @@ pub fn create_and_cache_executor( return Err(InstructionError::ProgramFailedToCompile); } } - let executor = Arc::new(BPFExecutor { program }); + let executor = Arc::new(BpfExecutor { program }); invoke_context.add_executor(key, executor.clone()); Ok(executor) } @@ -138,11 +138,11 @@ const DEFAULT_HEAP_SIZE: usize = 32 * 1024; /// Create the BPF virtual machine pub fn create_vm<'a>( loader_id: &'a Pubkey, - program: &'a dyn Executable, + program: &'a dyn Executable, parameter_bytes: &mut [u8], parameter_accounts: &'a [KeyedAccount<'a>], invoke_context: &'a mut dyn InvokeContext, -) -> Result, EbpfError> { +) -> Result, EbpfError> { let heap = vec![0_u8; DEFAULT_HEAP_SIZE]; let heap_region = MemoryRegion::new_from_slice(&heap, MM_HEAP_START, 0, true); let mut vm = EbpfVm::new(program, parameter_bytes, &[heap_region])?; @@ -743,18 +743,18 @@ impl InstructionMeter for ThisInstructionMeter { } /// BPF Loader's Executor implementation -pub struct BPFExecutor { - program: Box>, +pub struct BpfExecutor { + program: Box>, } // Well, implement Debug for solana_rbpf::vm::Executable in solana-rbpf... -impl Debug for BPFExecutor { +impl Debug for BpfExecutor { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "BPFExecutor({:p})", self) + write!(f, "BpfExecutor({:p})", self) } } -impl Executor for BPFExecutor { +impl Executor for BpfExecutor { fn execute( &self, loader_id: &Pubkey, @@ -814,7 +814,7 @@ impl Executor for BPFExecutor { } Err(error) => { let error = match error { - EbpfError::UserError(BPFError::SyscallError( + EbpfError::UserError(BpfError::SyscallError( SyscallError::InstructionError(error), )) => error, err => { @@ -884,14 +884,14 @@ mod tests { ]; let input = &mut [0x00]; - let program = Executable::::from_text_bytes( + let program = Executable::::from_text_bytes( program, None, Config::default(), ) .unwrap(); let mut vm = - EbpfVm::::new(program.as_ref(), input, &[]).unwrap(); + EbpfVm::::new(program.as_ref(), input, &[]).unwrap(); let mut instruction_meter = TestInstructionMeter { remaining: 10 }; vm.execute_program_interpreted(&mut instruction_meter) .unwrap(); diff --git a/programs/bpf_loader/src/syscalls.rs b/programs/bpf_loader/src/syscalls.rs index 0ddec47f4f..ca2d028076 100644 --- a/programs/bpf_loader/src/syscalls.rs +++ b/programs/bpf_loader/src/syscalls.rs @@ -1,4 +1,4 @@ -use crate::{alloc, BPFError}; +use crate::{alloc, BpfError}; use alloc::Alloc; use curve25519_dalek::{ristretto::RistrettoPoint, scalar::Scalar}; use solana_rbpf::{ @@ -66,17 +66,17 @@ pub enum SyscallError { #[error("Too many accounts passed to inner instruction")] TooManyAccounts, } -impl From for EbpfError { +impl From for EbpfError { fn from(error: SyscallError) -> Self { EbpfError::UserError(error.into()) } } trait SyscallConsume { - fn consume(&mut self, amount: u64) -> Result<(), EbpfError>; + fn consume(&mut self, amount: u64) -> Result<(), EbpfError>; } impl SyscallConsume for Rc> { - fn consume(&mut self, amount: u64) -> Result<(), EbpfError> { + fn consume(&mut self, amount: u64) -> Result<(), EbpfError> { self.try_borrow_mut() .map_err(|_| SyscallError::InvokeContextBorrowFailed)? .consume(amount) @@ -91,11 +91,11 @@ impl SyscallConsume for Rc> { /// Only one allocator is currently supported /// Simple bump allocator, never frees -use crate::allocator_bump::BPFAllocator; +use crate::allocator_bump::BpfAllocator; pub fn register_syscalls( invoke_context: &mut dyn InvokeContext, -) -> Result> { +) -> Result> { let mut syscall_registry = SyscallRegistry::default(); syscall_registry.register_syscall_by_name(b"abort", SyscallAbort::call)?; @@ -147,11 +147,11 @@ macro_rules! bind_feature_gated_syscall_context_object { pub fn bind_syscall_context_objects<'a>( loader_id: &'a Pubkey, - vm: &mut EbpfVm<'a, BPFError, crate::ThisInstructionMeter>, + vm: &mut EbpfVm<'a, BpfError, crate::ThisInstructionMeter>, callers_keyed_accounts: &'a [KeyedAccount<'a>], invoke_context: &'a mut dyn InvokeContext, heap: Vec, -) -> Result<(), EbpfError> { +) -> Result<(), EbpfError> { let bpf_compute_budget = invoke_context.get_bpf_compute_budget(); // Syscall functions common across languages @@ -270,7 +270,7 @@ pub fn bind_syscall_context_objects<'a>( vm.bind_syscall_context_object( Box::new(SyscallAllocFree { aligned: *loader_id != bpf_loader_deprecated::id(), - allocator: BPFAllocator::new(heap, MM_HEAP_START), + allocator: BpfAllocator::new(heap, MM_HEAP_START), }), None, )?; @@ -283,8 +283,8 @@ fn translate( access_type: AccessType, vm_addr: u64, len: u64, -) -> Result> { - memory_mapping.map::(access_type, vm_addr, len) +) -> Result> { + memory_mapping.map::(access_type, vm_addr, len) } fn translate_type_inner<'a, T>( @@ -292,7 +292,7 @@ fn translate_type_inner<'a, T>( access_type: AccessType, vm_addr: u64, loader_id: &Pubkey, -) -> Result<&'a mut T, EbpfError> { +) -> Result<&'a mut T, EbpfError> { if loader_id != &bpf_loader_deprecated::id() && (vm_addr as u64 as *mut T).align_offset(align_of::()) != 0 { @@ -310,14 +310,14 @@ fn translate_type_mut<'a, T>( memory_mapping: &MemoryMapping, vm_addr: u64, loader_id: &Pubkey, -) -> Result<&'a mut T, EbpfError> { +) -> Result<&'a mut T, EbpfError> { translate_type_inner::(memory_mapping, AccessType::Store, vm_addr, loader_id) } fn translate_type<'a, T>( memory_mapping: &MemoryMapping, vm_addr: u64, loader_id: &Pubkey, -) -> Result<&'a T, EbpfError> { +) -> Result<&'a T, EbpfError> { match translate_type_inner::(memory_mapping, AccessType::Load, vm_addr, loader_id) { Ok(value) => Ok(&*value), Err(e) => Err(e), @@ -330,7 +330,7 @@ fn translate_slice_inner<'a, T>( vm_addr: u64, len: u64, loader_id: &Pubkey, -) -> Result<&'a mut [T], EbpfError> { +) -> Result<&'a mut [T], EbpfError> { if loader_id != &bpf_loader_deprecated::id() && (vm_addr as u64 as *mut T).align_offset(align_of::()) != 0 { @@ -354,7 +354,7 @@ fn translate_slice_mut<'a, T>( vm_addr: u64, len: u64, loader_id: &Pubkey, -) -> Result<&'a mut [T], EbpfError> { +) -> Result<&'a mut [T], EbpfError> { translate_slice_inner::(memory_mapping, AccessType::Store, vm_addr, len, loader_id) } fn translate_slice<'a, T>( @@ -362,7 +362,7 @@ fn translate_slice<'a, T>( vm_addr: u64, len: u64, loader_id: &Pubkey, -) -> Result<&'a [T], EbpfError> { +) -> Result<&'a [T], EbpfError> { match translate_slice_inner::(memory_mapping, AccessType::Load, vm_addr, len, loader_id) { Ok(value) => Ok(&*value), Err(e) => Err(e), @@ -376,8 +376,8 @@ fn translate_string_and_do( addr: u64, len: u64, loader_id: &Pubkey, - work: &mut dyn FnMut(&str) -> Result>, -) -> Result> { + work: &mut dyn FnMut(&str) -> Result>, +) -> Result> { let buf = translate_slice::(memory_mapping, addr, len, loader_id)?; let i = match buf.iter().position(|byte| *byte == 0) { Some(i) => i, @@ -394,7 +394,7 @@ fn translate_string_and_do( /// `abort()` is not intended to be called explicitly by the program. /// Causes the BPF program to be halted immediately pub struct SyscallAbort {} -impl SyscallObject for SyscallAbort { +impl SyscallObject for SyscallAbort { fn call( &mut self, _arg1: u64, @@ -403,7 +403,7 @@ impl SyscallObject for SyscallAbort { _arg4: u64, _arg5: u64, _memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { *result = Err(SyscallError::Abort.into()); } @@ -416,7 +416,7 @@ pub struct SyscallPanic<'a> { compute_meter: Option>>, loader_id: &'a Pubkey, } -impl<'a> SyscallObject for SyscallPanic<'a> { +impl<'a> SyscallObject for SyscallPanic<'a> { fn call( &mut self, file: u64, @@ -425,7 +425,7 @@ impl<'a> SyscallObject for SyscallPanic<'a> { column: u64, _arg5: u64, memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { if let Some(ref mut compute_meter) = self.compute_meter { question_mark!(compute_meter.consume(len), result); @@ -448,7 +448,7 @@ pub struct SyscallLog<'a> { logger: Rc>, loader_id: &'a Pubkey, } -impl<'a> SyscallObject for SyscallLog<'a> { +impl<'a> SyscallObject for SyscallLog<'a> { fn call( &mut self, addr: u64, @@ -457,7 +457,7 @@ impl<'a> SyscallObject for SyscallLog<'a> { _arg4: u64, _arg5: u64, memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { if self.per_byte_cost { question_mark!(self.compute_meter.consume(len), result); @@ -487,7 +487,7 @@ pub struct SyscallLogU64 { compute_meter: Rc>, logger: Rc>, } -impl SyscallObject for SyscallLogU64 { +impl SyscallObject for SyscallLogU64 { fn call( &mut self, arg1: u64, @@ -496,7 +496,7 @@ impl SyscallObject for SyscallLogU64 { arg4: u64, arg5: u64, _memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { question_mark!(self.compute_meter.consume(self.cost), result); stable_log::program_log( @@ -516,7 +516,7 @@ pub struct SyscallLogBpfComputeUnits { compute_meter: Rc>, logger: Rc>, } -impl SyscallObject for SyscallLogBpfComputeUnits { +impl SyscallObject for SyscallLogBpfComputeUnits { fn call( &mut self, _arg1: u64, @@ -525,7 +525,7 @@ impl SyscallObject for SyscallLogBpfComputeUnits { _arg4: u64, _arg5: u64, _memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { question_mark!(self.compute_meter.consume(self.cost), result); let logger = question_mark!( @@ -551,7 +551,7 @@ pub struct SyscallLogPubkey<'a> { logger: Rc>, loader_id: &'a Pubkey, } -impl<'a> SyscallObject for SyscallLogPubkey<'a> { +impl<'a> SyscallObject for SyscallLogPubkey<'a> { fn call( &mut self, pubkey_addr: u64, @@ -560,7 +560,7 @@ impl<'a> SyscallObject for SyscallLogPubkey<'a> { _arg4: u64, _arg5: u64, memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { question_mark!(self.compute_meter.consume(self.cost), result); let pubkey = question_mark!( @@ -580,9 +580,9 @@ impl<'a> SyscallObject for SyscallLogPubkey<'a> { /// to the VM to use for enforcement. pub struct SyscallAllocFree { aligned: bool, - allocator: BPFAllocator, + allocator: BpfAllocator, } -impl SyscallObject for SyscallAllocFree { +impl SyscallObject for SyscallAllocFree { fn call( &mut self, size: u64, @@ -591,7 +591,7 @@ impl SyscallObject for SyscallAllocFree { _arg4: u64, _arg5: u64, _memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { let align = if self.aligned { align_of::() @@ -623,7 +623,7 @@ fn translate_program_address_inputs<'a>( program_id_addr: u64, memory_mapping: &MemoryMapping, loader_id: &Pubkey, -) -> Result<(Vec<&'a [u8]>, &'a Pubkey), EbpfError> { +) -> Result<(Vec<&'a [u8]>, &'a Pubkey), EbpfError> { let untranslated_seeds = translate_slice::<&[&u8]>(memory_mapping, seeds_addr, seeds_len, loader_id)?; if untranslated_seeds.len() > MAX_SEEDS { @@ -639,7 +639,7 @@ fn translate_program_address_inputs<'a>( loader_id, ) }) - .collect::, EbpfError>>()?; + .collect::, EbpfError>>()?; let program_id = translate_type::(memory_mapping, program_id_addr, loader_id)?; Ok((seeds, program_id)) } @@ -650,7 +650,7 @@ struct SyscallCreateProgramAddress<'a> { compute_meter: Rc>, loader_id: &'a Pubkey, } -impl<'a> SyscallObject for SyscallCreateProgramAddress<'a> { +impl<'a> SyscallObject for SyscallCreateProgramAddress<'a> { fn call( &mut self, seeds_addr: u64, @@ -659,7 +659,7 @@ impl<'a> SyscallObject for SyscallCreateProgramAddress<'a> { address_addr: u64, _arg5: u64, memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { let (seeds, program_id) = question_mark!( translate_program_address_inputs( @@ -695,7 +695,7 @@ struct SyscallTryFindProgramAddress<'a> { compute_meter: Rc>, loader_id: &'a Pubkey, } -impl<'a> SyscallObject for SyscallTryFindProgramAddress<'a> { +impl<'a> SyscallObject for SyscallTryFindProgramAddress<'a> { fn call( &mut self, seeds_addr: u64, @@ -704,7 +704,7 @@ impl<'a> SyscallObject for SyscallTryFindProgramAddress<'a> { address_addr: u64, bump_seed_addr: u64, memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { let (seeds, program_id) = question_mark!( translate_program_address_inputs( @@ -754,7 +754,7 @@ pub struct SyscallSha256<'a> { compute_meter: Rc>, loader_id: &'a Pubkey, } -impl<'a> SyscallObject for SyscallSha256<'a> { +impl<'a> SyscallObject for SyscallSha256<'a> { fn call( &mut self, vals_addr: u64, @@ -763,7 +763,7 @@ impl<'a> SyscallObject for SyscallSha256<'a> { _arg4: u64, _arg5: u64, memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { question_mark!(self.compute_meter.consume(self.sha256_base_cost), result); let hash_result = question_mark!( @@ -810,7 +810,7 @@ pub struct SyscallRistrettoMul<'a> { compute_meter: Rc>, loader_id: &'a Pubkey, } -impl<'a> SyscallObject for SyscallRistrettoMul<'a> { +impl<'a> SyscallObject for SyscallRistrettoMul<'a> { fn call( &mut self, point_addr: u64, @@ -819,7 +819,7 @@ impl<'a> SyscallObject for SyscallRistrettoMul<'a> { _arg4: u64, _arg5: u64, memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { question_mark!(self.compute_meter.consume(self.cost), result); @@ -859,14 +859,14 @@ type TranslatedAccounts<'a> = ( /// Implemented by language specific data structure translators trait SyscallInvokeSigned<'a> { - fn get_context_mut(&self) -> Result, EbpfError>; - fn get_context(&self) -> Result, EbpfError>; + fn get_context_mut(&self) -> Result, EbpfError>; + fn get_context(&self) -> Result, EbpfError>; fn get_callers_keyed_accounts(&self) -> &'a [KeyedAccount<'a>]; fn translate_instruction( &self, addr: u64, memory_mapping: &MemoryMapping, - ) -> Result>; + ) -> Result>; fn translate_accounts( &self, account_keys: &[Pubkey], @@ -874,14 +874,14 @@ trait SyscallInvokeSigned<'a> { account_infos_addr: u64, account_infos_len: u64, memory_mapping: &MemoryMapping, - ) -> Result, EbpfError>; + ) -> Result, EbpfError>; fn translate_signers( &self, program_id: &Pubkey, signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &MemoryMapping, - ) -> Result, EbpfError>; + ) -> Result, EbpfError>; } /// Cross-program invocation called from Rust @@ -891,12 +891,12 @@ pub struct SyscallInvokeSignedRust<'a> { loader_id: &'a Pubkey, } impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { - fn get_context_mut(&self) -> Result, EbpfError> { + fn get_context_mut(&self) -> Result, EbpfError> { self.invoke_context .try_borrow_mut() .map_err(|_| SyscallError::InvokeContextBorrowFailed.into()) } - fn get_context(&self) -> Result, EbpfError> { + fn get_context(&self) -> Result, EbpfError> { self.invoke_context .try_borrow() .map_err(|_| SyscallError::InvokeContextBorrowFailed.into()) @@ -908,7 +908,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { &self, addr: u64, memory_mapping: &MemoryMapping, - ) -> Result> { + ) -> Result> { let ix = translate_type::(memory_mapping, addr, self.loader_id)?; check_instruction_size( @@ -945,7 +945,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { account_infos_addr: u64, account_infos_len: u64, memory_mapping: &MemoryMapping, - ) -> Result, EbpfError> { + ) -> Result, EbpfError> { let invoke_context = self.invoke_context.borrow(); let account_infos = translate_slice::( @@ -964,7 +964,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { self.loader_id, ) }) - .collect::, EbpfError>>()?; + .collect::, EbpfError>>()?; let translate = |account_info: &AccountInfo| { // Translate the account from user space @@ -1052,7 +1052,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &MemoryMapping, - ) -> Result, EbpfError> { + ) -> Result, EbpfError> { let mut signers = Vec::new(); if signers_seeds_len > 0 { let signers_seeds = translate_slice::<&[&[u8]]>( @@ -1087,7 +1087,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { self.loader_id, ) }) - .collect::, EbpfError>>()?; + .collect::, EbpfError>>()?; let signer = Pubkey::create_program_address(&seeds, program_id) .map_err(SyscallError::BadSeeds)?; signers.push(signer); @@ -1098,7 +1098,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { } } } -impl<'a> SyscallObject for SyscallInvokeSignedRust<'a> { +impl<'a> SyscallObject for SyscallInvokeSignedRust<'a> { fn call( &mut self, instruction_addr: u64, @@ -1107,7 +1107,7 @@ impl<'a> SyscallObject for SyscallInvokeSignedRust<'a> { signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { *result = call( self, @@ -1174,12 +1174,12 @@ pub struct SyscallInvokeSignedC<'a> { loader_id: &'a Pubkey, } impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { - fn get_context_mut(&self) -> Result, EbpfError> { + fn get_context_mut(&self) -> Result, EbpfError> { self.invoke_context .try_borrow_mut() .map_err(|_| SyscallError::InvokeContextBorrowFailed.into()) } - fn get_context(&self) -> Result, EbpfError> { + fn get_context(&self) -> Result, EbpfError> { self.invoke_context .try_borrow() .map_err(|_| SyscallError::InvokeContextBorrowFailed.into()) @@ -1193,7 +1193,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { &self, addr: u64, memory_mapping: &MemoryMapping, - ) -> Result> { + ) -> Result> { let ix_c = translate_type::(memory_mapping, addr, self.loader_id)?; check_instruction_size( @@ -1227,7 +1227,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { is_writable: meta_c.is_writable, }) }) - .collect::, EbpfError>>()?; + .collect::, EbpfError>>()?; Ok(Instruction { program_id: *program_id, @@ -1243,7 +1243,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { account_infos_addr: u64, account_infos_len: u64, memory_mapping: &MemoryMapping, - ) -> Result, EbpfError> { + ) -> Result, EbpfError> { let invoke_context = self.invoke_context.borrow(); let account_infos = translate_slice::( @@ -1258,7 +1258,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { .map(|account_info| { translate_type::(memory_mapping, account_info.key_addr, self.loader_id) }) - .collect::, EbpfError>>()?; + .collect::, EbpfError>>()?; let translate = |account_info: &SolAccountInfo| { // Translate the account from user space @@ -1335,7 +1335,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &MemoryMapping, - ) -> Result, EbpfError> { + ) -> Result, EbpfError> { if signers_seeds_len > 0 { let signers_seeds = translate_slice::( memory_mapping, @@ -1371,17 +1371,17 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { self.loader_id, ) }) - .collect::, EbpfError>>()?; + .collect::, EbpfError>>()?; Pubkey::create_program_address(&seeds_bytes, program_id) .map_err(|err| SyscallError::BadSeeds(err).into()) }) - .collect::, EbpfError>>()?) + .collect::, EbpfError>>()?) } else { Ok(vec![]) } } } -impl<'a> SyscallObject for SyscallInvokeSignedC<'a> { +impl<'a> SyscallObject for SyscallInvokeSignedC<'a> { fn call( &mut self, instruction_addr: u64, @@ -1390,7 +1390,7 @@ impl<'a> SyscallObject for SyscallInvokeSignedC<'a> { signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { *result = call( self, @@ -1411,9 +1411,9 @@ fn get_translated_accounts<'a, T, F>( account_infos: &[T], invoke_context: &Ref<&mut dyn InvokeContext>, do_translate: F, -) -> Result, EbpfError> +) -> Result, EbpfError> where - F: Fn(&T) -> Result, EbpfError>, + F: Fn(&T) -> Result, EbpfError>, { let mut accounts = Vec::with_capacity(account_keys.len()); let mut refs = Vec::with_capacity(account_keys.len()); @@ -1466,7 +1466,7 @@ fn check_instruction_size( num_accounts: usize, data_len: usize, invoke_context: &Ref<&mut dyn InvokeContext>, -) -> Result<(), EbpfError> { +) -> Result<(), EbpfError> { let size = num_accounts .saturating_mul(size_of::()) .saturating_add(data_len); @@ -1482,7 +1482,7 @@ fn check_instruction_size( fn check_account_infos( len: usize, invoke_context: &Ref<&mut dyn InvokeContext>, -) -> Result<(), EbpfError> { +) -> Result<(), EbpfError> { if len * size_of::() > invoke_context .get_bpf_compute_budget() @@ -1498,7 +1498,7 @@ fn check_account_infos( fn check_authorized_program( program_id: &Pubkey, instruction_data: &[u8], -) -> Result<(), EbpfError> { +) -> Result<(), EbpfError> { if native_loader::check_id(program_id) || bpf_loader::check_id(program_id) || bpf_loader_deprecated::check_id(program_id) @@ -1514,7 +1514,7 @@ fn get_upgradeable_executable( callee_program_id: &Pubkey, program_account: &RefCell, invoke_context: &Ref<&mut dyn InvokeContext>, -) -> Result)>, EbpfError> { +) -> Result)>, EbpfError> { if program_account.borrow().owner == bpf_loader_upgradeable::id() { match program_account.borrow().state() { Ok(UpgradeableLoaderState::Program { @@ -1554,7 +1554,7 @@ fn call<'a>( signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &MemoryMapping, -) -> Result> { +) -> Result> { let (message, executables, accounts, account_refs, caller_privileges) = { let invoke_context = syscall.get_context()?; @@ -1812,7 +1812,7 @@ mod tests { let translated_instruction = translate_type::(&memory_mapping, 96, &bpf_loader::id()).unwrap(); assert_eq!(instruction, *translated_instruction); - memory_mapping.resize_region::(0, 1).unwrap(); + memory_mapping.resize_region::(0, 1).unwrap(); assert!(translate_type::(&memory_mapping, 100, &bpf_loader::id()).is_err()); } @@ -1950,7 +1950,7 @@ mod tests { #[should_panic(expected = "UserError(SyscallError(Abort))")] fn test_syscall_abort() { let memory_mapping = MemoryMapping::new(vec![MemoryRegion::default()], &DEFAULT_CONFIG); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); SyscallAbort::call( &mut SyscallAbort {}, 0, @@ -1988,7 +1988,7 @@ mod tests { compute_meter: Some(compute_meter), loader_id: &bpf_loader::id(), }; - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_panic.call( 100, string.len() as u64, @@ -1999,7 +1999,7 @@ mod tests { &mut result, ); assert_eq!( - Err(EbpfError::UserError(BPFError::SyscallError( + Err(EbpfError::UserError(BpfError::SyscallError( SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) ))), result @@ -2009,7 +2009,7 @@ mod tests { compute_meter: None, loader_id: &bpf_loader::id(), }; - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_panic.call( 100, string.len() as u64, @@ -2050,7 +2050,7 @@ mod tests { &DEFAULT_CONFIG, ); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_sol_log.call( 100, string.len() as u64, @@ -2064,7 +2064,7 @@ mod tests { assert_eq!(log.borrow().len(), 1); assert_eq!(log.borrow()[0], "Program log: Gaggablaghblagh!"); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_sol_log.call( 101, // AccessViolation string.len() as u64, @@ -2075,7 +2075,7 @@ mod tests { &mut result, ); assert_access_violation!(result, 101, string.len() as u64); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_sol_log.call( 100, string.len() as u64 * 2, // AccessViolation @@ -2086,7 +2086,7 @@ mod tests { &mut result, ); assert_access_violation!(result, 100, string.len() as u64 * 2); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_sol_log.call( 100, string.len() as u64, @@ -2097,7 +2097,7 @@ mod tests { &mut result, ); assert_eq!( - Err(EbpfError::UserError(BPFError::SyscallError( + Err(EbpfError::UserError(BpfError::SyscallError( SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) ))), result @@ -2115,7 +2115,7 @@ mod tests { logger, loader_id: &bpf_loader::id(), }; - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_sol_log.call( 100, string.len() as u64, @@ -2126,7 +2126,7 @@ mod tests { &mut result, ); result.unwrap(); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_sol_log.call( 100, string.len() as u64, @@ -2137,7 +2137,7 @@ mod tests { &mut result, ); assert_eq!( - Err(EbpfError::UserError(BPFError::SyscallError( + Err(EbpfError::UserError(BpfError::SyscallError( SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) ))), result @@ -2160,7 +2160,7 @@ mod tests { }; let memory_mapping = MemoryMapping::new(vec![], &DEFAULT_CONFIG); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_sol_log_u64.call(1, 2, 3, 4, 5, &memory_mapping, &mut result); result.unwrap(); @@ -2195,7 +2195,7 @@ mod tests { &DEFAULT_CONFIG, ); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_sol_pubkey.call(100, 0, 0, 0, 0, &memory_mapping, &mut result); result.unwrap(); assert_eq!(log.borrow().len(), 1); @@ -2203,7 +2203,7 @@ mod tests { log.borrow()[0], "Program log: MoqiU1vryuCGQSxFKA1SZ316JdLEFFhoAu6cKUNk7dN" ); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_sol_pubkey.call( 101, // AccessViolation 32, @@ -2214,10 +2214,10 @@ mod tests { &mut result, ); assert_access_violation!(result, 101, 32); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_sol_pubkey.call(100, 32, 0, 0, 0, &memory_mapping, &mut result); assert_eq!( - Err(EbpfError::UserError(BPFError::SyscallError( + Err(EbpfError::UserError(BpfError::SyscallError( SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) ))), result @@ -2235,15 +2235,15 @@ mod tests { ); let mut syscall = SyscallAllocFree { aligned: true, - allocator: BPFAllocator::new(heap, MM_HEAP_START), + allocator: BpfAllocator::new(heap, MM_HEAP_START), }; - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call(100, 0, 0, 0, 0, &memory_mapping, &mut result); assert_ne!(result.unwrap(), 0); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call(100, 0, 0, 0, 0, &memory_mapping, &mut result); assert_eq!(result.unwrap(), 0); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call(u64::MAX, 0, 0, 0, 0, &memory_mapping, &mut result); assert_eq!(result.unwrap(), 0); } @@ -2256,14 +2256,14 @@ mod tests { ); let mut syscall = SyscallAllocFree { aligned: false, - allocator: BPFAllocator::new(heap, MM_HEAP_START), + allocator: BpfAllocator::new(heap, MM_HEAP_START), }; for _ in 0..100 { - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call(1, 0, 0, 0, 0, &memory_mapping, &mut result); assert_ne!(result.unwrap(), 0); } - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call(100, 0, 0, 0, 0, &memory_mapping, &mut result); assert_eq!(result.unwrap(), 0); } @@ -2276,14 +2276,14 @@ mod tests { ); let mut syscall = SyscallAllocFree { aligned: true, - allocator: BPFAllocator::new(heap, MM_HEAP_START), + allocator: BpfAllocator::new(heap, MM_HEAP_START), }; for _ in 0..12 { - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call(1, 0, 0, 0, 0, &memory_mapping, &mut result); assert_ne!(result.unwrap(), 0); } - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call(100, 0, 0, 0, 0, &memory_mapping, &mut result); assert_eq!(result.unwrap(), 0); } @@ -2297,9 +2297,9 @@ mod tests { ); let mut syscall = SyscallAllocFree { aligned: true, - allocator: BPFAllocator::new(heap, MM_HEAP_START), + allocator: BpfAllocator::new(heap, MM_HEAP_START), }; - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call( size_of::() as u64, 0, @@ -2386,13 +2386,13 @@ mod tests { loader_id: &bpf_loader_deprecated::id(), }; - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call(ro_va, ro_len, rw_va, 0, 0, &memory_mapping, &mut result); result.unwrap(); let hash_local = hashv(&[bytes1.as_ref(), bytes2.as_ref()]).to_bytes(); assert_eq!(hash_result, hash_local); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call( ro_va - 1, // AccessViolation ro_len, @@ -2403,7 +2403,7 @@ mod tests { &mut result, ); assert_access_violation!(result, ro_va - 1, ro_len); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call( ro_va, ro_len + 1, // AccessViolation @@ -2414,7 +2414,7 @@ mod tests { &mut result, ); assert_access_violation!(result, ro_va, ro_len + 1); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call( ro_va, ro_len, @@ -2428,7 +2428,7 @@ mod tests { syscall.call(ro_va, ro_len, rw_va, 0, 0, &memory_mapping, &mut result); assert_eq!( - Err(EbpfError::UserError(BPFError::SyscallError( + Err(EbpfError::UserError(BpfError::SyscallError( SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) ))), result diff --git a/remote-wallet/src/ledger_error.rs b/remote-wallet/src/ledger_error.rs index 6c714cb9c1..6e5b6e2296 100644 --- a/remote-wallet/src/ledger_error.rs +++ b/remote-wallet/src/ledger_error.rs @@ -37,7 +37,7 @@ pub enum LedgerError { SdkTimeout = 0x6810, #[error("Ledger PIC exception")] - SdkExceptionPIC = 0x6811, + SdkExceptionPic = 0x6811, #[error("Ledger app exit exception")] SdkExceptionAppExit = 0x6812, diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 8affdbf6fe..25b6f83dc2 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -1,5 +1,5 @@ use crate::{ - accounts_db::{AccountsDB, BankHashInfo, ErrorCounters, LoadedAccount, ScanStorageResult}, + accounts_db::{AccountsDb, BankHashInfo, ErrorCounters, LoadedAccount, ScanStorageResult}, accounts_index::{AccountIndex, Ancestors, IndexKey}, bank::{ NonceRollbackFull, NonceRollbackInfo, TransactionCheckResult, TransactionExecutionResult, @@ -51,8 +51,8 @@ pub struct Accounts { /// my epoch pub epoch: Epoch, - /// Single global AccountsDB - pub accounts_db: Arc, + /// Single global AccountsDb + pub accounts_db: Arc, /// set of writable accounts which are currently in the pipeline pub(crate) account_locks: Mutex>, @@ -93,7 +93,7 @@ impl Accounts { caching_enabled: bool, ) -> Self { Self { - accounts_db: Arc::new(AccountsDB::new_with_config( + accounts_db: Arc::new(AccountsDb::new_with_config( paths, cluster_type, account_indexes, @@ -117,7 +117,7 @@ impl Accounts { } } - pub(crate) fn new_empty(accounts_db: AccountsDB) -> Self { + pub(crate) fn new_empty(accounts_db: AccountsDb) -> Self { Self { accounts_db: Arc::new(accounts_db), account_locks: Mutex::new(HashSet::new()), @@ -553,14 +553,14 @@ impl Accounts { |total_capitalization: &mut u64, (_pubkey, loaded_account, _slot)| { let lamports = loaded_account.lamports(); if Self::is_loadable(lamports) { - let account_cap = AccountsDB::account_balance_for_capitalization( + let account_cap = AccountsDb::account_balance_for_capitalization( lamports, &loaded_account.owner(), loaded_account.executable(), simple_capitalization_enabled, ); - *total_capitalization = AccountsDB::checked_iterative_sum_for_capitalization( + *total_capitalization = AccountsDb::checked_iterative_sum_for_capitalization( *total_capitalization, account_cap, ); @@ -1624,7 +1624,7 @@ mod tests { let accounts = Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); - // Load accounts owned by various programs into AccountsDB + // Load accounts owned by various programs into AccountsDb let pubkey0 = solana_sdk::pubkey::new_rand(); let account0 = Account::new(1, 0, &Pubkey::new(&[2; 32])); accounts.store_slow_uncached(0, &pubkey0, &account0); diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index 6d9de94c27..da29bc8764 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -205,13 +205,13 @@ impl SnapshotRequestHandler { } #[derive(Default)] -pub struct ABSRequestSender { +pub struct AbsRequestSender { snapshot_request_sender: Option, } -impl ABSRequestSender { +impl AbsRequestSender { pub fn new(snapshot_request_sender: Option) -> Self { - ABSRequestSender { + AbsRequestSender { snapshot_request_sender, } } @@ -232,12 +232,12 @@ impl ABSRequestSender { } } -pub struct ABSRequestHandler { +pub struct AbsRequestHandler { pub snapshot_request_handler: Option, pub pruned_banks_receiver: DroppedSlotsReceiver, } -impl ABSRequestHandler { +impl AbsRequestHandler { // Returns the latest requested snapshot block height, if one exists pub fn handle_snapshot_requests( &self, @@ -275,7 +275,7 @@ impl AccountsBackgroundService { pub fn new( bank_forks: Arc>, exit: &Arc, - request_handler: ABSRequestHandler, + request_handler: AbsRequestHandler, accounts_db_caching_enabled: bool, test_hash_calculation: bool, use_index_hash_calculation: bool, @@ -378,7 +378,7 @@ impl AccountsBackgroundService { fn remove_dead_slots( bank: &Bank, - request_handler: &ABSRequestHandler, + request_handler: &AbsRequestHandler, removed_slots_count: &mut usize, total_remove_slots_time: &mut u64, ) { @@ -411,7 +411,7 @@ mod test { let genesis = create_genesis_config(10); let bank0 = Arc::new(Bank::new(&genesis.genesis_config)); let (pruned_banks_sender, pruned_banks_receiver) = unbounded(); - let request_handler = ABSRequestHandler { + let request_handler = AbsRequestHandler { snapshot_request_handler: None, pruned_banks_receiver, }; diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 2e22f677d1..22a57c8261 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -14,7 +14,7 @@ //! //! AppendVec's only store accounts for single slots. To bootstrap the //! index from a persistent store of AppendVec's, the entries include -//! a "write_version". A single global atomic `AccountsDB::write_version` +//! a "write_version". A single global atomic `AccountsDb::write_version` //! tracks the number of commits to the entire data store. So the latest //! commit for each slot entry would be indexed. @@ -53,7 +53,7 @@ use std::{ boxed::Box, collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, convert::TryFrom, - io::{Error as IOError, Result as IOResult}, + io::{Error as IoError, Result as IoResult}, ops::RangeBounds, path::{Path, PathBuf}, sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, @@ -96,7 +96,7 @@ const CACHE_VIRTUAL_STORED_SIZE: usize = 0; type DashMapVersionHash = DashMap; lazy_static! { - // FROZEN_ACCOUNT_PANIC is used to signal local_cluster that an AccountsDB panic has occurred, + // FROZEN_ACCOUNT_PANIC is used to signal local_cluster that an AccountsDb panic has occurred, // as |cargo test| cannot observe panics in other threads pub static ref FROZEN_ACCOUNT_PANIC: Arc = Arc::new(AtomicBool::new(false)); } @@ -152,7 +152,7 @@ impl ZeroLamport for AccountInfo { } } -/// An offset into the AccountsDB::storage vector +/// An offset into the AccountsDb::storage vector pub type AppendVecId = usize; pub type SnapshotStorage = Vec>; pub type SnapshotStorages = Vec; @@ -254,10 +254,10 @@ impl<'a> LoadedAccount<'a> { pub fn compute_hash(&self, slot: Slot, cluster_type: &ClusterType, pubkey: &Pubkey) -> Hash { match self { LoadedAccount::Stored(stored_account_meta) => { - AccountsDB::hash_stored_account(slot, &stored_account_meta, cluster_type) + AccountsDb::hash_stored_account(slot, &stored_account_meta, cluster_type) } LoadedAccount::Cached((_, cached_account)) => { - AccountsDB::hash_account(slot, &cached_account.account, pubkey, cluster_type) + AccountsDb::hash_account(slot, &cached_account.account, pubkey, cluster_type) } } } @@ -477,7 +477,7 @@ impl AccountStorageEntry { self.id.load(Ordering::Relaxed) } - pub fn flush(&self) -> Result<(), IOError> { + pub fn flush(&self) -> Result<(), IoError> { self.accounts.flush() } @@ -552,8 +552,8 @@ impl AccountStorageEntry { } } -pub fn get_temp_accounts_paths(count: u32) -> IOResult<(Vec, Vec)> { - let temp_dirs: IOResult> = (0..count).map(|_| TempDir::new()).collect(); +pub fn get_temp_accounts_paths(count: u32) -> IoResult<(Vec, Vec)> { + let temp_dirs: IoResult> = (0..count).map(|_| TempDir::new()).collect(); let temp_dirs = temp_dirs?; let paths: Vec = temp_dirs.iter().map(|t| t.path().to_path_buf()).collect(); Ok((temp_dirs, paths)) @@ -651,7 +651,7 @@ impl RecycleStores { // This structure handles the load/store of the accounts #[derive(Debug)] -pub struct AccountsDB { +pub struct AccountsDb { /// Keeps tracks of index into AppendVec on a per slot basis pub accounts_index: AccountsIndex, @@ -1003,9 +1003,9 @@ pub fn make_min_priority_thread_pool() -> ThreadPool { } #[cfg(all(test, RUSTC_WITH_SPECIALIZATION))] -impl solana_frozen_abi::abi_example::AbiExample for AccountsDB { +impl solana_frozen_abi::abi_example::AbiExample for AccountsDb { fn example() -> Self { - let accounts_db = AccountsDB::new_single(); + let accounts_db = AccountsDb::new_single(); let key = Pubkey::default(); let some_data_len = 5; let some_slot: Slot = 0; @@ -1017,13 +1017,13 @@ impl solana_frozen_abi::abi_example::AbiExample for AccountsDB { } } -impl Default for AccountsDB { +impl Default for AccountsDb { fn default() -> Self { let num_threads = get_thread_count(); let mut bank_hashes = HashMap::new(); bank_hashes.insert(0, BankHashInfo::default()); - AccountsDB { + AccountsDb { accounts_index: AccountsIndex::default(), storage: AccountStorage::default(), accounts_cache: AccountsCache::default(), @@ -1057,9 +1057,9 @@ impl Default for AccountsDB { } } -impl AccountsDB { +impl AccountsDb { pub fn new(paths: Vec, cluster_type: &ClusterType) -> Self { - AccountsDB::new_with_config(paths, cluster_type, HashSet::new(), false) + AccountsDb::new_with_config(paths, cluster_type, HashSet::new(), false) } pub fn new_with_config( @@ -1112,9 +1112,9 @@ impl AccountsDB { } pub fn new_single() -> Self { - AccountsDB { + AccountsDb { min_num_stores: 0, - ..AccountsDB::new(Vec::new(), &ClusterType::Development) + ..AccountsDb::new(Vec::new(), &ClusterType::Development) } } @@ -1127,7 +1127,7 @@ impl AccountsDB { ) } - // Reclaim older states of rooted accounts for AccountsDB bloat mitigation + // Reclaim older states of rooted accounts for AccountsDb bloat mitigation fn clean_old_rooted_accounts( &self, purges_in_root: Vec, @@ -4550,19 +4550,19 @@ impl AccountsDB { } #[cfg(test)] -impl AccountsDB { +impl AccountsDb { pub fn new_sized(paths: Vec, file_size: u64) -> Self { - AccountsDB { + AccountsDb { file_size, - ..AccountsDB::new(paths, &ClusterType::Development) + ..AccountsDb::new(paths, &ClusterType::Development) } } pub fn new_sized_no_extra_stores(paths: Vec, file_size: u64) -> Self { - AccountsDB { + AccountsDb { file_size, min_num_stores: 0, - ..AccountsDB::new(paths, &ClusterType::Development) + ..AccountsDb::new(paths, &ClusterType::Development) } } @@ -4582,7 +4582,7 @@ impl AccountsDB { /// Legacy shrink functions to support non-cached path. /// Should be able to be deleted after cache path is the only path. -impl AccountsDB { +impl AccountsDb { // Reads all accounts in given slot's AppendVecs and filter only to alive, // then create a minimum AppendVec filled with the alive. // v1 path shrinks all stores in the slot @@ -4971,17 +4971,17 @@ pub mod tests { #[should_panic(expected = "assertion failed: bins <= max_plus_1 && bins > 0")] fn test_accountsdb_scan_snapshot_stores_illegal_bins2() { let mut stats = HashStats::default(); - AccountsDB::scan_snapshot_stores(&[], true, &mut stats, 257); + AccountsDb::scan_snapshot_stores(&[], true, &mut stats, 257); } #[test] #[should_panic(expected = "assertion failed: bins <= max_plus_1 && bins > 0")] fn test_accountsdb_scan_snapshot_stores_illegal_bins() { let mut stats = HashStats::default(); - AccountsDB::scan_snapshot_stores(&[], true, &mut stats, 0); + AccountsDb::scan_snapshot_stores(&[], true, &mut stats, 0); } fn sample_storages_and_accounts() -> (SnapshotStorages, Vec) { - let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey0 = Pubkey::new(&[0u8; 32]); let pubkey127 = Pubkey::new(&[0x7fu8; 32]); let pubkey128 = Pubkey::new(&[0x80u8; 32]); @@ -5051,11 +5051,11 @@ pub mod tests { let bins = 1; let mut stats = HashStats::default(); - let result = AccountsDB::scan_snapshot_stores(&storages, true, &mut stats, bins); + let result = AccountsDb::scan_snapshot_stores(&storages, true, &mut stats, bins); assert_eq!(result, vec![vec![raw_expected.clone()]]); let bins = 2; - let result = AccountsDB::scan_snapshot_stores(&storages, true, &mut stats, bins); + let result = AccountsDb::scan_snapshot_stores(&storages, true, &mut stats, bins); let mut expected = vec![Vec::new(); bins]; expected[0].push(raw_expected[0].clone()); expected[0].push(raw_expected[1].clone()); @@ -5064,7 +5064,7 @@ pub mod tests { assert_eq!(result, vec![expected]); let bins = 4; - let result = AccountsDB::scan_snapshot_stores(&storages, true, &mut stats, bins); + let result = AccountsDb::scan_snapshot_stores(&storages, true, &mut stats, bins); let mut expected = vec![Vec::new(); bins]; expected[0].push(raw_expected[0].clone()); expected[1].push(raw_expected[1].clone()); @@ -5073,7 +5073,7 @@ pub mod tests { assert_eq!(result, vec![expected]); let bins = 256; - let result = AccountsDB::scan_snapshot_stores(&storages, true, &mut stats, bins); + let result = AccountsDb::scan_snapshot_stores(&storages, true, &mut stats, bins); let mut expected = vec![Vec::new(); bins]; expected[0].push(raw_expected[0].clone()); expected[127].push(raw_expected[1].clone()); @@ -5094,7 +5094,7 @@ pub mod tests { storages[0].splice(0..0, vec![arc; MAX_ITEMS_PER_CHUNK]); let mut stats = HashStats::default(); - let result = AccountsDB::scan_snapshot_stores(&storages, true, &mut stats, bins); + let result = AccountsDb::scan_snapshot_stores(&storages, true, &mut stats, bins); assert_eq!(result.len(), 2); // 2 chunks assert_eq!(result[0].len(), 0); // nothing found in first slots assert_eq!(result[1].len(), bins); @@ -5106,7 +5106,7 @@ pub mod tests { solana_logger::setup(); let (storages, _size, _slot_expected) = sample_storage(); - let result = AccountsDB::calculate_accounts_hash_without_index(&storages, true, None); + let result = AccountsDb::calculate_accounts_hash_without_index(&storages, true, None); let expected_hash = Hash::from_str("GKot5hBsd81kMupNCXHaqbhv3huEbxAFMLnpcX2hniwn").unwrap(); assert_eq!(result, (expected_hash, 0)); } @@ -5121,7 +5121,7 @@ pub mod tests { item.hash }); let sum = raw_expected.iter().map(|item| item.lamports).sum(); - let result = AccountsDB::calculate_accounts_hash_without_index(&storages, true, None); + let result = AccountsDb::calculate_accounts_hash_without_index(&storages, true, None); assert_eq!(result, (expected_hash, sum)); } @@ -5166,7 +5166,7 @@ pub mod tests { .append_accounts(&[(sm, &acc)], &[Hash::default()]); let calls = AtomicU64::new(0); - let result = AccountsDB::scan_account_storage_no_bank( + let result = AccountsDb::scan_account_storage_no_bank( &storages, &mut HashStats::default(), |loaded_account: LoadedAccount, accum: &mut Vec, slot: Slot| { @@ -5183,7 +5183,7 @@ pub mod tests { #[test] fn test_accountsdb_add_root() { solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account0 = Account::new(1, 0, &key); @@ -5196,7 +5196,7 @@ pub mod tests { #[test] fn test_accountsdb_latest_ancestor() { solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account0 = Account::new(1, 0, &key); @@ -5221,7 +5221,7 @@ pub mod tests { #[test] fn test_accountsdb_latest_ancestor_with_root() { solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account0 = Account::new(1, 0, &key); @@ -5241,7 +5241,7 @@ pub mod tests { #[test] fn test_accountsdb_root_one_slot() { solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account0 = Account::new(1, 0, &key); @@ -5282,7 +5282,7 @@ pub mod tests { #[test] fn test_accountsdb_add_root_many() { - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let mut pubkeys: Vec = vec![]; create_account(&db, &mut pubkeys, 0, 100, 0, 0); @@ -5318,7 +5318,7 @@ pub mod tests { #[test] fn test_accountsdb_count_stores() { solana_logger::setup(); - let db = AccountsDB::new_single(); + let db = AccountsDb::new_single(); let mut pubkeys: Vec = vec![]; create_account(&db, &mut pubkeys, 0, 2, DEFAULT_FILE_SIZE as usize / 3, 0); @@ -5380,7 +5380,7 @@ pub mod tests { let key = Pubkey::default(); // 1 token in the "root", i.e. db zero - let db0 = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db0 = AccountsDb::new(Vec::new(), &ClusterType::Development); let account0 = Account::new(1, 0, &key); db0.store_uncached(0, &[(&key, &account0)]); @@ -5399,7 +5399,7 @@ pub mod tests { #[test] fn test_remove_unrooted_slot() { let unrooted_slot = 9; - let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); db.caching_enabled = true; let key = Pubkey::default(); let account0 = Account::new(1, 0, &key); @@ -5440,7 +5440,7 @@ pub mod tests { fn test_remove_unrooted_slot_snapshot() { solana_logger::setup(); let unrooted_slot = 9; - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = solana_sdk::pubkey::new_rand(); let account0 = Account::new(1, 0, &key); db.store_uncached(unrooted_slot, &[(&key, &account0)]); @@ -5466,7 +5466,7 @@ pub mod tests { } fn create_account( - accounts: &AccountsDB, + accounts: &AccountsDb, pubkeys: &mut Vec, slot: Slot, num: usize, @@ -5491,7 +5491,7 @@ pub mod tests { } } - fn update_accounts(accounts: &AccountsDB, pubkeys: &[Pubkey], slot: Slot, range: usize) { + fn update_accounts(accounts: &AccountsDb, pubkeys: &[Pubkey], slot: Slot, range: usize) { for _ in 1..1000 { let idx = thread_rng().gen_range(0, range); let ancestors = vec![(slot, 0)].into_iter().collect(); @@ -5512,7 +5512,7 @@ pub mod tests { } } - fn check_storage(accounts: &AccountsDB, slot: Slot, count: usize) -> bool { + fn check_storage(accounts: &AccountsDb, slot: Slot, count: usize) -> bool { assert_eq!( accounts .storage @@ -5546,7 +5546,7 @@ pub mod tests { } fn check_accounts( - accounts: &AccountsDB, + accounts: &AccountsDb, pubkeys: &[Pubkey], slot: Slot, num: usize, @@ -5566,7 +5566,7 @@ pub mod tests { #[allow(clippy::needless_range_loop)] fn modify_accounts( - accounts: &AccountsDB, + accounts: &AccountsDb, pubkeys: &[Pubkey], slot: Slot, num: usize, @@ -5581,7 +5581,7 @@ pub mod tests { #[test] fn test_account_one() { let (_accounts_dirs, paths) = get_temp_accounts_paths(1).unwrap(); - let db = AccountsDB::new(paths, &ClusterType::Development); + let db = AccountsDb::new(paths, &ClusterType::Development); let mut pubkeys: Vec = vec![]; create_account(&db, &mut pubkeys, 0, 1, 0, 0); let ancestors = vec![(0, 0)].into_iter().collect(); @@ -5596,7 +5596,7 @@ pub mod tests { #[test] fn test_account_many() { let (_accounts_dirs, paths) = get_temp_accounts_paths(2).unwrap(); - let db = AccountsDB::new(paths, &ClusterType::Development); + let db = AccountsDb::new(paths, &ClusterType::Development); let mut pubkeys: Vec = vec![]; create_account(&db, &mut pubkeys, 0, 100, 0, 0); check_accounts(&db, &pubkeys, 0, 100, 1); @@ -5604,7 +5604,7 @@ pub mod tests { #[test] fn test_account_update() { - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); let mut pubkeys: Vec = vec![]; create_account(&accounts, &mut pubkeys, 0, 100, 0, 0); update_accounts(&accounts, &pubkeys, 0, 99); @@ -5615,7 +5615,7 @@ pub mod tests { fn test_account_grow_many() { let (_accounts_dir, paths) = get_temp_accounts_paths(2).unwrap(); let size = 4096; - let accounts = AccountsDB::new_sized(paths, size); + let accounts = AccountsDb::new_sized(paths, size); let mut keys = vec![]; for i in 0..9 { let key = solana_sdk::pubkey::new_rand(); @@ -5646,7 +5646,7 @@ pub mod tests { #[test] fn test_account_grow() { - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); let status = [AccountStorageStatus::Available, AccountStorageStatus::Full]; let pubkey1 = solana_sdk::pubkey::new_rand(); @@ -5711,7 +5711,7 @@ pub mod tests { //This test is pedantic //A slot is purged when a non root bank is cleaned up. If a slot is behind root but it is //not root, it means we are retaining dead banks. - let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey = solana_sdk::pubkey::new_rand(); let account = Account::new(1, 0, &Account::default().owner); //store an account @@ -5753,7 +5753,7 @@ pub mod tests { assert_eq!(accounts.load_slow(&ancestors, &pubkey), Some((account, 1))); } - impl AccountsDB { + impl AccountsDb { fn all_account_count_in_append_vec(&self, slot: Slot) -> usize { let slot_storage = self.storage.get_slot_stores(slot); if let Some(slot_storage) = slot_storage { @@ -5782,7 +5782,7 @@ pub mod tests { fn test_clean_zero_lamport_and_dead_slot() { solana_logger::setup(); - let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey1 = solana_sdk::pubkey::new_rand(); let pubkey2 = solana_sdk::pubkey::new_rand(); let account = Account::new(1, 1, &Account::default().owner); @@ -5838,7 +5838,7 @@ pub mod tests { fn test_clean_zero_lamport_and_old_roots() { solana_logger::setup(); - let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey = solana_sdk::pubkey::new_rand(); let account = Account::new(1, 0, &Account::default().owner); let zero_lamport_account = Account::new(0, 0, &Account::default().owner); @@ -5877,7 +5877,7 @@ pub mod tests { fn test_clean_old_with_normal_account() { solana_logger::setup(); - let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey = solana_sdk::pubkey::new_rand(); let account = Account::new(1, 0, &Account::default().owner); //store an account @@ -5905,7 +5905,7 @@ pub mod tests { fn test_clean_old_with_zero_lamport_account() { solana_logger::setup(); - let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey1 = solana_sdk::pubkey::new_rand(); let pubkey2 = solana_sdk::pubkey::new_rand(); let normal_account = Account::new(1, 0, &Account::default().owner); @@ -5939,7 +5939,7 @@ pub mod tests { fn test_clean_old_with_both_normal_and_zero_lamport_accounts() { solana_logger::setup(); - let accounts = AccountsDB::new_with_config( + let accounts = AccountsDb::new_with_config( Vec::new(), &ClusterType::Development, spl_token_mint_index_enabled(), @@ -6022,7 +6022,7 @@ pub mod tests { fn test_clean_max_slot_zero_lamport_account() { solana_logger::setup(); - let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey = solana_sdk::pubkey::new_rand(); let account = Account::new(1, 0, &Account::default().owner); let zero_account = Account::new(0, 0, &Account::default().owner); @@ -6059,7 +6059,7 @@ pub mod tests { fn test_uncleaned_roots_with_account() { solana_logger::setup(); - let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey = solana_sdk::pubkey::new_rand(); let account = Account::new(1, 0, &Account::default().owner); //store an account @@ -6079,7 +6079,7 @@ pub mod tests { fn test_uncleaned_roots_with_no_account() { solana_logger::setup(); - let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0); @@ -6095,7 +6095,7 @@ pub mod tests { #[test] fn test_accounts_db_serialize1() { solana_logger::setup(); - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); let mut pubkeys: Vec = vec![]; // Create 100 accounts in slot 0 @@ -6197,7 +6197,7 @@ pub mod tests { } fn assert_load_account( - accounts: &AccountsDB, + accounts: &AccountsDb, slot: Slot, pubkey: Pubkey, expected_lamports: u64, @@ -6207,19 +6207,19 @@ pub mod tests { assert_eq!((account.lamports, slot), (expected_lamports, slot)); } - fn assert_not_load_account(accounts: &AccountsDB, slot: Slot, pubkey: Pubkey) { + fn assert_not_load_account(accounts: &AccountsDb, slot: Slot, pubkey: Pubkey) { let ancestors = vec![(slot, 0)].into_iter().collect(); assert!(accounts.load_slow(&ancestors, &pubkey).is_none()); } - fn reconstruct_accounts_db_via_serialization(accounts: &AccountsDB, slot: Slot) -> AccountsDB { + fn reconstruct_accounts_db_via_serialization(accounts: &AccountsDb, slot: Slot) -> AccountsDb { let daccounts = crate::serde_snapshot::reconstruct_accounts_db_via_serialization(accounts, slot); daccounts.print_count_and_status("daccounts"); daccounts } - fn assert_no_stores(accounts: &AccountsDB, slot: Slot) { + fn assert_no_stores(accounts: &AccountsDb, slot: Slot) { let slot_stores = accounts.storage.get_slot_stores(slot); let r_slot_stores = slot_stores.as_ref().map(|slot_stores| { let r_slot_stores = slot_stores.read().unwrap(); @@ -6245,7 +6245,7 @@ pub mod tests { let zero_lamport_account = Account::new(zero_lamport, no_data, &owner); - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); accounts.add_root(0); // Step A @@ -6321,7 +6321,7 @@ pub mod tests { let zero_lamport_account = Account::new(zero_lamport, no_data, &owner); - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); accounts.add_root(0); let mut current_slot = 1; @@ -6386,7 +6386,7 @@ pub mod tests { let filler_account = Account::new(some_lamport, no_data, &owner); let filler_account_pubkey = solana_sdk::pubkey::new_rand(); - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); let mut current_slot = 1; accounts.store_uncached(current_slot, &[(&pubkey, &account)]); @@ -6426,7 +6426,7 @@ pub mod tests { fn with_chained_zero_lamport_accounts(f: F) where - F: Fn(AccountsDB, Slot) -> AccountsDB, + F: Fn(AccountsDb, Slot) -> AccountsDb, { let some_lamport = 223; let zero_lamport = 0; @@ -6446,7 +6446,7 @@ pub mod tests { let dummy_account = Account::new(dummy_lamport, no_data, &owner); let dummy_pubkey = Pubkey::default(); - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); let mut current_slot = 1; accounts.store_uncached(current_slot, &[(&pubkey, &account)]); @@ -6512,7 +6512,7 @@ pub mod tests { let min_file_bytes = std::mem::size_of::() + std::mem::size_of::(); - let db = Arc::new(AccountsDB::new_sized(Vec::new(), min_file_bytes as u64)); + let db = Arc::new(AccountsDb::new_sized(Vec::new(), min_file_bytes as u64)); db.add_root(slot); let thread_hdls: Vec<_> = (0..num_threads) @@ -6550,7 +6550,7 @@ pub mod tests { #[test] fn test_accountsdb_scan_accounts() { solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let key0 = solana_sdk::pubkey::new_rand(); let account0 = Account::new(1, 0, &key); @@ -6579,7 +6579,7 @@ pub mod tests { #[test] fn test_cleanup_key_not_removed() { solana_logger::setup(); - let db = AccountsDB::new_single(); + let db = AccountsDb::new_single(); let key = Pubkey::default(); let key0 = solana_sdk::pubkey::new_rand(); @@ -6608,7 +6608,7 @@ pub mod tests { #[test] fn test_store_large_account() { solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let data_len = DEFAULT_FILE_SIZE as usize + 7; @@ -6625,7 +6625,7 @@ pub mod tests { fn test_hash_frozen_account_data() { let account = Account::new(1, 42, &Pubkey::default()); - let hash = AccountsDB::hash_frozen_account_data(&account); + let hash = AccountsDb::hash_frozen_account_data(&account); assert_ne!(hash, Hash::default()); // Better not be the default Hash // Lamports changes to not affect the hash @@ -6633,7 +6633,7 @@ pub mod tests { account_modified.lamports -= 1; assert_eq!( hash, - AccountsDB::hash_frozen_account_data(&account_modified) + AccountsDb::hash_frozen_account_data(&account_modified) ); // Rent epoch may changes to not affect the hash @@ -6641,7 +6641,7 @@ pub mod tests { account_modified.rent_epoch += 1; assert_eq!( hash, - AccountsDB::hash_frozen_account_data(&account_modified) + AccountsDb::hash_frozen_account_data(&account_modified) ); // Account data may not be modified @@ -6649,7 +6649,7 @@ pub mod tests { account_modified.data[0] = 42; assert_ne!( hash, - AccountsDB::hash_frozen_account_data(&account_modified) + AccountsDb::hash_frozen_account_data(&account_modified) ); // Owner may not be modified @@ -6658,7 +6658,7 @@ pub mod tests { Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); assert_ne!( hash, - AccountsDB::hash_frozen_account_data(&account_modified) + AccountsDb::hash_frozen_account_data(&account_modified) ); // Executable may not be modified @@ -6666,7 +6666,7 @@ pub mod tests { account_modified.executable = true; assert_ne!( hash, - AccountsDB::hash_frozen_account_data(&account_modified) + AccountsDb::hash_frozen_account_data(&account_modified) ); } @@ -6674,7 +6674,7 @@ pub mod tests { fn test_frozen_account_lamport_increase() { let frozen_pubkey = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); - let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); let mut account = Account::new(1, 42, &frozen_pubkey); db.store_uncached(0, &[(&frozen_pubkey, &account)]); @@ -6709,7 +6709,7 @@ pub mod tests { fn test_frozen_account_lamport_decrease() { let frozen_pubkey = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); - let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); let mut account = Account::new(1, 42, &frozen_pubkey); db.store_uncached(0, &[(&frozen_pubkey, &account)]); @@ -6729,7 +6729,7 @@ pub mod tests { fn test_frozen_account_nonexistent() { let frozen_pubkey = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); - let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); let ancestors = vec![(0, 0)].into_iter().collect(); db.freeze_accounts(&ancestors, &[frozen_pubkey]); @@ -6742,7 +6742,7 @@ pub mod tests { fn test_frozen_account_data_modified() { let frozen_pubkey = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); - let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); let mut account = Account::new(1, 42, &frozen_pubkey); db.store_uncached(0, &[(&frozen_pubkey, &account)]); @@ -6796,12 +6796,12 @@ pub mod tests { Hash::from_str("4StuvYHFd7xuShVXB94uHHvpqGMCaacdZnYB74QQkPA1").unwrap(); assert_eq!( - AccountsDB::hash_stored_account(slot, &stored_account, &ClusterType::Development), + AccountsDb::hash_stored_account(slot, &stored_account, &ClusterType::Development), expected_account_hash, "StoredAccountMeta's data layout might be changed; update hashing if needed." ); assert_eq!( - AccountsDB::hash_account( + AccountsDb::hash_account( slot, &account, &stored_account.meta.pubkey, @@ -6815,7 +6815,7 @@ pub mod tests { #[test] fn test_bank_hash_stats() { solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let some_data_len = 5; @@ -6843,7 +6843,7 @@ pub mod tests { fn test_verify_bank_hash() { use BankHashVerificationError::*; solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = solana_sdk::pubkey::new_rand(); let some_data_len = 0; @@ -6885,7 +6885,7 @@ pub mod tests { fn test_verify_bank_capitalization() { use BankHashVerificationError::*; solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = solana_sdk::pubkey::new_rand(); let some_data_len = 0; @@ -6928,7 +6928,7 @@ pub mod tests { #[test] fn test_verify_bank_hash_no_account() { solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let some_slot: Slot = 0; let ancestors = vec![(some_slot, 0)].into_iter().collect(); @@ -6949,7 +6949,7 @@ pub mod tests { fn test_verify_bank_hash_bad_account_hash() { use BankHashVerificationError::*; solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let some_data_len = 0; @@ -6958,7 +6958,7 @@ pub mod tests { let ancestors = vec![(some_slot, 0)].into_iter().collect(); let accounts = &[(&key, &account)]; - // update AccountsDB's bank hash but discard real account hashes + // update AccountsDb's bank hash but discard real account hashes db.hash_accounts(some_slot, accounts, &ClusterType::Development); // provide bogus account hashes let some_hash = Hash::new(&[0xca; HASH_BYTES]); @@ -6973,7 +6973,7 @@ pub mod tests { #[test] fn test_storage_finder() { solana_logger::setup(); - let db = AccountsDB::new_sized(Vec::new(), 16 * 1024); + let db = AccountsDb::new_sized(Vec::new(), 16 * 1024); let key = solana_sdk::pubkey::new_rand(); let lamports = 100; let data_len = 8190; @@ -6985,13 +6985,13 @@ pub mod tests { #[test] fn test_get_snapshot_storages_empty() { - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); assert!(db.get_snapshot_storages(0).is_empty()); } #[test] fn test_get_snapshot_storages_only_older_than_or_equal_to_snapshot_slot() { - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account = Account::new(1, 0, &key); @@ -7009,7 +7009,7 @@ pub mod tests { #[test] fn test_get_snapshot_storages_only_non_empty() { - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account = Account::new(1, 0, &key); @@ -7032,7 +7032,7 @@ pub mod tests { #[test] fn test_get_snapshot_storages_only_roots() { - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account = Account::new(1, 0, &key); @@ -7048,7 +7048,7 @@ pub mod tests { #[test] fn test_get_snapshot_storages_exclude_empty() { - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account = Account::new(1, 0, &key); @@ -7074,7 +7074,7 @@ pub mod tests { #[test] #[should_panic(expected = "double remove of account in slot: 0/store: 0!!")] fn test_storage_remove_account_double_remove() { - let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey = solana_sdk::pubkey::new_rand(); let account = Account::new(1, 0, &Account::default().owner); accounts.store_uncached(0, &[(&pubkey, &account)]); @@ -7112,7 +7112,7 @@ pub mod tests { let purged_pubkey2 = solana_sdk::pubkey::new_rand(); let mut current_slot = 0; - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); // create intermediate updates to purged_pubkey1 so that // generate_index must add slots as root last at once @@ -7174,9 +7174,9 @@ pub mod tests { let zero_lamport_account = Account::new(zero_lamport, data_size, &owner); let mut current_slot = 0; - let accounts = AccountsDB::new_sized_no_extra_stores(Vec::new(), store_size); + let accounts = AccountsDb::new_sized_no_extra_stores(Vec::new(), store_size); - // A: Initialize AccountsDB with pubkey1 and pubkey2 + // A: Initialize AccountsDb with pubkey1 and pubkey2 current_slot += 1; if store1_first { accounts.store_uncached(current_slot, &[(&pubkey1, &account)]); @@ -7302,9 +7302,9 @@ pub mod tests { let dummy_pubkey = solana_sdk::pubkey::new_rand(); let mut current_slot = 0; - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); - // A: Initialize AccountsDB with pubkey1 and pubkey2 + // A: Initialize AccountsDb with pubkey1 and pubkey2 current_slot += 1; accounts.store_uncached(current_slot, &[(&pubkey1, &account)]); accounts.store_uncached(current_slot, &[(&pubkey2, &account)]); @@ -7388,7 +7388,7 @@ pub mod tests { #[test] fn test_clean_stored_dead_slots_empty() { - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); let mut dead_slots = HashSet::new(); dead_slots.insert(10); accounts.clean_stored_dead_slots(&dead_slots, None); @@ -7396,7 +7396,7 @@ pub mod tests { #[test] fn test_shrink_all_slots_none() { - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); for _ in 0..10 { accounts.shrink_candidate_slots(); @@ -7407,7 +7407,7 @@ pub mod tests { #[test] fn test_shrink_next_slots() { - let mut accounts = AccountsDB::new_single(); + let mut accounts = AccountsDb::new_single(); accounts.caching_enabled = false; let mut current_slot = 7; @@ -7446,7 +7446,7 @@ pub mod tests { #[test] fn test_shrink_reset_uncleaned_roots() { - let mut accounts = AccountsDB::new_single(); + let mut accounts = AccountsDb::new_single(); accounts.caching_enabled = false; accounts.reset_uncleaned_roots_v1(); @@ -7483,7 +7483,7 @@ pub mod tests { fn test_shrink_stale_slots_processed() { solana_logger::setup(); - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); let pubkey_count = 100; let pubkeys: Vec<_> = (0..pubkey_count) @@ -7551,7 +7551,7 @@ pub mod tests { fn test_shrink_candidate_slots() { solana_logger::setup(); - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); let pubkey_count = 30000; let pubkeys: Vec<_> = (0..pubkey_count) @@ -7610,7 +7610,7 @@ pub mod tests { fn test_shrink_stale_slots_skipped() { solana_logger::setup(); - let mut accounts = AccountsDB::new_single(); + let mut accounts = AccountsDb::new_single(); accounts.caching_enabled = false; let pubkey_count = 30000; @@ -7775,7 +7775,7 @@ pub mod tests { store_counts.insert(1, (0, HashSet::from_iter(vec![key0, key1]))); store_counts.insert(2, (0, HashSet::from_iter(vec![key1, key2]))); store_counts.insert(3, (1, HashSet::from_iter(vec![key2]))); - AccountsDB::calc_delete_dependencies(&purges, &mut store_counts); + AccountsDb::calc_delete_dependencies(&purges, &mut store_counts); let mut stores: Vec<_> = store_counts.keys().cloned().collect(); stores.sort_unstable(); for store in &stores { @@ -7794,12 +7794,12 @@ pub mod tests { fn test_account_balance_for_capitalization_normal() { // system accounts assert_eq!( - AccountsDB::account_balance_for_capitalization(10, &Pubkey::default(), false, true), + AccountsDb::account_balance_for_capitalization(10, &Pubkey::default(), false, true), 10 ); // any random program data accounts assert_eq!( - AccountsDB::account_balance_for_capitalization( + AccountsDb::account_balance_for_capitalization( 10, &solana_sdk::pubkey::new_rand(), false, @@ -7808,7 +7808,7 @@ pub mod tests { 10 ); assert_eq!( - AccountsDB::account_balance_for_capitalization( + AccountsDb::account_balance_for_capitalization( 10, &solana_sdk::pubkey::new_rand(), false, @@ -7825,7 +7825,7 @@ pub mod tests { 1, ); assert_eq!( - AccountsDB::account_balance_for_capitalization( + AccountsDb::account_balance_for_capitalization( normal_sysvar.lamports, &normal_sysvar.owner, normal_sysvar.executable, @@ -7834,7 +7834,7 @@ pub mod tests { 0 ); assert_eq!( - AccountsDB::account_balance_for_capitalization( + AccountsDb::account_balance_for_capitalization( normal_sysvar.lamports, &normal_sysvar.owner, normal_sysvar.executable, @@ -7845,7 +7845,7 @@ pub mod tests { // currently transactions can send any lamports to sysvars although this is not sensible. assert_eq!( - AccountsDB::account_balance_for_capitalization( + AccountsDb::account_balance_for_capitalization( 10, &solana_sdk::sysvar::id(), false, @@ -7854,7 +7854,7 @@ pub mod tests { 9 ); assert_eq!( - AccountsDB::account_balance_for_capitalization( + AccountsDb::account_balance_for_capitalization( 10, &solana_sdk::sysvar::id(), false, @@ -7868,7 +7868,7 @@ pub mod tests { fn test_account_balance_for_capitalization_native_program() { let normal_native_program = solana_sdk::native_loader::create_loadable_account("foo", 1); assert_eq!( - AccountsDB::account_balance_for_capitalization( + AccountsDb::account_balance_for_capitalization( normal_native_program.lamports, &normal_native_program.owner, normal_native_program.executable, @@ -7877,7 +7877,7 @@ pub mod tests { 0 ); assert_eq!( - AccountsDB::account_balance_for_capitalization( + AccountsDb::account_balance_for_capitalization( normal_native_program.lamports, &normal_native_program.owner, normal_native_program.executable, @@ -7888,7 +7888,7 @@ pub mod tests { // test maliciously assigned bogus native loader account assert_eq!( - AccountsDB::account_balance_for_capitalization( + AccountsDb::account_balance_for_capitalization( 1, &solana_sdk::native_loader::id(), false, @@ -7897,7 +7897,7 @@ pub mod tests { 1 ); assert_eq!( - AccountsDB::account_balance_for_capitalization( + AccountsDb::account_balance_for_capitalization( 1, &solana_sdk::native_loader::id(), false, @@ -7910,7 +7910,7 @@ pub mod tests { #[test] fn test_checked_sum_for_capitalization_normal() { assert_eq!( - AccountsDB::checked_sum_for_capitalization(vec![1, 2].into_iter()), + AccountsDb::checked_sum_for_capitalization(vec![1, 2].into_iter()), 3 ); } @@ -7919,7 +7919,7 @@ pub mod tests { #[should_panic(expected = "overflow is detected while summing capitalization")] fn test_checked_sum_for_capitalization_overflow() { assert_eq!( - AccountsDB::checked_sum_for_capitalization(vec![1, u64::max_value()].into_iter()), + AccountsDb::checked_sum_for_capitalization(vec![1, u64::max_value()].into_iter()), 3 ); } @@ -7927,7 +7927,7 @@ pub mod tests { #[test] fn test_store_overhead() { solana_logger::setup(); - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); let account = Account::default(); let pubkey = solana_sdk::pubkey::new_rand(); accounts.store_uncached(0, &[(&pubkey, &account)]); @@ -7943,7 +7943,7 @@ pub mod tests { #[test] fn test_store_reuse() { solana_logger::setup(); - let accounts = AccountsDB::new_sized(vec![], 4096); + let accounts = AccountsDb::new_sized(vec![], 4096); let size = 100; let num_accounts: usize = 100; @@ -7992,7 +7992,7 @@ pub mod tests { #[test] fn test_zero_lamport_new_root_not_cleaned() { - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let account_key = Pubkey::new_unique(); let zero_lamport_account = Account::new(0, 0, &Account::default().owner); @@ -8016,7 +8016,7 @@ pub mod tests { #[test] fn test_store_load_cached() { - let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); db.caching_enabled = true; let key = Pubkey::default(); let account0 = Account::new(1, 0, &key); @@ -8044,7 +8044,7 @@ pub mod tests { #[test] fn test_store_flush_load_cached() { - let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); db.caching_enabled = true; let key = Pubkey::default(); let account0 = Account::new(1, 0, &key); @@ -8069,7 +8069,7 @@ pub mod tests { #[test] fn test_flush_accounts_cache() { - let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); db.caching_enabled = true; let account0 = Account::new(1, 0, &Pubkey::default()); @@ -8128,7 +8128,7 @@ pub mod tests { } fn run_test_flush_accounts_cache_if_needed(num_roots: usize, num_unrooted: usize) { - let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); db.caching_enabled = true; let account0 = Account::new(1, 0, &Pubkey::default()); let mut keys = vec![]; @@ -8177,7 +8177,7 @@ pub mod tests { } } - fn slot_stores(db: &AccountsDB, slot: Slot) -> Vec> { + fn slot_stores(db: &AccountsDb, slot: Slot) -> Vec> { db.storage .get_slot_storage_entries(slot) .unwrap_or_default() @@ -8186,7 +8186,7 @@ pub mod tests { #[test] fn test_flush_cache_clean() { let caching_enabled = true; - let db = Arc::new(AccountsDB::new_with_config( + let db = Arc::new(AccountsDb::new_with_config( Vec::new(), &ClusterType::Development, HashSet::new(), @@ -8231,7 +8231,7 @@ pub mod tests { } fn setup_scan( - db: Arc, + db: Arc, scan_ancestors: Arc, stall_key: Pubkey, ) -> ScanTracker { @@ -8274,7 +8274,7 @@ pub mod tests { #[test] fn test_scan_flush_accounts_cache_then_clean_drop() { let caching_enabled = true; - let db = Arc::new(AccountsDB::new_with_config( + let db = Arc::new(AccountsDb::new_with_config( Vec::new(), &ClusterType::Development, HashSet::new(), @@ -8357,7 +8357,7 @@ pub mod tests { #[test] fn test_alive_bytes() { let caching_enabled = true; - let accounts_db = AccountsDB::new_with_config( + let accounts_db = AccountsDb::new_with_config( Vec::new(), &ClusterType::Development, HashSet::new(), @@ -8409,9 +8409,9 @@ pub mod tests { fn setup_accounts_db_cache_clean( num_slots: usize, scan_slot: Option, - ) -> (Arc, Vec, Vec, Option) { + ) -> (Arc, Vec, Vec, Option) { let caching_enabled = true; - let accounts_db = Arc::new(AccountsDB::new_with_config( + let accounts_db = Arc::new(AccountsDb::new_with_config( Vec::new(), &ClusterType::Development, HashSet::new(), @@ -8793,7 +8793,7 @@ pub mod tests { // Enable caching so that we use the straightforward implementation // of shrink that will shrink all candidate slots let caching_enabled = true; - let db = AccountsDB::new_with_config( + let db = AccountsDb::new_with_config( Vec::new(), &ClusterType::Development, HashSet::default(), @@ -8876,7 +8876,7 @@ pub mod tests { #[test] fn test_partial_clean() { solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let account_key1 = Pubkey::new_unique(); let account_key2 = Pubkey::new_unique(); let account1 = Account::new(1, 0, &Account::default().owner); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index ffbfd91e40..0169761f08 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1067,7 +1067,7 @@ impl Bank { new.ancestors.insert(p.slot(), i + 1); }); - // Following code may touch AccountsDB, requiring proper ancestors + // Following code may touch AccountsDb, requiring proper ancestors let parent_epoch = parent.epoch(); if parent_epoch < new.epoch() { new.apply_feature_activations(false); @@ -2169,7 +2169,7 @@ impl Bank { self.capitalization.fetch_sub(account.lamports, Relaxed); - // Resetting account balance to 0 is needed to really purge from AccountsDB and + // Resetting account balance to 0 is needed to really purge from AccountsDb and // flush the Stakes cache account.lamports = 0; self.store_account(&program_id, &account); @@ -2189,7 +2189,7 @@ impl Bank { ), Some(account) => { if *name == String::from_utf8_lossy(&account.data) { - // nop; it seems that already AccountsDB is updated. + // nop; it seems that already AccountsDb is updated. return; } // continue to replace account diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index 07970f3dec..f01ebabd13 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -1,7 +1,7 @@ //! The `bank_forks` module implements BankForks a DAG of checkpointed Banks use crate::{ - accounts_background_service::{ABSRequestSender, SnapshotRequest}, + accounts_background_service::{AbsRequestSender, SnapshotRequest}, bank::Bank, }; use log::*; @@ -187,7 +187,7 @@ impl BankForks { pub fn set_root( &mut self, root: Slot, - accounts_background_request_sender: &ABSRequestSender, + accounts_background_request_sender: &AbsRequestSender, highest_confirmed_root: Option, ) { let old_epoch = self.root_bank().epoch(); @@ -428,7 +428,7 @@ mod tests { let bank0 = Bank::new(&genesis_config); let mut bank_forks0 = BankForks::new(bank0); - bank_forks0.set_root(0, &ABSRequestSender::default(), None); + bank_forks0.set_root(0, &AbsRequestSender::default(), None); let bank1 = Bank::new(&genesis_config); let mut bank_forks1 = BankForks::new(bank1); @@ -460,7 +460,7 @@ mod tests { // Set root in bank_forks0 to truncate the ancestor history bank_forks0.insert(child1); - bank_forks0.set_root(slot, &ABSRequestSender::default(), None); + bank_forks0.set_root(slot, &AbsRequestSender::default(), None); // Don't set root in bank_forks1 to keep the ancestor history bank_forks1.insert(child2); @@ -514,7 +514,7 @@ mod tests { ); bank_forks.set_root( 2, - &ABSRequestSender::default(), + &AbsRequestSender::default(), None, // highest confirmed root ); banks[2].squash(); @@ -573,7 +573,7 @@ mod tests { ); bank_forks.set_root( 2, - &ABSRequestSender::default(), + &AbsRequestSender::default(), Some(1), // highest confirmed root ); banks[2].squash(); diff --git a/runtime/src/hardened_unpack.rs b/runtime/src/hardened_unpack.rs index a221aacff5..d35d33db8a 100644 --- a/runtime/src/hardened_unpack.rs +++ b/runtime/src/hardened_unpack.rs @@ -19,7 +19,7 @@ use thiserror::Error; #[derive(Error, Debug)] pub enum UnpackError { #[error("IO error: {0}")] - IO(#[from] std::io::Error), + Io(#[from] std::io::Error), #[error("Archive error: {0}")] Archive(String), } diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 799959feb2..ef952fb445 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -1,7 +1,7 @@ use { crate::{ accounts::Accounts, - accounts_db::{AccountStorageEntry, AccountsDB, AppendVecId, BankHashInfo}, + accounts_db::{AccountStorageEntry, AccountsDb, AppendVecId, BankHashInfo}, accounts_index::{AccountIndex, Ancestors}, append_vec::AppendVec, bank::{Bank, BankFieldsToDeserialize, BankRc, Builtins}, @@ -59,7 +59,7 @@ pub(crate) use crate::accounts_db::{SnapshotStorage, SnapshotStorages}; #[derive(Copy, Clone, Eq, PartialEq)] pub(crate) enum SerdeStyle { - NEWER, + Newer, } const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024; @@ -82,7 +82,7 @@ trait TypeContext<'a> { fn serialize_accounts_db_fields( serializer: S, - serializable_db: &SerializableAccountsDB<'a, Self>, + serializable_db: &SerializableAccountsDb<'a, Self>, ) -> std::result::Result where Self: std::marker::Sized; @@ -155,7 +155,7 @@ where }}; } match serde_style { - SerdeStyle::NEWER => INTO!(TypeContextFuture), + SerdeStyle::Newer => INTO!(TypeContextFuture), } .map_err(|err| { warn!("bankrc_from_stream error: {:?}", err); @@ -185,7 +185,7 @@ where }; } match serde_style { - SerdeStyle::NEWER => INTO!(TypeContextFuture), + SerdeStyle::Newer => INTO!(TypeContextFuture), } .map_err(|err| { warn!("bankrc_to_stream error: {:?}", err); @@ -208,14 +208,14 @@ impl<'a, C: TypeContext<'a>> Serialize for SerializableBankAndStorage<'a, C> { } } -struct SerializableAccountsDB<'a, C> { - accounts_db: &'a AccountsDB, +struct SerializableAccountsDb<'a, C> { + accounts_db: &'a AccountsDb, slot: Slot, account_storage_entries: &'a [SnapshotStorage], phantom: std::marker::PhantomData, } -impl<'a, C: TypeContext<'a>> Serialize for SerializableAccountsDB<'a, C> { +impl<'a, C: TypeContext<'a>> Serialize for SerializableAccountsDb<'a, C> { fn serialize(&self, serializer: S) -> std::result::Result where S: serde::ser::Serializer, @@ -225,7 +225,7 @@ impl<'a, C: TypeContext<'a>> Serialize for SerializableAccountsDB<'a, C> { } #[cfg(RUSTC_WITH_SPECIALIZATION)] -impl<'a, C> IgnoreAsHelper for SerializableAccountsDB<'a, C> {} +impl<'a, C> IgnoreAsHelper for SerializableAccountsDb<'a, C> {} #[allow(clippy::too_many_arguments)] fn reconstruct_bank_from_fields( @@ -273,12 +273,12 @@ fn reconstruct_accountsdb_from_fields( cluster_type: &ClusterType, account_indexes: HashSet, caching_enabled: bool, -) -> Result +) -> Result where E: SerializableStorage, P: AsRef, { - let mut accounts_db = AccountsDB::new_with_config( + let mut accounts_db = AccountsDb::new_with_config( account_paths.to_vec(), cluster_type, account_indexes, diff --git a/runtime/src/serde_snapshot/future.rs b/runtime/src/serde_snapshot/future.rs index e61faf53b1..2bf612fba1 100644 --- a/runtime/src/serde_snapshot/future.rs +++ b/runtime/src/serde_snapshot/future.rs @@ -215,7 +215,7 @@ impl<'a> TypeContext<'a> for Context { { ( SerializableVersionedBank::from(serializable_bank.bank.get_fields_to_serialize()), - SerializableAccountsDB::<'a, Self> { + SerializableAccountsDb::<'a, Self> { accounts_db: &*serializable_bank.bank.rc.accounts.accounts_db, slot: serializable_bank.bank.rc.slot, account_storage_entries: serializable_bank.snapshot_storages, @@ -227,7 +227,7 @@ impl<'a> TypeContext<'a> for Context { fn serialize_accounts_db_fields( serializer: S, - serializable_db: &SerializableAccountsDB<'a, Self>, + serializable_db: &SerializableAccountsDb<'a, Self>, ) -> std::result::Result where Self: std::marker::Sized, diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 14671d827a..d29d57b7ed 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -21,7 +21,7 @@ use { #[cfg(test)] fn copy_append_vecs>( - accounts_db: &AccountsDB, + accounts_db: &AccountsDb, output_dir: P, ) -> std::io::Result<()> { let storage_entries = accounts_db.get_snapshot_storages(Slot::max_value()); @@ -57,7 +57,7 @@ fn context_accountsdb_from_stream<'a, C, R, P>( stream: &mut BufReader, account_paths: &[PathBuf], stream_append_vecs_path: P, -) -> Result +) -> Result where C: TypeContext<'a>, R: Read, @@ -80,13 +80,13 @@ fn accountsdb_from_stream( stream: &mut BufReader, account_paths: &[PathBuf], stream_append_vecs_path: P, -) -> Result +) -> Result where R: Read, P: AsRef, { match serde_style { - SerdeStyle::NEWER => context_accountsdb_from_stream::( + SerdeStyle::Newer => context_accountsdb_from_stream::( stream, account_paths, stream_append_vecs_path, @@ -98,7 +98,7 @@ where fn accountsdb_to_stream( serde_style: SerdeStyle, stream: &mut W, - accounts_db: &AccountsDB, + accounts_db: &AccountsDb, slot: Slot, account_storage_entries: &[SnapshotStorage], ) -> Result<(), Error> @@ -106,9 +106,9 @@ where W: Write, { match serde_style { - SerdeStyle::NEWER => serialize_into( + SerdeStyle::Newer => serialize_into( stream, - &SerializableAccountsDB:: { + &SerializableAccountsDb:: { accounts_db, slot, account_storage_entries, @@ -230,13 +230,13 @@ fn test_bank_serialize_style(serde_style: SerdeStyle) { #[cfg(test)] pub(crate) fn reconstruct_accounts_db_via_serialization( - accounts: &AccountsDB, + accounts: &AccountsDb, slot: Slot, -) -> AccountsDB { +) -> AccountsDb { let mut writer = Cursor::new(vec![]); let snapshot_storages = accounts.get_snapshot_storages(slot); accountsdb_to_stream( - SerdeStyle::NEWER, + SerdeStyle::Newer, &mut writer, &accounts, slot, @@ -249,17 +249,17 @@ pub(crate) fn reconstruct_accounts_db_via_serialization( let copied_accounts = TempDir::new().unwrap(); // Simulate obtaining a copy of the AppendVecs from a tarball copy_append_vecs(&accounts, copied_accounts.path()).unwrap(); - accountsdb_from_stream(SerdeStyle::NEWER, &mut reader, &[], copied_accounts.path()).unwrap() + accountsdb_from_stream(SerdeStyle::Newer, &mut reader, &[], copied_accounts.path()).unwrap() } #[test] fn test_accounts_serialize_newer() { - test_accounts_serialize_style(SerdeStyle::NEWER) + test_accounts_serialize_style(SerdeStyle::Newer) } #[test] fn test_bank_serialize_newer() { - test_bank_serialize_style(SerdeStyle::NEWER) + test_bank_serialize_style(SerdeStyle::Newer) } #[cfg(all(test, RUSTC_WITH_SPECIALIZATION))] @@ -268,7 +268,7 @@ mod test_bank_serialize { // These some what long test harness is required to freeze the ABI of // Bank's serialization due to versioned nature - #[frozen_abi(digest = "9CqwEeiVycBp9wVDLz19XUJXRMZ68itGfYVEe29S8JmA")] + #[frozen_abi(digest = "DuRGntVwLGNAv5KooafUSpxk67BPAx2yC7Z8A9c8wr2G")] #[derive(Serialize, AbiExample)] pub struct BankAbiTestWrapperFuture { #[serde(serialize_with = "wrapper_future")] diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 57dfc67e13..c4d01ef6a8 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1,5 +1,5 @@ use crate::{ - accounts_db::AccountsDB, + accounts_db::AccountsDb, accounts_index::AccountIndex, bank::{Bank, BankSlotDelta, Builtins}, bank_forks::ArchiveFormat, @@ -25,7 +25,7 @@ use std::{ cmp::Ordering, fmt, fs::{self, File}, - io::{self, BufReader, BufWriter, Error as IOError, ErrorKind, Read, Seek, SeekFrom, Write}, + io::{self, BufReader, BufWriter, Error as IoError, ErrorKind, Read, Seek, SeekFrom, Write}, path::{Path, PathBuf}, process::{self, ExitStatus}, str::FromStr, @@ -108,7 +108,7 @@ pub struct SlotSnapshotPaths { #[derive(Error, Debug)] pub enum SnapshotError { #[error("I/O error: {0}")] - IO(#[from] std::io::Error), + Io(#[from] std::io::Error), #[error("serialization error: {0}")] Serialize(#[from] bincode::Error), @@ -319,7 +319,7 @@ pub fn archive_snapshot_package(snapshot_package: &AccountsPackage) -> Result<() match &mut tar.stdout { None => { - return Err(SnapshotError::IO(IOError::new( + return Err(SnapshotError::Io(IoError::new( ErrorKind::Other, "tar stdout unavailable".to_string(), ))); @@ -521,7 +521,7 @@ pub fn add_snapshot>( let mut bank_serialize = Measure::start("bank-serialize-ms"); let bank_snapshot_serializer = move |stream: &mut BufWriter| -> Result<()> { let serde_style = match snapshot_version { - SnapshotVersion::V1_2_0 => SerdeStyle::NEWER, + SnapshotVersion::V1_2_0 => SerdeStyle::Newer, }; bank_to_stream(serde_style, stream.by_ref(), bank, snapshot_storages)?; Ok(()) @@ -797,7 +797,7 @@ where let bank = deserialize_snapshot_data_file(&root_paths.snapshot_file_path, |mut stream| { Ok(match snapshot_version_enum { SnapshotVersion::V1_2_0 => bank_from_stream( - SerdeStyle::NEWER, + SerdeStyle::Newer, &mut stream, &append_vecs_path, account_paths, @@ -841,7 +841,7 @@ fn get_bank_snapshot_dir>(path: P, slot: Slot) -> PathBuf { fn get_io_error(error: &str) -> SnapshotError { warn!("Snapshot Error: {:?}", error); - SnapshotError::IO(IOError::new(ErrorKind::Other, error)) + SnapshotError::Io(IoError::new(ErrorKind::Other, error)) } pub fn verify_snapshot_archive( @@ -968,7 +968,7 @@ pub fn process_accounts_package_pre( let hash = accounts_package.hash; // temporarily remaining here if let Some(expected_hash) = accounts_package.hash_for_testing { - let (hash, lamports) = AccountsDB::calculate_accounts_hash_without_index( + let (hash, lamports) = AccountsDb::calculate_accounts_hash_without_index( &accounts_package.storages, accounts_package.simple_capitalization_testing, thread_pool, @@ -1039,7 +1039,7 @@ mod tests { Ok(()) }, ); - assert_matches!(result, Err(SnapshotError::IO(ref message)) if message.to_string().starts_with("too large snapshot data file to serialize")); + assert_matches!(result, Err(SnapshotError::Io(ref message)) if message.to_string().starts_with("too large snapshot data file to serialize")); } #[test] @@ -1088,7 +1088,7 @@ mod tests { expected_consumed_size - 1, |stream| Ok(deserialize_from::<_, u32>(stream)?), ); - assert_matches!(result, Err(SnapshotError::IO(ref message)) if message.to_string().starts_with("too large snapshot data file to deserialize")); + assert_matches!(result, Err(SnapshotError::Io(ref message)) if message.to_string().starts_with("too large snapshot data file to deserialize")); } #[test] @@ -1113,7 +1113,7 @@ mod tests { expected_consumed_size * 2, |stream| Ok(deserialize_from::<_, u32>(stream)?), ); - assert_matches!(result, Err(SnapshotError::IO(ref message)) if message.to_string().starts_with("invalid snapshot data file")); + assert_matches!(result, Err(SnapshotError::Io(ref message)) if message.to_string().starts_with("invalid snapshot data file")); } #[test] diff --git a/runtime/tests/accounts.rs b/runtime/tests/accounts.rs index 5593feae07..7a0b50fcd5 100644 --- a/runtime/tests/accounts.rs +++ b/runtime/tests/accounts.rs @@ -1,7 +1,7 @@ use log::*; use rand::{thread_rng, Rng}; use rayon::prelude::*; -use solana_runtime::{accounts_db::AccountsDB, accounts_index::Ancestors}; +use solana_runtime::{accounts_db::AccountsDb, accounts_index::Ancestors}; use solana_sdk::genesis_config::ClusterType; use solana_sdk::{account::Account, clock::Slot, pubkey::Pubkey}; use std::collections::HashSet; @@ -15,7 +15,7 @@ fn test_shrink_and_clean() { // repeat the whole test scenario for _ in 0..5 { - let accounts = Arc::new(AccountsDB::new_single()); + let accounts = Arc::new(AccountsDb::new_single()); let accounts_for_shrink = accounts.clone(); // spawn the slot shrinking background thread @@ -31,7 +31,7 @@ fn test_shrink_and_clean() { let mut alive_accounts = vec![]; let owner = Pubkey::default(); - // populate the AccountsDB with plenty of food for slot shrinking + // populate the AccountsDb with plenty of food for slot shrinking // also this simulates realistic some heavy spike account updates in the wild for current_slot in 0..100 { while alive_accounts.len() <= 10 { @@ -66,7 +66,7 @@ fn test_shrink_and_clean() { fn test_bad_bank_hash() { solana_logger::setup(); use solana_sdk::signature::{Keypair, Signer}; - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let some_slot: Slot = 0; let ancestors: Ancestors = [(some_slot, 0)].iter().copied().collect(); @@ -113,7 +113,7 @@ fn test_bad_bank_hash() { for (key, account) in &account_refs { assert_eq!( db.load_account_hash(&ancestors, &key), - AccountsDB::hash_account(some_slot, &account, &key, &ClusterType::Development) + AccountsDb::hash_account(some_slot, &account, &key, &ClusterType::Development) ); } existing.clear(); diff --git a/streamer/src/streamer.rs b/streamer/src/streamer.rs index 6bbdb9fcf0..926342b876 100644 --- a/streamer/src/streamer.rs +++ b/streamer/src/streamer.rs @@ -19,7 +19,7 @@ pub type PacketSender = Sender; #[derive(Error, Debug)] pub enum StreamerError { #[error("I/O error")] - IO(#[from] std::io::Error), + Io(#[from] std::io::Error), #[error("receive timeout error")] RecvTimeoutError(#[from] RecvTimeoutError),