Move accounts-db code to its own crate (#32766)

This commit is contained in:
Pankaj Garg 2023-08-09 13:03:36 -07:00 committed by GitHub
parent b97c451200
commit f4287d70bb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
153 changed files with 1383 additions and 1006 deletions

99
Cargo.lock generated
View File

@ -5151,9 +5151,9 @@ dependencies = [
"clap 2.33.3",
"log",
"rayon",
"solana-accounts-db",
"solana-logger",
"solana-measure",
"solana-runtime",
"solana-sdk",
"solana-version",
]
@ -5167,6 +5167,7 @@ dependencies = [
"rand 0.7.3",
"rayon",
"solana-account-decoder",
"solana-accounts-db",
"solana-clap-utils",
"solana-cli-config",
"solana-client",
@ -5187,6 +5188,84 @@ dependencies = [
"spl-token",
]
[[package]]
name = "solana-accounts-db"
version = "1.17.0"
dependencies = [
"arrayref",
"assert_matches",
"bincode",
"blake3",
"bv",
"bytemuck",
"byteorder",
"bzip2",
"crossbeam-channel",
"dashmap 4.0.2",
"dir-diff",
"ed25519-dalek",
"flate2",
"fnv",
"fs-err",
"im",
"index_list",
"itertools",
"lazy_static",
"libsecp256k1",
"log",
"lru",
"lz4",
"memmap2",
"memoffset 0.9.0",
"modular-bitfield",
"num-derive",
"num-traits",
"num_cpus",
"num_enum 0.6.1",
"once_cell",
"ouroboros",
"percentage",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rayon",
"regex",
"rustc_version 0.4.0",
"serde",
"serde_derive",
"siphasher",
"solana-accounts-db",
"solana-address-lookup-table-program",
"solana-bpf-loader-program",
"solana-bucket-map",
"solana-compute-budget-program",
"solana-config-program",
"solana-cost-model",
"solana-frozen-abi",
"solana-frozen-abi-macro",
"solana-loader-v4-program",
"solana-logger",
"solana-measure",
"solana-metrics",
"solana-perf",
"solana-program-runtime",
"solana-rayon-threadlimit",
"solana-sdk",
"solana-stake-program",
"solana-system-program",
"solana-vote-program",
"solana-zk-token-proof-program",
"solana-zk-token-sdk",
"static_assertions",
"strum",
"strum_macros",
"symlink",
"tar",
"tempfile",
"test-case",
"thiserror",
"zstd",
]
[[package]]
name = "solana-address-lookup-table-program"
version = "1.17.0"
@ -5274,6 +5353,7 @@ dependencies = [
"bincode",
"crossbeam-channel",
"futures 0.3.28",
"solana-accounts-db",
"solana-banks-interface",
"solana-client",
"solana-runtime",
@ -5715,6 +5795,7 @@ dependencies = [
"serde_derive",
"serde_json",
"serial_test",
"solana-accounts-db",
"solana-address-lookup-table-program",
"solana-bloom",
"solana-client",
@ -5935,6 +6016,7 @@ dependencies = [
"serde",
"serde_json",
"serde_yaml 0.9.25",
"solana-accounts-db",
"solana-clap-utils",
"solana-cli-config",
"solana-entry",
@ -5953,9 +6035,9 @@ name = "solana-genesis-utils"
version = "1.17.0"
dependencies = [
"log",
"solana-accounts-db",
"solana-download-utils",
"solana-rpc-client",
"solana-runtime",
"solana-sdk",
]
@ -5981,6 +6063,7 @@ dependencies = [
"libloading",
"log",
"serde_json",
"solana-accounts-db",
"solana-entry",
"solana-geyser-plugin-interface",
"solana-ledger",
@ -6128,6 +6211,7 @@ dependencies = [
"serde_bytes",
"sha2 0.10.7",
"solana-account-decoder",
"solana-accounts-db",
"solana-bpf-loader-program",
"solana-cost-model",
"solana-entry",
@ -6181,6 +6265,7 @@ dependencies = [
"serde_json",
"signal-hook",
"solana-account-decoder",
"solana-accounts-db",
"solana-bpf-loader-program",
"solana-clap-utils",
"solana-cli-output",
@ -6233,6 +6318,7 @@ dependencies = [
"rand 0.7.3",
"rayon",
"serial_test",
"solana-accounts-db",
"solana-client",
"solana-config-program",
"solana-core",
@ -6295,9 +6381,9 @@ version = "1.17.0"
dependencies = [
"clap 2.33.3",
"log",
"solana-accounts-db",
"solana-logger",
"solana-measure",
"solana-runtime",
"solana-sdk",
"solana-version",
]
@ -6530,6 +6616,7 @@ dependencies = [
"crossbeam-channel",
"log",
"serde",
"solana-accounts-db",
"solana-banks-client",
"solana-banks-interface",
"solana-banks-server",
@ -6647,6 +6734,7 @@ dependencies = [
"serial_test",
"soketto",
"solana-account-decoder",
"solana-accounts-db",
"solana-address-lookup-table-program",
"solana-client",
"solana-entry",
@ -6817,6 +6905,7 @@ dependencies = [
"serde",
"serde_derive",
"siphasher",
"solana-accounts-db",
"solana-address-lookup-table-program",
"solana-bpf-loader-program",
"solana-bucket-map",
@ -7019,8 +7108,8 @@ version = "1.17.0"
dependencies = [
"clap 2.33.3",
"log",
"solana-accounts-db",
"solana-logger",
"solana-runtime",
"solana-sdk",
"solana-version",
]
@ -7080,6 +7169,7 @@ dependencies = [
"log",
"serde_derive",
"serde_json",
"solana-accounts-db",
"solana-cli-output",
"solana-client",
"solana-core",
@ -7307,6 +7397,7 @@ dependencies = [
"serde_yaml 0.9.25",
"signal-hook",
"solana-account-decoder",
"solana-accounts-db",
"solana-clap-utils",
"solana-cli-config",
"solana-core",

View File

@ -3,6 +3,7 @@ members = [
"account-decoder",
"accounts-bench",
"accounts-cluster-bench",
"accounts-db",
"banking-bench",
"banks-client",
"banks-interface",
@ -293,6 +294,7 @@ socket2 = "0.5.3"
soketto = "0.7"
solana_rbpf = "=0.6.0"
solana-account-decoder = { path = "account-decoder", version = "=1.17.0" }
solana-accounts-db = { path = "accounts-db", version = "=1.17.0" }
solana-address-lookup-table-program = { path = "programs/address-lookup-table", version = "=1.17.0" }
solana-banks-client = { path = "banks-client", version = "=1.17.0" }
solana-banks-interface = { path = "banks-interface", version = "=1.17.0" }

View File

@ -12,9 +12,9 @@ edition = { workspace = true }
clap = { workspace = true }
log = { workspace = true }
rayon = { workspace = true }
solana-accounts-db = { workspace = true }
solana-logger = { workspace = true }
solana-measure = { workspace = true }
solana-runtime = { workspace = true }
solana-sdk = { workspace = true }
solana-version = { workspace = true }

View File

@ -5,8 +5,7 @@ extern crate log;
use {
clap::{crate_description, crate_name, value_t, App, Arg},
rayon::prelude::*,
solana_measure::measure::Measure,
solana_runtime::{
solana_accounts_db::{
accounts::Accounts,
accounts_db::{
test_utils::{create_test_accounts, update_accounts_bench},
@ -16,6 +15,7 @@ use {
ancestors::Ancestors,
rent_collector::RentCollector,
},
solana_measure::measure::Measure,
solana_sdk::{
genesis_config::ClusterType, pubkey::Pubkey, sysvar::epoch_schedule::EpochSchedule,
},

View File

@ -14,6 +14,7 @@ log = { workspace = true }
rand = { workspace = true }
rayon = { workspace = true }
solana-account-decoder = { workspace = true }
solana-accounts-db = { workspace = true }
solana-clap-utils = { workspace = true }
solana-cli-config = { workspace = true }
solana-client = { workspace = true }

View File

@ -4,6 +4,7 @@ use {
log::*,
rand::{thread_rng, Rng},
rayon::prelude::*,
solana_accounts_db::inline_spl_token,
solana_clap_utils::{
hidden_unless_forced, input_parsers::pubkey_of, input_validators::is_url_or_moniker,
},
@ -11,7 +12,6 @@ use {
solana_client::transaction_executor::TransactionExecutor,
solana_gossip::gossip_service::discover,
solana_rpc_client::rpc_client::RpcClient,
solana_runtime::inline_spl_token,
solana_sdk::{
commitment_config::CommitmentConfig,
hash::Hash,
@ -703,6 +703,7 @@ fn main() {
pub mod test {
use {
super::*,
solana_accounts_db::inline_spl_token,
solana_core::validator::ValidatorConfig,
solana_faucet::faucet::run_local_faucet,
solana_local_cluster::{

101
accounts-db/Cargo.toml Normal file
View File

@ -0,0 +1,101 @@
[package]
name = "solana-accounts-db"
description = "Solana accounts db"
documentation = "https://docs.rs/solana-acounts-db"
version = { workspace = true }
authors = { workspace = true }
repository = { workspace = true }
homepage = { workspace = true }
license = { workspace = true }
edition = { workspace = true }
[dependencies]
arrayref = { workspace = true }
bincode = { workspace = true }
blake3 = { workspace = true }
bv = { workspace = true, features = ["serde"] }
bytemuck = { workspace = true }
byteorder = { workspace = true }
bzip2 = { workspace = true }
crossbeam-channel = { workspace = true }
dashmap = { workspace = true, features = ["rayon", "raw-api"] }
dir-diff = { workspace = true }
flate2 = { workspace = true }
fnv = { workspace = true }
fs-err = { workspace = true }
im = { workspace = true, features = ["rayon", "serde"] }
index_list = { workspace = true }
itertools = { workspace = true }
lazy_static = { workspace = true }
log = { workspace = true }
lru = { workspace = true }
lz4 = { workspace = true }
memmap2 = { workspace = true }
modular-bitfield = { workspace = true }
num-derive = { workspace = true }
num-traits = { workspace = true }
num_cpus = { workspace = true }
num_enum = { workspace = true }
once_cell = { workspace = true }
ouroboros = { workspace = true }
percentage = { workspace = true }
rand = { workspace = true }
rayon = { workspace = true }
regex = { workspace = true }
serde = { workspace = true, features = ["rc"] }
serde_derive = { workspace = true }
siphasher = { workspace = true }
solana-address-lookup-table-program = { workspace = true }
solana-bpf-loader-program = { workspace = true }
solana-bucket-map = { workspace = true }
solana-compute-budget-program = { workspace = true }
solana-config-program = { workspace = true }
solana-cost-model = { workspace = true }
solana-frozen-abi = { workspace = true }
solana-frozen-abi-macro = { workspace = true }
solana-loader-v4-program = { workspace = true }
solana-measure = { workspace = true }
solana-metrics = { workspace = true }
solana-perf = { workspace = true }
solana-program-runtime = { workspace = true }
solana-rayon-threadlimit = { workspace = true }
solana-sdk = { workspace = true }
solana-stake-program = { workspace = true }
solana-system-program = { workspace = true }
solana-vote-program = { workspace = true }
solana-zk-token-proof-program = { workspace = true }
solana-zk-token-sdk = { workspace = true }
static_assertions = { workspace = true }
strum = { workspace = true, features = ["derive"] }
strum_macros = { workspace = true }
symlink = { workspace = true }
tar = { workspace = true }
tempfile = { workspace = true }
thiserror = { workspace = true }
zstd = { workspace = true }
[lib]
crate-type = ["lib"]
name = "solana_accounts_db"
[dev-dependencies]
assert_matches = { workspace = true }
ed25519-dalek = { workspace = true }
libsecp256k1 = { workspace = true }
memoffset = { workspace = true }
rand_chacha = { workspace = true }
# See order-crates-for-publishing.py for using this unusual `path = "."`
solana-accounts-db = { path = ".", features = ["dev-context-only-utils"] }
solana-logger = { workspace = true }
solana-sdk = { workspace = true, features = ["dev-context-only-utils"] }
static_assertions = { workspace = true }
test-case = { workspace = true }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[build-dependencies]
rustc_version = { workspace = true }
[features]
dev-context-only-utils = []

27
accounts-db/build.rs Normal file
View File

@ -0,0 +1,27 @@
extern crate rustc_version;
use rustc_version::{version_meta, Channel};
fn main() {
// Copied and adapted from
// https://github.com/Kimundi/rustc-version-rs/blob/1d692a965f4e48a8cb72e82cda953107c0d22f47/README.md#example
// Licensed under Apache-2.0 + MIT
match version_meta().unwrap().channel {
Channel::Stable => {
println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION");
}
Channel::Beta => {
println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION");
}
Channel::Nightly => {
println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION");
}
Channel::Dev => {
println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION");
// See https://github.com/solana-labs/solana/issues/11055
// We may be running the custom `rust-bpf-builder` toolchain,
// which currently needs `#![feature(proc_macro_hygiene)]` to
// be applied.
println!("cargo:rustc-cfg=RUSTC_NEEDS_PROC_MACRO_HYGIENE");
}
}
}

View File

@ -10,7 +10,7 @@ use {
};
#[derive(Debug, PartialEq, Eq)]
pub(crate) enum RentState {
pub enum RentState {
/// account.lamports == 0
Uninitialized,
/// 0 < account.lamports < rent-exempt-minimum
@ -23,7 +23,7 @@ pub(crate) enum RentState {
}
impl RentState {
pub(crate) fn from_account(account: &AccountSharedData, rent: &Rent) -> Self {
pub fn from_account(account: &AccountSharedData, rent: &Rent) -> Self {
if account.lamports() == 0 {
Self::Uninitialized
} else if rent.is_exempt(account.lamports(), account.data().len()) {
@ -36,7 +36,7 @@ impl RentState {
}
}
pub(crate) fn transition_allowed_from(&self, pre_rent_state: &RentState) -> bool {
pub fn transition_allowed_from(&self, pre_rent_state: &RentState) -> bool {
match self {
Self::Uninitialized | Self::RentExempt => true,
Self::RentPaying {
@ -73,7 +73,7 @@ pub(crate) fn submit_rent_state_metrics(pre_rent_state: &RentState, post_rent_st
}
}
pub(crate) fn check_rent_state(
pub fn check_rent_state(
pre_rent_state: Option<&RentState>,
post_rent_state: Option<&RentState>,
transaction_context: &TransactionContext,

View File

@ -12,10 +12,10 @@ pub mod meta;
#[derive(Clone, Debug)]
pub struct AccountStorageReference {
/// the single storage for a given slot
pub(crate) storage: Arc<AccountStorageEntry>,
pub storage: Arc<AccountStorageEntry>,
/// id can be read from 'storage', but it is an atomic read.
/// id will never change while a storage is held, so we store it separately here for faster runtime lookup in 'get_account_storage_entry'
pub(crate) id: AppendVecId,
pub id: AppendVecId,
}
pub type AccountStorageMap = DashMap<Slot, AccountStorageReference>;
@ -74,7 +74,7 @@ impl AccountStorage {
/// return the append vec for 'slot' if it exists
/// This is only ever called when shrink is not possibly running and there is a max of 1 append vec per slot.
pub(crate) fn get_slot_storage_entry(&self, slot: Slot) -> Option<Arc<AccountStorageEntry>> {
pub fn get_slot_storage_entry(&self, slot: Slot) -> Option<Arc<AccountStorageEntry>> {
assert!(self.no_shrink_in_progress());
self.get_slot_storage_entry_shrinking_in_progress_ok(slot)
}
@ -100,7 +100,7 @@ impl AccountStorage {
}
/// initialize the storage map to 'all_storages'
pub(crate) fn initialize(&mut self, all_storages: AccountStorageMap) {
pub fn initialize(&mut self, all_storages: AccountStorageMap) {
assert!(self.map.is_empty());
assert!(self.no_shrink_in_progress());
self.map.extend(all_storages.into_iter())
@ -208,7 +208,7 @@ impl<'a> Iterator for AccountStorageIter<'a> {
/// exists while there is a shrink in progress
/// keeps track of the 'new_store' being created and the 'old_store' being replaced.
#[derive(Debug)]
pub(crate) struct ShrinkInProgress<'a> {
pub struct ShrinkInProgress<'a> {
storage: &'a AccountStorage,
/// old store which will be shrunk and replaced
old_store: Arc<AccountStorageEntry>,
@ -244,7 +244,7 @@ impl<'a> Drop for ShrinkInProgress<'a> {
}
impl<'a> ShrinkInProgress<'a> {
pub(crate) fn new_storage(&self) -> &Arc<AccountStorageEntry> {
pub fn new_storage(&self) -> &Arc<AccountStorageEntry> {
&self.new_store
}
pub(crate) fn old_storage(&self) -> &Arc<AccountStorageEntry> {

View File

@ -84,7 +84,7 @@ pub struct AccountLocks {
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub(crate) enum RewardInterval {
pub enum RewardInterval {
/// the slot within the epoch is INSIDE the reward distribution interval
InsideInterval,
/// the slot within the epoch is OUTSIDE the reward distribution interval
@ -216,11 +216,11 @@ impl Accounts {
))
}
pub(crate) fn new_empty(accounts_db: AccountsDb) -> Self {
pub fn new_empty(accounts_db: AccountsDb) -> Self {
Self::new(Arc::new(accounts_db))
}
pub(crate) fn new(accounts_db: Arc<AccountsDb>) -> Self {
pub fn new(accounts_db: Arc<AccountsDb>) -> Self {
Self {
accounts_db,
account_locks: Mutex::new(AccountLocks::default()),
@ -694,7 +694,7 @@ impl Accounts {
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn load_accounts(
pub fn load_accounts(
&self,
ancestors: &Ancestors,
txs: &[SanitizedTransaction],
@ -1306,7 +1306,7 @@ impl Accounts {
/// Store the accounts into the DB
// allow(clippy) needed for various gating flags
#[allow(clippy::too_many_arguments)]
pub(crate) fn store_cached(
pub fn store_cached(
&self,
slot: Slot,
txs: &[SanitizedTransaction],

View File

@ -28,12 +28,12 @@ use {
},
AccountStorage, AccountStorageStatus, ShrinkInProgress,
},
accounts_background_service::{DroppedSlotsSender, SendDroppedBankCallback},
accounts_cache::{AccountsCache, CachedAccount, SlotCache},
accounts_file::{AccountsFile, AccountsFileError},
accounts_hash::{
AccountsDeltaHash, AccountsHash, AccountsHashEnum, AccountsHasher,
CalcAccountsHashConfig, CalculateHashIntermediate, HashStats, IncrementalAccountsHash,
SerdeAccountsDeltaHash, SerdeAccountsHash, SerdeIncrementalAccountsHash,
ZeroLamportAccounts,
},
accounts_index::{
@ -62,8 +62,6 @@ use {
pubkey_bins::PubkeyBinCalculator24,
read_only_accounts_cache::ReadOnlyAccountsCache,
rent_collector::RentCollector,
serde_snapshot::{SerdeAccountsDeltaHash, SerdeAccountsHash, SerdeIncrementalAccountsHash},
snapshot_utils::create_accounts_run_and_snapshot_dirs,
sorted_storages::SortedStorages,
storable_accounts::StorableAccounts,
verify_accounts_hash_in_background::VerifyAccountsHashInBackground,
@ -394,7 +392,7 @@ enum LoadZeroLamports {
/// Note that this is non-deterministic if clean is running asynchronously.
/// If a zero lamport account exists in the index, then Some is returned.
/// Once it is cleaned from the index, None is returned.
#[cfg(test)]
#[cfg(feature = "dev-context-only-utils")]
SomeWithZeroLamportAccountForTests,
}
@ -1021,7 +1019,7 @@ pub struct AccountStorageEntry {
pub(crate) slot: AtomicU64,
/// storage holding the accounts
pub(crate) accounts: AccountsFile,
pub accounts: AccountsFile,
/// Keeps track of the number of accounts stored in a specific AppendVec.
/// This is periodically checked to reuse the stores that do not have
@ -1057,7 +1055,7 @@ impl AccountStorageEntry {
}
}
pub(crate) fn new_existing(
pub fn new_existing(
slot: Slot,
id: AppendVecId,
accounts: AccountsFile,
@ -1211,6 +1209,76 @@ impl AccountStorageEntry {
}
}
/// To allow generating a bank snapshot directory with full state information, we need to
/// hardlink account appendvec files from the runtime operation directory to a snapshot
/// hardlink directory. This is to create the run/ and snapshot sub directories for an
/// account_path provided by the user. These two sub directories are on the same file
/// system partition to allow hard-linking.
pub fn create_accounts_run_and_snapshot_dirs(
account_dir: impl AsRef<Path>,
) -> std::io::Result<(PathBuf, PathBuf)> {
let run_path = account_dir.as_ref().join("run");
let snapshot_path = account_dir.as_ref().join("snapshot");
if (!run_path.is_dir()) || (!snapshot_path.is_dir()) {
// If the "run/" or "snapshot" sub directories do not exist, the directory may be from
// an older version for which the appendvec files are at this directory. Clean up
// them first.
// This will be done only once when transitioning from an old image without run directory
// to this new version using run and snapshot directories.
// The run/ content cleanup will be done at a later point. The snapshot/ content persists
// across the process boot, and will be purged by the account_background_service.
if fs_err::remove_dir_all(&account_dir).is_err() {
delete_contents_of_path(&account_dir);
}
fs_err::create_dir_all(&run_path)?;
fs_err::create_dir_all(&snapshot_path)?;
}
Ok((run_path, snapshot_path))
}
/// For all account_paths, create the run/ and snapshot/ sub directories.
/// If an account_path directory does not exist, create it.
/// It returns (account_run_paths, account_snapshot_paths) or error
pub fn create_all_accounts_run_and_snapshot_dirs(
account_paths: &[PathBuf],
) -> std::io::Result<(Vec<PathBuf>, Vec<PathBuf>)> {
let mut run_dirs = Vec::with_capacity(account_paths.len());
let mut snapshot_dirs = Vec::with_capacity(account_paths.len());
for account_path in account_paths {
// create the run/ and snapshot/ sub directories for each account_path
let (run_dir, snapshot_dir) = create_accounts_run_and_snapshot_dirs(account_path)?;
run_dirs.push(run_dir);
snapshot_dirs.push(snapshot_dir);
}
Ok((run_dirs, snapshot_dirs))
}
/// Delete the files and subdirectories in a directory.
/// This is useful if the process does not have permission
/// to delete the top level directory it might be able to
/// delete the contents of that directory.
pub fn delete_contents_of_path(path: impl AsRef<Path>) {
match fs_err::read_dir(path.as_ref()) {
Err(err) => {
warn!("Failed to delete contents: {err}")
}
Ok(dir_entries) => {
for entry in dir_entries.flatten() {
let sub_path = entry.path();
let result = if sub_path.is_dir() {
fs_err::remove_dir_all(&sub_path)
} else {
fs_err::remove_file(&sub_path)
};
if let Err(err) = result {
warn!("Failed to delete contents: {err}");
}
}
}
}
}
pub fn get_temp_accounts_paths(count: u32) -> IoResult<(Vec<TempDir>, Vec<PathBuf>)> {
let temp_dirs: IoResult<Vec<TempDir>> = (0..count).map(|_| TempDir::new()).collect();
let temp_dirs = temp_dirs?;
@ -1371,7 +1439,7 @@ pub struct AccountsDb {
/// true iff we want to skip the initial hash calculation on startup
pub skip_initial_hash_calc: bool,
pub(crate) storage: AccountStorage,
pub storage: AccountStorage,
#[allow(dead_code)]
/// from AccountsDbConfig
@ -1392,10 +1460,10 @@ pub struct AccountsDb {
/// Set of shrinkable stores organized by map of slot to append_vec_id
pub shrink_candidate_slots: Mutex<ShrinkCandidates>,
pub(crate) write_version: AtomicU64,
pub write_version: AtomicU64,
/// Set of storage paths to pick from
pub(crate) paths: Vec<PathBuf>,
pub paths: Vec<PathBuf>,
full_accounts_hash_cache_path: PathBuf,
incremental_accounts_hash_cache_path: PathBuf,
@ -1410,7 +1478,7 @@ pub struct AccountsDb {
/// Directory of paths this accounts_db needs to hold/remove
#[allow(dead_code)]
pub(crate) temp_paths: Option<Vec<TempDir>>,
pub temp_paths: Option<Vec<TempDir>>,
/// Starting file size of appendvecs
file_size: u64,
@ -1433,7 +1501,7 @@ pub struct AccountsDb {
// Stats for purges called outside of clean_accounts()
external_purge_slots_stats: PurgeStats,
pub(crate) shrink_stats: ShrinkStats,
pub shrink_stats: ShrinkStats,
pub(crate) shrink_ancient_stats: ShrinkAncientStats,
@ -1485,18 +1553,18 @@ pub struct AccountsDb {
/// number of slots remaining where filler accounts should be added
pub filler_account_slots_remaining: AtomicU64,
pub(crate) verify_accounts_hash_in_bg: VerifyAccountsHashInBackground,
pub verify_accounts_hash_in_bg: VerifyAccountsHashInBackground,
/// Used to disable logging dead slots during removal.
/// allow disabling noisy log
pub(crate) log_dead_slots: AtomicBool,
pub log_dead_slots: AtomicBool,
/// debug feature to scan every append vec and verify refcounts are equal
exhaustively_verify_refcounts: bool,
/// this will live here until the feature for partitioned epoch rewards is activated.
/// At that point, this and other code can be deleted.
pub(crate) partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig,
pub partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig,
/// the full accounts hash calculation as of a predetermined block height 'N'
/// to be included in the bank hash at a predetermined block height 'M'
@ -1534,7 +1602,7 @@ pub struct AccountsStats {
}
#[derive(Debug, Default)]
pub(crate) struct PurgeStats {
pub struct PurgeStats {
last_report: AtomicInterval,
safety_checks_elapsed: AtomicU64,
remove_cache_elapsed: AtomicU64,
@ -1949,7 +2017,7 @@ impl ShrinkStatsSub {
}
#[derive(Debug, Default)]
pub(crate) struct ShrinkStats {
pub struct ShrinkStats {
last_report: AtomicInterval,
num_slots_shrunk: AtomicUsize,
storage_read_elapsed: AtomicU64,
@ -2233,7 +2301,7 @@ pub fn make_min_priority_thread_pool() -> ThreadPool {
.unwrap()
}
#[cfg(all(test, RUSTC_WITH_SPECIALIZATION))]
#[cfg(RUSTC_WITH_SPECIALIZATION)]
impl solana_frozen_abi::abi_example::AbiExample for AccountsDb {
fn example() -> Self {
let accounts_db = AccountsDb::new_single_for_tests();
@ -2834,7 +2902,7 @@ impl AccountsDb {
}
#[must_use]
pub(crate) fn purge_keys_exact<'a, C: 'a>(
pub fn purge_keys_exact<'a, C: 'a>(
&'a self,
pubkey_to_slot_set: impl Iterator<Item = &'a (Pubkey, C)>,
) -> (Vec<(Slot, AccountInfo)>, PubkeysRemovedFromAccountsIndex)
@ -3805,7 +3873,7 @@ impl AccountsDb {
/// get all accounts in all the storages passed in
/// for duplicate pubkeys, the account with the highest write_value is returned
pub(crate) fn get_unique_accounts_from_storage<'a>(
pub fn get_unique_accounts_from_storage<'a>(
&self,
store: &'a Arc<AccountStorageEntry>,
) -> GetUniqueAccountsResult<'a> {
@ -4084,7 +4152,7 @@ impl AccountsDb {
/// Drop 'shrink_in_progress', which will cause the old store to be removed from the storage map.
/// For 'shrink_in_progress'.'old_storage' which is not retained, insert in 'dead_storages' and optionally 'dirty_stores'
/// This is the end of the life cycle of `shrink_in_progress`.
pub(crate) fn mark_dirty_dead_stores(
pub fn mark_dirty_dead_stores(
&self,
slot: Slot,
add_dirty_stores: bool,
@ -4112,7 +4180,7 @@ impl AccountsDb {
dead_storages
}
pub(crate) fn drop_or_recycle_stores(
pub fn drop_or_recycle_stores(
&self,
dead_storages: Vec<Arc<AccountStorageEntry>>,
stats: &ShrinkStats,
@ -4142,11 +4210,7 @@ impl AccountsDb {
}
/// return a store that can contain 'aligned_total' bytes
pub(crate) fn get_store_for_shrink(
&self,
slot: Slot,
aligned_total: u64,
) -> ShrinkInProgress<'_> {
pub fn get_store_for_shrink(&self, slot: Slot, aligned_total: u64) -> ShrinkInProgress<'_> {
let shrunken_store = self
.try_recycle_store(slot, aligned_total, aligned_total + 1024)
.unwrap_or_else(|| {
@ -4316,20 +4380,6 @@ impl AccountsDb {
}
}
#[cfg(test)]
pub(crate) fn sizes_of_accounts_in_storage_for_tests(&self, slot: Slot) -> Vec<usize> {
self.storage
.get_slot_storage_entry(slot)
.map(|storage| {
storage
.accounts
.account_iter()
.map(|account| account.stored_size())
.collect()
})
.unwrap_or_default()
}
/// 'accounts' that exist in the current slot we are combining into a different ancient slot
/// 'existing_ancient_pubkeys': pubkeys that exist currently in the ancient append vec slot
/// returns the pubkeys that are in 'accounts' that are already in 'existing_ancient_pubkeys'
@ -4964,7 +5014,7 @@ impl AccountsDb {
/// Insert a default bank hash stats for `slot`
///
/// This fn is called when creating a new bank from parent.
pub(crate) fn insert_default_bank_hash_stats(&self, slot: Slot, parent_slot: Slot) {
pub fn insert_default_bank_hash_stats(&self, slot: Slot, parent_slot: Slot) {
let mut bank_hash_stats = self.bank_hash_stats.lock().unwrap();
if bank_hash_stats.get(&slot).is_some() {
error!("set_hash: already exists; multiple forks with shared slot {slot} as child (parent: {parent_slot})!?");
@ -5613,7 +5663,7 @@ impl AccountsDb {
store
}
pub(crate) fn page_align(size: u64) -> u64 {
pub fn page_align(size: u64) -> u64 {
(size + (PAGE_SIZE - 1)) & !(PAGE_SIZE - 1)
}
@ -5684,13 +5734,9 @@ impl AccountsDb {
self.storage.insert(slot, store)
}
pub fn create_drop_bank_callback(
&self,
pruned_banks_sender: DroppedSlotsSender,
) -> SendDroppedBankCallback {
pub fn enable_bank_drop_callback(&self) {
self.is_bank_drop_callback_enabled
.store(true, Ordering::Release);
SendDroppedBankCallback::new(pruned_banks_sender)
}
/// This should only be called after the `Bank::drop()` runs in bank.rs, See BANK_DROP_SAFETY
@ -5748,7 +5794,7 @@ impl AccountsDb {
/// Purges every slot in `removed_slots` from both the cache and storage. This includes
/// entries in the accounts index, cache entries, and any backing storage entries.
pub(crate) fn purge_slots_from_cache_and_store<'a>(
pub fn purge_slots_from_cache_and_store<'a>(
&self,
removed_slots: impl Iterator<Item = &'a Slot> + Clone,
purge_stats: &PurgeStats,
@ -6248,8 +6294,9 @@ impl AccountsDb {
.fetch_add(recycle_stores_write_elapsed.as_us(), Ordering::Relaxed);
}
#[cfg(test)]
pub(crate) fn flush_accounts_cache_slot_for_tests(&self, slot: Slot) {
// These functions/fields are only usable from a dev context (i.e. tests and benches)
#[cfg(feature = "dev-context-only-utils")]
pub fn flush_accounts_cache_slot_for_tests(&self, slot: Slot) {
self.flush_slot_cache(slot);
}
@ -6567,7 +6614,7 @@ impl AccountsDb {
/// However, there is a clear path to be able to support this.
/// So, combine all accounts from 'slot_stores' into a new storage and return it.
/// This runs prior to the storages being put in AccountsDb.storage
pub(crate) fn combine_multiple_slots_into_one_at_startup(
pub fn combine_multiple_slots_into_one_at_startup(
path: &Path,
id: AppendVecId,
slot: Slot,
@ -7262,7 +7309,7 @@ impl AccountsDb {
stats.num_dirty_slots = num_dirty_slots;
}
pub(crate) fn calculate_accounts_hash(
pub fn calculate_accounts_hash(
&self,
data_source: CalcAccountsHashDataSource,
slot: Slot,
@ -7781,10 +7828,7 @@ impl AccountsDb {
/// 1. pubkey, hash pairs for the slot
/// 2. us spent scanning
/// 3. Measure started when we began accumulating
pub(crate) fn get_pubkey_hash_for_slot(
&self,
slot: Slot,
) -> (Vec<(Pubkey, Hash)>, u64, Measure) {
pub fn get_pubkey_hash_for_slot(&self, slot: Slot) -> (Vec<(Pubkey, Hash)>, u64, Measure) {
let mut scan = Measure::start("scan");
let scan_result: ScanStorageResult<(Pubkey, Hash), DashMap<Pubkey, Hash>> = self
@ -7821,7 +7865,7 @@ impl AccountsDb {
///
/// As part of calculating the accounts delta hash, get a list of accounts modified this slot
/// (aka dirty pubkeys) and add them to `self.uncleaned_pubkeys` for future cleaning.
pub(crate) fn calculate_accounts_delta_hash_internal(
pub fn calculate_accounts_delta_hash_internal(
&self,
slot: Slot,
ignore: Option<Pubkey>,
@ -8569,7 +8613,7 @@ impl AccountsDb {
);
}
pub(crate) fn store_accounts_frozen<'a, T: ReadableAccount + Sync + ZeroLamport + 'a>(
pub fn store_accounts_frozen<'a, T: ReadableAccount + Sync + ZeroLamport + 'a>(
&self,
accounts: impl StorableAccounts<'a, T>,
hashes: Option<Vec<impl Borrow<Hash>>>,
@ -9381,7 +9425,7 @@ impl AccountsDb {
timings.storage_size_storages_us = storage_size_storages_time.as_us();
}
pub(crate) fn print_accounts_stats(&self, label: &str) {
pub fn print_accounts_stats(&self, label: &str) {
self.print_index(label);
self.print_count_and_status(label);
info!("recycle_stores:");
@ -9474,10 +9518,6 @@ pub(crate) enum UpdateIndexThreadSelection {
#[cfg(test)]
impl AccountsDb {
pub fn new(paths: Vec<PathBuf>, cluster_type: &ClusterType) -> Self {
Self::new_for_tests(paths, cluster_type)
}
pub fn new_with_config_for_tests(
paths: Vec<PathBuf>,
cluster_type: &ClusterType,
@ -9521,24 +9561,30 @@ impl AccountsDb {
let result = self.accounts_index.get(pubkey, Some(&ancestors), None);
result.map(|(list, index)| list.slot_list()[index].1.store_id())
}
pub fn alive_account_count_in_slot(&self, slot: Slot) -> usize {
self.storage
.get_slot_storage_entry(slot)
.map(|storage| storage.count())
.unwrap_or(0)
.saturating_add(
self.accounts_cache
.slot_cache(slot)
.map(|slot_cache| slot_cache.len())
.unwrap_or_default(),
)
}
}
// These functions/fields are only usable from a dev context (i.e. tests and benches)
#[cfg(feature = "dev-context-only-utils")]
impl AccountsDb {
pub fn new(paths: Vec<PathBuf>, cluster_type: &ClusterType) -> Self {
Self::new_for_tests(paths, cluster_type)
}
pub fn load_without_fixed_root(
&self,
ancestors: &Ancestors,
pubkey: &Pubkey,
) -> Option<(AccountSharedData, Slot)> {
self.do_load(
ancestors,
pubkey,
None,
LoadHint::Unspecified,
// callers of this expect zero lamport accounts that exist in the index to be returned as Some(empty)
LoadZeroLamports::SomeWithZeroLamportAccountForTests,
)
}
pub fn accounts_delta_hashes(&self) -> &Mutex<HashMap<Slot, AccountsDeltaHash>> {
&self.accounts_delta_hashes
}
@ -9558,6 +9604,182 @@ impl AccountsDb {
pub fn set_accounts_hash_for_tests(&self, slot: Slot, accounts_hash: AccountsHash) {
self.set_accounts_hash(slot, (accounts_hash, u64::default()));
}
pub fn assert_load_account(&self, slot: Slot, pubkey: Pubkey, expected_lamports: u64) {
let ancestors = vec![(slot, 0)].into_iter().collect();
let (account, slot) = self.load_without_fixed_root(&ancestors, &pubkey).unwrap();
assert_eq!((account.lamports(), slot), (expected_lamports, slot));
}
pub fn assert_not_load_account(&self, slot: Slot, pubkey: Pubkey) {
let ancestors = vec![(slot, 0)].into_iter().collect();
let load = self.load_without_fixed_root(&ancestors, &pubkey);
assert!(load.is_none(), "{load:?}");
}
pub fn check_accounts(&self, pubkeys: &[Pubkey], slot: Slot, num: usize, count: usize) {
let ancestors = vec![(slot, 0)].into_iter().collect();
for _ in 0..num {
let idx = thread_rng().gen_range(0, num);
let account = self.load_without_fixed_root(&ancestors, &pubkeys[idx]);
let account1 = Some((
AccountSharedData::new(
(idx + count) as u64,
0,
AccountSharedData::default().owner(),
),
slot,
));
assert_eq!(account, account1);
}
}
/// callers used to call store_uncached. But, this is not allowed anymore.
pub fn store_for_tests(&self, slot: Slot, accounts: &[(&Pubkey, &AccountSharedData)]) {
self.store(
(slot, accounts, INCLUDE_SLOT_IN_HASH_TESTS),
&StoreTo::Cache,
None,
StoreReclaims::Default,
UpdateIndexThreadSelection::PoolWithThreshold,
);
}
#[allow(clippy::needless_range_loop)]
pub fn modify_accounts(&self, pubkeys: &[Pubkey], slot: Slot, num: usize, count: usize) {
for idx in 0..num {
let account = AccountSharedData::new(
(idx + count) as u64,
0,
AccountSharedData::default().owner(),
);
self.store_for_tests(slot, &[(&pubkeys[idx], &account)]);
}
}
pub fn check_storage(&self, slot: Slot, count: usize) {
assert!(self.storage.get_slot_storage_entry(slot).is_some());
let store = self.storage.get_slot_storage_entry(slot).unwrap();
let total_count = store.count();
assert_eq!(store.status(), AccountStorageStatus::Available);
assert_eq!(total_count, count);
let (expected_store_count, actual_store_count): (usize, usize) =
(store.approx_stored_count(), store.all_accounts().len());
assert_eq!(expected_store_count, actual_store_count);
}
pub fn create_account(
&self,
pubkeys: &mut Vec<Pubkey>,
slot: Slot,
num: usize,
space: usize,
num_vote: usize,
) {
let ancestors = vec![(slot, 0)].into_iter().collect();
for t in 0..num {
let pubkey = solana_sdk::pubkey::new_rand();
let account =
AccountSharedData::new((t + 1) as u64, space, AccountSharedData::default().owner());
pubkeys.push(pubkey);
assert!(self.load_without_fixed_root(&ancestors, &pubkey).is_none());
self.store_for_tests(slot, &[(&pubkey, &account)]);
}
for t in 0..num_vote {
let pubkey = solana_sdk::pubkey::new_rand();
let account =
AccountSharedData::new((num + t + 1) as u64, space, &solana_vote_program::id());
pubkeys.push(pubkey);
let ancestors = vec![(slot, 0)].into_iter().collect();
assert!(self.load_without_fixed_root(&ancestors, &pubkey).is_none());
self.store_for_tests(slot, &[(&pubkey, &account)]);
}
}
pub fn sizes_of_accounts_in_storage_for_tests(&self, slot: Slot) -> Vec<usize> {
self.storage
.get_slot_storage_entry(slot)
.map(|storage| {
storage
.accounts
.account_iter()
.map(|account| account.stored_size())
.collect()
})
.unwrap_or_default()
}
pub fn ref_count_for_pubkey(&self, pubkey: &Pubkey) -> RefCount {
self.accounts_index.ref_count_from_storage(pubkey)
}
pub fn alive_account_count_in_slot(&self, slot: Slot) -> usize {
self.storage
.get_slot_storage_entry(slot)
.map(|storage| storage.count())
.unwrap_or(0)
.saturating_add(
self.accounts_cache
.slot_cache(slot)
.map(|slot_cache| slot_cache.len())
.unwrap_or_default(),
)
}
/// useful to adapt tests written prior to introduction of the write cache
/// to use the write cache
pub fn add_root_and_flush_write_cache(&self, slot: Slot) {
self.add_root(slot);
self.flush_root_write_cache(slot);
}
/// useful to adapt tests written prior to introduction of the write cache
/// to use the write cache
pub fn flush_root_write_cache(&self, root: Slot) {
assert!(
self.accounts_index
.roots_tracker
.read()
.unwrap()
.alive_roots
.contains(&root),
"slot: {root}"
);
self.flush_accounts_cache(true, Some(root));
}
pub fn all_account_count_in_append_vec(&self, slot: Slot) -> usize {
let store = self.storage.get_slot_storage_entry(slot);
if let Some(store) = store {
let count = store.all_accounts().len();
let stored_count = store.approx_stored_count();
assert_eq!(stored_count, count);
count
} else {
0
}
}
}
// These functions/fields are only usable from a dev context (i.e. tests and benches)
#[cfg(feature = "dev-context-only-utils")]
impl<'a> VerifyAccountsHashAndLamportsConfig<'a> {
pub fn new_for_test(
ancestors: &'a Ancestors,
epoch_schedule: &'a EpochSchedule,
rent_collector: &'a RentCollector,
) -> Self {
Self {
ancestors,
test_hash_calculation: true,
epoch_schedule,
rent_collector,
ignore_mismatch: false,
store_detailed_debug_info: false,
use_bg_thread_pool: false,
include_slot_in_hash: INCLUDE_SLOT_IN_HASH_TESTS,
}
}
}
/// A set of utility functions used for testing and benchmarking
@ -9649,7 +9871,7 @@ pub mod tests {
},
};
pub fn linear_ancestors(end_slot: u64) -> Ancestors {
fn linear_ancestors(end_slot: u64) -> Ancestors {
let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect();
for i in 1..end_slot {
ancestors.insert(i, (i - 1) as usize);
@ -9688,21 +9910,6 @@ pub mod tests {
)
}
pub fn load_without_fixed_root(
&self,
ancestors: &Ancestors,
pubkey: &Pubkey,
) -> Option<(AccountSharedData, Slot)> {
self.do_load(
ancestors,
pubkey,
None,
LoadHint::Unspecified,
// callers of this expect zero lamport accounts that exist in the index to be returned as Some(empty)
LoadZeroLamports::SomeWithZeroLamportAccountForTests,
)
}
fn get_storage_for_slot(&self, slot: Slot) -> Option<Arc<AccountStorageEntry>> {
self.storage.get_slot_storage_entry(slot)
}
@ -9776,25 +9983,6 @@ pub mod tests {
}
}
impl<'a> VerifyAccountsHashAndLamportsConfig<'a> {
pub fn new_for_test(
ancestors: &'a Ancestors,
epoch_schedule: &'a EpochSchedule,
rent_collector: &'a RentCollector,
) -> Self {
Self {
ancestors,
test_hash_calculation: true,
epoch_schedule,
rent_collector,
ignore_mismatch: false,
store_detailed_debug_info: false,
use_bg_thread_pool: false,
include_slot_in_hash: INCLUDE_SLOT_IN_HASH_TESTS,
}
}
}
#[test]
fn test_maybe_unref_accounts_already_in_ancient() {
let db = AccountsDb::new_single_for_tests();
@ -11151,7 +11339,7 @@ pub mod tests {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&db, &mut pubkeys, 0, 100, 0, 0);
db.create_account(&mut pubkeys, 0, 100, 0, 0);
for _ in 1..100 {
let idx = thread_rng().gen_range(0, 99);
let ancestors = vec![(0, 0)].into_iter().collect();
@ -11193,9 +11381,9 @@ pub mod tests {
let db = AccountsDb::new_single_for_tests();
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&db, &mut pubkeys, 0, 2, DEFAULT_FILE_SIZE as usize / 3, 0);
db.create_account(&mut pubkeys, 0, 2, DEFAULT_FILE_SIZE as usize / 3, 0);
db.add_root_and_flush_write_cache(0);
check_storage(&db, 0, 2);
db.check_storage(0, 2);
let pubkey = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, DEFAULT_FILE_SIZE as usize / 3, &pubkey);
@ -11272,7 +11460,7 @@ pub mod tests {
.accounts_index
.get(&key, Some(&ancestors), None)
.is_some());
assert_load_account(&db, unrooted_slot, key, 1);
db.assert_load_account(unrooted_slot, key, 1);
// Purge the slot
db.remove_unrooted_slots(&[(unrooted_slot, unrooted_bank_id)]);
@ -11289,7 +11477,7 @@ pub mod tests {
// Test we can store for the same slot again and get the right information
let account0 = AccountSharedData::new(2, 0, &key);
db.store_for_tests(unrooted_slot, &[(&key, &account0)]);
assert_load_account(&db, unrooted_slot, key, 2);
db.assert_load_account(unrooted_slot, key, 2);
}
#[test]
@ -11302,38 +11490,6 @@ pub mod tests {
run_test_remove_unrooted_slot(false);
}
pub fn create_account(
accounts: &AccountsDb,
pubkeys: &mut Vec<Pubkey>,
slot: Slot,
num: usize,
space: usize,
num_vote: usize,
) {
let ancestors = vec![(slot, 0)].into_iter().collect();
for t in 0..num {
let pubkey = solana_sdk::pubkey::new_rand();
let account =
AccountSharedData::new((t + 1) as u64, space, AccountSharedData::default().owner());
pubkeys.push(pubkey);
assert!(accounts
.load_without_fixed_root(&ancestors, &pubkey)
.is_none());
accounts.store_for_tests(slot, &[(&pubkey, &account)]);
}
for t in 0..num_vote {
let pubkey = solana_sdk::pubkey::new_rand();
let account =
AccountSharedData::new((num + t + 1) as u64, space, &solana_vote_program::id());
pubkeys.push(pubkey);
let ancestors = vec![(slot, 0)].into_iter().collect();
assert!(accounts
.load_without_fixed_root(&ancestors, &pubkey)
.is_none());
accounts.store_for_tests(slot, &[(&pubkey, &account)]);
}
}
fn update_accounts(accounts: &AccountsDb, pubkeys: &[Pubkey], slot: Slot, range: usize) {
for _ in 1..1000 {
let idx = thread_rng().gen_range(0, range);
@ -11359,64 +11515,12 @@ pub mod tests {
}
}
pub fn check_storage(accounts: &AccountsDb, slot: Slot, count: usize) {
assert!(accounts.storage.get_slot_storage_entry(slot).is_some());
let store = accounts.storage.get_slot_storage_entry(slot).unwrap();
let total_count = store.count();
assert_eq!(store.status(), AccountStorageStatus::Available);
assert_eq!(total_count, count);
let (expected_store_count, actual_store_count): (usize, usize) =
(store.approx_stored_count(), store.all_accounts().len());
assert_eq!(expected_store_count, actual_store_count);
}
pub fn check_accounts(
accounts: &AccountsDb,
pubkeys: &[Pubkey],
slot: Slot,
num: usize,
count: usize,
) {
let ancestors = vec![(slot, 0)].into_iter().collect();
for _ in 0..num {
let idx = thread_rng().gen_range(0, num);
let account = accounts.load_without_fixed_root(&ancestors, &pubkeys[idx]);
let account1 = Some((
AccountSharedData::new(
(idx + count) as u64,
0,
AccountSharedData::default().owner(),
),
slot,
));
assert_eq!(account, account1);
}
}
#[allow(clippy::needless_range_loop)]
pub fn modify_accounts(
accounts: &AccountsDb,
pubkeys: &[Pubkey],
slot: Slot,
num: usize,
count: usize,
) {
for idx in 0..num {
let account = AccountSharedData::new(
(idx + count) as u64,
0,
AccountSharedData::default().owner(),
);
accounts.store_for_tests(slot, &[(&pubkeys[idx], &account)]);
}
}
#[test]
fn test_account_one() {
let (_accounts_dirs, paths) = get_temp_accounts_paths(1).unwrap();
let db = AccountsDb::new(paths, &ClusterType::Development);
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&db, &mut pubkeys, 0, 1, 0, 0);
db.create_account(&mut pubkeys, 0, 1, 0, 0);
let ancestors = vec![(0, 0)].into_iter().collect();
let account = db.load_without_fixed_root(&ancestors, &pubkeys[0]).unwrap();
let default_account = AccountSharedData::from(Account {
@ -11431,18 +11535,18 @@ pub mod tests {
let (_accounts_dirs, paths) = get_temp_accounts_paths(2).unwrap();
let db = AccountsDb::new(paths, &ClusterType::Development);
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&db, &mut pubkeys, 0, 100, 0, 0);
check_accounts(&db, &pubkeys, 0, 100, 1);
db.create_account(&mut pubkeys, 0, 100, 0, 0);
db.check_accounts(&pubkeys, 0, 100, 1);
}
#[test]
fn test_account_update() {
let accounts = AccountsDb::new_single_for_tests();
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&accounts, &mut pubkeys, 0, 100, 0, 0);
accounts.create_account(&mut pubkeys, 0, 100, 0, 0);
update_accounts(&accounts, &pubkeys, 0, 99);
accounts.add_root_and_flush_write_cache(0);
check_storage(&accounts, 0, 100);
accounts.check_storage(0, 100);
}
#[test]
@ -11613,24 +11717,6 @@ pub mod tests {
);
}
impl AccountsDb {
pub fn all_account_count_in_append_vec(&self, slot: Slot) -> usize {
let store = self.storage.get_slot_storage_entry(slot);
if let Some(store) = store {
let count = store.all_accounts().len();
let stored_count = store.approx_stored_count();
assert_eq!(stored_count, count);
count
} else {
0
}
}
pub fn ref_count_for_pubkey(&self, pubkey: &Pubkey) -> RefCount {
self.accounts_index.ref_count_from_storage(pubkey)
}
}
#[test]
fn test_clean_zero_lamport_and_dead_slot() {
solana_logger::setup();
@ -12075,25 +12161,6 @@ pub mod tests {
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0);
}
pub fn assert_load_account(
accounts: &AccountsDb,
slot: Slot,
pubkey: Pubkey,
expected_lamports: u64,
) {
let ancestors = vec![(slot, 0)].into_iter().collect();
let (account, slot) = accounts
.load_without_fixed_root(&ancestors, &pubkey)
.unwrap();
assert_eq!((account.lamports(), slot), (expected_lamports, slot));
}
pub fn assert_not_load_account(accounts: &AccountsDb, slot: Slot, pubkey: Pubkey) {
let ancestors = vec![(slot, 0)].into_iter().collect();
let load = accounts.load_without_fixed_root(&ancestors, &pubkey);
assert!(load.is_none(), "{load:?}");
}
fn assert_no_stores(accounts: &AccountsDb, slot: Slot) {
let store = accounts.storage.get_slot_storage_entry(slot);
assert!(store.is_none());
@ -12148,7 +12215,7 @@ pub mod tests {
accounts.calculate_accounts_delta_hash(current_slot);
accounts.add_root_and_flush_write_cache(current_slot);
assert_load_account(&accounts, current_slot, pubkey, zero_lamport);
accounts.assert_load_account(current_slot, pubkey, zero_lamport);
current_slot += 1;
accounts.calculate_accounts_delta_hash(current_slot);
@ -12177,9 +12244,9 @@ pub mod tests {
// storage for slot 1 had 2 accounts, now has 1 after pubkey 1
// was reclaimed
check_storage(&accounts, 1, 1);
accounts.check_storage(1, 1);
// storage for slot 2 had 1 accounts, now has 1
check_storage(&accounts, 2, 1);
accounts.check_storage(2, 1);
}
#[test]
@ -12208,7 +12275,7 @@ pub mod tests {
accounts.calculate_accounts_delta_hash(current_slot);
accounts.add_root_and_flush_write_cache(current_slot);
assert_load_account(&accounts, current_slot, pubkey, zero_lamport);
accounts.assert_load_account(current_slot, pubkey, zero_lamport);
// Otherwise slot 2 will not be removed
current_slot += 1;
@ -15970,39 +16037,6 @@ pub mod tests {
}
impl AccountsDb {
/// useful to adapt tests written prior to introduction of the write cache
/// to use the write cache
pub fn add_root_and_flush_write_cache(&self, slot: Slot) {
self.add_root(slot);
self.flush_root_write_cache(slot);
}
/// useful to adapt tests written prior to introduction of the write cache
/// to use the write cache
pub(crate) fn flush_root_write_cache(&self, root: Slot) {
assert!(
self.accounts_index
.roots_tracker
.read()
.unwrap()
.alive_roots
.contains(&root),
"slot: {root}"
);
self.flush_accounts_cache(true, Some(root));
}
/// callers used to call store_uncached. But, this is not allowed anymore.
pub fn store_for_tests(&self, slot: Slot, accounts: &[(&Pubkey, &AccountSharedData)]) {
self.store(
(slot, accounts, INCLUDE_SLOT_IN_HASH_TESTS),
&StoreTo::Cache,
None,
StoreReclaims::Default,
UpdateIndexThreadSelection::PoolWithThreshold,
);
}
/// helper function to test unref_accounts or clean_dead_slots_from_accounts_index
fn test_unref(
&self,

View File

@ -1148,6 +1148,51 @@ pub struct IncrementalAccountsHash(pub Hash);
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct AccountsDeltaHash(pub Hash);
/// Snapshot serde-safe accounts delta hash
#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, Eq, AbiExample)]
pub struct SerdeAccountsDeltaHash(pub Hash);
impl From<SerdeAccountsDeltaHash> for AccountsDeltaHash {
fn from(accounts_delta_hash: SerdeAccountsDeltaHash) -> Self {
Self(accounts_delta_hash.0)
}
}
impl From<AccountsDeltaHash> for SerdeAccountsDeltaHash {
fn from(accounts_delta_hash: AccountsDeltaHash) -> Self {
Self(accounts_delta_hash.0)
}
}
/// Snapshot serde-safe accounts hash
#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, Eq, AbiExample)]
pub struct SerdeAccountsHash(pub Hash);
impl From<SerdeAccountsHash> for AccountsHash {
fn from(accounts_hash: SerdeAccountsHash) -> Self {
Self(accounts_hash.0)
}
}
impl From<AccountsHash> for SerdeAccountsHash {
fn from(accounts_hash: AccountsHash) -> Self {
Self(accounts_hash.0)
}
}
/// Snapshot serde-safe incremental accounts hash
#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, Eq, AbiExample)]
pub struct SerdeIncrementalAccountsHash(pub Hash);
impl From<SerdeIncrementalAccountsHash> for IncrementalAccountsHash {
fn from(incremental_accounts_hash: SerdeIncrementalAccountsHash) -> Self {
Self(incremental_accounts_hash.0)
}
}
impl From<IncrementalAccountsHash> for SerdeIncrementalAccountsHash {
fn from(incremental_accounts_hash: IncrementalAccountsHash) -> Self {
Self(incremental_accounts_hash.0)
}
}
#[cfg(test)]
pub mod tests {
use {super::*, itertools::Itertools, std::str::FromStr, tempfile::tempdir};

View File

@ -449,7 +449,7 @@ pub struct RootsTracker {
/// Constructed during load from snapshots.
/// Updated every time we add a new root or clean/shrink an append vec into irrelevancy.
/// Range is approximately the last N slots where N is # slots per epoch.
pub(crate) alive_roots: RollingBitField,
pub alive_roots: RollingBitField,
uncleaned_roots: HashSet<Slot>,
previous_uncleaned_roots: HashSet<Slot>,
}
@ -677,7 +677,7 @@ pub struct AccountsIndex<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> {
program_id_index: SecondaryIndex<DashMapSecondaryIndexEntry>,
spl_token_mint_index: SecondaryIndex<DashMapSecondaryIndexEntry>,
spl_token_owner_index: SecondaryIndex<RwLockSecondaryIndexEntry>,
pub(crate) roots_tracker: RwLock<RootsTracker>,
pub roots_tracker: RwLock<RootsTracker>,
ongoing_scan_roots: RwLock<BTreeMap<Slot, u64>>,
// Each scan has some latest slot `S` that is the tip of the fork the scan
// is iterating over. The unique id of that slot `S` is recorded here (note we don't use
@ -1420,7 +1420,7 @@ impl<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> AccountsIndex<T, U> {
/// Get an account
/// The latest account that appears in `ancestors` or `roots` is returned.
pub(crate) fn get(
pub fn get(
&self,
pubkey: &Pubkey,
ancestors: Option<&Ancestors>,
@ -1996,7 +1996,8 @@ impl<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> AccountsIndex<T, U> {
self.roots_tracker.read().unwrap().uncleaned_roots.len()
}
#[cfg(test)]
// These functions/fields are only usable from a dev context (i.e. tests and benches)
#[cfg(feature = "dev-context-only-utils")]
// filter any rooted entries and return them along with a bool that indicates
// if this account has no more entries. Note this does not update the secondary
// indexes!
@ -2010,6 +2011,34 @@ impl<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> AccountsIndex<T, U> {
}
}
// These functions/fields are only usable from a dev context (i.e. tests and benches)
#[cfg(feature = "dev-context-only-utils")]
impl<T: IndexValue> AccountIndexGetResult<T> {
pub fn unwrap(self) -> (ReadAccountMapEntry<T>, usize) {
match self {
AccountIndexGetResult::Found(lock, size) => (lock, size),
_ => {
panic!("trying to unwrap AccountIndexGetResult with non-Success result");
}
}
}
pub fn is_none(&self) -> bool {
!self.is_some()
}
pub fn is_some(&self) -> bool {
matches!(self, AccountIndexGetResult::Found(_lock, _size))
}
pub fn map<V, F: FnOnce((ReadAccountMapEntry<T>, usize)) -> V>(self, f: F) -> Option<V> {
match self {
AccountIndexGetResult::Found(lock, size) => Some(f((lock, size))),
_ => None,
}
}
}
#[cfg(test)]
pub mod tests {
use {
@ -2045,32 +2074,6 @@ pub mod tests {
}
}
impl<T: IndexValue> AccountIndexGetResult<T> {
pub fn unwrap(self) -> (ReadAccountMapEntry<T>, usize) {
match self {
AccountIndexGetResult::Found(lock, size) => (lock, size),
_ => {
panic!("trying to unwrap AccountIndexGetResult with non-Success result");
}
}
}
pub fn is_none(&self) -> bool {
!self.is_some()
}
pub fn is_some(&self) -> bool {
matches!(self, AccountIndexGetResult::Found(_lock, _size))
}
pub fn map<V, F: FnOnce((ReadAccountMapEntry<T>, usize)) -> V>(self, f: F) -> Option<V> {
match self {
AccountIndexGetResult::Found(lock, size) => Some(f((lock, size))),
_ => None,
}
}
}
fn create_dashmap_secondary_index_state() -> (usize, usize, AccountSecondaryIndexes) {
{
// Check that we're actually testing the correct variant

View File

@ -14,9 +14,9 @@ use {
// Eager rent collection repeats in cyclic manner.
// Each cycle is composed of <partition_count> number of tiny pubkey subranges
// to scan, which is always multiple of the number of slots in epoch.
pub(crate) type PartitionIndex = u64;
pub type PartitionIndex = u64;
type PartitionsPerCycle = u64;
pub(crate) type Partition = (PartitionIndex, PartitionIndex, PartitionsPerCycle);
pub type Partition = (PartitionIndex, PartitionIndex, PartitionsPerCycle);
type RentCollectionCycleParams = (
Epoch,
SlotCount,
@ -43,7 +43,7 @@ fn partition_index_from_slot_index(
slot_index_in_epoch + epoch_index_in_cycle * slot_count_per_epoch
}
pub(crate) fn get_partition_from_slot_indexes(
pub fn get_partition_from_slot_indexes(
cycle_params: RentCollectionCycleParams,
start_slot_index: SlotIndex,
end_slot_index: SlotIndex,
@ -100,8 +100,9 @@ pub(crate) fn get_partition_from_slot_indexes(
/// used only by filler accounts in debug path
/// previous means slot - 1, not parent
#[cfg(test)]
pub(crate) fn variable_cycle_partition_from_previous_slot(
// These functions/fields are only usable from a dev context (i.e. tests and benches)
#[cfg(feature = "dev-context-only-utils")]
pub fn variable_cycle_partition_from_previous_slot(
epoch_schedule: &EpochSchedule,
slot: Slot,
) -> Partition {
@ -137,7 +138,7 @@ pub(crate) fn variable_cycle_partition_from_previous_slot(
/// 1. 'pubkey_range_from_partition'
/// 2. 'partition_from_pubkey'
/// 3. this function
pub(crate) fn get_partition_end_indexes(partition: &Partition) -> Vec<PartitionIndex> {
pub fn get_partition_end_indexes(partition: &Partition) -> Vec<PartitionIndex> {
if partition.0 == partition.1 && partition.0 == 0 {
// special case for start=end=0. ie. (0, 0, N). This returns [0]
vec![0]
@ -149,7 +150,7 @@ pub(crate) fn get_partition_end_indexes(partition: &Partition) -> Vec<PartitionI
}
}
pub(crate) fn rent_single_epoch_collection_cycle_params(
pub fn rent_single_epoch_collection_cycle_params(
epoch: Epoch,
slot_count_per_epoch: SlotCount,
) -> RentCollectionCycleParams {
@ -163,7 +164,7 @@ pub(crate) fn rent_single_epoch_collection_cycle_params(
)
}
pub(crate) fn rent_multi_epoch_collection_cycle_params(
pub fn rent_multi_epoch_collection_cycle_params(
epoch: Epoch,
slot_count_per_epoch: SlotCount,
first_normal_epoch: Epoch,
@ -180,7 +181,7 @@ pub(crate) fn rent_multi_epoch_collection_cycle_params(
)
}
pub(crate) fn get_partitions(
pub fn get_partitions(
slot: Slot,
parent_slot: Slot,
slot_count_in_two_day: SlotCount,
@ -221,7 +222,7 @@ pub(crate) fn get_partitions(
// start_index..=end_index. But it has some exceptional cases, including
// this important and valid one:
// 0..=0: the first partition in the new epoch when crossing epochs
pub(crate) fn pubkey_range_from_partition(
pub fn pubkey_range_from_partition(
(start_index, end_index, partition_count): Partition,
) -> RangeInclusive<Pubkey> {
assert!(start_index <= end_index);
@ -336,14 +337,14 @@ pub(crate) fn pubkey_range_from_partition(
start_pubkey_final..=end_pubkey_final
}
pub(crate) fn prefix_from_pubkey(pubkey: &Pubkey) -> u64 {
pub fn prefix_from_pubkey(pubkey: &Pubkey) -> u64 {
const PREFIX_SIZE: usize = mem::size_of::<u64>();
u64::from_be_bytes(pubkey.as_ref()[0..PREFIX_SIZE].try_into().unwrap())
}
/// This is the inverse of pubkey_range_from_partition.
/// return the lowest end_index which would contain this pubkey
pub(crate) fn partition_from_pubkey(
pub fn partition_from_pubkey(
pubkey: &Pubkey,
partition_count: PartitionsPerCycle,
) -> PartitionIndex {

View File

@ -89,6 +89,36 @@ impl Ancestors {
self.ancestors.max_exclusive().saturating_sub(1)
}
}
// These functions/fields are only usable from a dev context (i.e. tests and benches)
#[cfg(feature = "dev-context-only-utils")]
impl std::iter::FromIterator<(Slot, usize)> for Ancestors {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = (Slot, usize)>,
{
let mut data = Vec::new();
for i in iter {
data.push(i);
}
Ancestors::from(data)
}
}
#[cfg(feature = "dev-context-only-utils")]
impl From<Vec<(Slot, usize)>> for Ancestors {
fn from(source: Vec<(Slot, usize)>) -> Ancestors {
Ancestors::from(source.into_iter().map(|(slot, _)| slot).collect::<Vec<_>>())
}
}
#[cfg(feature = "dev-context-only-utils")]
impl Ancestors {
pub fn insert(&mut self, slot: Slot, _size: usize) {
self.ancestors.insert(slot);
}
}
#[cfg(test)]
pub mod tests {
use {
@ -96,30 +126,6 @@ pub mod tests {
std::collections::HashSet,
};
impl std::iter::FromIterator<(Slot, usize)> for Ancestors {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = (Slot, usize)>,
{
let mut data = Vec::new();
for i in iter {
data.push(i);
}
Ancestors::from(data)
}
}
impl From<Vec<(Slot, usize)>> for Ancestors {
fn from(source: Vec<(Slot, usize)>) -> Ancestors {
Ancestors::from(source.into_iter().map(|(slot, _)| slot).collect::<Vec<_>>())
}
}
impl Ancestors {
pub fn insert(&mut self, slot: Slot, _size: usize) {
self.ancestors.insert(slot);
}
}
#[test]
fn test_ancestors_permutations() {
solana_logger::setup();

View File

@ -16,7 +16,7 @@ struct HashAge {
}
/// Low memory overhead, so can be cloned for every checkpoint
#[frozen_abi(digest = "J66ssCYGtWdQu5oyJxFKFeZY86nUjThBdBeXQYuRPDvE")]
#[frozen_abi(digest = "8upYCMG37Awf4FGQ5kKtZARHP1QfD2GMpQCPnwCCsxhu")]
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, AbiExample)]
pub struct BlockhashQueue {
/// index of last hash to be registered
@ -124,7 +124,7 @@ impl BlockhashQueue {
})
}
pub(crate) fn get_max_age(&self) -> usize {
pub fn get_max_age(&self) -> usize {
self.max_age
}
}

View File

@ -133,7 +133,7 @@ impl<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> BucketMapHolder<T, U>
}
/// return when the bg threads have reached an 'idle' state
pub(crate) fn wait_for_idle(&self) {
pub fn wait_for_idle(&self) {
assert!(self.get_startup());
if self.disk.is_none() {
return;

View File

@ -23,7 +23,7 @@ pub struct Header {
count: usize,
}
pub(crate) struct CacheHashDataFile {
pub struct CacheHashDataFile {
cell_size: u64,
mmap: MmapMut,
capacity: u64,
@ -31,13 +31,13 @@ pub(crate) struct CacheHashDataFile {
impl CacheHashDataFile {
/// return a slice of a reference to all the cache hash data from the mmapped file
pub(crate) fn get_cache_hash_data(&self) -> &[EntryType] {
pub fn get_cache_hash_data(&self) -> &[EntryType] {
self.get_slice(0)
}
#[cfg(test)]
/// Populate 'accumulator' from entire contents of the cache file.
pub(crate) fn load_all(
pub fn load_all(
&self,
accumulator: &mut SavedType,
start_bin_index: usize,
@ -196,7 +196,7 @@ impl CacheHashData {
#[cfg(test)]
/// load from 'file_name' into 'accumulator'
pub(crate) fn load(
pub fn load(
&self,
file_name: impl AsRef<Path>,
accumulator: &mut SavedType,
@ -213,7 +213,7 @@ impl CacheHashData {
}
/// map 'file_name' into memory
pub(crate) fn load_map(
pub fn load_map(
&self,
file_name: impl AsRef<Path>,
) -> Result<CacheHashDataFile, std::io::Error> {

View File

@ -94,7 +94,7 @@ pub struct InMemAccountsIndex<T: IndexValue, U: DiskIndexValue + From<T> + Into<
bucket: Option<Arc<BucketApi<(Slot, U)>>>,
// pubkey ranges that this bin must hold in the cache while the range is present in this vec
pub(crate) cache_ranges_held: CacheRangesHeld,
pub cache_ranges_held: CacheRangesHeld,
// incremented each time stop_evictions is changed
stop_evictions_changes: AtomicU64,
// true while ranges are being manipulated. Used to keep an async flush from removing things while a range is being held.
@ -322,7 +322,7 @@ impl<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> InMemAccountsIndex<T,
/// lookup 'pubkey' in index (in_mem or disk).
/// call 'callback' whether found or not
pub(crate) fn get_internal<RT>(
pub fn get_internal<RT>(
&self,
pubkey: &K,
// return true if item should be added to in_mem cache
@ -545,7 +545,7 @@ impl<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> InMemAccountsIndex<T,
/// the new item.
/// if 'other_slot' is some, then also remove any entries in the slot list that are at 'other_slot'
/// return resulting len of slot list
pub(crate) fn lock_and_update_slot_list(
pub fn lock_and_update_slot_list(
current: &AccountMapEntryInner<T>,
new_value: (Slot, T),
other_slot: Option<Slot>,
@ -920,7 +920,7 @@ impl<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> InMemAccountsIndex<T,
self.stop_evictions_changes.load(Ordering::Acquire)
}
pub(crate) fn flush(&self, can_advance_age: bool) {
pub fn flush(&self, can_advance_age: bool) {
if let Some(flush_guard) = FlushGuard::lock(&self.flushing_active) {
self.flush_internal(&flush_guard, can_advance_age)
}
@ -1089,7 +1089,7 @@ impl<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> InMemAccountsIndex<T,
/// duplicate pubkeys have a slot list with len > 1
/// These were collected for this bin when we did batch inserts in the bg flush threads.
/// Insert these into the in-mem index, then return the duplicate (Slot, Pubkey)
pub(crate) fn populate_and_retrieve_duplicate_keys_from_startup(&self) -> Vec<(Slot, Pubkey)> {
pub fn populate_and_retrieve_duplicate_keys_from_startup(&self) -> Vec<(Slot, Pubkey)> {
// in order to return accurate and complete duplicates, we must have nothing left remaining to insert
assert!(self.startup_info.insert.lock().unwrap().is_empty());

View File

@ -3,7 +3,7 @@ use solana_sdk::pubkey::{Pubkey, PUBKEY_BYTES};
solana_sdk::declare_id!("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA");
pub(crate) mod program_v3_4_0 {
pub mod program_v3_4_0 {
solana_sdk::declare_id!("NToK4t5AQzxPNpUA84DkxgfXaVDbDQQjpHKCqsbY46B");
}
@ -23,7 +23,7 @@ pub const SPL_TOKEN_ACCOUNT_MINT_OFFSET: usize = 0;
pub const SPL_TOKEN_ACCOUNT_OWNER_OFFSET: usize = 32;
const SPL_TOKEN_ACCOUNT_LENGTH: usize = 165;
pub(crate) trait GenericTokenAccount {
pub trait GenericTokenAccount {
fn valid_account_data(account_data: &[u8]) -> bool;
// Call after account length has already been verified

59
accounts-db/src/lib.rs Normal file
View File

@ -0,0 +1,59 @@
#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))]
#![allow(clippy::integer_arithmetic)]
#[macro_use]
extern crate lazy_static;
pub mod account_info;
pub mod account_overrides;
pub mod account_rent_state;
pub mod account_storage;
pub mod accounts;
pub mod accounts_cache;
pub mod accounts_db;
pub mod accounts_file;
pub mod accounts_hash;
pub mod accounts_index;
pub mod accounts_index_storage;
pub mod accounts_partition;
pub mod accounts_update_notifier_interface;
pub mod active_stats;
pub mod ancestors;
pub mod ancient_append_vecs;
pub mod append_vec;
pub mod blockhash_queue;
pub mod bucket_map_holder;
pub mod bucket_map_holder_stats;
pub mod cache_hash_data;
pub mod cache_hash_data_stats;
pub mod contains;
pub mod epoch_accounts_hash;
pub mod hardened_unpack;
pub mod in_mem_accounts_index;
pub mod inline_spl_token;
pub mod inline_spl_token_2022;
pub mod nonce_info;
pub mod partitioned_rewards;
mod pubkey_bins;
mod read_only_accounts_cache;
pub mod rent_collector;
pub mod rent_debits;
mod rolling_bit_field;
pub mod secondary_index;
pub mod shared_buffer_reader;
pub mod sorted_storages;
pub mod stake_rewards;
pub mod storable_accounts;
pub mod tiered_storage;
pub mod transaction_error_metrics;
pub mod transaction_results;
mod verify_accounts_hash_in_background;
pub mod waitable_condvar;
#[macro_use]
extern crate solana_metrics;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate solana_frozen_abi_macro;

View File

@ -6,22 +6,22 @@ use solana_sdk::clock::Slot;
#[derive(Debug)]
/// Configuration options for partitioned epoch rewards.
/// This struct allows various forms of testing, especially prior to feature activation.
pub(crate) struct PartitionedEpochRewardsConfig {
pub struct PartitionedEpochRewardsConfig {
/// Number of blocks for reward calculation and storing vote accounts.
/// Distributing rewards to stake accounts begins AFTER this many blocks.
/// Normally, this will be 1.
/// if force_one_slot_partitioned_rewards, this will be 0 (ie. we take 0 blocks just for reward calculation)
pub(crate) reward_calculation_num_blocks: Slot,
pub reward_calculation_num_blocks: Slot,
/// number of stake accounts to store in one block during partitioned reward interval
/// normally, this is a number tuned for reasonable performance, such as 4096 accounts/block
/// if force_one_slot_partitioned_rewards, this will usually be u64::MAX so that all stake accounts are written in the first block
pub(crate) stake_account_stores_per_block: Slot,
pub stake_account_stores_per_block: Slot,
/// if true, end of epoch bank rewards will force using partitioned rewards distribution.
/// see `set_test_enable_partitioned_rewards`
pub(crate) test_enable_partitioned_rewards: bool,
pub test_enable_partitioned_rewards: bool,
/// if true, end of epoch non-partitioned bank rewards will test the partitioned rewards distribution vote and stake accounts
/// This has a significant performance impact on the first slot in each new epoch.
pub(crate) test_compare_partitioned_epoch_rewards: bool,
pub test_compare_partitioned_epoch_rewards: bool,
}
impl Default for PartitionedEpochRewardsConfig {
@ -55,7 +55,7 @@ pub enum TestPartitionedEpochRewards {
#[allow(dead_code)]
impl PartitionedEpochRewardsConfig {
pub(crate) fn new(test: TestPartitionedEpochRewards) -> Self {
pub fn new(test: TestPartitionedEpochRewards) -> Self {
match test {
TestPartitionedEpochRewards::None => Self::default(),
TestPartitionedEpochRewards::CompareResults => {

View File

@ -27,7 +27,7 @@ struct ReadOnlyAccountCacheEntry {
}
#[derive(Debug)]
pub(crate) struct ReadOnlyAccountsCache {
pub struct ReadOnlyAccountsCache {
cache: DashMap<ReadOnlyCacheKey, ReadOnlyAccountCacheEntry>,
// When an item is first entered into the cache, it is added to the end of
// the queue. Also each time an entry is looked up from the cache it is
@ -44,7 +44,7 @@ pub(crate) struct ReadOnlyAccountsCache {
}
impl ReadOnlyAccountsCache {
pub(crate) fn new(max_data_size: usize) -> Self {
pub fn new(max_data_size: usize) -> Self {
Self {
max_data_size,
cache: DashMap::default(),
@ -102,7 +102,7 @@ impl ReadOnlyAccountsCache {
CACHE_ENTRY_SIZE + account.data().len()
}
pub(crate) fn store(&self, pubkey: Pubkey, slot: Slot, account: AccountSharedData) {
pub fn store(&self, pubkey: Pubkey, slot: Slot, account: AccountSharedData) {
let key = (pubkey, slot);
let account_size = self.account_size(&account);
self.data_size.fetch_add(account_size, Ordering::Relaxed);
@ -138,7 +138,7 @@ impl ReadOnlyAccountsCache {
self.evicts.fetch_add(num_evicts, Ordering::Relaxed);
}
pub(crate) fn remove(&self, pubkey: Pubkey, slot: Slot) -> Option<AccountSharedData> {
pub fn remove(&self, pubkey: Pubkey, slot: Slot) -> Option<AccountSharedData> {
let (_, entry) = self.cache.remove(&(pubkey, slot))?;
// self.queue should be modified only after removing the entry from the
// cache, so that this is still safe if another thread writes to the
@ -149,11 +149,11 @@ impl ReadOnlyAccountsCache {
Some(entry.account)
}
pub(crate) fn cache_len(&self) -> usize {
pub fn cache_len(&self) -> usize {
self.cache.len()
}
pub(crate) fn data_size(&self) -> usize {
pub fn data_size(&self) -> usize {
self.data_size.load(Ordering::Relaxed)
}

View File

@ -49,7 +49,7 @@ enum RentResult {
}
impl RentCollector {
pub(crate) fn new(
pub fn new(
epoch: Epoch,
epoch_schedule: EpochSchedule,
slots_per_year: f64,
@ -63,7 +63,7 @@ impl RentCollector {
}
}
pub(crate) fn clone_with_epoch(&self, epoch: Epoch) -> Self {
pub fn clone_with_epoch(&self, epoch: Epoch) -> Self {
Self {
epoch,
..self.clone()
@ -71,18 +71,14 @@ impl RentCollector {
}
/// true if it is easy to determine this account should consider having rent collected from it
pub(crate) fn should_collect_rent(
&self,
address: &Pubkey,
account: &impl ReadableAccount,
) -> bool {
pub fn should_collect_rent(&self, address: &Pubkey, account: &impl ReadableAccount) -> bool {
!(account.executable() // executable accounts must be rent-exempt balance
|| *address == incinerator::id())
}
/// given an account that 'should_collect_rent'
/// returns (amount rent due, is_exempt_from_rent)
pub(crate) fn get_rent_due(&self, account: &impl ReadableAccount) -> RentDue {
pub fn get_rent_due(&self, account: &impl ReadableAccount) -> RentDue {
if self
.rent
.is_exempt(account.lamports(), account.data().len())
@ -111,7 +107,7 @@ impl RentCollector {
// This is NOT thread safe at some level. If we try to collect from the same account in
// parallel, we may collect twice.
#[must_use = "add to Bank::collected_rent"]
pub(crate) fn collect_from_existing_account(
pub fn collect_from_existing_account(
&self,
address: &Pubkey,
account: &mut AccountSharedData,
@ -188,11 +184,11 @@ impl RentCollector {
/// Information computed during rent collection
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)]
pub(crate) struct CollectedInfo {
pub struct CollectedInfo {
/// Amount of rent collected from account
pub(crate) rent_amount: u64,
pub rent_amount: u64,
/// Size of data reclaimed from account (happens when account's lamports go to zero)
pub(crate) account_data_len_reclaimed: u64,
pub account_data_len_reclaimed: u64,
}
impl std::ops::Add for CollectedInfo {

View File

@ -1,11 +1,11 @@
use {
crate::bank::RewardInfo,
crate::stake_rewards::RewardInfo,
solana_sdk::{pubkey::Pubkey, reward_type::RewardType},
std::collections::HashMap,
};
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct RentDebit {
pub struct RentDebit {
rent_collected: u64,
post_balance: u64,
}
@ -27,18 +27,24 @@ impl RentDebit {
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct RentDebits(HashMap<Pubkey, RentDebit>);
impl RentDebits {
pub(crate) fn get_account_rent_debit(&self, address: &Pubkey) -> u64 {
pub fn get_account_rent_debit(&self, address: &Pubkey) -> u64 {
self.0
.get(address)
.map(|r| r.rent_collected)
.unwrap_or_default()
}
#[cfg(test)]
pub(crate) fn len(&self) -> usize {
// These functions/fields are only usable from a dev context (i.e. tests and benches)
#[cfg(feature = "dev-context-only-utils")]
pub fn len(&self) -> usize {
self.0.len()
}
#[cfg(feature = "dev-context-only-utils")]
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
pub fn insert(&mut self, address: &Pubkey, rent_collected: u64, post_balance: u64) {
if rent_collected != 0 {
self.0.insert(

View File

@ -0,0 +1,115 @@
//! Code for stake and vote rewards
use {
crate::{accounts_db::IncludeSlotInHash, storable_accounts::StorableAccounts},
solana_sdk::{
account::AccountSharedData, clock::Slot, pubkey::Pubkey, reward_type::RewardType,
},
};
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, AbiExample, Clone, Copy)]
pub struct RewardInfo {
pub reward_type: RewardType,
/// Reward amount
pub lamports: i64,
/// Account balance in lamports after `lamports` was applied
pub post_balance: u64,
/// Vote account commission when the reward was credited, only present for voting and staking rewards
pub commission: Option<u8>,
}
#[derive(AbiExample, Debug, Serialize, Deserialize, Clone, PartialEq)]
pub struct StakeReward {
pub stake_pubkey: Pubkey,
pub stake_reward_info: RewardInfo,
pub stake_account: AccountSharedData,
}
impl StakeReward {
pub fn get_stake_reward(&self) -> i64 {
self.stake_reward_info.lamports
}
}
/// allow [StakeReward] to be passed to `StoreAccounts` directly without copies or vec construction
impl<'a> StorableAccounts<'a, AccountSharedData> for (Slot, &'a [StakeReward], IncludeSlotInHash) {
fn pubkey(&self, index: usize) -> &Pubkey {
&self.1[index].stake_pubkey
}
fn account(&self, index: usize) -> &AccountSharedData {
&self.1[index].stake_account
}
fn slot(&self, _index: usize) -> Slot {
// per-index slot is not unique per slot when per-account slot is not included in the source data
self.target_slot()
}
fn target_slot(&self) -> Slot {
self.0
}
fn len(&self) -> usize {
self.1.len()
}
fn include_slot_in_hash(&self) -> IncludeSlotInHash {
self.2
}
}
#[cfg(feature = "dev-context-only-utils")]
use {
rand::Rng,
solana_sdk::{
account::WritableAccount,
rent::Rent,
signature::{Keypair, Signer},
},
solana_stake_program::stake_state,
solana_vote_program::vote_state,
};
// These functions/fields are only usable from a dev context (i.e. tests and benches)
#[cfg(feature = "dev-context-only-utils")]
impl StakeReward {
pub fn new_random() -> Self {
let mut rng = rand::thread_rng();
let rent = Rent::free();
let validator_pubkey = solana_sdk::pubkey::new_rand();
let validator_stake_lamports = 20;
let validator_staking_keypair = Keypair::new();
let validator_voting_keypair = Keypair::new();
let validator_vote_account = vote_state::create_account(
&validator_voting_keypair.pubkey(),
&validator_pubkey,
10,
validator_stake_lamports,
);
let validator_stake_account = stake_state::create_account(
&validator_staking_keypair.pubkey(),
&validator_voting_keypair.pubkey(),
&validator_vote_account,
&rent,
validator_stake_lamports,
);
Self {
stake_pubkey: Pubkey::new_unique(),
stake_reward_info: RewardInfo {
reward_type: RewardType::Staking,
lamports: rng.gen_range(1, 200),
post_balance: 0, /* unused atm */
commission: None, /* unused atm */
},
stake_account: validator_stake_account,
}
}
pub fn credit(&mut self, amount: u64) {
self.stake_reward_info.lamports = amount as i64;
self.stake_reward_info.post_balance += amount;
self.stake_account.checked_add_lamports(amount).unwrap();
}
}

View File

@ -203,7 +203,7 @@ pub struct StorableAccountsBySlot<'a> {
impl<'a> StorableAccountsBySlot<'a> {
#[allow(dead_code)]
/// each element of slots_and_accounts is (source slot, accounts moving FROM source slot)
pub(crate) fn new(
pub fn new(
target_slot: Slot,
slots_and_accounts: &'a [(Slot, &'a [&'a StoredAccountMeta<'a>])],
include_slot_in_hash: IncludeSlotInHash,

View File

@ -16,16 +16,16 @@ use {
#[derive(PartialEq, Eq, Debug)]
pub struct TieredReadableAccount<'accounts_file, M: TieredAccountMeta> {
/// TieredAccountMeta
pub(crate) meta: &'accounts_file M,
pub meta: &'accounts_file M,
/// The address of the account
pub(crate) address: &'accounts_file Pubkey,
pub address: &'accounts_file Pubkey,
/// The address of the account owner
pub(crate) owner: &'accounts_file Pubkey,
pub owner: &'accounts_file Pubkey,
/// The index for accessing the account inside its belonging AccountsFile
pub(crate) index: usize,
pub index: usize,
/// The account block that contains this account. Note that this account
/// block may be shared with other accounts.
pub(crate) account_block: &'accounts_file [u8],
pub account_block: &'accounts_file [u8],
}
impl<'accounts_file, M: TieredAccountMeta> TieredReadableAccount<'accounts_file, M> {

View File

@ -12,9 +12,9 @@ use {
};
#[derive(Debug)]
pub(crate) struct VerifyAccountsHashInBackground {
pub struct VerifyAccountsHashInBackground {
/// true when verification has completed or never had to run in background
pub(crate) verified: Arc<AtomicBool>,
pub verified: Arc<AtomicBool>,
/// enable waiting for verification to become complete
complete: Arc<WaitableCondvar>,
/// thread doing verification
@ -39,14 +39,14 @@ impl Default for VerifyAccountsHashInBackground {
impl VerifyAccountsHashInBackground {
/// start the bg thread to do the verification
pub(crate) fn start(&self, start: impl FnOnce() -> JoinHandle<bool>) {
pub fn start(&self, start: impl FnOnce() -> JoinHandle<bool>) {
// note that we're not verified before
self.verified.store(false, Ordering::Release);
*self.thread.lock().unwrap() = Some(start());
}
/// notify that the bg process has completed
pub(crate) fn background_finished(&self) {
pub fn background_finished(&self) {
self.complete.notify_all();
self.background_completed.store(true, Ordering::Release);
}
@ -54,7 +54,7 @@ impl VerifyAccountsHashInBackground {
/// notify that verification was completed successfully
/// This can occur because it completed in the background
/// or if the verification was run in the foreground.
pub(crate) fn verification_complete(&self) {
pub fn verification_complete(&self) {
self.verified.store(true, Ordering::Release);
}
@ -76,7 +76,7 @@ impl VerifyAccountsHashInBackground {
/// return true if bg hash verification is complete
/// return false if bg hash verification has not completed yet
/// if hash verification failed, a panic will occur
pub(crate) fn check_complete(&self) -> bool {
pub fn check_complete(&self) -> bool {
if self.verified.load(Ordering::Acquire) {
// already completed
return true;
@ -95,7 +95,7 @@ impl VerifyAccountsHashInBackground {
}
#[cfg(test)]
pub(crate) mod tests {
pub mod tests {
use {super::*, std::thread::Builder};
#[test]

View File

@ -13,6 +13,7 @@ edition = { workspace = true }
bincode = { workspace = true }
crossbeam-channel = { workspace = true }
futures = { workspace = true }
solana-accounts-db = { workspace = true }
solana-banks-interface = { workspace = true }
solana-client = { workspace = true }
solana-runtime = { workspace = true }

View File

@ -2,6 +2,7 @@ use {
bincode::{deserialize, serialize},
crossbeam_channel::{unbounded, Receiver, Sender},
futures::{future, prelude::stream::StreamExt},
solana_accounts_db::transaction_results::TransactionExecutionResult,
solana_banks_interface::{
Banks, BanksRequest, BanksResponse, BanksTransactionResultWithMetadata,
BanksTransactionResultWithSimulation, TransactionConfirmationStatus, TransactionMetadata,
@ -12,7 +13,6 @@ use {
bank::{Bank, TransactionSimulationResult},
bank_forks::BankForks,
commitment::BlockCommitmentCache,
transaction_results::TransactionExecutionResult,
},
solana_sdk::{
account::Account,

View File

@ -37,6 +37,7 @@ rayon = { workspace = true }
rolling-file = { workspace = true }
serde = { workspace = true }
serde_derive = { workspace = true }
solana-accounts-db = { workspace = true }
solana-address-lookup-table-program = { workspace = true }
solana-bloom = { workspace = true }
solana-client = { workspace = true }

View File

@ -5,14 +5,17 @@
use {
crossbeam_channel::{Receiver, Sender},
solana_gossip::cluster_info::{ClusterInfo, MAX_ACCOUNTS_HASHES},
solana_measure::measure_us,
solana_runtime::{
solana_accounts_db::{
accounts_db::CalcAccountsHashFlavor,
accounts_hash::{
AccountsHash, AccountsHashEnum, CalcAccountsHashConfig, HashStats,
IncrementalAccountsHash,
},
sorted_storages::SortedStorages,
},
solana_gossip::cluster_info::{ClusterInfo, MAX_ACCOUNTS_HASHES},
solana_measure::measure_us,
solana_runtime::{
serde_snapshot::BankIncrementalSnapshotPersistence,
snapshot_config::SnapshotConfig,
snapshot_package::{
@ -20,7 +23,6 @@ use {
SnapshotType,
},
snapshot_utils,
sorted_storages::SortedStorages,
},
solana_sdk::{
clock::{Slot, DEFAULT_MS_PER_SLOT},

View File

@ -1,17 +1,19 @@
use {
super::leader_slot_timing_metrics::LeaderExecuteAndCommitTimings,
itertools::Itertools,
solana_accounts_db::{
accounts::TransactionLoadResult,
transaction_results::{TransactionExecutionResult, TransactionResults},
},
solana_ledger::{
blockstore_processor::TransactionStatusSender, token_balances::collect_token_balances,
},
solana_measure::measure_us,
solana_runtime::{
accounts::TransactionLoadResult,
bank::{Bank, CommitTransactionCounts, TransactionBalancesSet},
bank_utils,
prioritization_fee_cache::PrioritizationFeeCache,
transaction_batch::TransactionBatch,
transaction_results::{TransactionExecutionResult, TransactionResults},
vote_sender_types::ReplayVoteSender,
},
solana_sdk::{pubkey::Pubkey, saturating_add_assign},

View File

@ -9,6 +9,10 @@ use {
BankingStageStats,
},
itertools::Itertools,
solana_accounts_db::{
transaction_error_metrics::TransactionErrorMetrics,
transaction_results::TransactionCheckResult,
},
solana_ledger::token_balances::collect_token_balances,
solana_measure::{measure::Measure, measure_us},
solana_poh::poh_recorder::{
@ -19,8 +23,6 @@ use {
solana_runtime::{
bank::{Bank, LoadAndExecuteTransactionsOutput},
transaction_batch::TransactionBatch,
transaction_error_metrics::TransactionErrorMetrics,
transaction_results::TransactionCheckResult,
},
solana_sdk::{
clock::{Slot, FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, MAX_PROCESSING_AGE},

View File

@ -3,8 +3,8 @@ use {
leader_slot_timing_metrics::{LeaderExecuteAndCommitTimings, LeaderSlotTimingMetrics},
unprocessed_transaction_storage::InsertPacketBatchSummary,
},
solana_accounts_db::transaction_error_metrics::*,
solana_poh::poh_recorder::BankStart,
solana_runtime::transaction_error_metrics::*,
solana_sdk::{clock::Slot, saturating_add_assign},
std::time::Instant,
};

View File

@ -978,11 +978,12 @@ mod test {
use {
super::*,
itertools::Itertools,
solana_accounts_db::contains::Contains,
solana_ledger::{
blockstore::{make_chaining_slot_entries, Blockstore},
get_tmp_ledger_path,
},
solana_runtime::{bank::Bank, bank_utils, contains::Contains},
solana_runtime::{bank::Bank, bank_utils},
solana_sdk::hash::Hash,
trees::tr,
};

View File

@ -1,7 +1,7 @@
use {
crossbeam_channel::{Receiver, RecvTimeoutError, Sender},
solana_accounts_db::stake_rewards::RewardInfo,
solana_ledger::blockstore::Blockstore,
solana_runtime::bank::RewardInfo,
solana_sdk::{clock::Slot, pubkey::Pubkey},
solana_transaction_status::Reward,
std::{

View File

@ -31,6 +31,12 @@ use {
crossbeam_channel::{bounded, unbounded, Receiver},
lazy_static::lazy_static,
quinn::Endpoint,
solana_accounts_db::{
accounts_db::{AccountShrinkThreshold, AccountsDbConfig},
accounts_index::AccountSecondaryIndexes,
accounts_update_notifier_interface::AccountsUpdateNotifier,
hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE},
},
solana_client::connection_cache::{ConnectionCache, Protocol},
solana_entry::poh::compute_hash_time_ns,
solana_geyser_plugin_manager::{
@ -83,13 +89,9 @@ use {
AbsRequestHandlers, AbsRequestSender, AccountsBackgroundService, DroppedSlotsReceiver,
PrunedBanksRequestHandler, SnapshotRequestHandler,
},
accounts_db::{AccountShrinkThreshold, AccountsDbConfig},
accounts_index::AccountSecondaryIndexes,
accounts_update_notifier_interface::AccountsUpdateNotifier,
bank::Bank,
bank_forks::BankForks,
commitment::BlockCommitmentCache,
hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE},
prioritization_fee_cache::PrioritizationFeeCache,
runtime_config::RuntimeConfig,
snapshot_archive_info::SnapshotArchiveInfoGetter,
@ -1917,7 +1919,7 @@ fn maybe_warp_slot(
&root_bank,
&Pubkey::default(),
warp_slot,
solana_runtime::accounts_db::CalcAccountsHashDataSource::Storages,
solana_accounts_db::accounts_db::CalcAccountsHashDataSource::Storages,
));
bank_forks.set_root(
warp_slot,

View File

@ -1,6 +1,14 @@
use {
crate::snapshot_utils::create_tmp_accounts_dir_for_tests,
log::*,
solana_accounts_db::{
accounts_db::{
AccountShrinkThreshold, CalcAccountsHashDataSource, INCLUDE_SLOT_IN_HASH_TESTS,
},
accounts_hash::CalcAccountsHashConfig,
accounts_index::AccountSecondaryIndexes,
epoch_accounts_hash::EpochAccountsHash,
},
solana_core::{
accounts_hash_verifier::AccountsHashVerifier,
snapshot_packager_service::SnapshotPackagerService,
@ -11,14 +19,8 @@ use {
AbsRequestHandlers, AbsRequestSender, AccountsBackgroundService, DroppedSlotsReceiver,
PrunedBanksRequestHandler, SnapshotRequestHandler,
},
accounts_db::{
AccountShrinkThreshold, CalcAccountsHashDataSource, INCLUDE_SLOT_IN_HASH_TESTS,
},
accounts_hash::CalcAccountsHashConfig,
accounts_index::AccountSecondaryIndexes,
bank::{epoch_accounts_hash_utils, Bank, BankTestConfig},
bank_forks::BankForks,
epoch_accounts_hash::EpochAccountsHash,
genesis_utils::{self, GenesisConfigInfo},
runtime_config::RuntimeConfig,
snapshot_archive_info::SnapshotArchiveInfoGetter,

View File

@ -6,6 +6,12 @@ use {
fs_extra::dir::CopyOptions,
itertools::Itertools,
log::{info, trace},
solana_accounts_db::{
accounts_db::{self, CalcAccountsHashDataSource, ACCOUNTS_DB_CONFIG_FOR_TESTING},
accounts_hash::AccountsHash,
accounts_index::AccountSecondaryIndexes,
epoch_accounts_hash::EpochAccountsHash,
},
solana_core::{
accounts_hash_verifier::AccountsHashVerifier,
snapshot_packager_service::SnapshotPackagerService,
@ -14,14 +20,10 @@ use {
solana_runtime::{
accounts_background_service::{
AbsRequestHandlers, AbsRequestSender, AccountsBackgroundService,
PrunedBanksRequestHandler, SnapshotRequestHandler,
PrunedBanksRequestHandler, SendDroppedBankCallback, SnapshotRequestHandler,
},
accounts_db::{self, CalcAccountsHashDataSource, ACCOUNTS_DB_CONFIG_FOR_TESTING},
accounts_hash::AccountsHash,
accounts_index::AccountSecondaryIndexes,
bank::Bank,
bank_forks::BankForks,
epoch_accounts_hash::EpochAccountsHash,
genesis_utils::{create_genesis_config_with_leader, GenesisConfigInfo},
runtime_config::RuntimeConfig,
snapshot_archive_info::FullSnapshotArchiveInfo,
@ -958,14 +960,15 @@ fn test_snapshots_with_background_services(
let (snapshot_package_sender, snapshot_package_receiver) = unbounded();
let bank_forks = Arc::new(RwLock::new(snapshot_test_config.bank_forks));
let callback = bank_forks
bank_forks
.read()
.unwrap()
.root_bank()
.rc
.accounts
.accounts_db
.create_drop_bank_callback(pruned_banks_sender);
.enable_bank_drop_callback();
let callback = SendDroppedBankCallback::new(pruned_banks_sender);
for bank in bank_forks.read().unwrap().banks().values() {
bank.set_callback(Some(Box::new(callback.clone())));
}

View File

@ -11,9 +11,9 @@ edition = { workspace = true }
[dependencies]
log = { workspace = true }
solana-accounts-db = { workspace = true }
solana-download-utils = { workspace = true }
solana-rpc-client = { workspace = true }
solana-runtime = { workspace = true }
solana-sdk = { workspace = true }
[lib]

View File

@ -1,8 +1,8 @@
use {
log::*,
solana_accounts_db::hardened_unpack::unpack_genesis_archive,
solana_download_utils::download_genesis_if_missing,
solana_rpc_client::rpc_client::RpcClient,
solana_runtime::hardened_unpack::unpack_genesis_archive,
solana_sdk::{
genesis_config::{GenesisConfig, DEFAULT_GENESIS_ARCHIVE},
hash::Hash,

View File

@ -17,6 +17,7 @@ itertools = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
serde_yaml = { workspace = true }
solana-accounts-db = { workspace = true }
solana-clap-utils = { workspace = true }
solana-cli-config = { workspace = true }
solana-entry = { workspace = true }

View File

@ -5,6 +5,7 @@ use {
base64::{prelude::BASE64_STANDARD, Engine},
clap::{crate_description, crate_name, value_t, value_t_or_exit, App, Arg, ArgMatches},
itertools::Itertools,
solana_accounts_db::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
solana_clap_utils::{
input_parsers::{
cluster_type_of, pubkey_of, pubkeys_of, unix_timestamp_from_rfc3339_datetime,
@ -16,7 +17,6 @@ use {
solana_entry::poh::compute_hashes_per_tick,
solana_genesis::{genesis_accounts::add_genesis_accounts, Base64Account},
solana_ledger::{blockstore::create_new_ledger, blockstore_options::LedgerColumnOptions},
solana_runtime::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
solana_sdk::{
account::{Account, AccountSharedData, ReadableAccount, WritableAccount},
bpf_loader_upgradeable::UpgradeableLoaderState,

View File

@ -19,6 +19,7 @@ jsonrpc-server-utils = { workspace = true }
libloading = { workspace = true }
log = { workspace = true }
serde_json = { workspace = true }
solana-accounts-db = { workspace = true }
solana-entry = { workspace = true }
solana-geyser-plugin-interface = { workspace = true }
solana-ledger = { workspace = true }

View File

@ -2,15 +2,15 @@
use {
crate::geyser_plugin_manager::GeyserPluginManager,
log::*,
solana_accounts_db::{
account_storage::meta::StoredAccountMeta,
accounts_update_notifier_interface::AccountsUpdateNotifierInterface,
},
solana_geyser_plugin_interface::geyser_plugin_interface::{
ReplicaAccountInfoV3, ReplicaAccountInfoVersions,
},
solana_measure::measure::Measure,
solana_metrics::*,
solana_runtime::{
account_storage::meta::StoredAccountMeta,
accounts_update_notifier_interface::AccountsUpdateNotifierInterface,
},
solana_sdk::{
account::{AccountSharedData, ReadableAccount},
clock::Slot,

View File

@ -4,12 +4,12 @@ use {
geyser_plugin_manager::GeyserPluginManager,
},
log::*,
solana_accounts_db::stake_rewards::RewardInfo,
solana_geyser_plugin_interface::geyser_plugin_interface::{
ReplicaBlockInfoV2, ReplicaBlockInfoVersions,
},
solana_measure::measure::Measure,
solana_metrics::*,
solana_runtime::bank::RewardInfo,
solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey},
solana_transaction_status::{Reward, Rewards},
std::sync::{Arc, RwLock},

View File

@ -1,5 +1,5 @@
use {
solana_runtime::bank::RewardInfo,
solana_accounts_db::stake_rewards::RewardInfo,
solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey},
std::sync::{Arc, RwLock},
};

View File

@ -11,12 +11,12 @@ use {
},
crossbeam_channel::Receiver,
log::*,
solana_accounts_db::accounts_update_notifier_interface::AccountsUpdateNotifier,
solana_ledger::entry_notifier_interface::EntryNotifierLock,
solana_rpc::{
optimistically_confirmed_bank_tracker::SlotNotification,
transaction_notifier_interface::TransactionNotifierLock,
},
solana_runtime::accounts_update_notifier_interface::AccountsUpdateNotifier,
std::{
path::{Path, PathBuf},
sync::{

View File

@ -25,6 +25,7 @@ regex = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
solana-account-decoder = { workspace = true }
solana-accounts-db = { workspace = true }
solana-bpf-loader-program = { workspace = true }
solana-clap-utils = { workspace = true }
solana-cli-output = { workspace = true }

View File

@ -1,6 +1,6 @@
use {
clap::{value_t, values_t_or_exit, ArgMatches},
solana_runtime::{
solana_accounts_db::{
accounts_db::{AccountsDb, AccountsDbConfig, FillerAccountsConfig},
accounts_index::{AccountsIndexConfig, IndexLimitMb},
partitioned_rewards::TestPartitionedEpochRewards,

View File

@ -2,6 +2,7 @@ use {
clap::{value_t, value_t_or_exit, values_t_or_exit, ArgMatches},
crossbeam_channel::unbounded,
log::*,
solana_accounts_db::hardened_unpack::open_genesis_config,
solana_core::{
accounts_hash_verifier::AccountsHashVerifier, validator::BlockVerificationMethod,
},
@ -26,7 +27,6 @@ use {
PrunedBanksRequestHandler, SnapshotRequestHandler,
},
bank_forks::BankForks,
hardened_unpack::open_genesis_config,
snapshot_config::SnapshotConfig,
snapshot_hash::StartingSnapshotHashes,
snapshot_utils::{

View File

@ -16,6 +16,10 @@ use {
},
serde_json::json,
solana_account_decoder::{UiAccount, UiAccountData, UiAccountEncoding},
solana_accounts_db::{
accounts::Accounts, accounts_db::CalcAccountsHashDataSource, accounts_index::ScanConfig,
hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
},
solana_clap_utils::{
hidden_unless_forced,
input_parsers::{cluster_type_of, pubkey_of, pubkeys_of},
@ -46,12 +50,8 @@ use {
},
solana_measure::{measure, measure::Measure},
solana_runtime::{
accounts::Accounts,
accounts_db::CalcAccountsHashDataSource,
accounts_index::ScanConfig,
bank::{Bank, RewardCalculationEvent, TotalAccountsStats},
bank_forks::BankForks,
hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
runtime_config::RuntimeConfig,
snapshot_archive_info::SnapshotArchiveInfoGetter,
snapshot_bank_utils,
@ -2292,7 +2292,7 @@ fn main() {
create_new_ledger(
&output_directory,
&genesis_config,
solana_runtime::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
solana_accounts_db::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
LedgerColumnOptions::default(),
)
.unwrap_or_else(|err| {

View File

@ -37,6 +37,7 @@ serde = { workspace = true }
serde_bytes = { workspace = true }
sha2 = { workspace = true }
solana-account-decoder = { workspace = true }
solana-accounts-db = { workspace = true }
solana-bpf-loader-program = { workspace = true }
solana-cost-model = { workspace = true }
solana-entry = { workspace = true }

View File

@ -10,9 +10,9 @@ use {
use_snapshot_archives_at_startup::{self, UseSnapshotArchivesAtStartup},
},
log::*,
solana_accounts_db::accounts_update_notifier_interface::AccountsUpdateNotifier,
solana_runtime::{
accounts_background_service::AbsRequestSender,
accounts_update_notifier_interface::AccountsUpdateNotifier,
bank_forks::BankForks,
snapshot_archive_info::{
FullSnapshotArchiveInfo, IncrementalSnapshotArchiveInfo, SnapshotArchiveInfoGetter,

View File

@ -32,6 +32,9 @@ use {
ThreadPool,
},
rocksdb::{DBRawIterator, LiveFile},
solana_accounts_db::hardened_unpack::{
unpack_genesis_archive, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
},
solana_entry::entry::{create_ticks, Entry},
solana_measure::measure::Measure,
solana_metrics::{
@ -39,10 +42,7 @@ use {
poh_timing_point::{send_poh_timing_point, PohTimingSender, SlotPohTimingInfo},
},
solana_rayon_threadlimit::get_max_thread_count,
solana_runtime::{
bank::Bank,
hardened_unpack::{unpack_genesis_archive, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE},
},
solana_runtime::bank::Bank,
solana_sdk::{
clock::{Slot, UnixTimestamp, DEFAULT_TICKS_PER_SECOND},
genesis_config::{GenesisConfig, DEFAULT_GENESIS_ARCHIVE, DEFAULT_GENESIS_FILE},

View File

@ -26,7 +26,7 @@ use {
WriteBatch as RWriteBatch, DB,
},
serde::{de::DeserializeOwned, Serialize},
solana_runtime::hardened_unpack::UnpackError,
solana_accounts_db::hardened_unpack::UnpackError,
solana_sdk::{
clock::{Slot, UnixTimestamp},
pubkey::Pubkey,

View File

@ -16,6 +16,16 @@ use {
rand::{seq::SliceRandom, thread_rng},
rayon::{prelude::*, ThreadPool},
scopeguard::defer,
solana_accounts_db::{
accounts_db::{AccountShrinkThreshold, AccountsDbConfig},
accounts_index::AccountSecondaryIndexes,
accounts_update_notifier_interface::AccountsUpdateNotifier,
epoch_accounts_hash::EpochAccountsHash,
rent_debits::RentDebits,
transaction_results::{
TransactionExecutionDetails, TransactionExecutionResult, TransactionResults,
},
},
solana_cost_model::cost_model::CostModel,
solana_entry::entry::{
self, create_ticks, Entry, EntrySlice, EntryType, EntryVerificationStatus, VerifyRecyclers,
@ -26,21 +36,13 @@ use {
solana_rayon_threadlimit::{get_max_thread_count, get_thread_count},
solana_runtime::{
accounts_background_service::{AbsRequestSender, SnapshotRequestType},
accounts_db::{AccountShrinkThreshold, AccountsDbConfig},
accounts_index::AccountSecondaryIndexes,
accounts_update_notifier_interface::AccountsUpdateNotifier,
bank::{Bank, TransactionBalancesSet},
bank_forks::BankForks,
bank_utils,
commitment::VOTE_THRESHOLD_SIZE,
epoch_accounts_hash::EpochAccountsHash,
prioritization_fee_cache::PrioritizationFeeCache,
rent_debits::RentDebits,
runtime_config::RuntimeConfig,
transaction_batch::TransactionBatch,
transaction_results::{
TransactionExecutionDetails, TransactionExecutionResult, TransactionResults,
},
vote_account::VoteAccountsHashMap,
vote_sender_types::ReplayVoteSender,
},

View File

@ -15,6 +15,7 @@ itertools = { workspace = true }
log = { workspace = true }
rand = { workspace = true }
rayon = { workspace = true }
solana-accounts-db = { workspace = true }
solana-client = { workspace = true }
solana-config-program = { workspace = true }
solana-core = { workspace = true }

View File

@ -6,6 +6,7 @@ use {
},
itertools::izip,
log::*,
solana_accounts_db::accounts_db::create_accounts_run_and_snapshot_dirs,
solana_client::{connection_cache::ConnectionCache, thin_client::ThinClient},
solana_core::{
consensus::tower_storage::FileTowerStorage,
@ -23,7 +24,6 @@ use {
ValidatorVoteKeypairs,
},
snapshot_config::SnapshotConfig,
snapshot_utils::create_accounts_run_and_snapshot_dirs,
},
solana_sdk::{
account::{Account, AccountSharedData},

Some files were not shown because too many files have changed in this diff Show More