Rename AppendVecId to AccountsFileId (#383)

#### Problem
The current AppendVecId actually refers to an accounts file id.

#### Summary of Changes
Rename AppendVecId to AccountsFileId.

#### Test Plan
Build
This commit is contained in:
Yueh-Hsuan Chiang 2024-03-22 11:25:30 -07:00 committed by GitHub
parent 24fe473b46
commit 977b1b836f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 108 additions and 104 deletions

View File

@ -4,7 +4,7 @@
//! Note that AccountInfo is saved to disk buckets during runtime, but disk buckets are recreated at startup.
use {
crate::{
accounts_db::AppendVecId,
accounts_db::AccountsFileId,
accounts_file::ALIGN_BOUNDARY_OFFSET,
accounts_index::{IsCached, ZeroLamport},
},
@ -21,7 +21,7 @@ pub type StoredSize = u32;
/// specify where account data is located
#[derive(Debug, PartialEq, Eq)]
pub enum StorageLocation {
AppendVec(AppendVecId, Offset),
AppendVec(AccountsFileId, Offset),
Cached,
}
@ -85,7 +85,7 @@ pub struct PackedOffsetAndFlags {
#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)]
pub struct AccountInfo {
/// index identifying the append storage
store_id: AppendVecId,
store_id: AccountsFileId,
account_offset_and_flags: AccountOffsetAndFlags,
}
@ -121,7 +121,7 @@ impl IsCached for StorageLocation {
}
/// We have to have SOME value for store_id when we are cached
const CACHE_VIRTUAL_STORAGE_ID: AppendVecId = AppendVecId::MAX;
const CACHE_VIRTUAL_STORAGE_ID: AccountsFileId = AccountsFileId::MAX;
impl AccountInfo {
pub fn new(storage_location: StorageLocation, lamports: u64) -> Self {
@ -160,7 +160,7 @@ impl AccountInfo {
(offset / ALIGN_BOUNDARY_OFFSET) as OffsetReduced
}
pub fn store_id(&self) -> AppendVecId {
pub fn store_id(&self) -> AccountsFileId {
// if the account is in a cached store, the store_id is meaningless
assert!(!self.is_cached());
self.store_id

View File

@ -1,7 +1,7 @@
//! Manage the map of slot -> append vec
use {
crate::accounts_db::{AccountStorageEntry, AppendVecId},
crate::accounts_db::{AccountStorageEntry, AccountsFileId},
dashmap::DashMap,
solana_sdk::clock::Slot,
std::sync::Arc,
@ -15,7 +15,7 @@ pub struct AccountStorageReference {
pub storage: Arc<AccountStorageEntry>,
/// id can be read from 'storage', but it is an atomic read.
/// id will never change while a storage is held, so we store it separately here for faster runtime lookup in 'get_account_storage_entry'
pub id: AppendVecId,
pub id: AccountsFileId,
}
pub type AccountStorageMap = DashMap<Slot, AccountStorageReference>;
@ -50,7 +50,7 @@ impl AccountStorage {
pub(crate) fn get_account_storage_entry(
&self,
slot: Slot,
store_id: AppendVecId,
store_id: AccountsFileId,
) -> Option<Arc<AccountStorageEntry>> {
let lookup_in_map = || {
self.map
@ -343,7 +343,7 @@ pub(crate) mod tests {
}
impl AccountStorage {
fn get_test_storage_with_id(&self, id: AppendVecId) -> Arc<AccountStorageEntry> {
fn get_test_storage_with_id(&self, id: AccountsFileId) -> Arc<AccountStorageEntry> {
let slot = 0;
// add a map store
let common_store_path = Path::new("");

View File

@ -637,7 +637,7 @@ struct StorageSizeAndCount {
/// number of accounts in the storage including both alive and dead accounts
pub count: usize,
}
type StorageSizeAndCountMap = DashMap<AppendVecId, StorageSizeAndCount>;
type StorageSizeAndCountMap = DashMap<AccountsFileId, StorageSizeAndCount>;
impl GenerateIndexTimings {
pub fn report(&self, startup_stats: &StartupStats) {
@ -764,8 +764,8 @@ impl<'a> MultiThreadProgress<'a> {
}
/// An offset into the AccountsDb::storage vector
pub type AtomicAppendVecId = AtomicU32;
pub type AppendVecId = u32;
pub type AtomicAccountsFileId = AtomicU32;
pub type AccountsFileId = u32;
type AccountSlots = HashMap<Pubkey, HashSet<Slot>>;
type SlotOffsets = HashMap<Slot, HashSet<usize>>;
@ -1005,7 +1005,7 @@ struct CleanKeyTimings {
/// Persistent storage structure holding the accounts
#[derive(Debug)]
pub struct AccountStorageEntry {
pub(crate) id: AppendVecId,
pub(crate) id: AccountsFileId,
pub(crate) slot: Slot,
@ -1031,7 +1031,7 @@ pub struct AccountStorageEntry {
}
impl AccountStorageEntry {
pub fn new(path: &Path, slot: Slot, id: AppendVecId, file_size: u64) -> Self {
pub fn new(path: &Path, slot: Slot, id: AccountsFileId, file_size: u64) -> Self {
let tail = AccountsFile::file_name(slot, id);
let path = Path::new(path).join(tail);
let accounts = AccountsFile::AppendVec(AppendVec::new(&path, true, file_size as usize));
@ -1048,7 +1048,7 @@ impl AccountStorageEntry {
pub fn new_existing(
slot: Slot,
id: AppendVecId,
id: AccountsFileId,
accounts: AccountsFile,
num_accounts: usize,
) -> Self {
@ -1115,7 +1115,7 @@ impl AccountStorageEntry {
self.slot
}
pub fn append_vec_id(&self) -> AppendVecId {
pub fn append_vec_id(&self) -> AccountsFileId {
self.id
}
@ -1297,7 +1297,7 @@ pub struct AccountsDb {
read_only_accounts_cache: ReadOnlyAccountsCache,
/// distribute the accounts across storage lists
pub next_id: AtomicAppendVecId,
pub next_id: AtomicAccountsFileId,
/// Set of shrinkable stores organized by map of slot to append_vec_id
pub shrink_candidate_slots: Mutex<ShrinkCandidates>,
@ -2336,7 +2336,7 @@ impl AccountsDb {
READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE,
),
uncleaned_pubkeys: DashMap::new(),
next_id: AtomicAppendVecId::new(0),
next_id: AtomicAccountsFileId::new(0),
shrink_candidate_slots: Mutex::new(ShrinkCandidates::default()),
write_cache_limit_bytes: None,
write_version: AtomicU64::new(0),
@ -2504,9 +2504,12 @@ impl AccountsDb {
self.base_working_path.clone()
}
fn next_id(&self) -> AppendVecId {
fn next_id(&self) -> AccountsFileId {
let next_id = self.next_id.fetch_add(1, Ordering::AcqRel);
assert!(next_id != AppendVecId::MAX, "We've run out of storage ids!");
assert!(
next_id != AccountsFileId::MAX,
"We've run out of storage ids!"
);
next_id
}
@ -6322,9 +6325,9 @@ impl AccountsDb {
/// This runs prior to the storages being put in AccountsDb.storage
pub fn combine_multiple_slots_into_one_at_startup(
path: &Path,
id: AppendVecId,
id: AccountsFileId,
slot: Slot,
slot_stores: &HashMap<AppendVecId, Arc<AccountStorageEntry>>,
slot_stores: &HashMap<AccountsFileId, Arc<AccountStorageEntry>>,
) -> Arc<AccountStorageEntry> {
let size = slot_stores.values().map(|storage| storage.capacity()).sum();
let storage = AccountStorageEntry::new(path, slot, id, size);
@ -8641,7 +8644,7 @@ impl AccountsDb {
&self,
storage: &Arc<AccountStorageEntry>,
slot: Slot,
store_id: AppendVecId,
store_id: AccountsFileId,
rent_collector: &RentCollector,
storage_info: &StorageSizeAndCountMap,
) -> SlotIndexGenerationInfo {
@ -9609,7 +9612,7 @@ pub mod tests {
impl CurrentAncientAppendVec {
/// note this requires that 'slot_and_append_vec' is Some
fn append_vec_id(&self) -> AppendVecId {
fn append_vec_id(&self) -> AccountsFileId {
self.append_vec().append_vec_id()
}
}
@ -10782,7 +10785,7 @@ pub mod tests {
write_version: StoredMetaWriteVersion,
slot: Slot,
pubkey: &Pubkey,
id: AppendVecId,
id: AccountsFileId,
mark_alive: bool,
account_data_size: Option<u64>,
fill_percentage: u64,
@ -10808,7 +10811,7 @@ pub mod tests {
write_version: StoredMetaWriteVersion,
slot: Slot,
pubkey: &Pubkey,
id: AppendVecId,
id: AccountsFileId,
mark_alive: bool,
account_data_size: Option<u64>,
) -> Arc<AccountStorageEntry> {
@ -13313,7 +13316,7 @@ pub mod tests {
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// set 'next' id to the max possible value
db.next_id.store(AppendVecId::MAX, Ordering::Release);
db.next_id.store(AccountsFileId::MAX, Ordering::Release);
let slots = 3;
let keys = (0..slots).map(|_| Pubkey::new_unique()).collect::<Vec<_>>();
// write unique keys to successive slots
@ -13340,7 +13343,7 @@ pub mod tests {
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// set 'next' id to the max possible value
db.next_id.store(AppendVecId::MAX, Ordering::Release);
db.next_id.store(AccountsFileId::MAX, Ordering::Release);
let slots = 3;
let keys = (0..slots).map(|_| Pubkey::new_unique()).collect::<Vec<_>>();
// write unique keys to successive slots
@ -13350,7 +13353,7 @@ pub mod tests {
db.calculate_accounts_delta_hash(slot);
db.add_root_and_flush_write_cache(slot);
// reset next_id to what it was previously to cause us to re-use the same id
db.next_id.store(AppendVecId::MAX, Ordering::Release);
db.next_id.store(AccountsFileId::MAX, Ordering::Release);
});
let ancestors = Ancestors::default();
keys.iter().for_each(|key| {
@ -17236,7 +17239,7 @@ pub mod tests {
.max()
.unwrap_or(999);
for (i, account_data_size) in account_data_sizes.iter().enumerate().take(num_slots) {
let id = starting_id + (i as AppendVecId);
let id = starting_id + (i as AccountsFileId);
let pubkey1 = solana_sdk::pubkey::new_rand();
let storage = sample_storage_with_entries_id_fill_percentage(
tf,
@ -17285,7 +17288,7 @@ pub mod tests {
.max()
.unwrap_or(999);
for i in 0..num_slots {
let id = starting_id + (i as AppendVecId);
let id = starting_id + (i as AccountsFileId);
let pubkey1 = solana_sdk::pubkey::new_rand();
let storage = sample_storage_with_entries_id(
tf,

View File

@ -3,7 +3,7 @@ use {
account_storage::meta::{
StorableAccountsWithHashesAndWriteVersions, StoredAccountInfo, StoredAccountMeta,
},
accounts_db::AppendVecId,
accounts_db::AccountsFileId,
accounts_hash::AccountHash,
append_vec::{AppendVec, AppendVecError},
storable_accounts::StorableAccounts,
@ -104,7 +104,7 @@ impl AccountsFile {
}
}
pub fn file_name(slot: Slot, id: AppendVecId) -> String {
pub fn file_name(slot: Slot, id: AccountsFileId) -> String {
format!("{slot}.{id}")
}

View File

@ -195,7 +195,7 @@ mod tests {
use {
super::*,
crate::{
accounts_db::{AccountStorageEntry, AppendVecId},
accounts_db::{AccountStorageEntry, AccountsFileId},
accounts_file::AccountsFile,
append_vec::AppendVec,
},
@ -297,7 +297,7 @@ mod tests {
assert!(
(slot != 2 && slot != 4)
^ storage
.map(|storage| storage.append_vec_id() == (slot as AppendVecId))
.map(|storage| storage.append_vec_id() == (slot as AccountsFileId))
.unwrap_or(false),
"slot: {slot}, storage: {storage:?}"
);
@ -440,7 +440,7 @@ mod tests {
);
}
fn create_sample_store(id: AppendVecId) -> Arc<AccountStorageEntry> {
fn create_sample_store(id: AccountsFileId) -> Arc<AccountStorageEntry> {
let tf = crate::append_vec::test_utils::get_append_vec_path("create_sample_store");
let (_temp_dirs, paths) = crate::accounts_db::get_temp_accounts_paths(1).unwrap();
let size: usize = 123;

View File

@ -19,11 +19,11 @@ The underlying memory for an AppendVec is a memory-mapped file. Memory-mapped fi
The account index is designed to support a single index for all the currently forked Accounts.
```text
type AppendVecId = usize;
type AccountsFileId = usize;
type Fork = u64;
struct AccountMap(Hashmap<Fork, (AppendVecId, u64)>);
struct AccountMap(Hashmap<Fork, (AccountsFileId, u64)>);
type AccountIndex = HashMap<Pubkey, AccountMap>;
```
@ -39,7 +39,7 @@ The index is a map of account Pubkeys to a map of Forks and the location of the
pub fn load_slow(&self, id: Fork, pubkey: &Pubkey) -> Option<&Account>
```
The read is satisfied by pointing to a memory-mapped location in the `AppendVecId` at the stored offset. A reference can be returned without a copy.
The read is satisfied by pointing to a memory-mapped location in the `AccountsFileId` at the stored offset. A reference can be returned without a copy.
### Root Forks

View File

@ -14,7 +14,7 @@ mod tests {
snapshot_bank_utils,
snapshot_utils::{
self, create_tmp_accounts_dir_for_tests, get_storages_to_serialize, ArchiveFormat,
StorageAndNextAppendVecId, BANK_SNAPSHOT_PRE_FILENAME_EXTENSION,
StorageAndNextAccountsFileId, BANK_SNAPSHOT_PRE_FILENAME_EXTENSION,
},
status_cache::StatusCache,
},
@ -23,7 +23,7 @@ mod tests {
account_storage::{AccountStorageMap, AccountStorageReference},
accounts_db::{
get_temp_accounts_paths, AccountShrinkThreshold, AccountStorageEntry, AccountsDb,
AtomicAppendVecId,
AtomicAccountsFileId,
},
accounts_file::{AccountsFile, AccountsFileError},
accounts_hash::{AccountsDeltaHash, AccountsHash},
@ -53,7 +53,7 @@ mod tests {
fn copy_append_vecs<P: AsRef<Path>>(
accounts_db: &AccountsDb,
output_dir: P,
) -> Result<StorageAndNextAppendVecId, AccountsFileError> {
) -> Result<StorageAndNextAccountsFileId, AccountsFileError> {
let storage_entries = accounts_db.get_snapshot_storages(RangeFull).0;
let storage: AccountStorageMap = AccountStorageMap::with_capacity(storage_entries.len());
let mut next_append_vec_id = 0;
@ -84,9 +84,9 @@ mod tests {
);
}
Ok(StorageAndNextAppendVecId {
Ok(StorageAndNextAccountsFileId {
storage,
next_append_vec_id: AtomicAppendVecId::new(next_append_vec_id + 1),
next_append_vec_id: AtomicAccountsFileId::new(next_append_vec_id + 1),
})
}

View File

@ -6,7 +6,7 @@ use {
epoch_stakes::EpochStakes,
serde_snapshot::storage::SerializableAccountStorageEntry,
snapshot_utils::{
self, SnapshotError, StorageAndNextAppendVecId, BANK_SNAPSHOT_PRE_FILENAME_EXTENSION,
self, SnapshotError, StorageAndNextAccountsFileId, BANK_SNAPSHOT_PRE_FILENAME_EXTENSION,
},
stakes::Stakes,
},
@ -17,8 +17,8 @@ use {
account_storage::meta::StoredMetaWriteVersion,
accounts::Accounts,
accounts_db::{
AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, AppendVecId,
AtomicAppendVecId, BankHashStats, IndexGenerationInfo,
AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig,
AccountsFileId, AtomicAccountsFileId, BankHashStats, IndexGenerationInfo,
},
accounts_file::AccountsFile,
accounts_hash::AccountsHash,
@ -64,7 +64,7 @@ pub(crate) use {
solana_accounts_db::accounts_hash::{
SerdeAccountsDeltaHash, SerdeAccountsHash, SerdeIncrementalAccountsHash,
},
storage::SerializedAppendVecId,
storage::SerializedAccountsFileId,
};
#[derive(Copy, Clone, Eq, PartialEq)]
@ -286,7 +286,7 @@ pub(crate) fn compare_two_serialized_banks(
/// Get snapshot storage lengths from accounts_db_fields
pub(crate) fn snapshot_storage_lengths_from_fields(
accounts_db_fields: &AccountsDbFields<SerializableAccountStorageEntry>,
) -> HashMap<Slot, HashMap<SerializedAppendVecId, usize>> {
) -> HashMap<Slot, HashMap<SerializedAccountsFileId, usize>> {
let AccountsDbFields(snapshot_storage, ..) = &accounts_db_fields;
snapshot_storage
.iter()
@ -353,7 +353,7 @@ pub(crate) fn bank_from_streams<R>(
serde_style: SerdeStyle,
snapshot_streams: &mut SnapshotStreams<R>,
account_paths: &[PathBuf],
storage_and_next_append_vec_id: StorageAndNextAppendVecId,
storage_and_next_append_vec_id: StorageAndNextAccountsFileId,
genesis_config: &GenesisConfig,
runtime_config: &RuntimeConfig,
debug_keys: Option<Arc<HashSet<Pubkey>>>,
@ -582,7 +582,7 @@ fn reconstruct_bank_from_fields<E>(
genesis_config: &GenesisConfig,
runtime_config: &RuntimeConfig,
account_paths: &[PathBuf],
storage_and_next_append_vec_id: StorageAndNextAppendVecId,
storage_and_next_append_vec_id: StorageAndNextAccountsFileId,
debug_keys: Option<Arc<HashSet<Pubkey>>>,
additional_builtins: Option<&[BuiltinPrototype]>,
account_secondary_indexes: AccountSecondaryIndexes,
@ -646,7 +646,7 @@ pub(crate) fn reconstruct_single_storage(
slot: &Slot,
append_vec_path: &Path,
current_len: usize,
append_vec_id: AppendVecId,
append_vec_id: AccountsFileId,
) -> Result<Arc<AccountStorageEntry>, SnapshotError> {
let (accounts_file, num_accounts) = AccountsFile::new_from_file(append_vec_path, current_len)?;
Ok(Arc::new(AccountStorageEntry::new_existing(
@ -662,11 +662,11 @@ pub(crate) fn reconstruct_single_storage(
// nodes
pub(crate) fn remap_append_vec_file(
slot: Slot,
old_append_vec_id: SerializedAppendVecId,
old_append_vec_id: SerializedAccountsFileId,
append_vec_path: &Path,
next_append_vec_id: &AtomicAppendVecId,
next_append_vec_id: &AtomicAccountsFileId,
num_collisions: &AtomicUsize,
) -> io::Result<(AppendVecId, PathBuf)> {
) -> io::Result<(AccountsFileId, PathBuf)> {
#[cfg(target_os = "linux")]
let append_vec_path_cstr = cstring_from_path(append_vec_path)?;
@ -681,7 +681,7 @@ pub(crate) fn remap_append_vec_file(
let remapped_append_vec_id = next_append_vec_id.fetch_add(1, Ordering::AcqRel);
// this can only happen in the first iteration of the loop
if old_append_vec_id == remapped_append_vec_id as SerializedAppendVecId {
if old_append_vec_id == remapped_append_vec_id as SerializedAccountsFileId {
break (remapped_append_vec_id, remapped_append_vec_path);
}
@ -717,7 +717,7 @@ pub(crate) fn remap_append_vec_file(
// Only rename the file if the new ID is actually different from the original. In the target_os
// = linux case, we have already renamed if necessary.
#[cfg(not(target_os = "linux"))]
if old_append_vec_id != remapped_append_vec_id as SerializedAppendVecId {
if old_append_vec_id != remapped_append_vec_id as SerializedAccountsFileId {
std::fs::rename(append_vec_path, &remapped_append_vec_path)?;
}
@ -726,10 +726,10 @@ pub(crate) fn remap_append_vec_file(
pub(crate) fn remap_and_reconstruct_single_storage(
slot: Slot,
old_append_vec_id: SerializedAppendVecId,
old_append_vec_id: SerializedAccountsFileId,
current_len: usize,
append_vec_path: &Path,
next_append_vec_id: &AtomicAppendVecId,
next_append_vec_id: &AtomicAccountsFileId,
num_collisions: &AtomicUsize,
) -> Result<Arc<AccountStorageEntry>, SnapshotError> {
let (remapped_append_vec_id, remapped_append_vec_path) = remap_append_vec_file(
@ -758,7 +758,7 @@ struct ReconstructedAccountsDbInfo {
fn reconstruct_accountsdb_from_fields<E>(
snapshot_accounts_db_fields: SnapshotAccountsDbFields<E>,
account_paths: &[PathBuf],
storage_and_next_append_vec_id: StorageAndNextAppendVecId,
storage_and_next_append_vec_id: StorageAndNextAccountsFileId,
genesis_config: &GenesisConfig,
account_secondary_indexes: AccountSecondaryIndexes,
limit_load_slot_count_from_snapshot: Option<usize>,
@ -905,7 +905,7 @@ where
.unwrap_or_else(|err| panic!("Failed to create directory {}: {}", path.display(), err));
}
let StorageAndNextAppendVecId {
let StorageAndNextAccountsFileId {
storage,
next_append_vec_id,
} = storage_and_next_append_vec_id;
@ -918,7 +918,7 @@ where
let next_append_vec_id = next_append_vec_id.load(Ordering::Acquire);
let max_append_vec_id = next_append_vec_id - 1;
assert!(
max_append_vec_id <= AppendVecId::MAX / 2,
max_append_vec_id <= AccountsFileId::MAX / 2,
"Storage id {max_append_vec_id} larger than allowed max"
);

View File

@ -3,23 +3,23 @@ use {
solana_accounts_db::accounts_db::AccountStorageEntry,
};
/// The serialized AppendVecId type is fixed as usize
pub(crate) type SerializedAppendVecId = usize;
/// The serialized AccountsFileId type is fixed as usize
pub(crate) type SerializedAccountsFileId = usize;
// Serializable version of AccountStorageEntry for snapshot format
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
pub struct SerializableAccountStorageEntry {
id: SerializedAppendVecId,
id: SerializedAccountsFileId,
accounts_current_len: usize,
}
pub(super) trait SerializableStorage {
fn id(&self) -> SerializedAppendVecId;
fn id(&self) -> SerializedAccountsFileId;
fn current_len(&self) -> usize;
}
impl SerializableStorage for SerializableAccountStorageEntry {
fn id(&self) -> SerializedAppendVecId {
fn id(&self) -> SerializedAccountsFileId {
self.id
}
fn current_len(&self) -> usize {
@ -30,7 +30,7 @@ impl SerializableStorage for SerializableAccountStorageEntry {
impl From<&AccountStorageEntry> for SerializableAccountStorageEntry {
fn from(rhs: &AccountStorageEntry) -> Self {
Self {
id: rhs.append_vec_id() as SerializedAppendVecId,
id: rhs.append_vec_id() as SerializedAccountsFileId,
accounts_current_len: rhs.accounts.len(),
}
}

View File

@ -6,7 +6,7 @@ mod serde_snapshot_tests {
newer, reconstruct_accountsdb_from_fields, remap_append_vec_file, SerdeStyle,
SerializableAccountsDb, SnapshotAccountsDbFields, TypeContext,
},
snapshot_utils::{get_storages_to_serialize, StorageAndNextAppendVecId},
snapshot_utils::{get_storages_to_serialize, StorageAndNextAccountsFileId},
},
bincode::{serialize_into, Error},
log::info,
@ -16,7 +16,7 @@ mod serde_snapshot_tests {
accounts::Accounts,
accounts_db::{
get_temp_accounts_paths, test_utils::create_test_accounts, AccountShrinkThreshold,
AccountStorageEntry, AccountsDb, AtomicAppendVecId,
AccountStorageEntry, AccountsDb, AtomicAccountsFileId,
VerifyAccountsHashAndLamportsConfig,
},
accounts_file::{AccountsFile, AccountsFileError},
@ -58,7 +58,7 @@ mod serde_snapshot_tests {
fn context_accountsdb_from_stream<'a, C, R>(
stream: &mut BufReader<R>,
account_paths: &[PathBuf],
storage_and_next_append_vec_id: StorageAndNextAppendVecId,
storage_and_next_append_vec_id: StorageAndNextAccountsFileId,
) -> Result<AccountsDb, Error>
where
C: TypeContext<'a>,
@ -96,7 +96,7 @@ mod serde_snapshot_tests {
serde_style: SerdeStyle,
stream: &mut BufReader<R>,
account_paths: &[PathBuf],
storage_and_next_append_vec_id: StorageAndNextAppendVecId,
storage_and_next_append_vec_id: StorageAndNextAccountsFileId,
) -> Result<AccountsDb, Error>
where
R: Read,
@ -137,7 +137,7 @@ mod serde_snapshot_tests {
fn copy_append_vecs<P: AsRef<Path>>(
accounts_db: &AccountsDb,
output_dir: P,
) -> Result<StorageAndNextAppendVecId, AccountsFileError> {
) -> Result<StorageAndNextAccountsFileId, AccountsFileError> {
let storage_entries = accounts_db.get_snapshot_storages(RangeFull).0;
let storage: AccountStorageMap = AccountStorageMap::with_capacity(storage_entries.len());
let mut next_append_vec_id = 0;
@ -168,9 +168,9 @@ mod serde_snapshot_tests {
);
}
Ok(StorageAndNextAppendVecId {
Ok(StorageAndNextAccountsFileId {
storage,
next_append_vec_id: AtomicAppendVecId::new(next_append_vec_id + 1),
next_append_vec_id: AtomicAccountsFileId::new(next_append_vec_id + 1),
})
}
@ -873,7 +873,7 @@ mod serde_snapshot_tests {
become_ungovernable(tmp.path());
let next_append_vec_id = AtomicAppendVecId::new(next_id as u32);
let next_append_vec_id = AtomicAccountsFileId::new(next_id as u32);
let num_collisions = AtomicUsize::new(0);
let (remapped_id, remapped_path) =
remap_append_vec_file(123, old_id, &old_path, &next_append_vec_id, &num_collisions)
@ -891,7 +891,7 @@ mod serde_snapshot_tests {
// In remap_append_vec() we want to handle EEXIST (collisions), but we want to return all
// other errors
let next_append_vec_id = AtomicAppendVecId::new(457);
let next_append_vec_id = AtomicAccountsFileId::new(457);
let num_collisions = AtomicUsize::new(0);
remap_append_vec_file(
123,

View File

@ -18,7 +18,7 @@ use {
rebuild_storages_from_snapshot_dir, serialize_snapshot_data_file,
verify_and_unarchive_snapshots, verify_unpacked_snapshots_dir_and_version,
AddBankSnapshotError, ArchiveFormat, BankSnapshotInfo, BankSnapshotKind, SnapshotError,
SnapshotRootPaths, SnapshotVersion, StorageAndNextAppendVecId,
SnapshotRootPaths, SnapshotVersion, StorageAndNextAccountsFileId,
UnpackedSnapshotsDirAndVersion, VerifySlotDeltasError,
},
status_cache,
@ -27,7 +27,7 @@ use {
log::*,
solana_accounts_db::{
accounts_db::{
AccountShrinkThreshold, AccountStorageEntry, AccountsDbConfig, AtomicAppendVecId,
AccountShrinkThreshold, AccountStorageEntry, AccountsDbConfig, AtomicAccountsFileId,
CalcAccountsHashDataSource,
},
accounts_hash::AccountsHash,
@ -308,7 +308,7 @@ pub fn bank_from_snapshot_archives(
storage.extend(incremental_snapshot_storages);
}
let storage_and_next_append_vec_id = StorageAndNextAppendVecId {
let storage_and_next_append_vec_id = StorageAndNextAccountsFileId {
storage,
next_append_vec_id,
};
@ -501,7 +501,7 @@ pub fn bank_from_snapshot_dir(
delete_contents_of_path(path);
}
let next_append_vec_id = Arc::new(AtomicAppendVecId::new(0));
let next_append_vec_id = Arc::new(AtomicAccountsFileId::new(0));
let (storage, measure_rebuild_storages) = measure!(
rebuild_storages_from_snapshot_dir(
@ -515,7 +515,7 @@ pub fn bank_from_snapshot_dir(
let next_append_vec_id =
Arc::try_unwrap(next_append_vec_id).expect("this is the only strong reference");
let storage_and_next_append_vec_id = StorageAndNextAppendVecId {
let storage_and_next_append_vec_id = StorageAndNextAccountsFileId {
storage,
next_append_vec_id,
};
@ -685,7 +685,7 @@ fn rebuild_bank_from_unarchived_snapshots(
&UnpackedSnapshotsDirAndVersion,
>,
account_paths: &[PathBuf],
storage_and_next_append_vec_id: StorageAndNextAppendVecId,
storage_and_next_append_vec_id: StorageAndNextAccountsFileId,
genesis_config: &GenesisConfig,
runtime_config: &RuntimeConfig,
debug_keys: Option<Arc<HashSet<Pubkey>>>,
@ -781,7 +781,7 @@ fn rebuild_bank_from_unarchived_snapshots(
fn rebuild_bank_from_snapshot(
bank_snapshot: &BankSnapshotInfo,
account_paths: &[PathBuf],
storage_and_next_append_vec_id: StorageAndNextAppendVecId,
storage_and_next_append_vec_id: StorageAndNextAccountsFileId,
genesis_config: &GenesisConfig,
runtime_config: &RuntimeConfig,
debug_keys: Option<Arc<HashSet<Pubkey>>>,

View File

@ -18,7 +18,7 @@ use {
regex::Regex,
solana_accounts_db::{
account_storage::AccountStorageMap,
accounts_db::{AccountStorageEntry, AtomicAppendVecId},
accounts_db::{AccountStorageEntry, AtomicAccountsFileId},
accounts_file::AccountsFileError,
append_vec::AppendVec,
hardened_unpack::{self, ParallelSelector, UnpackError},
@ -281,9 +281,9 @@ pub struct UnpackedSnapshotsDirAndVersion {
/// Helper type for passing around account storage map and next append vec id
/// for reconstructing accounts from a snapshot
pub(crate) struct StorageAndNextAppendVecId {
pub(crate) struct StorageAndNextAccountsFileId {
pub storage: AccountStorageMap,
pub next_append_vec_id: AtomicAppendVecId,
pub next_append_vec_id: AtomicAccountsFileId,
}
#[derive(Error, Debug)]
@ -1228,7 +1228,7 @@ pub fn verify_and_unarchive_snapshots(
) -> Result<(
UnarchivedSnapshot,
Option<UnarchivedSnapshot>,
AtomicAppendVecId,
AtomicAccountsFileId,
)> {
check_are_snapshots_compatible(
full_snapshot_archive_info,
@ -1237,7 +1237,7 @@ pub fn verify_and_unarchive_snapshots(
let parallel_divisions = (num_cpus::get() / 4).clamp(1, PARALLEL_UNTAR_READERS_DEFAULT);
let next_append_vec_id = Arc::new(AtomicAppendVecId::new(0));
let next_append_vec_id = Arc::new(AtomicAccountsFileId::new(0));
let unarchived_full_snapshot = unarchive_snapshot(
&bank_snapshots_dir,
TMP_SNAPSHOT_ARCHIVE_PREFIX,
@ -1384,7 +1384,7 @@ fn unarchive_snapshot(
account_paths: &[PathBuf],
archive_format: ArchiveFormat,
parallel_divisions: usize,
next_append_vec_id: Arc<AtomicAppendVecId>,
next_append_vec_id: Arc<AtomicAccountsFileId>,
) -> Result<UnarchivedSnapshot> {
let unpack_dir = tempfile::Builder::new()
.prefix(unpacked_snapshots_dir_prefix)
@ -1459,7 +1459,7 @@ fn streaming_snapshot_dir_files(
pub fn rebuild_storages_from_snapshot_dir(
snapshot_info: &BankSnapshotInfo,
account_paths: &[PathBuf],
next_append_vec_id: Arc<AtomicAppendVecId>,
next_append_vec_id: Arc<AtomicAccountsFileId>,
) -> Result<AccountStorageMap> {
let bank_snapshot_dir = &snapshot_info.snapshot_dir;
let accounts_hardlinks = bank_snapshot_dir.join(SNAPSHOT_ACCOUNTS_HARDLINKS);

View File

@ -4,7 +4,7 @@ use {
super::{snapshot_version_from_file, SnapshotError, SnapshotFrom, SnapshotVersion},
crate::serde_snapshot::{
self, reconstruct_single_storage, remap_and_reconstruct_single_storage,
snapshot_storage_lengths_from_fields, SerdeStyle, SerializedAppendVecId,
snapshot_storage_lengths_from_fields, SerdeStyle, SerializedAccountsFileId,
},
crossbeam_channel::{select, unbounded, Receiver, Sender},
dashmap::DashMap,
@ -16,7 +16,7 @@ use {
regex::Regex,
solana_accounts_db::{
account_storage::{AccountStorageMap, AccountStorageReference},
accounts_db::{AccountStorageEntry, AccountsDb, AppendVecId, AtomicAppendVecId},
accounts_db::{AccountStorageEntry, AccountsDb, AccountsFileId, AtomicAccountsFileId},
append_vec::AppendVec,
},
solana_sdk::clock::Slot,
@ -55,16 +55,16 @@ pub(crate) struct SnapshotStorageRebuilder {
/// Number of threads to rebuild with
num_threads: usize,
/// Snapshot storage lengths - from the snapshot file
snapshot_storage_lengths: HashMap<Slot, HashMap<SerializedAppendVecId, usize>>,
snapshot_storage_lengths: HashMap<Slot, HashMap<SerializedAccountsFileId, usize>>,
/// Container for storing snapshot file paths
storage_paths: DashMap<Slot, Mutex<Vec<PathBuf>>>,
/// Container for storing rebuilt snapshot storages
storage: AccountStorageMap,
/// Tracks next append_vec_id
next_append_vec_id: Arc<AtomicAppendVecId>,
next_append_vec_id: Arc<AtomicAccountsFileId>,
/// Tracker for number of processed slots
processed_slot_count: AtomicUsize,
/// Tracks the number of collisions in AppendVecId
/// Tracks the number of collisions in AccountsFileId
num_collisions: AtomicUsize,
/// Rebuild from the snapshot files or archives
snapshot_from: SnapshotFrom,
@ -75,7 +75,7 @@ impl SnapshotStorageRebuilder {
pub(crate) fn rebuild_storage(
file_receiver: Receiver<PathBuf>,
num_threads: usize,
next_append_vec_id: Arc<AtomicAppendVecId>,
next_append_vec_id: Arc<AtomicAccountsFileId>,
snapshot_from: SnapshotFrom,
) -> Result<RebuiltSnapshotStorage, SnapshotError> {
let (snapshot_version_path, snapshot_file_path, append_vec_files) =
@ -109,7 +109,7 @@ impl SnapshotStorageRebuilder {
fn new(
file_receiver: Receiver<PathBuf>,
num_threads: usize,
next_append_vec_id: Arc<AtomicAppendVecId>,
next_append_vec_id: Arc<AtomicAccountsFileId>,
snapshot_storage_lengths: HashMap<Slot, HashMap<usize, usize>>,
snapshot_from: SnapshotFrom,
) -> Self {
@ -199,7 +199,7 @@ impl SnapshotStorageRebuilder {
fn spawn_rebuilder_threads(
file_receiver: Receiver<PathBuf>,
num_threads: usize,
next_append_vec_id: Arc<AtomicAppendVecId>,
next_append_vec_id: Arc<AtomicAccountsFileId>,
snapshot_storage_lengths: HashMap<Slot, HashMap<usize, usize>>,
append_vec_files: Vec<PathBuf>,
snapshot_from: SnapshotFrom,
@ -274,7 +274,7 @@ impl SnapshotStorageRebuilder {
// dir. When loading from a snapshot archive, the max of the appendvec IDs is
// updated in remap_append_vec_file(), which is not in the from_dir route.
self.next_append_vec_id
.fetch_max((append_vec_id + 1) as AppendVecId, Ordering::Relaxed);
.fetch_max((append_vec_id + 1) as AccountsFileId, Ordering::Relaxed);
}
let slot_storage_count = self.insert_storage_file(&slot, path);
if slot_storage_count == self.snapshot_storage_lengths.get(&slot).unwrap().len() {
@ -324,13 +324,14 @@ impl SnapshotStorageRebuilder {
&slot,
path.as_path(),
current_len,
old_append_vec_id as AppendVecId,
old_append_vec_id as AccountsFileId,
)?,
};
Ok((storage_entry.append_vec_id(), storage_entry))
})
.collect::<Result<HashMap<AppendVecId, Arc<AccountStorageEntry>>, SnapshotError>>()?;
.collect::<Result<HashMap<AccountsFileId, Arc<AccountStorageEntry>>, SnapshotError>>(
)?;
let storage = if slot_stores.len() > 1 {
let remapped_append_vec_folder = lock.first().unwrap().parent().unwrap();
@ -365,10 +366,10 @@ impl SnapshotStorageRebuilder {
/// increment `next_append_vec_id` until there is no file in `parent_folder` with this id and slot
/// return the id
fn get_unique_append_vec_id(
next_append_vec_id: &Arc<AtomicAppendVecId>,
next_append_vec_id: &Arc<AtomicAccountsFileId>,
parent_folder: &Path,
slot: Slot,
) -> AppendVecId {
) -> AccountsFileId {
loop {
let remapped_append_vec_id = next_append_vec_id.fetch_add(1, Ordering::AcqRel);
let remapped_file_name = AppendVec::file_name(slot, remapped_append_vec_id);