AcctIdx: appendvecid: u32 (#21842)

This commit is contained in:
Jeff Washington (jwash) 2022-01-03 10:35:17 -06:00 committed by GitHub
parent 2a00382d71
commit fb62407232
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 58 additions and 6 deletions

View File

@ -75,7 +75,7 @@ use {
path::{Path, PathBuf},
str::FromStr,
sync::{
atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering},
Arc, Condvar, Mutex, MutexGuard, RwLock,
},
thread::Builder,
@ -353,8 +353,8 @@ impl<'a> MultiThreadProgress<'a> {
}
/// An offset into the AccountsDb::storage vector
pub type AtomicAppendVecId = AtomicUsize;
pub type AppendVecId = usize;
pub type AtomicAppendVecId = AtomicU32;
pub type AppendVecId = u32;
pub type SnapshotStorage = Vec<Arc<AccountStorageEntry>>;
pub type SnapshotStorages = Vec<SnapshotStorage>;
@ -11255,6 +11255,58 @@ pub mod tests {
}
}
#[test]
#[should_panic(expected = "We've run out of storage ids!")]
fn test_wrapping_append_vec_id() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let zero_lamport_account =
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// set 'next' id to the max possible value
db.next_id.store(AppendVecId::MAX, Ordering::Release);
let slots = 3;
let keys = (0..slots).map(|_| Pubkey::new_unique()).collect::<Vec<_>>();
// write unique keys to successive slots
keys.iter().enumerate().for_each(|(slot, key)| {
let slot = slot as Slot;
db.store_uncached(slot, &[(key, &zero_lamport_account)]);
db.get_accounts_delta_hash(slot);
db.add_root(slot);
});
assert_eq!(slots - 1, db.next_id.load(Ordering::Acquire));
let ancestors = Ancestors::default();
keys.iter().for_each(|key| {
assert!(db.load_without_fixed_root(&ancestors, key).is_some());
});
}
#[test]
#[should_panic(expected = "We've run out of storage ids!")]
fn test_reuse_append_vec_id() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let zero_lamport_account =
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// set 'next' id to the max possible value
db.next_id.store(AppendVecId::MAX, Ordering::Release);
let slots = 3;
let keys = (0..slots).map(|_| Pubkey::new_unique()).collect::<Vec<_>>();
// write unique keys to successive slots
keys.iter().enumerate().for_each(|(slot, key)| {
let slot = slot as Slot;
db.store_uncached(slot, &[(key, &zero_lamport_account)]);
db.get_accounts_delta_hash(slot);
db.add_root(slot);
// reset next_id to what it was previously to cause us to re-use the same id
db.next_id.store(AppendVecId::MAX, Ordering::Release);
});
let ancestors = Ancestors::default();
keys.iter().for_each(|key| {
assert!(db.load_without_fixed_root(&ancestors, key).is_some());
});
}
#[test]
fn test_zero_lamport_new_root_not_cleaned() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);

View File

@ -16,7 +16,7 @@ use {
epoch_stakes::EpochStakes,
hardened_unpack::UnpackedAppendVecMap,
rent_collector::RentCollector,
serde_snapshot::future::SerializableStorage,
serde_snapshot::future::{AppendVecIdSerialized, SerializableStorage},
stakes::Stakes,
},
bincode::{self, config::Options, Error},
@ -468,7 +468,7 @@ where
// rename the file to this new path.
// **DEVELOPER NOTE:** Keep this check last so that it can short-circuit if
// possible.
if storage_entry.id() == remapped_append_vec_id
if storage_entry.id() == remapped_append_vec_id as AppendVecIdSerialized
|| std::fs::metadata(&remapped_append_vec_path).is_err()
{
break (remapped_append_vec_id, remapped_append_vec_path);
@ -479,7 +479,7 @@ where
num_collisions.fetch_add(1, Ordering::Relaxed);
};
// Only rename the file if the new ID is actually different from the original.
if storage_entry.id() != remapped_append_vec_id {
if storage_entry.id() != remapped_append_vec_id as AppendVecIdSerialized {
std::fs::rename(append_vec_path, &remapped_append_vec_path)?;
}