AcctIdx: appendvecid: u32 (#21842)
This commit is contained in:
parent
2a00382d71
commit
fb62407232
|
@ -75,7 +75,7 @@ use {
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
str::FromStr,
|
str::FromStr,
|
||||||
sync::{
|
sync::{
|
||||||
atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
|
atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering},
|
||||||
Arc, Condvar, Mutex, MutexGuard, RwLock,
|
Arc, Condvar, Mutex, MutexGuard, RwLock,
|
||||||
},
|
},
|
||||||
thread::Builder,
|
thread::Builder,
|
||||||
|
@ -353,8 +353,8 @@ impl<'a> MultiThreadProgress<'a> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An offset into the AccountsDb::storage vector
|
/// An offset into the AccountsDb::storage vector
|
||||||
pub type AtomicAppendVecId = AtomicUsize;
|
pub type AtomicAppendVecId = AtomicU32;
|
||||||
pub type AppendVecId = usize;
|
pub type AppendVecId = u32;
|
||||||
pub type SnapshotStorage = Vec<Arc<AccountStorageEntry>>;
|
pub type SnapshotStorage = Vec<Arc<AccountStorageEntry>>;
|
||||||
pub type SnapshotStorages = Vec<SnapshotStorage>;
|
pub type SnapshotStorages = Vec<SnapshotStorage>;
|
||||||
|
|
||||||
|
@ -11255,6 +11255,58 @@ pub mod tests {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[should_panic(expected = "We've run out of storage ids!")]
|
||||||
|
fn test_wrapping_append_vec_id() {
|
||||||
|
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
|
||||||
|
let zero_lamport_account =
|
||||||
|
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
|
||||||
|
|
||||||
|
// set 'next' id to the max possible value
|
||||||
|
db.next_id.store(AppendVecId::MAX, Ordering::Release);
|
||||||
|
let slots = 3;
|
||||||
|
let keys = (0..slots).map(|_| Pubkey::new_unique()).collect::<Vec<_>>();
|
||||||
|
// write unique keys to successive slots
|
||||||
|
keys.iter().enumerate().for_each(|(slot, key)| {
|
||||||
|
let slot = slot as Slot;
|
||||||
|
db.store_uncached(slot, &[(key, &zero_lamport_account)]);
|
||||||
|
db.get_accounts_delta_hash(slot);
|
||||||
|
db.add_root(slot);
|
||||||
|
});
|
||||||
|
assert_eq!(slots - 1, db.next_id.load(Ordering::Acquire));
|
||||||
|
let ancestors = Ancestors::default();
|
||||||
|
keys.iter().for_each(|key| {
|
||||||
|
assert!(db.load_without_fixed_root(&ancestors, key).is_some());
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[should_panic(expected = "We've run out of storage ids!")]
|
||||||
|
fn test_reuse_append_vec_id() {
|
||||||
|
solana_logger::setup();
|
||||||
|
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
|
||||||
|
let zero_lamport_account =
|
||||||
|
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
|
||||||
|
|
||||||
|
// set 'next' id to the max possible value
|
||||||
|
db.next_id.store(AppendVecId::MAX, Ordering::Release);
|
||||||
|
let slots = 3;
|
||||||
|
let keys = (0..slots).map(|_| Pubkey::new_unique()).collect::<Vec<_>>();
|
||||||
|
// write unique keys to successive slots
|
||||||
|
keys.iter().enumerate().for_each(|(slot, key)| {
|
||||||
|
let slot = slot as Slot;
|
||||||
|
db.store_uncached(slot, &[(key, &zero_lamport_account)]);
|
||||||
|
db.get_accounts_delta_hash(slot);
|
||||||
|
db.add_root(slot);
|
||||||
|
// reset next_id to what it was previously to cause us to re-use the same id
|
||||||
|
db.next_id.store(AppendVecId::MAX, Ordering::Release);
|
||||||
|
});
|
||||||
|
let ancestors = Ancestors::default();
|
||||||
|
keys.iter().for_each(|key| {
|
||||||
|
assert!(db.load_without_fixed_root(&ancestors, key).is_some());
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_zero_lamport_new_root_not_cleaned() {
|
fn test_zero_lamport_new_root_not_cleaned() {
|
||||||
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
|
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
|
||||||
|
|
|
@ -16,7 +16,7 @@ use {
|
||||||
epoch_stakes::EpochStakes,
|
epoch_stakes::EpochStakes,
|
||||||
hardened_unpack::UnpackedAppendVecMap,
|
hardened_unpack::UnpackedAppendVecMap,
|
||||||
rent_collector::RentCollector,
|
rent_collector::RentCollector,
|
||||||
serde_snapshot::future::SerializableStorage,
|
serde_snapshot::future::{AppendVecIdSerialized, SerializableStorage},
|
||||||
stakes::Stakes,
|
stakes::Stakes,
|
||||||
},
|
},
|
||||||
bincode::{self, config::Options, Error},
|
bincode::{self, config::Options, Error},
|
||||||
|
@ -468,7 +468,7 @@ where
|
||||||
// rename the file to this new path.
|
// rename the file to this new path.
|
||||||
// **DEVELOPER NOTE:** Keep this check last so that it can short-circuit if
|
// **DEVELOPER NOTE:** Keep this check last so that it can short-circuit if
|
||||||
// possible.
|
// possible.
|
||||||
if storage_entry.id() == remapped_append_vec_id
|
if storage_entry.id() == remapped_append_vec_id as AppendVecIdSerialized
|
||||||
|| std::fs::metadata(&remapped_append_vec_path).is_err()
|
|| std::fs::metadata(&remapped_append_vec_path).is_err()
|
||||||
{
|
{
|
||||||
break (remapped_append_vec_id, remapped_append_vec_path);
|
break (remapped_append_vec_id, remapped_append_vec_path);
|
||||||
|
@ -479,7 +479,7 @@ where
|
||||||
num_collisions.fetch_add(1, Ordering::Relaxed);
|
num_collisions.fetch_add(1, Ordering::Relaxed);
|
||||||
};
|
};
|
||||||
// Only rename the file if the new ID is actually different from the original.
|
// Only rename the file if the new ID is actually different from the original.
|
||||||
if storage_entry.id() != remapped_append_vec_id {
|
if storage_entry.id() != remapped_append_vec_id as AppendVecIdSerialized {
|
||||||
std::fs::rename(append_vec_path, &remapped_append_vec_path)?;
|
std::fs::rename(append_vec_path, &remapped_append_vec_path)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue