Start saving/loading prior_roots(_with_hash) to snapshot (#23844)

* Start saving/loading prior_roots(_with_hash) to snapshot

* Update runtime/src/accounts_index.rs

Co-authored-by: Michael Vines <mvines@gmail.com>

* Update runtime/src/accounts_index.rs

Co-authored-by: Michael Vines <mvines@gmail.com>

* update comment

Co-authored-by: Michael Vines <mvines@gmail.com>
This commit is contained in:
Jeff Washington (jwash) 2022-03-24 10:06:24 -05:00 committed by GitHub
parent b22165ad69
commit 396b49a7c1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 40 additions and 7 deletions

View File

@ -652,7 +652,7 @@ impl RollingBitField {
#[derive(Debug)]
pub struct RootsTracker {
roots: RollingBitField,
pub(crate) roots: RollingBitField,
uncleaned_roots: HashSet<Slot>,
previous_uncleaned_roots: HashSet<Slot>,
}
@ -869,7 +869,7 @@ pub struct AccountsIndex<T: IndexValue> {
program_id_index: SecondaryIndex<DashMapSecondaryIndexEntry>,
spl_token_mint_index: SecondaryIndex<DashMapSecondaryIndexEntry>,
spl_token_owner_index: SecondaryIndex<RwLockSecondaryIndexEntry>,
roots_tracker: RwLock<RootsTracker>,
pub(crate) roots_tracker: RwLock<RootsTracker>,
ongoing_scan_roots: RwLock<BTreeMap<Slot, u64>>,
// Each scan has some latest slot `S` that is the tip of the fork the scan
// is iterating over. The unique id of that slot `S` is recorded here (note we don't use

View File

@ -23,6 +23,7 @@ use {
solana_measure::measure::Measure,
solana_sdk::{
clock::{Epoch, Slot, UnixTimestamp},
deserialize_utils::default_on_eof,
epoch_schedule::EpochSchedule,
fee_calculator::{FeeCalculator, FeeRateGovernor},
genesis_config::GenesisConfig,
@ -67,6 +68,12 @@ struct AccountsDbFields<T>(
StoredMetaWriteVersion,
Slot,
BankHashInfo,
/// all slots that were roots within the last epoch
#[serde(deserialize_with = "default_on_eof")]
Vec<Slot>,
/// slots that were roots within the last epoch for which we care about the hash value
#[serde(deserialize_with = "default_on_eof")]
Vec<(Slot, Hash)>,
);
/// Helper type to wrap BufReader streams when deserializing and reconstructing from either just a
@ -86,9 +93,9 @@ struct SnapshotAccountsDbFields<T> {
impl<T> SnapshotAccountsDbFields<T> {
/// Collapse the SnapshotAccountsDbFields into a single AccountsDbFields. If there is no
/// incremental snapshot, this returns the AccountsDbFields from the full snapshot. Otherwise
/// this uses the version, slot, and bank hash info from the incremental snapshot, then the
/// combination of the storages from both the full and incremental snapshots.
/// incremental snapshot, this returns the AccountsDbFields from the full snapshot.
/// Otherwise, use the AccountsDbFields from the incremental snapshot, and a combination
/// of the storages from both the full and incremental snapshots.
fn collapse_into(self) -> Result<AccountsDbFields<T>, Error> {
match self.incremental_snapshot_accounts_db_fields {
None => Ok(self.full_snapshot_accounts_db_fields),
@ -97,6 +104,8 @@ impl<T> SnapshotAccountsDbFields<T> {
incremental_snapshot_version,
incremental_snapshot_slot,
incremental_snapshot_bank_hash_info,
incremental_snapshot_prior_roots,
incremental_snapshot_prior_roots_with_hash,
)) => {
let full_snapshot_storages = self.full_snapshot_accounts_db_fields.0;
let full_snapshot_slot = self.full_snapshot_accounts_db_fields.2;
@ -119,6 +128,8 @@ impl<T> SnapshotAccountsDbFields<T> {
incremental_snapshot_version,
incremental_snapshot_slot,
incremental_snapshot_bank_hash_info,
incremental_snapshot_prior_roots,
incremental_snapshot_prior_roots_with_hash,
))
}
}
@ -418,6 +429,8 @@ where
snapshot_version,
snapshot_slot,
snapshot_bank_hash_info,
_snapshot_prior_roots,
_snapshot_prior_roots_with_hash,
) = snapshot_accounts_db_fields.collapse_into()?;
let snapshot_storages = snapshot_storages.into_iter().collect::<Vec<_>>();

View File

@ -238,8 +238,27 @@ impl<'a> TypeContext<'a> for Context {
.unwrap_or_else(|| panic!("No bank_hashes entry for slot {}", serializable_db.slot))
.clone();
// for now, prior_roots is the same as 'roots' and is redundant with the storages we persist in the snapshot
let prior_roots = serializable_db
.accounts_db
.accounts_index
.roots_tracker
.read()
.unwrap()
.roots
.get_all();
let prior_roots_with_hash = Vec::<(Slot, Hash)>::default();
let mut serialize_account_storage_timer = Measure::start("serialize_account_storage_ms");
let result = (entries, version, slot, hash).serialize(serializer);
let result = (
entries,
version,
slot,
hash,
prior_roots,
prior_roots_with_hash,
)
.serialize(serializer);
serialize_account_storage_timer.stop();
datapoint_info!(
"serialize_account_storage_ms",

View File

@ -305,7 +305,7 @@ mod test_bank_serialize {
// This some what long test harness is required to freeze the ABI of
// Bank's serialization due to versioned nature
#[frozen_abi(digest = "ERbJJzaQD39td9tiE4FPAud374S2Hvk6pvsxejm6quWf")]
#[frozen_abi(digest = "H2XtVdhokwLMTbjXh4Lh3Mw8m7PYQDMh4Ha5ojuxip9Z")]
#[derive(Serialize, AbiExample)]
pub struct BankAbiTestWrapperNewer {
#[serde(serialize_with = "wrapper_newer")]

View File

@ -12,6 +12,7 @@ where
let result = T::deserialize(d);
match result {
Err(err) if err.to_string() == "io error: unexpected end of file" => Ok(T::default()),
Err(err) if err.to_string() == "io error: failed to fill whole buffer" => Ok(T::default()),
result => result,
}
}