Makes AccountsHashVerifier aware of Incremental Accounts Hash (#30820)

This commit is contained in:
Brooks 2023-03-22 10:20:16 -04:00 committed by GitHub
parent 4285cb2b84
commit 35437b8dad
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 118 additions and 72 deletions

View File

@ -7,12 +7,14 @@
use {
crossbeam_channel::{Receiver, Sender},
solana_gossip::cluster_info::{ClusterInfo, MAX_SNAPSHOT_HASHES},
solana_measure::{measure::Measure, measure_us},
solana_measure::measure_us,
solana_runtime::{
accounts_db::CalcAccountsHashFlavor,
accounts_hash::{
AccountsHashEnum, CalcAccountsHashConfig, HashStats, IncrementalAccountsHash,
AccountsHash, AccountsHashEnum, CalcAccountsHashConfig, HashStats,
IncrementalAccountsHash,
},
bank::BankIncrementalSnapshotPersistence,
snapshot_config::SnapshotConfig,
snapshot_package::{
self, retain_max_n_elements, AccountsPackage, AccountsPackageType, SnapshotPackage,
@ -214,7 +216,6 @@ impl AccountsHashVerifier {
/// returns calculated accounts hash
fn calculate_and_verify_accounts_hash(accounts_package: &AccountsPackage) -> AccountsHashEnum {
let slot = accounts_package.slot;
let accounts_hash_calculation_flavor = match accounts_package.package_type {
AccountsPackageType::AccountsHashVerifier => CalcAccountsHashFlavor::Full,
AccountsPackageType::EpochAccountsHash => CalcAccountsHashFlavor::Full,
@ -230,13 +231,70 @@ impl AccountsHashVerifier {
},
};
let mut measure_hash = Measure::start("hash");
let mut sort_time = Measure::start("sort_storages");
let sorted_storages = SortedStorages::new(&accounts_package.snapshot_storages);
sort_time.stop();
let (
accounts_hash_enum,
accounts_hash_for_reserialize,
bank_incremental_snapshot_persistence,
) = match accounts_hash_calculation_flavor {
CalcAccountsHashFlavor::Full => {
let (accounts_hash, _capitalization) =
Self::_calculate_full_accounts_hash(accounts_package);
(accounts_hash.into(), accounts_hash, None)
}
CalcAccountsHashFlavor::Incremental => {
let AccountsPackageType::Snapshot(SnapshotType::IncrementalSnapshot(base_slot)) = accounts_package.package_type else {
panic!("Calculating incremental accounts hash requires a base slot");
};
let (base_accounts_hash, base_capitalization) = accounts_package
.accounts
.accounts_db
.get_accounts_hash(base_slot)
.expect("incremental snapshot requires accounts hash and capitalization from the full snapshot it is based on");
let (incremental_accounts_hash, incremental_capitalization) =
Self::_calculate_incremental_accounts_hash(accounts_package, base_slot);
let bank_incremental_snapshot_persistence = BankIncrementalSnapshotPersistence {
full_slot: base_slot,
full_hash: base_accounts_hash.into(),
full_capitalization: base_capitalization,
incremental_hash: incremental_accounts_hash.into(),
incremental_capitalization,
};
(
incremental_accounts_hash.into(),
AccountsHash(Hash::default()), // value does not matter; not used for incremental snapshots
Some(bank_incremental_snapshot_persistence),
)
}
};
if let Some(snapshot_info) = &accounts_package.snapshot_info {
solana_runtime::serde_snapshot::reserialize_bank_with_new_accounts_hash(
snapshot_info.snapshot_links.path(),
accounts_package.slot,
&accounts_hash_for_reserialize,
bank_incremental_snapshot_persistence.as_ref(),
);
}
if accounts_package.package_type
== AccountsPackageType::Snapshot(SnapshotType::FullSnapshot)
{
accounts_package
.accounts
.accounts_db
.purge_old_accounts_hashes(accounts_package.slot);
}
accounts_hash_enum
}
fn _calculate_full_accounts_hash(
accounts_package: &AccountsPackage,
) -> (AccountsHash, /*capitalization*/ u64) {
let (sorted_storages, storage_sort_us) =
measure_us!(SortedStorages::new(&accounts_package.snapshot_storages));
let mut timings = HashStats {
storage_sort_us: sort_time.as_us(),
storage_sort_us,
..HashStats::default()
};
timings.calc_storage_size_quartiles(&accounts_package.snapshot_storages);
@ -250,7 +308,7 @@ impl AccountsHashVerifier {
store_detailed_debug_info_on_failure: false,
};
let (accounts_hash, lamports) = accounts_package
let ((accounts_hash, lamports), measure_hash_us) = measure_us!(accounts_package
.accounts
.accounts_db
.calculate_accounts_hash_from_storages(
@ -258,30 +316,15 @@ impl AccountsHashVerifier {
&sorted_storages,
timings,
)
.unwrap(); // unwrap here will never fail since check_hash = false
.unwrap()); // unwrap here will never fail since check_hash = false
match accounts_hash_calculation_flavor {
CalcAccountsHashFlavor::Full => {
let old_accounts_hash = accounts_package
.accounts
.accounts_db
.set_accounts_hash(slot, (accounts_hash, lamports));
if let Some(old_accounts_hash) = old_accounts_hash {
warn!("Accounts hash was already set for slot {slot}! old: {old_accounts_hash:?}, new: {accounts_hash:?}");
}
}
CalcAccountsHashFlavor::Incremental => {
// Once we calculate incremental accounts hashes, we can use the calculation result
// directly. Until then, convert the full accounts hash into an incremental.
let incremental_accounts_hash = IncrementalAccountsHash(accounts_hash.0);
let old_incremental_accounts_hash = accounts_package
.accounts
.accounts_db
.set_incremental_accounts_hash(slot, (incremental_accounts_hash, lamports));
if let Some(old_incremental_accounts_hash) = old_incremental_accounts_hash {
warn!("Incremental accounts hash was already set for slot {slot}! old: {old_incremental_accounts_hash:?}, new: {incremental_accounts_hash:?}");
}
}
let slot = accounts_package.slot;
let old_accounts_hash = accounts_package
.accounts
.accounts_db
.set_accounts_hash(slot, (accounts_hash, lamports));
if let Some(old_accounts_hash) = old_accounts_hash {
warn!("Accounts hash was already set for slot {slot}! old: {old_accounts_hash:?}, new: {accounts_hash:?}");
}
if accounts_package.expected_capitalization != lamports {
@ -328,31 +371,56 @@ impl AccountsHashVerifier {
&accounts_package.epoch_schedule,
);
measure_hash.stop();
datapoint_info!(
"accounts_hash_verifier",
("calculate_hash", measure_hash_us, i64),
);
if let Some(snapshot_info) = &accounts_package.snapshot_info {
solana_runtime::serde_snapshot::reserialize_bank_with_new_accounts_hash(
snapshot_info.snapshot_links.path(),
slot,
&accounts_hash,
None,
);
}
(accounts_hash, lamports)
}
if accounts_package.package_type
== AccountsPackageType::Snapshot(SnapshotType::FullSnapshot)
{
fn _calculate_incremental_accounts_hash(
accounts_package: &AccountsPackage,
base_slot: Slot,
) -> (IncrementalAccountsHash, /*capitalization*/ u64) {
let incremental_storages =
accounts_package
.snapshot_storages
.iter()
.filter_map(|storage| {
let storage_slot = storage.slot();
(storage_slot > base_slot).then_some((storage, storage_slot))
});
let sorted_storages = SortedStorages::new_with_slots(incremental_storages, None, None);
let calculate_accounts_hash_config = CalcAccountsHashConfig {
use_bg_thread_pool: true,
check_hash: false,
ancestors: None,
epoch_schedule: &accounts_package.epoch_schedule,
rent_collector: &accounts_package.rent_collector,
store_detailed_debug_info_on_failure: false,
};
let (incremental_accounts_hash, measure_hash_us) = measure_us!(
accounts_package
.accounts
.accounts_db
.purge_old_accounts_hashes(slot);
}
.update_incremental_accounts_hash(
&calculate_accounts_hash_config,
&sorted_storages,
accounts_package.slot,
HashStats::default(),
)
.unwrap() // unwrap here will never fail since check_hash = false
);
datapoint_info!(
"accounts_hash_verifier",
("calculate_hash", measure_hash.as_us(), i64),
("calculate_incremental_accounts_hash", measure_hash_us, i64),
);
accounts_hash.into()
incremental_accounts_hash
}
fn save_epoch_accounts_hash(

View File

@ -37,7 +37,6 @@ use {
},
solana_sdk::{
clock::Slot,
feature_set,
genesis_config::{
ClusterType::{self, Development, Devnet, MainnetBeta, Testnet},
GenesisConfig,
@ -82,7 +81,6 @@ impl SnapshotTestConfig {
accounts_hash_interval_slots: Slot,
full_snapshot_archive_interval_slots: Slot,
incremental_snapshot_archive_interval_slots: Slot,
is_incremental_snapshot_only_incremental_hash_calculation_enabled: bool,
) -> SnapshotTestConfig {
let (_accounts_tmp_dir, accounts_dir) = create_tmp_accounts_dir_for_tests();
let bank_snapshots_dir = TempDir::new().unwrap();
@ -98,12 +96,6 @@ impl SnapshotTestConfig {
1, // validator_stake_lamports
);
genesis_config_info.genesis_config.cluster_type = cluster_type;
if !is_incremental_snapshot_only_incremental_hash_calculation_enabled {
genesis_config_info
.genesis_config
.accounts
.remove(&feature_set::incremental_snapshot_only_incremental_hash_calculation::id());
}
let bank0 = Bank::new_with_paths_for_tests(
&genesis_config_info.genesis_config,
Arc::<RuntimeConfig>::default(),
@ -216,7 +208,6 @@ fn run_bank_forks_snapshot_n<F>(
set_root_interval,
set_root_interval,
Slot::MAX,
true,
);
let bank_forks = &mut snapshot_test_config.bank_forks;
@ -340,7 +331,7 @@ fn test_concurrent_snapshot_packaging(
// Set up snapshotting config
let mut snapshot_test_config =
SnapshotTestConfig::new(snapshot_version, cluster_type, 1, 1, Slot::MAX, true);
SnapshotTestConfig::new(snapshot_version, cluster_type, 1, 1, Slot::MAX);
let bank_forks = &mut snapshot_test_config.bank_forks;
let snapshot_config = &snapshot_test_config.snapshot_config;
@ -592,7 +583,6 @@ fn test_slots_to_snapshot(snapshot_version: SnapshotVersion, cluster_type: Clust
(*add_root_interval * num_set_roots * 2) as Slot,
(*add_root_interval * num_set_roots * 2) as Slot,
Slot::MAX,
true,
);
let mut current_bank = snapshot_test_config.bank_forks[0].clone();
let request_sender = AbsRequestSender::new(snapshot_sender);
@ -711,7 +701,6 @@ fn test_bank_forks_incremental_snapshot(
SET_ROOT_INTERVAL,
FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS,
INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS,
true,
);
trace!("SnapshotTestConfig:\naccounts_dir: {}\nbank_snapshots_dir: {}\nfull_snapshot_archives_dir: {}\nincremental_snapshot_archives_dir: {}",
snapshot_test_config.accounts_dir.display(), snapshot_test_config.bank_snapshots_dir.path().display(), snapshot_test_config.full_snapshot_archives_dir.path().display(), snapshot_test_config.incremental_snapshot_archives_dir.path().display());
@ -951,8 +940,6 @@ fn test_snapshots_with_background_services(
BANK_SNAPSHOT_INTERVAL_SLOTS,
FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS,
INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS,
// disable incremental accounts hash feature since AccountsHashVerifier does not support it yet
false,
);
let node_keypair = Arc::new(Keypair::new());

View File

@ -31,7 +31,6 @@ use {
clock::{DEFAULT_DEV_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT},
commitment_config::CommitmentConfig,
epoch_schedule::EpochSchedule,
feature_set,
genesis_config::{ClusterType, GenesisConfig},
message::Message,
poh_config::PohConfig,
@ -264,11 +263,6 @@ impl LocalCluster {
),
);
// disable incremental accounts hash feature since AccountsHashVerifier does not support it yet
genesis_config
.accounts
.remove(&feature_set::incremental_snapshot_only_incremental_hash_calculation::id());
let (leader_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
let leader_contact_info = leader_node.info.clone();
let mut leader_config = safe_clone_config(&config.validator_configs[0]);

View File

@ -59,12 +59,9 @@ mod utils;
// a number of test cases in accounts_db use this
#[cfg(test)]
pub(crate) use tests::reconstruct_accounts_db_via_serialization;
// NOTE: AHV currently needs `SerdeIncrementalAccountsHash`, which is why this `use` is not
// `pub(crate)`. Once AHV calculates incremental accounts hashes, this can be reverted.
pub use types::SerdeIncrementalAccountsHash;
pub(crate) use {
storage::SerializedAppendVecId,
types::{SerdeAccountsDeltaHash, SerdeAccountsHash},
types::{SerdeAccountsDeltaHash, SerdeAccountsHash, SerdeIncrementalAccountsHash},
};
#[derive(Copy, Clone, Eq, PartialEq)]